]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.c
f89aa4fbbe8e679bf34e1810252bce15240f545c
[apple/xnu.git] / iokit / Kernel / IOLib.c
1 /*
2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * HISTORY
27 *
28 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
29 * 17-Nov-98 cpp
30 *
31 */
32
33 #include <IOKit/system.h>
34 #include <mach/sync_policy.h>
35 #include <machine/machine_routines.h>
36 #include <libkern/c++/OSCPPDebug.h>
37
38 #include <IOKit/assert.h>
39
40 #include <IOKit/IOReturn.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitDebug.h>
44
45 mach_timespec_t IOZeroTvalspec = { 0, 0 };
46
47 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 /*
52 * Global variables for use by iLogger
53 * These symbols are for use only by Apple diagnostic code.
54 * Binary compatibility is not guaranteed for kexts that reference these symbols.
55 */
56
57 void *_giDebugLogInternal = NULL;
58 void *_giDebugLogDataInternal = NULL;
59 void *_giDebugReserved1 = NULL;
60 void *_giDebugReserved2 = NULL;
61
62
63 /*
64 * Static variables for this module.
65 */
66
67 static IOThreadFunc threadArgFcn;
68 static void * threadArgArg;
69 static lock_t * threadArgLock;
70
71 static queue_head_t gIOMallocContiguousEntries;
72 static mutex_t * gIOMallocContiguousEntriesLock;
73
74 enum { kIOMaxPageableMaps = 16 };
75 enum { kIOPageableMapSize = 16 * 1024 * 1024 };
76 enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 };
77
78 typedef struct {
79 vm_map_t map;
80 vm_offset_t address;
81 vm_offset_t end;
82 } IOMapData;
83
84 static struct {
85 UInt32 count;
86 UInt32 hint;
87 IOMapData maps[ kIOMaxPageableMaps ];
88 mutex_t * lock;
89 } gIOKitPageableSpace;
90
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
92
93 void IOLibInit(void)
94 {
95 kern_return_t ret;
96
97 static bool libInitialized;
98
99 if(libInitialized)
100 return;
101
102 threadArgLock = lock_alloc( true, NULL, NULL );
103
104 gIOKitPageableSpace.maps[0].address = 0;
105 ret = kmem_suballoc(kernel_map,
106 &gIOKitPageableSpace.maps[0].address,
107 kIOPageableMapSize,
108 TRUE,
109 TRUE,
110 &gIOKitPageableSpace.maps[0].map);
111 if (ret != KERN_SUCCESS)
112 panic("failed to allocate iokit pageable map\n");
113
114 gIOKitPageableSpace.lock = mutex_alloc( 0 );
115 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
116 gIOKitPageableSpace.hint = 0;
117 gIOKitPageableSpace.count = 1;
118
119 gIOMallocContiguousEntriesLock = mutex_alloc( 0 );
120 queue_init( &gIOMallocContiguousEntries );
121
122 libInitialized = true;
123 }
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 /*
128 * We pass an argument to a new thread by saving fcn and arg in some
129 * locked variables and starting the thread at ioThreadStart(). This
130 * function retrives fcn and arg and makes the appropriate call.
131 *
132 */
133
134 static void ioThreadStart( void )
135 {
136 IOThreadFunc fcn;
137 void * arg;
138
139 fcn = threadArgFcn;
140 arg = threadArgArg;
141 lock_done( threadArgLock);
142
143 (*fcn)(arg);
144
145 IOExitThread();
146 }
147
148 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
149 {
150 IOThread thread;
151
152 lock_write( threadArgLock);
153 threadArgFcn = fcn;
154 threadArgArg = arg;
155
156 thread = kernel_thread( kernel_task, ioThreadStart);
157
158 return(thread);
159 }
160
161
162 volatile void IOExitThread()
163 {
164 (void) thread_terminate(current_act());
165 }
166
167 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
168
169
170 void * IOMalloc(vm_size_t size)
171 {
172 void * address;
173
174 address = (void *)kalloc(size);
175 #if IOALLOCDEBUG
176 if (address)
177 debug_iomalloc_size += size;
178 #endif
179 return address;
180 }
181
182 void IOFree(void * address, vm_size_t size)
183 {
184 if (address) {
185 kfree((vm_offset_t)address, size);
186 #if IOALLOCDEBUG
187 debug_iomalloc_size -= size;
188 #endif
189 }
190 }
191
192 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
193
194 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
195 {
196 kern_return_t kr;
197 vm_address_t address;
198 vm_address_t allocationAddress;
199 vm_size_t adjustedSize;
200 vm_offset_t alignMask;
201
202 if (size == 0)
203 return 0;
204 if (alignment == 0)
205 alignment = 1;
206
207 alignMask = alignment - 1;
208 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
209
210 if (adjustedSize >= page_size) {
211
212 kr = kernel_memory_allocate(kernel_map, &address,
213 size, alignMask, 0);
214 if (KERN_SUCCESS != kr)
215 address = 0;
216
217 } else {
218
219 adjustedSize += alignMask;
220
221 if (adjustedSize >= page_size) {
222
223 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
224 adjustedSize, 0, 0);
225 if (KERN_SUCCESS != kr)
226 allocationAddress = 0;
227
228 } else
229 allocationAddress = (vm_address_t) kalloc(adjustedSize);
230
231 if (allocationAddress) {
232 address = (allocationAddress + alignMask
233 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
234 & (~alignMask);
235
236 *((vm_size_t *)(address - sizeof(vm_size_t)
237 - sizeof(vm_address_t))) = adjustedSize;
238 *((vm_address_t *)(address - sizeof(vm_address_t)))
239 = allocationAddress;
240 } else
241 address = 0;
242 }
243
244 assert(0 == (address & alignMask));
245
246 #if IOALLOCDEBUG
247 if( address)
248 debug_iomalloc_size += size;
249 #endif
250
251 return (void *) address;
252 }
253
254 void IOFreeAligned(void * address, vm_size_t size)
255 {
256 vm_address_t allocationAddress;
257 vm_size_t adjustedSize;
258
259 if( !address)
260 return;
261
262 assert(size);
263
264 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
265 if (adjustedSize >= page_size) {
266
267 kmem_free( kernel_map, (vm_address_t) address, size);
268
269 } else {
270 adjustedSize = *((vm_size_t *)( (vm_address_t) address
271 - sizeof(vm_address_t) - sizeof(vm_size_t)));
272 allocationAddress = *((vm_address_t *)( (vm_address_t) address
273 - sizeof(vm_address_t) ));
274
275 if (adjustedSize >= page_size)
276 kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize);
277 else
278 kfree((vm_offset_t) allocationAddress, adjustedSize);
279 }
280
281 #if IOALLOCDEBUG
282 debug_iomalloc_size -= size;
283 #endif
284 }
285
286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
287
288 struct _IOMallocContiguousEntry
289 {
290 void * virtual;
291 ppnum_t ioBase;
292 queue_chain_t link;
293 };
294 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
295
296 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
297 IOPhysicalAddress * physicalAddress)
298 {
299 kern_return_t kr;
300 vm_address_t address;
301 vm_address_t allocationAddress;
302 vm_size_t adjustedSize;
303 vm_offset_t alignMask;
304 ppnum_t pagenum;
305
306 if (size == 0)
307 return 0;
308 if (alignment == 0)
309 alignment = 1;
310
311 alignMask = alignment - 1;
312 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
313
314 if (adjustedSize >= page_size)
315 {
316 adjustedSize = size;
317 if (adjustedSize > page_size)
318 {
319 kr = kmem_alloc_contig(kernel_map, &address, size,
320 alignMask, 0);
321 }
322 else
323 {
324 kr = kernel_memory_allocate(kernel_map, &address,
325 size, alignMask, 0);
326 }
327 if (KERN_SUCCESS != kr)
328 address = 0;
329 }
330 else
331 {
332 adjustedSize += alignMask;
333 allocationAddress = (vm_address_t) kalloc(adjustedSize);
334
335 if (allocationAddress) {
336
337 address = (allocationAddress + alignMask
338 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
339 & (~alignMask);
340
341 if (atop_32(address) != atop_32(address + size - 1))
342 address = round_page_32(address);
343
344 *((vm_size_t *)(address - sizeof(vm_size_t)
345 - sizeof(vm_address_t))) = adjustedSize;
346 *((vm_address_t *)(address - sizeof(vm_address_t)))
347 = allocationAddress;
348 } else
349 address = 0;
350 }
351
352 /* Do we want a physical address? */
353 if (address && physicalAddress)
354 {
355 do
356 {
357 /* Get the physical page */
358 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
359 if(pagenum)
360 {
361 IOByteCount offset;
362 ppnum_t base;
363
364 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
365 if (base)
366 {
367 _IOMallocContiguousEntry *
368 entry = IONew(_IOMallocContiguousEntry, 1);
369 if (!entry)
370 {
371 IOFreeContiguous((void *) address, size);
372 address = 0;
373 break;
374 }
375 entry->virtual = (void *) address;
376 entry->ioBase = base;
377 mutex_lock(gIOMallocContiguousEntriesLock);
378 queue_enter( &gIOMallocContiguousEntries, entry,
379 _IOMallocContiguousEntry *, link );
380 mutex_unlock(gIOMallocContiguousEntriesLock);
381
382 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
383 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
384 IOMapperInsertPage( base, offset, pagenum );
385 }
386 else
387 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
388 }
389 else
390 /* Did not find, return 0 */
391 *physicalAddress = (IOPhysicalAddress) 0;
392 }
393 while (false);
394 }
395
396 assert(0 == (address & alignMask));
397
398 #if IOALLOCDEBUG
399 if( address)
400 debug_iomalloc_size += size;
401 #endif
402
403 return (void *) address;
404 }
405
406 void IOFreeContiguous(void * address, vm_size_t size)
407 {
408 vm_address_t allocationAddress;
409 vm_size_t adjustedSize;
410 _IOMallocContiguousEntry * entry;
411 ppnum_t base = 0;
412
413 if( !address)
414 return;
415
416 assert(size);
417
418 mutex_lock(gIOMallocContiguousEntriesLock);
419 queue_iterate( &gIOMallocContiguousEntries, entry,
420 _IOMallocContiguousEntry *, link )
421 {
422 if( entry->virtual == address ) {
423 base = entry->ioBase;
424 queue_remove( &gIOMallocContiguousEntries, entry,
425 _IOMallocContiguousEntry *, link );
426 break;
427 }
428 }
429 mutex_unlock(gIOMallocContiguousEntriesLock);
430
431 if (base)
432 {
433 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
434 IODelete(entry, _IOMallocContiguousEntry, 1);
435 }
436
437 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
438 if (adjustedSize >= page_size) {
439
440 kmem_free( kernel_map, (vm_address_t) address, size);
441
442 } else {
443 adjustedSize = *((vm_size_t *)( (vm_address_t) address
444 - sizeof(vm_address_t) - sizeof(vm_size_t)));
445 allocationAddress = *((vm_address_t *)( (vm_address_t) address
446 - sizeof(vm_address_t) ));
447
448 kfree((vm_offset_t) allocationAddress, adjustedSize);
449 }
450
451 #if IOALLOCDEBUG
452 debug_iomalloc_size -= size;
453 #endif
454 }
455
456 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
457
458 typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
459
460 kern_return_t IOIteratePageableMaps(vm_size_t size,
461 IOIteratePageableMapsCallback callback, void * ref)
462 {
463 kern_return_t kr = kIOReturnNotReady;
464 vm_size_t segSize;
465 UInt32 attempts;
466 UInt32 index;
467 vm_offset_t min;
468 vm_map_t map;
469
470 if (size > kIOPageableMaxMapSize)
471 return( kIOReturnBadArgument );
472
473 do {
474 index = gIOKitPageableSpace.hint;
475 attempts = gIOKitPageableSpace.count;
476 while( attempts--) {
477 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
478 if( KERN_SUCCESS == kr) {
479 gIOKitPageableSpace.hint = index;
480 break;
481 }
482 if( index)
483 index--;
484 else
485 index = gIOKitPageableSpace.count - 1;
486 }
487 if( KERN_SUCCESS == kr)
488 break;
489
490 mutex_lock( gIOKitPageableSpace.lock );
491
492 index = gIOKitPageableSpace.count;
493 if( index >= (kIOMaxPageableMaps - 1)) {
494 mutex_unlock( gIOKitPageableSpace.lock );
495 break;
496 }
497
498 if( size < kIOPageableMapSize)
499 segSize = kIOPageableMapSize;
500 else
501 segSize = size;
502
503 min = 0;
504 kr = kmem_suballoc(kernel_map,
505 &min,
506 segSize,
507 TRUE,
508 TRUE,
509 &map);
510 if( KERN_SUCCESS != kr) {
511 mutex_unlock( gIOKitPageableSpace.lock );
512 break;
513 }
514
515 gIOKitPageableSpace.maps[index].map = map;
516 gIOKitPageableSpace.maps[index].address = min;
517 gIOKitPageableSpace.maps[index].end = min + segSize;
518 gIOKitPageableSpace.hint = index;
519 gIOKitPageableSpace.count = index + 1;
520
521 mutex_unlock( gIOKitPageableSpace.lock );
522
523 } while( true );
524
525 return kr;
526 }
527
528 struct IOMallocPageableRef
529 {
530 vm_address_t address;
531 vm_size_t size;
532 };
533
534 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
535 {
536 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
537 kern_return_t kr;
538
539 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
540
541 return( kr );
542 }
543
544 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
545 {
546 kern_return_t kr = kIOReturnNotReady;
547 struct IOMallocPageableRef ref;
548
549 if (alignment > page_size)
550 return( 0 );
551 if (size > kIOPageableMaxMapSize)
552 return( 0 );
553
554 ref.size = size;
555 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
556 if( kIOReturnSuccess != kr)
557 ref.address = 0;
558
559 #if IOALLOCDEBUG
560 if( ref.address)
561 debug_iomalloc_size += round_page_32(size);
562 #endif
563
564 return( (void *) ref.address );
565 }
566
567 vm_map_t IOPageableMapForAddress( vm_address_t address )
568 {
569 vm_map_t map = 0;
570 UInt32 index;
571
572 for( index = 0; index < gIOKitPageableSpace.count; index++) {
573 if( (address >= gIOKitPageableSpace.maps[index].address)
574 && (address < gIOKitPageableSpace.maps[index].end) ) {
575 map = gIOKitPageableSpace.maps[index].map;
576 break;
577 }
578 }
579 if( !map)
580 IOPanic("IOPageableMapForAddress: null");
581
582 return( map );
583 }
584
585 void IOFreePageable(void * address, vm_size_t size)
586 {
587 vm_map_t map;
588
589 map = IOPageableMapForAddress( (vm_address_t) address);
590 if( map)
591 kmem_free( map, (vm_offset_t) address, size);
592
593 #if IOALLOCDEBUG
594 debug_iomalloc_size -= round_page_32(size);
595 #endif
596 }
597
598 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
599
600 extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
601 vm_size_t length, unsigned int options);
602 extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
603
604 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
605 IOByteCount length, IOOptionBits cacheMode )
606 {
607 IOReturn ret = kIOReturnSuccess;
608 ppnum_t pagenum;
609
610 if( task != kernel_task)
611 return( kIOReturnUnsupported );
612
613 length = round_page_32(address + length) - trunc_page_32( address );
614 address = trunc_page_32( address );
615
616 // make map mode
617 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
618
619 while( (kIOReturnSuccess == ret) && (length > 0) ) {
620
621 // Get the physical page number
622 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
623 if( pagenum) {
624 ret = IOUnmapPages( get_task_map(task), address, page_size );
625 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
626 } else
627 ret = kIOReturnVMError;
628
629 address += page_size;
630 length -= page_size;
631 }
632
633 return( ret );
634 }
635
636
637 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
638 IOByteCount length )
639 {
640 if( task != kernel_task)
641 return( kIOReturnUnsupported );
642
643 #if __ppc__
644 flush_dcache64( (addr64_t) address, (unsigned) length, false );
645 #endif
646
647 return( kIOReturnSuccess );
648 }
649
650 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
651
652 SInt32 OSKernelStackRemaining( void )
653 {
654 SInt32 stack;
655
656 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
657
658 return( stack );
659 }
660
661 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
662
663 void IOSleep(unsigned milliseconds)
664 {
665 wait_result_t wait_result;
666
667 wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT);
668 assert(wait_result == THREAD_WAITING);
669
670 wait_result = thread_block(THREAD_CONTINUE_NULL);
671 assert(wait_result == THREAD_TIMED_OUT);
672 }
673
674 /*
675 * Spin for indicated number of microseconds.
676 */
677 void IODelay(unsigned microseconds)
678 {
679 extern void delay(int usec);
680
681 delay(microseconds);
682 }
683
684 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
685
686 void IOLog(const char *format, ...)
687 {
688 va_list ap;
689 extern void conslog_putc(char);
690 extern void logwakeup();
691
692 va_start(ap, format);
693 _doprnt(format, &ap, conslog_putc, 16);
694 va_end(ap);
695 }
696
697 void IOPanic(const char *reason)
698 {
699 panic(reason);
700 }
701
702 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
703
704 /*
705 * Convert a integer constant (typically a #define or enum) to a string.
706 */
707 static char noValue[80]; // that's pretty
708
709 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
710 {
711 for( ; regValueArray->name; regValueArray++) {
712 if(regValueArray->value == value)
713 return(regValueArray->name);
714 }
715 sprintf(noValue, "0x%x (UNDEFINED)", value);
716 return((const char *)noValue);
717 }
718
719 IOReturn IOFindValueForName(const char *string,
720 const IONamedValue *regValueArray,
721 int *value)
722 {
723 for( ; regValueArray->name; regValueArray++) {
724 if(!strcmp(regValueArray->name, string)) {
725 *value = regValueArray->value;
726 return kIOReturnSuccess;
727 }
728 }
729 return kIOReturnBadArgument;
730 }
731
732 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
733
734 IOAlignment IOSizeToAlignment(unsigned int size)
735 {
736 register int shift;
737 const int intsize = sizeof(unsigned int) * 8;
738
739 for (shift = 1; shift < intsize; shift++) {
740 if (size & 0x80000000)
741 return (IOAlignment)(intsize - shift);
742 size <<= 1;
743 }
744 return 0;
745 }
746
747 unsigned int IOAlignmentToSize(IOAlignment align)
748 {
749 unsigned int size;
750
751 for (size = 1; align; align--) {
752 size <<= 1;
753 }
754 return size;
755 }
756
757 IOReturn IONDRVLibrariesInitialize( void )
758 {
759 return( kIOReturnUnsupported );
760 }