]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.c
9d696b0edf20be7c5499c8673b5103205f8546df
[apple/xnu.git] / iokit / Kernel / IOLib.c
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * HISTORY
25 *
26 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
27 * 17-Nov-98 cpp
28 *
29 */
30
31 #include <IOKit/system.h>
32 #include <mach/sync_policy.h>
33 #include <machine/machine_routines.h>
34 #include <libkern/c++/OSCPPDebug.h>
35
36 #include <IOKit/assert.h>
37
38 #include <IOKit/IOReturn.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOLocks.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitDebug.h>
43
44 #include "IOKitKernelInternal.h"
45
46 mach_timespec_t IOZeroTvalspec = { 0, 0 };
47
48 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
49
50 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
51
52 lck_grp_t *IOLockGroup;
53
54 /*
55 * Global variables for use by iLogger
56 * These symbols are for use only by Apple diagnostic code.
57 * Binary compatibility is not guaranteed for kexts that reference these symbols.
58 */
59
60 void *_giDebugLogInternal = NULL;
61 void *_giDebugLogDataInternal = NULL;
62 void *_giDebugReserved1 = NULL;
63 void *_giDebugReserved2 = NULL;
64
65
66 /*
67 * Static variables for this module.
68 */
69
70 static queue_head_t gIOMallocContiguousEntries;
71 static lck_mtx_t * gIOMallocContiguousEntriesLock;
72
73 enum { kIOMaxPageableMaps = 16 };
74 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
75 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
76
77 /* LP64todo - these need to expand */
78 typedef struct {
79 vm_map_t map;
80 vm_offset_t address;
81 vm_offset_t end;
82 } IOMapData;
83
84 static struct {
85 UInt32 count;
86 UInt32 hint;
87 IOMapData maps[ kIOMaxPageableMaps ];
88 lck_mtx_t * lock;
89 } gIOKitPageableSpace;
90
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
92
93 void IOLibInit(void)
94 {
95 kern_return_t ret;
96
97 static bool libInitialized;
98
99 if(libInitialized)
100 return;
101
102 gIOKitPageableSpace.maps[0].address = 0;
103 ret = kmem_suballoc(kernel_map,
104 &gIOKitPageableSpace.maps[0].address,
105 kIOPageableMapSize,
106 TRUE,
107 VM_FLAGS_ANYWHERE,
108 &gIOKitPageableSpace.maps[0].map);
109 if (ret != KERN_SUCCESS)
110 panic("failed to allocate iokit pageable map\n");
111
112 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
113
114 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
115 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
116 gIOKitPageableSpace.hint = 0;
117 gIOKitPageableSpace.count = 1;
118
119 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
120 queue_init( &gIOMallocContiguousEntries );
121
122 libInitialized = true;
123 }
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
128 {
129 kern_return_t result;
130 thread_t thread;
131
132 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
133 if (result != KERN_SUCCESS)
134 return (NULL);
135
136 thread_deallocate(thread);
137
138 return (thread);
139 }
140
141
142 volatile void IOExitThread(void)
143 {
144 (void) thread_terminate(current_thread());
145 }
146
147 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
148
149
150 void * IOMalloc(vm_size_t size)
151 {
152 void * address;
153
154 address = (void *)kalloc(size);
155 #if IOALLOCDEBUG
156 if (address)
157 debug_iomalloc_size += size;
158 #endif
159 return address;
160 }
161
162 void IOFree(void * address, vm_size_t size)
163 {
164 if (address) {
165 kfree(address, size);
166 #if IOALLOCDEBUG
167 debug_iomalloc_size -= size;
168 #endif
169 }
170 }
171
172 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
173
174 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
175 {
176 kern_return_t kr;
177 vm_address_t address;
178 vm_address_t allocationAddress;
179 vm_size_t adjustedSize;
180 vm_offset_t alignMask;
181
182 if (size == 0)
183 return 0;
184 if (alignment == 0)
185 alignment = 1;
186
187 alignMask = alignment - 1;
188 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
189
190 if (adjustedSize >= page_size) {
191
192 kr = kernel_memory_allocate(kernel_map, &address,
193 size, alignMask, 0);
194 if (KERN_SUCCESS != kr)
195 address = 0;
196
197 } else {
198
199 adjustedSize += alignMask;
200
201 if (adjustedSize >= page_size) {
202
203 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
204 adjustedSize, 0, 0);
205 if (KERN_SUCCESS != kr)
206 allocationAddress = 0;
207
208 } else
209 allocationAddress = (vm_address_t) kalloc(adjustedSize);
210
211 if (allocationAddress) {
212 address = (allocationAddress + alignMask
213 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
214 & (~alignMask);
215
216 *((vm_size_t *)(address - sizeof(vm_size_t)
217 - sizeof(vm_address_t))) = adjustedSize;
218 *((vm_address_t *)(address - sizeof(vm_address_t)))
219 = allocationAddress;
220 } else
221 address = 0;
222 }
223
224 assert(0 == (address & alignMask));
225
226 #if IOALLOCDEBUG
227 if( address)
228 debug_iomalloc_size += size;
229 #endif
230
231 return (void *) address;
232 }
233
234 void IOFreeAligned(void * address, vm_size_t size)
235 {
236 vm_address_t allocationAddress;
237 vm_size_t adjustedSize;
238
239 if( !address)
240 return;
241
242 assert(size);
243
244 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
245 if (adjustedSize >= page_size) {
246
247 kmem_free( kernel_map, (vm_address_t) address, size);
248
249 } else {
250 adjustedSize = *((vm_size_t *)( (vm_address_t) address
251 - sizeof(vm_address_t) - sizeof(vm_size_t)));
252 allocationAddress = *((vm_address_t *)( (vm_address_t) address
253 - sizeof(vm_address_t) ));
254
255 if (adjustedSize >= page_size)
256 kmem_free( kernel_map, allocationAddress, adjustedSize);
257 else
258 kfree((void *)allocationAddress, adjustedSize);
259 }
260
261 #if IOALLOCDEBUG
262 debug_iomalloc_size -= size;
263 #endif
264 }
265
266 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
267
268 struct _IOMallocContiguousEntry
269 {
270 void * virtual;
271 ppnum_t ioBase;
272 queue_chain_t link;
273 };
274 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
275
276 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
277 IOPhysicalAddress * physicalAddress)
278 {
279 kern_return_t kr;
280 vm_address_t address;
281 vm_address_t allocationAddress;
282 vm_size_t adjustedSize;
283 vm_offset_t alignMask;
284 ppnum_t pagenum;
285
286 if (size == 0)
287 return 0;
288 if (alignment == 0)
289 alignment = 1;
290
291 alignMask = alignment - 1;
292 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
293
294 if (adjustedSize >= page_size)
295 {
296 adjustedSize = size;
297 if (adjustedSize > page_size)
298 {
299 kr = kmem_alloc_contig(kernel_map, &address, size,
300 alignMask, 0);
301 }
302 else
303 {
304 kr = kernel_memory_allocate(kernel_map, &address,
305 size, alignMask, 0);
306 }
307 if (KERN_SUCCESS != kr)
308 address = 0;
309 }
310 else
311 {
312 adjustedSize += alignMask;
313 allocationAddress = (vm_address_t) kalloc(adjustedSize);
314
315 if (allocationAddress) {
316
317 address = (allocationAddress + alignMask
318 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
319 & (~alignMask);
320
321 if (atop_32(address) != atop_32(address + size - 1))
322 address = round_page_32(address);
323
324 *((vm_size_t *)(address - sizeof(vm_size_t)
325 - sizeof(vm_address_t))) = adjustedSize;
326 *((vm_address_t *)(address - sizeof(vm_address_t)))
327 = allocationAddress;
328 } else
329 address = 0;
330 }
331
332 /* Do we want a physical address? */
333 if (address && physicalAddress)
334 {
335 do
336 {
337 /* Get the physical page */
338 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
339 if(pagenum)
340 {
341 IOByteCount offset;
342 ppnum_t base;
343
344 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
345 if (base)
346 {
347 _IOMallocContiguousEntry *
348 entry = IONew(_IOMallocContiguousEntry, 1);
349 if (!entry)
350 {
351 IOFreeContiguous((void *) address, size);
352 address = 0;
353 break;
354 }
355 entry->virtual = (void *) address;
356 entry->ioBase = base;
357 lck_mtx_lock(gIOMallocContiguousEntriesLock);
358 queue_enter( &gIOMallocContiguousEntries, entry,
359 _IOMallocContiguousEntry *, link );
360 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
361
362 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
363 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
364 IOMapperInsertPage( base, offset, pagenum );
365 }
366 else
367 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
368 }
369 else
370 /* Did not find, return 0 */
371 *physicalAddress = (IOPhysicalAddress) 0;
372 }
373 while (false);
374 }
375
376 assert(0 == (address & alignMask));
377
378 #if IOALLOCDEBUG
379 if( address)
380 debug_iomalloc_size += size;
381 #endif
382
383 return (void *) address;
384 }
385
386 void IOFreeContiguous(void * address, vm_size_t size)
387 {
388 vm_address_t allocationAddress;
389 vm_size_t adjustedSize;
390 _IOMallocContiguousEntry * entry;
391 ppnum_t base = 0;
392
393 if( !address)
394 return;
395
396 assert(size);
397
398 lck_mtx_lock(gIOMallocContiguousEntriesLock);
399 queue_iterate( &gIOMallocContiguousEntries, entry,
400 _IOMallocContiguousEntry *, link )
401 {
402 if( entry->virtual == address ) {
403 base = entry->ioBase;
404 queue_remove( &gIOMallocContiguousEntries, entry,
405 _IOMallocContiguousEntry *, link );
406 break;
407 }
408 }
409 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
410
411 if (base)
412 {
413 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
414 IODelete(entry, _IOMallocContiguousEntry, 1);
415 }
416
417 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
418 if (adjustedSize >= page_size) {
419
420 kmem_free( kernel_map, (vm_address_t) address, size);
421
422 } else {
423 adjustedSize = *((vm_size_t *)( (vm_address_t) address
424 - sizeof(vm_address_t) - sizeof(vm_size_t)));
425 allocationAddress = *((vm_address_t *)( (vm_address_t) address
426 - sizeof(vm_address_t) ));
427
428 kfree((void *)allocationAddress, adjustedSize);
429 }
430
431 #if IOALLOCDEBUG
432 debug_iomalloc_size -= size;
433 #endif
434 }
435
436 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
437
438 kern_return_t IOIteratePageableMaps(vm_size_t size,
439 IOIteratePageableMapsCallback callback, void * ref)
440 {
441 kern_return_t kr = kIOReturnNotReady;
442 vm_size_t segSize;
443 UInt32 attempts;
444 UInt32 index;
445 vm_offset_t min;
446 vm_map_t map;
447
448 if (size > kIOPageableMaxMapSize)
449 return( kIOReturnBadArgument );
450
451 do {
452 index = gIOKitPageableSpace.hint;
453 attempts = gIOKitPageableSpace.count;
454 while( attempts--) {
455 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
456 if( KERN_SUCCESS == kr) {
457 gIOKitPageableSpace.hint = index;
458 break;
459 }
460 if( index)
461 index--;
462 else
463 index = gIOKitPageableSpace.count - 1;
464 }
465 if( KERN_SUCCESS == kr)
466 break;
467
468 lck_mtx_lock( gIOKitPageableSpace.lock );
469
470 index = gIOKitPageableSpace.count;
471 if( index >= (kIOMaxPageableMaps - 1)) {
472 lck_mtx_unlock( gIOKitPageableSpace.lock );
473 break;
474 }
475
476 if( size < kIOPageableMapSize)
477 segSize = kIOPageableMapSize;
478 else
479 segSize = size;
480
481 min = 0;
482 kr = kmem_suballoc(kernel_map,
483 &min,
484 segSize,
485 TRUE,
486 VM_FLAGS_ANYWHERE,
487 &map);
488 if( KERN_SUCCESS != kr) {
489 lck_mtx_unlock( gIOKitPageableSpace.lock );
490 break;
491 }
492
493 gIOKitPageableSpace.maps[index].map = map;
494 gIOKitPageableSpace.maps[index].address = min;
495 gIOKitPageableSpace.maps[index].end = min + segSize;
496 gIOKitPageableSpace.hint = index;
497 gIOKitPageableSpace.count = index + 1;
498
499 lck_mtx_unlock( gIOKitPageableSpace.lock );
500
501 } while( true );
502
503 return kr;
504 }
505
506 struct IOMallocPageableRef
507 {
508 vm_address_t address;
509 vm_size_t size;
510 };
511
512 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
513 {
514 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
515 kern_return_t kr;
516
517 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
518
519 return( kr );
520 }
521
522 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
523 {
524 kern_return_t kr = kIOReturnNotReady;
525 struct IOMallocPageableRef ref;
526
527 if (alignment > page_size)
528 return( 0 );
529 if (size > kIOPageableMaxMapSize)
530 return( 0 );
531
532 ref.size = size;
533 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
534 if( kIOReturnSuccess != kr)
535 ref.address = 0;
536
537 #if IOALLOCDEBUG
538 if( ref.address)
539 debug_iomallocpageable_size += round_page_32(size);
540 #endif
541
542 return( (void *) ref.address );
543 }
544
545 vm_map_t IOPageableMapForAddress( vm_address_t address )
546 {
547 vm_map_t map = 0;
548 UInt32 index;
549
550 for( index = 0; index < gIOKitPageableSpace.count; index++) {
551 if( (address >= gIOKitPageableSpace.maps[index].address)
552 && (address < gIOKitPageableSpace.maps[index].end) ) {
553 map = gIOKitPageableSpace.maps[index].map;
554 break;
555 }
556 }
557 if( !map)
558 IOPanic("IOPageableMapForAddress: null");
559
560 return( map );
561 }
562
563 void IOFreePageable(void * address, vm_size_t size)
564 {
565 vm_map_t map;
566
567 map = IOPageableMapForAddress( (vm_address_t) address);
568 if( map)
569 kmem_free( map, (vm_offset_t) address, size);
570
571 #if IOALLOCDEBUG
572 debug_iomallocpageable_size -= round_page_32(size);
573 #endif
574 }
575
576 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
577
578 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
579 IOByteCount length, IOOptionBits cacheMode )
580 {
581 IOReturn ret = kIOReturnSuccess;
582 ppnum_t pagenum;
583
584 if( task != kernel_task)
585 return( kIOReturnUnsupported );
586
587 length = round_page_32(address + length) - trunc_page_32( address );
588 address = trunc_page_32( address );
589
590 // make map mode
591 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
592
593 while( (kIOReturnSuccess == ret) && (length > 0) ) {
594
595 // Get the physical page number
596 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
597 if( pagenum) {
598 ret = IOUnmapPages( get_task_map(task), address, page_size );
599 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
600 } else
601 ret = kIOReturnVMError;
602
603 address += page_size;
604 length -= page_size;
605 }
606
607 return( ret );
608 }
609
610
611 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
612 IOByteCount length )
613 {
614 if( task != kernel_task)
615 return( kIOReturnUnsupported );
616
617 #if __ppc__
618 flush_dcache64( (addr64_t) address, (unsigned) length, false );
619 #endif
620
621 return( kIOReturnSuccess );
622 }
623
624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
625
626 SInt32 OSKernelStackRemaining( void )
627 {
628 SInt32 stack;
629
630 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
631
632 return( stack );
633 }
634
635 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
636
637 void IOSleep(unsigned milliseconds)
638 {
639 delay_for_interval(milliseconds, kMillisecondScale);
640 }
641
642 /*
643 * Spin for indicated number of microseconds.
644 */
645 void IODelay(unsigned microseconds)
646 {
647 delay_for_interval(microseconds, kMicrosecondScale);
648 }
649
650 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
651
652 void IOLog(const char *format, ...)
653 {
654 va_list ap;
655 extern void conslog_putc(char);
656 extern void logwakeup(void);
657
658 va_start(ap, format);
659 _doprnt(format, &ap, conslog_putc, 16);
660 va_end(ap);
661 }
662
663 void IOPanic(const char *reason)
664 {
665 panic(reason);
666 }
667
668 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
669
670 /*
671 * Convert a integer constant (typically a #define or enum) to a string.
672 */
673 static char noValue[80]; // that's pretty
674
675 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
676 {
677 for( ; regValueArray->name; regValueArray++) {
678 if(regValueArray->value == value)
679 return(regValueArray->name);
680 }
681 sprintf(noValue, "0x%x (UNDEFINED)", value);
682 return((const char *)noValue);
683 }
684
685 IOReturn IOFindValueForName(const char *string,
686 const IONamedValue *regValueArray,
687 int *value)
688 {
689 for( ; regValueArray->name; regValueArray++) {
690 if(!strcmp(regValueArray->name, string)) {
691 *value = regValueArray->value;
692 return kIOReturnSuccess;
693 }
694 }
695 return kIOReturnBadArgument;
696 }
697
698 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
699
700 IOAlignment IOSizeToAlignment(unsigned int size)
701 {
702 register int shift;
703 const int intsize = sizeof(unsigned int) * 8;
704
705 for (shift = 1; shift < intsize; shift++) {
706 if (size & 0x80000000)
707 return (IOAlignment)(intsize - shift);
708 size <<= 1;
709 }
710 return 0;
711 }
712
713 unsigned int IOAlignmentToSize(IOAlignment align)
714 {
715 unsigned int size;
716
717 for (size = 1; align; align--) {
718 size <<= 1;
719 }
720 return size;
721 }