]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.c
3b002caa3eb9bcf35c08e442ef6eb81729999e89
[apple/xnu.git] / iokit / Kernel / IOLib.c
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * HISTORY
32 *
33 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
34 * 17-Nov-98 cpp
35 *
36 */
37
38 #include <IOKit/system.h>
39 #include <mach/sync_policy.h>
40 #include <machine/machine_routines.h>
41 #include <libkern/c++/OSCPPDebug.h>
42
43 #include <IOKit/assert.h>
44
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 mach_timespec_t IOZeroTvalspec = { 0, 0 };
54
55 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
56
57 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
58
59 lck_grp_t *IOLockGroup;
60
61 /*
62 * Global variables for use by iLogger
63 * These symbols are for use only by Apple diagnostic code.
64 * Binary compatibility is not guaranteed for kexts that reference these symbols.
65 */
66
67 void *_giDebugLogInternal = NULL;
68 void *_giDebugLogDataInternal = NULL;
69 void *_giDebugReserved1 = NULL;
70 void *_giDebugReserved2 = NULL;
71
72
73 /*
74 * Static variables for this module.
75 */
76
77 static queue_head_t gIOMallocContiguousEntries;
78 static lck_mtx_t * gIOMallocContiguousEntriesLock;
79
80 enum { kIOMaxPageableMaps = 16 };
81 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
82 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
83
84 /* LP64todo - these need to expand */
85 typedef struct {
86 vm_map_t map;
87 vm_offset_t address;
88 vm_offset_t end;
89 } IOMapData;
90
91 static struct {
92 UInt32 count;
93 UInt32 hint;
94 IOMapData maps[ kIOMaxPageableMaps ];
95 lck_mtx_t * lock;
96 } gIOKitPageableSpace;
97
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99
100 void IOLibInit(void)
101 {
102 kern_return_t ret;
103
104 static bool libInitialized;
105
106 if(libInitialized)
107 return;
108
109 gIOKitPageableSpace.maps[0].address = 0;
110 ret = kmem_suballoc(kernel_map,
111 &gIOKitPageableSpace.maps[0].address,
112 kIOPageableMapSize,
113 TRUE,
114 VM_FLAGS_ANYWHERE,
115 &gIOKitPageableSpace.maps[0].map);
116 if (ret != KERN_SUCCESS)
117 panic("failed to allocate iokit pageable map\n");
118
119 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
120
121 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
122 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
123 gIOKitPageableSpace.hint = 0;
124 gIOKitPageableSpace.count = 1;
125
126 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
127 queue_init( &gIOMallocContiguousEntries );
128
129 libInitialized = true;
130 }
131
132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
133
134 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
135 {
136 kern_return_t result;
137 thread_t thread;
138
139 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
140 if (result != KERN_SUCCESS)
141 return (NULL);
142
143 thread_deallocate(thread);
144
145 return (thread);
146 }
147
148
149 volatile void IOExitThread(void)
150 {
151 (void) thread_terminate(current_thread());
152 }
153
154 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
155
156
157 void * IOMalloc(vm_size_t size)
158 {
159 void * address;
160
161 address = (void *)kalloc(size);
162 #if IOALLOCDEBUG
163 if (address)
164 debug_iomalloc_size += size;
165 #endif
166 return address;
167 }
168
169 void IOFree(void * address, vm_size_t size)
170 {
171 if (address) {
172 kfree(address, size);
173 #if IOALLOCDEBUG
174 debug_iomalloc_size -= size;
175 #endif
176 }
177 }
178
179 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
180
181 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
182 {
183 kern_return_t kr;
184 vm_address_t address;
185 vm_address_t allocationAddress;
186 vm_size_t adjustedSize;
187 vm_offset_t alignMask;
188
189 if (size == 0)
190 return 0;
191 if (alignment == 0)
192 alignment = 1;
193
194 alignMask = alignment - 1;
195 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
196
197 if (adjustedSize >= page_size) {
198
199 kr = kernel_memory_allocate(kernel_map, &address,
200 size, alignMask, 0);
201 if (KERN_SUCCESS != kr)
202 address = 0;
203
204 } else {
205
206 adjustedSize += alignMask;
207
208 if (adjustedSize >= page_size) {
209
210 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
211 adjustedSize, 0, 0);
212 if (KERN_SUCCESS != kr)
213 allocationAddress = 0;
214
215 } else
216 allocationAddress = (vm_address_t) kalloc(adjustedSize);
217
218 if (allocationAddress) {
219 address = (allocationAddress + alignMask
220 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
221 & (~alignMask);
222
223 *((vm_size_t *)(address - sizeof(vm_size_t)
224 - sizeof(vm_address_t))) = adjustedSize;
225 *((vm_address_t *)(address - sizeof(vm_address_t)))
226 = allocationAddress;
227 } else
228 address = 0;
229 }
230
231 assert(0 == (address & alignMask));
232
233 #if IOALLOCDEBUG
234 if( address)
235 debug_iomalloc_size += size;
236 #endif
237
238 return (void *) address;
239 }
240
241 void IOFreeAligned(void * address, vm_size_t size)
242 {
243 vm_address_t allocationAddress;
244 vm_size_t adjustedSize;
245
246 if( !address)
247 return;
248
249 assert(size);
250
251 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
252 if (adjustedSize >= page_size) {
253
254 kmem_free( kernel_map, (vm_address_t) address, size);
255
256 } else {
257 adjustedSize = *((vm_size_t *)( (vm_address_t) address
258 - sizeof(vm_address_t) - sizeof(vm_size_t)));
259 allocationAddress = *((vm_address_t *)( (vm_address_t) address
260 - sizeof(vm_address_t) ));
261
262 if (adjustedSize >= page_size)
263 kmem_free( kernel_map, allocationAddress, adjustedSize);
264 else
265 kfree((void *)allocationAddress, adjustedSize);
266 }
267
268 #if IOALLOCDEBUG
269 debug_iomalloc_size -= size;
270 #endif
271 }
272
273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
274
275 struct _IOMallocContiguousEntry
276 {
277 void * virtual;
278 ppnum_t ioBase;
279 queue_chain_t link;
280 };
281 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
282
283 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
284 IOPhysicalAddress * physicalAddress)
285 {
286 kern_return_t kr;
287 vm_address_t address;
288 vm_address_t allocationAddress;
289 vm_size_t adjustedSize;
290 vm_offset_t alignMask;
291 ppnum_t pagenum;
292
293 if (size == 0)
294 return 0;
295 if (alignment == 0)
296 alignment = 1;
297
298 alignMask = alignment - 1;
299 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
300
301 if (adjustedSize >= page_size)
302 {
303 adjustedSize = size;
304 if (adjustedSize > page_size)
305 {
306 kr = kmem_alloc_contig(kernel_map, &address, size,
307 alignMask, 0);
308 }
309 else
310 {
311 kr = kernel_memory_allocate(kernel_map, &address,
312 size, alignMask, 0);
313 }
314 if (KERN_SUCCESS != kr)
315 address = 0;
316 }
317 else
318 {
319 adjustedSize += alignMask;
320 allocationAddress = (vm_address_t) kalloc(adjustedSize);
321
322 if (allocationAddress) {
323
324 address = (allocationAddress + alignMask
325 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
326 & (~alignMask);
327
328 if (atop_32(address) != atop_32(address + size - 1))
329 address = round_page_32(address);
330
331 *((vm_size_t *)(address - sizeof(vm_size_t)
332 - sizeof(vm_address_t))) = adjustedSize;
333 *((vm_address_t *)(address - sizeof(vm_address_t)))
334 = allocationAddress;
335 } else
336 address = 0;
337 }
338
339 /* Do we want a physical address? */
340 if (address && physicalAddress)
341 {
342 do
343 {
344 /* Get the physical page */
345 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
346 if(pagenum)
347 {
348 IOByteCount offset;
349 ppnum_t base;
350
351 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
352 if (base)
353 {
354 _IOMallocContiguousEntry *
355 entry = IONew(_IOMallocContiguousEntry, 1);
356 if (!entry)
357 {
358 IOFreeContiguous((void *) address, size);
359 address = 0;
360 break;
361 }
362 entry->virtual = (void *) address;
363 entry->ioBase = base;
364 lck_mtx_lock(gIOMallocContiguousEntriesLock);
365 queue_enter( &gIOMallocContiguousEntries, entry,
366 _IOMallocContiguousEntry *, link );
367 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
368
369 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
370 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
371 IOMapperInsertPage( base, offset, pagenum );
372 }
373 else
374 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
375 }
376 else
377 /* Did not find, return 0 */
378 *physicalAddress = (IOPhysicalAddress) 0;
379 }
380 while (false);
381 }
382
383 assert(0 == (address & alignMask));
384
385 #if IOALLOCDEBUG
386 if( address)
387 debug_iomalloc_size += size;
388 #endif
389
390 return (void *) address;
391 }
392
393 void IOFreeContiguous(void * address, vm_size_t size)
394 {
395 vm_address_t allocationAddress;
396 vm_size_t adjustedSize;
397 _IOMallocContiguousEntry * entry;
398 ppnum_t base = 0;
399
400 if( !address)
401 return;
402
403 assert(size);
404
405 lck_mtx_lock(gIOMallocContiguousEntriesLock);
406 queue_iterate( &gIOMallocContiguousEntries, entry,
407 _IOMallocContiguousEntry *, link )
408 {
409 if( entry->virtual == address ) {
410 base = entry->ioBase;
411 queue_remove( &gIOMallocContiguousEntries, entry,
412 _IOMallocContiguousEntry *, link );
413 break;
414 }
415 }
416 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
417
418 if (base)
419 {
420 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
421 IODelete(entry, _IOMallocContiguousEntry, 1);
422 }
423
424 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
425 if (adjustedSize >= page_size) {
426
427 kmem_free( kernel_map, (vm_address_t) address, size);
428
429 } else {
430 adjustedSize = *((vm_size_t *)( (vm_address_t) address
431 - sizeof(vm_address_t) - sizeof(vm_size_t)));
432 allocationAddress = *((vm_address_t *)( (vm_address_t) address
433 - sizeof(vm_address_t) ));
434
435 kfree((void *)allocationAddress, adjustedSize);
436 }
437
438 #if IOALLOCDEBUG
439 debug_iomalloc_size -= size;
440 #endif
441 }
442
443 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
444
445 kern_return_t IOIteratePageableMaps(vm_size_t size,
446 IOIteratePageableMapsCallback callback, void * ref)
447 {
448 kern_return_t kr = kIOReturnNotReady;
449 vm_size_t segSize;
450 UInt32 attempts;
451 UInt32 index;
452 vm_offset_t min;
453 vm_map_t map;
454
455 if (size > kIOPageableMaxMapSize)
456 return( kIOReturnBadArgument );
457
458 do {
459 index = gIOKitPageableSpace.hint;
460 attempts = gIOKitPageableSpace.count;
461 while( attempts--) {
462 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
463 if( KERN_SUCCESS == kr) {
464 gIOKitPageableSpace.hint = index;
465 break;
466 }
467 if( index)
468 index--;
469 else
470 index = gIOKitPageableSpace.count - 1;
471 }
472 if( KERN_SUCCESS == kr)
473 break;
474
475 lck_mtx_lock( gIOKitPageableSpace.lock );
476
477 index = gIOKitPageableSpace.count;
478 if( index >= (kIOMaxPageableMaps - 1)) {
479 lck_mtx_unlock( gIOKitPageableSpace.lock );
480 break;
481 }
482
483 if( size < kIOPageableMapSize)
484 segSize = kIOPageableMapSize;
485 else
486 segSize = size;
487
488 min = 0;
489 kr = kmem_suballoc(kernel_map,
490 &min,
491 segSize,
492 TRUE,
493 VM_FLAGS_ANYWHERE,
494 &map);
495 if( KERN_SUCCESS != kr) {
496 lck_mtx_unlock( gIOKitPageableSpace.lock );
497 break;
498 }
499
500 gIOKitPageableSpace.maps[index].map = map;
501 gIOKitPageableSpace.maps[index].address = min;
502 gIOKitPageableSpace.maps[index].end = min + segSize;
503 gIOKitPageableSpace.hint = index;
504 gIOKitPageableSpace.count = index + 1;
505
506 lck_mtx_unlock( gIOKitPageableSpace.lock );
507
508 } while( true );
509
510 return kr;
511 }
512
513 struct IOMallocPageableRef
514 {
515 vm_address_t address;
516 vm_size_t size;
517 };
518
519 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
520 {
521 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
522 kern_return_t kr;
523
524 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
525
526 return( kr );
527 }
528
529 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
530 {
531 kern_return_t kr = kIOReturnNotReady;
532 struct IOMallocPageableRef ref;
533
534 if (alignment > page_size)
535 return( 0 );
536 if (size > kIOPageableMaxMapSize)
537 return( 0 );
538
539 ref.size = size;
540 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
541 if( kIOReturnSuccess != kr)
542 ref.address = 0;
543
544 #if IOALLOCDEBUG
545 if( ref.address)
546 debug_iomallocpageable_size += round_page_32(size);
547 #endif
548
549 return( (void *) ref.address );
550 }
551
552 vm_map_t IOPageableMapForAddress( vm_address_t address )
553 {
554 vm_map_t map = 0;
555 UInt32 index;
556
557 for( index = 0; index < gIOKitPageableSpace.count; index++) {
558 if( (address >= gIOKitPageableSpace.maps[index].address)
559 && (address < gIOKitPageableSpace.maps[index].end) ) {
560 map = gIOKitPageableSpace.maps[index].map;
561 break;
562 }
563 }
564 if( !map)
565 IOPanic("IOPageableMapForAddress: null");
566
567 return( map );
568 }
569
570 void IOFreePageable(void * address, vm_size_t size)
571 {
572 vm_map_t map;
573
574 map = IOPageableMapForAddress( (vm_address_t) address);
575 if( map)
576 kmem_free( map, (vm_offset_t) address, size);
577
578 #if IOALLOCDEBUG
579 debug_iomallocpageable_size -= round_page_32(size);
580 #endif
581 }
582
583 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
584
585 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
586 IOByteCount length, IOOptionBits cacheMode )
587 {
588 IOReturn ret = kIOReturnSuccess;
589 ppnum_t pagenum;
590
591 if( task != kernel_task)
592 return( kIOReturnUnsupported );
593
594 length = round_page_32(address + length) - trunc_page_32( address );
595 address = trunc_page_32( address );
596
597 // make map mode
598 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
599
600 while( (kIOReturnSuccess == ret) && (length > 0) ) {
601
602 // Get the physical page number
603 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
604 if( pagenum) {
605 ret = IOUnmapPages( get_task_map(task), address, page_size );
606 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
607 } else
608 ret = kIOReturnVMError;
609
610 address += page_size;
611 length -= page_size;
612 }
613
614 return( ret );
615 }
616
617
618 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
619 IOByteCount length )
620 {
621 if( task != kernel_task)
622 return( kIOReturnUnsupported );
623
624 #if __ppc__
625 flush_dcache64( (addr64_t) address, (unsigned) length, false );
626 #endif
627
628 return( kIOReturnSuccess );
629 }
630
631 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
632
633 SInt32 OSKernelStackRemaining( void )
634 {
635 SInt32 stack;
636
637 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
638
639 return( stack );
640 }
641
642 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
643
644 void IOSleep(unsigned milliseconds)
645 {
646 delay_for_interval(milliseconds, kMillisecondScale);
647 }
648
649 /*
650 * Spin for indicated number of microseconds.
651 */
652 void IODelay(unsigned microseconds)
653 {
654 delay_for_interval(microseconds, kMicrosecondScale);
655 }
656
657 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
658
659 void IOLog(const char *format, ...)
660 {
661 va_list ap;
662 extern void conslog_putc(char);
663 extern void logwakeup(void);
664
665 va_start(ap, format);
666 _doprnt(format, &ap, conslog_putc, 16);
667 va_end(ap);
668 }
669
670 void IOPanic(const char *reason)
671 {
672 panic(reason);
673 }
674
675 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
676
677 /*
678 * Convert a integer constant (typically a #define or enum) to a string.
679 */
680 static char noValue[80]; // that's pretty
681
682 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
683 {
684 for( ; regValueArray->name; regValueArray++) {
685 if(regValueArray->value == value)
686 return(regValueArray->name);
687 }
688 sprintf(noValue, "0x%x (UNDEFINED)", value);
689 return((const char *)noValue);
690 }
691
692 IOReturn IOFindValueForName(const char *string,
693 const IONamedValue *regValueArray,
694 int *value)
695 {
696 for( ; regValueArray->name; regValueArray++) {
697 if(!strcmp(regValueArray->name, string)) {
698 *value = regValueArray->value;
699 return kIOReturnSuccess;
700 }
701 }
702 return kIOReturnBadArgument;
703 }
704
705 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
706
707 IOAlignment IOSizeToAlignment(unsigned int size)
708 {
709 register int shift;
710 const int intsize = sizeof(unsigned int) * 8;
711
712 for (shift = 1; shift < intsize; shift++) {
713 if (size & 0x80000000)
714 return (IOAlignment)(intsize - shift);
715 size <<= 1;
716 }
717 return 0;
718 }
719
720 unsigned int IOAlignmentToSize(IOAlignment align)
721 {
722 unsigned int size;
723
724 for (size = 1; align; align--) {
725 size <<= 1;
726 }
727 return size;
728 }