]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.c
xnu-792.6.76.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * HISTORY
24 *
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
26 * 17-Nov-98 cpp
27 *
28 */
29
30 #include <IOKit/system.h>
31 #include <mach/sync_policy.h>
32 #include <machine/machine_routines.h>
33 #include <libkern/c++/OSCPPDebug.h>
34
35 #include <IOKit/assert.h>
36
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOLocks.h>
40 #include <IOKit/IOMapper.h>
41 #include <IOKit/IOKitDebug.h>
42
43 #include "IOKitKernelInternal.h"
44
45 mach_timespec_t IOZeroTvalspec = { 0, 0 };
46
47 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 lck_grp_t *IOLockGroup;
52
53 /*
54 * Global variables for use by iLogger
55 * These symbols are for use only by Apple diagnostic code.
56 * Binary compatibility is not guaranteed for kexts that reference these symbols.
57 */
58
59 void *_giDebugLogInternal = NULL;
60 void *_giDebugLogDataInternal = NULL;
61 void *_giDebugReserved1 = NULL;
62 void *_giDebugReserved2 = NULL;
63
64
65 /*
66 * Static variables for this module.
67 */
68
69 static queue_head_t gIOMallocContiguousEntries;
70 static lck_mtx_t * gIOMallocContiguousEntriesLock;
71
72 enum { kIOMaxPageableMaps = 16 };
73 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
74 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
75
76 /* LP64todo - these need to expand */
77 typedef struct {
78 vm_map_t map;
79 vm_offset_t address;
80 vm_offset_t end;
81 } IOMapData;
82
83 static struct {
84 UInt32 count;
85 UInt32 hint;
86 IOMapData maps[ kIOMaxPageableMaps ];
87 lck_mtx_t * lock;
88 } gIOKitPageableSpace;
89
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91
92 void IOLibInit(void)
93 {
94 kern_return_t ret;
95
96 static bool libInitialized;
97
98 if(libInitialized)
99 return;
100
101 gIOKitPageableSpace.maps[0].address = 0;
102 ret = kmem_suballoc(kernel_map,
103 &gIOKitPageableSpace.maps[0].address,
104 kIOPageableMapSize,
105 TRUE,
106 VM_FLAGS_ANYWHERE,
107 &gIOKitPageableSpace.maps[0].map);
108 if (ret != KERN_SUCCESS)
109 panic("failed to allocate iokit pageable map\n");
110
111 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
112
113 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
114 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
115 gIOKitPageableSpace.hint = 0;
116 gIOKitPageableSpace.count = 1;
117
118 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
119 queue_init( &gIOMallocContiguousEntries );
120
121 libInitialized = true;
122 }
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
127 {
128 kern_return_t result;
129 thread_t thread;
130
131 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
132 if (result != KERN_SUCCESS)
133 return (NULL);
134
135 thread_deallocate(thread);
136
137 return (thread);
138 }
139
140
141 volatile void IOExitThread(void)
142 {
143 (void) thread_terminate(current_thread());
144 }
145
146 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
147
148
149 void * IOMalloc(vm_size_t size)
150 {
151 void * address;
152
153 address = (void *)kalloc(size);
154 #if IOALLOCDEBUG
155 if (address)
156 debug_iomalloc_size += size;
157 #endif
158 return address;
159 }
160
161 void IOFree(void * address, vm_size_t size)
162 {
163 if (address) {
164 kfree(address, size);
165 #if IOALLOCDEBUG
166 debug_iomalloc_size -= size;
167 #endif
168 }
169 }
170
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
174 {
175 kern_return_t kr;
176 vm_address_t address;
177 vm_address_t allocationAddress;
178 vm_size_t adjustedSize;
179 vm_offset_t alignMask;
180
181 if (size == 0)
182 return 0;
183 if (alignment == 0)
184 alignment = 1;
185
186 alignMask = alignment - 1;
187 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
188
189 if (adjustedSize >= page_size) {
190
191 kr = kernel_memory_allocate(kernel_map, &address,
192 size, alignMask, 0);
193 if (KERN_SUCCESS != kr)
194 address = 0;
195
196 } else {
197
198 adjustedSize += alignMask;
199
200 if (adjustedSize >= page_size) {
201
202 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
203 adjustedSize, 0, 0);
204 if (KERN_SUCCESS != kr)
205 allocationAddress = 0;
206
207 } else
208 allocationAddress = (vm_address_t) kalloc(adjustedSize);
209
210 if (allocationAddress) {
211 address = (allocationAddress + alignMask
212 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
213 & (~alignMask);
214
215 *((vm_size_t *)(address - sizeof(vm_size_t)
216 - sizeof(vm_address_t))) = adjustedSize;
217 *((vm_address_t *)(address - sizeof(vm_address_t)))
218 = allocationAddress;
219 } else
220 address = 0;
221 }
222
223 assert(0 == (address & alignMask));
224
225 #if IOALLOCDEBUG
226 if( address)
227 debug_iomalloc_size += size;
228 #endif
229
230 return (void *) address;
231 }
232
233 void IOFreeAligned(void * address, vm_size_t size)
234 {
235 vm_address_t allocationAddress;
236 vm_size_t adjustedSize;
237
238 if( !address)
239 return;
240
241 assert(size);
242
243 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
244 if (adjustedSize >= page_size) {
245
246 kmem_free( kernel_map, (vm_address_t) address, size);
247
248 } else {
249 adjustedSize = *((vm_size_t *)( (vm_address_t) address
250 - sizeof(vm_address_t) - sizeof(vm_size_t)));
251 allocationAddress = *((vm_address_t *)( (vm_address_t) address
252 - sizeof(vm_address_t) ));
253
254 if (adjustedSize >= page_size)
255 kmem_free( kernel_map, allocationAddress, adjustedSize);
256 else
257 kfree((void *)allocationAddress, adjustedSize);
258 }
259
260 #if IOALLOCDEBUG
261 debug_iomalloc_size -= size;
262 #endif
263 }
264
265 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
266
267 struct _IOMallocContiguousEntry
268 {
269 void * virtual;
270 ppnum_t ioBase;
271 queue_chain_t link;
272 };
273 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
274
275 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
276 IOPhysicalAddress * physicalAddress)
277 {
278 kern_return_t kr;
279 vm_address_t address;
280 vm_address_t allocationAddress;
281 vm_size_t adjustedSize;
282 vm_offset_t alignMask;
283 ppnum_t pagenum;
284
285 if (size == 0)
286 return 0;
287 if (alignment == 0)
288 alignment = 1;
289
290 alignMask = alignment - 1;
291 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
292
293 if (adjustedSize >= page_size)
294 {
295 adjustedSize = size;
296 if (adjustedSize > page_size)
297 {
298 kr = kmem_alloc_contig(kernel_map, &address, size,
299 alignMask, 0);
300 }
301 else
302 {
303 kr = kernel_memory_allocate(kernel_map, &address,
304 size, alignMask, 0);
305 }
306 if (KERN_SUCCESS != kr)
307 address = 0;
308 }
309 else
310 {
311 adjustedSize += alignMask;
312 allocationAddress = (vm_address_t) kalloc(adjustedSize);
313
314 if (allocationAddress) {
315
316 address = (allocationAddress + alignMask
317 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
318 & (~alignMask);
319
320 if (atop_32(address) != atop_32(address + size - 1))
321 address = round_page_32(address);
322
323 *((vm_size_t *)(address - sizeof(vm_size_t)
324 - sizeof(vm_address_t))) = adjustedSize;
325 *((vm_address_t *)(address - sizeof(vm_address_t)))
326 = allocationAddress;
327 } else
328 address = 0;
329 }
330
331 /* Do we want a physical address? */
332 if (address && physicalAddress)
333 {
334 do
335 {
336 /* Get the physical page */
337 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
338 if(pagenum)
339 {
340 IOByteCount offset;
341 ppnum_t base;
342
343 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
344 if (base)
345 {
346 _IOMallocContiguousEntry *
347 entry = IONew(_IOMallocContiguousEntry, 1);
348 if (!entry)
349 {
350 IOFreeContiguous((void *) address, size);
351 address = 0;
352 break;
353 }
354 entry->virtual = (void *) address;
355 entry->ioBase = base;
356 lck_mtx_lock(gIOMallocContiguousEntriesLock);
357 queue_enter( &gIOMallocContiguousEntries, entry,
358 _IOMallocContiguousEntry *, link );
359 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
360
361 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
362 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
363 IOMapperInsertPage( base, offset, pagenum );
364 }
365 else
366 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
367 }
368 else
369 /* Did not find, return 0 */
370 *physicalAddress = (IOPhysicalAddress) 0;
371 }
372 while (false);
373 }
374
375 assert(0 == (address & alignMask));
376
377 #if IOALLOCDEBUG
378 if( address)
379 debug_iomalloc_size += size;
380 #endif
381
382 return (void *) address;
383 }
384
385 void IOFreeContiguous(void * address, vm_size_t size)
386 {
387 vm_address_t allocationAddress;
388 vm_size_t adjustedSize;
389 _IOMallocContiguousEntry * entry;
390 ppnum_t base = 0;
391
392 if( !address)
393 return;
394
395 assert(size);
396
397 lck_mtx_lock(gIOMallocContiguousEntriesLock);
398 queue_iterate( &gIOMallocContiguousEntries, entry,
399 _IOMallocContiguousEntry *, link )
400 {
401 if( entry->virtual == address ) {
402 base = entry->ioBase;
403 queue_remove( &gIOMallocContiguousEntries, entry,
404 _IOMallocContiguousEntry *, link );
405 break;
406 }
407 }
408 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
409
410 if (base)
411 {
412 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
413 IODelete(entry, _IOMallocContiguousEntry, 1);
414 }
415
416 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
417 if (adjustedSize >= page_size) {
418
419 kmem_free( kernel_map, (vm_address_t) address, size);
420
421 } else {
422 adjustedSize = *((vm_size_t *)( (vm_address_t) address
423 - sizeof(vm_address_t) - sizeof(vm_size_t)));
424 allocationAddress = *((vm_address_t *)( (vm_address_t) address
425 - sizeof(vm_address_t) ));
426
427 kfree((void *)allocationAddress, adjustedSize);
428 }
429
430 #if IOALLOCDEBUG
431 debug_iomalloc_size -= size;
432 #endif
433 }
434
435 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
436
437 kern_return_t IOIteratePageableMaps(vm_size_t size,
438 IOIteratePageableMapsCallback callback, void * ref)
439 {
440 kern_return_t kr = kIOReturnNotReady;
441 vm_size_t segSize;
442 UInt32 attempts;
443 UInt32 index;
444 vm_offset_t min;
445 vm_map_t map;
446
447 if (size > kIOPageableMaxMapSize)
448 return( kIOReturnBadArgument );
449
450 do {
451 index = gIOKitPageableSpace.hint;
452 attempts = gIOKitPageableSpace.count;
453 while( attempts--) {
454 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
455 if( KERN_SUCCESS == kr) {
456 gIOKitPageableSpace.hint = index;
457 break;
458 }
459 if( index)
460 index--;
461 else
462 index = gIOKitPageableSpace.count - 1;
463 }
464 if( KERN_SUCCESS == kr)
465 break;
466
467 lck_mtx_lock( gIOKitPageableSpace.lock );
468
469 index = gIOKitPageableSpace.count;
470 if( index >= (kIOMaxPageableMaps - 1)) {
471 lck_mtx_unlock( gIOKitPageableSpace.lock );
472 break;
473 }
474
475 if( size < kIOPageableMapSize)
476 segSize = kIOPageableMapSize;
477 else
478 segSize = size;
479
480 min = 0;
481 kr = kmem_suballoc(kernel_map,
482 &min,
483 segSize,
484 TRUE,
485 VM_FLAGS_ANYWHERE,
486 &map);
487 if( KERN_SUCCESS != kr) {
488 lck_mtx_unlock( gIOKitPageableSpace.lock );
489 break;
490 }
491
492 gIOKitPageableSpace.maps[index].map = map;
493 gIOKitPageableSpace.maps[index].address = min;
494 gIOKitPageableSpace.maps[index].end = min + segSize;
495 gIOKitPageableSpace.hint = index;
496 gIOKitPageableSpace.count = index + 1;
497
498 lck_mtx_unlock( gIOKitPageableSpace.lock );
499
500 } while( true );
501
502 return kr;
503 }
504
505 struct IOMallocPageableRef
506 {
507 vm_address_t address;
508 vm_size_t size;
509 };
510
511 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
512 {
513 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
514 kern_return_t kr;
515
516 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
517
518 return( kr );
519 }
520
521 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
522 {
523 kern_return_t kr = kIOReturnNotReady;
524 struct IOMallocPageableRef ref;
525
526 if (alignment > page_size)
527 return( 0 );
528 if (size > kIOPageableMaxMapSize)
529 return( 0 );
530
531 ref.size = size;
532 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
533 if( kIOReturnSuccess != kr)
534 ref.address = 0;
535
536 #if IOALLOCDEBUG
537 if( ref.address)
538 debug_iomallocpageable_size += round_page_32(size);
539 #endif
540
541 return( (void *) ref.address );
542 }
543
544 vm_map_t IOPageableMapForAddress( vm_address_t address )
545 {
546 vm_map_t map = 0;
547 UInt32 index;
548
549 for( index = 0; index < gIOKitPageableSpace.count; index++) {
550 if( (address >= gIOKitPageableSpace.maps[index].address)
551 && (address < gIOKitPageableSpace.maps[index].end) ) {
552 map = gIOKitPageableSpace.maps[index].map;
553 break;
554 }
555 }
556 if( !map)
557 IOPanic("IOPageableMapForAddress: null");
558
559 return( map );
560 }
561
562 void IOFreePageable(void * address, vm_size_t size)
563 {
564 vm_map_t map;
565
566 map = IOPageableMapForAddress( (vm_address_t) address);
567 if( map)
568 kmem_free( map, (vm_offset_t) address, size);
569
570 #if IOALLOCDEBUG
571 debug_iomallocpageable_size -= round_page_32(size);
572 #endif
573 }
574
575 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
576
577 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
578 IOByteCount length, IOOptionBits cacheMode )
579 {
580 IOReturn ret = kIOReturnSuccess;
581 ppnum_t pagenum;
582
583 if( task != kernel_task)
584 return( kIOReturnUnsupported );
585
586 length = round_page_32(address + length) - trunc_page_32( address );
587 address = trunc_page_32( address );
588
589 // make map mode
590 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
591
592 while( (kIOReturnSuccess == ret) && (length > 0) ) {
593
594 // Get the physical page number
595 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
596 if( pagenum) {
597 ret = IOUnmapPages( get_task_map(task), address, page_size );
598 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
599 } else
600 ret = kIOReturnVMError;
601
602 address += page_size;
603 length -= page_size;
604 }
605
606 return( ret );
607 }
608
609
610 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
611 IOByteCount length )
612 {
613 if( task != kernel_task)
614 return( kIOReturnUnsupported );
615
616 #if __ppc__
617 flush_dcache64( (addr64_t) address, (unsigned) length, false );
618 #endif
619
620 return( kIOReturnSuccess );
621 }
622
623 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
624
625 SInt32 OSKernelStackRemaining( void )
626 {
627 SInt32 stack;
628
629 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
630
631 return( stack );
632 }
633
634 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
635
636 void IOSleep(unsigned milliseconds)
637 {
638 delay_for_interval(milliseconds, kMillisecondScale);
639 }
640
641 /*
642 * Spin for indicated number of microseconds.
643 */
644 void IODelay(unsigned microseconds)
645 {
646 delay_for_interval(microseconds, kMicrosecondScale);
647 }
648
649 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
650
651 void IOLog(const char *format, ...)
652 {
653 va_list ap;
654 extern void conslog_putc(char);
655 extern void logwakeup(void);
656
657 va_start(ap, format);
658 _doprnt(format, &ap, conslog_putc, 16);
659 va_end(ap);
660 }
661
662 void IOPanic(const char *reason)
663 {
664 panic(reason);
665 }
666
667 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
668
669 /*
670 * Convert a integer constant (typically a #define or enum) to a string.
671 */
672 static char noValue[80]; // that's pretty
673
674 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
675 {
676 for( ; regValueArray->name; regValueArray++) {
677 if(regValueArray->value == value)
678 return(regValueArray->name);
679 }
680 sprintf(noValue, "0x%x (UNDEFINED)", value);
681 return((const char *)noValue);
682 }
683
684 IOReturn IOFindValueForName(const char *string,
685 const IONamedValue *regValueArray,
686 int *value)
687 {
688 for( ; regValueArray->name; regValueArray++) {
689 if(!strcmp(regValueArray->name, string)) {
690 *value = regValueArray->value;
691 return kIOReturnSuccess;
692 }
693 }
694 return kIOReturnBadArgument;
695 }
696
697 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
698
699 IOAlignment IOSizeToAlignment(unsigned int size)
700 {
701 register int shift;
702 const int intsize = sizeof(unsigned int) * 8;
703
704 for (shift = 1; shift < intsize; shift++) {
705 if (size & 0x80000000)
706 return (IOAlignment)(intsize - shift);
707 size <<= 1;
708 }
709 return 0;
710 }
711
712 unsigned int IOAlignmentToSize(IOAlignment align)
713 {
714 unsigned int size;
715
716 for (size = 1; align; align--) {
717 size <<= 1;
718 }
719 return size;
720 }