]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
5f2b9f77c0cc6be02e6a6dbac1f549455b948de9
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * HISTORY
24 *
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
26 * 17-Nov-98 cpp
27 *
28 */
29
30 #include <IOKit/system.h>
31 #include <mach/sync_policy.h>
32 #include <machine/machine_routines.h>
33 #include <libkern/c++/OSCPPDebug.h>
34
35 #include <IOKit/assert.h>
36
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOLocks.h>
40 #include <IOKit/IOMapper.h>
41 #include <IOKit/IOBufferMemoryDescriptor.h>
42 #include <IOKit/IOKitDebug.h>
43
44 #include "IOKitKernelInternal.h"
45
46 extern "C"
47 {
48
49
50 mach_timespec_t IOZeroTvalspec = { 0, 0 };
51
52 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
53
54 extern kern_return_t kmem_suballoc(
55 vm_map_t parent,
56 vm_offset_t *addr,
57 vm_size_t size,
58 boolean_t pageable,
59 boolean_t anywhere,
60 vm_map_t *new_map);
61
62 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
63
64 lck_grp_t *IOLockGroup;
65
66 /*
67 * Global variables for use by iLogger
68 * These symbols are for use only by Apple diagnostic code.
69 * Binary compatibility is not guaranteed for kexts that reference these symbols.
70 */
71
72 void *_giDebugLogInternal = NULL;
73 void *_giDebugLogDataInternal = NULL;
74 void *_giDebugReserved1 = NULL;
75 void *_giDebugReserved2 = NULL;
76
77
78 /*
79 * Static variables for this module.
80 */
81
82 static queue_head_t gIOMallocContiguousEntries;
83 static lck_mtx_t * gIOMallocContiguousEntriesLock;
84
85 enum { kIOMaxPageableMaps = 16 };
86 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
87 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
88
89 /* LP64todo - these need to expand */
90 typedef struct {
91 vm_map_t map;
92 vm_offset_t address;
93 vm_offset_t end;
94 } IOMapData;
95
96 static struct {
97 UInt32 count;
98 UInt32 hint;
99 IOMapData maps[ kIOMaxPageableMaps ];
100 lck_mtx_t * lock;
101 } gIOKitPageableSpace;
102
103 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104
105 void IOLibInit(void)
106 {
107 kern_return_t ret;
108
109 static bool libInitialized;
110
111 if(libInitialized)
112 return;
113
114 gIOKitPageableSpace.maps[0].address = 0;
115 ret = kmem_suballoc(kernel_map,
116 &gIOKitPageableSpace.maps[0].address,
117 kIOPageableMapSize,
118 TRUE,
119 VM_FLAGS_ANYWHERE,
120 &gIOKitPageableSpace.maps[0].map);
121 if (ret != KERN_SUCCESS)
122 panic("failed to allocate iokit pageable map\n");
123
124 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
125
126 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
127 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
128 gIOKitPageableSpace.hint = 0;
129 gIOKitPageableSpace.count = 1;
130
131 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
132 queue_init( &gIOMallocContiguousEntries );
133
134 libInitialized = true;
135 }
136
137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
138
139 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
140 {
141 kern_return_t result;
142 thread_t thread;
143
144 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
145 if (result != KERN_SUCCESS)
146 return (NULL);
147
148 thread_deallocate(thread);
149
150 return (thread);
151 }
152
153
154 void IOExitThread(void)
155 {
156 (void) thread_terminate(current_thread());
157 }
158
159 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
160
161
162 void * IOMalloc(vm_size_t size)
163 {
164 void * address;
165
166 address = (void *)kalloc(size);
167 #if IOALLOCDEBUG
168 if (address)
169 debug_iomalloc_size += size;
170 #endif
171 return address;
172 }
173
174 void IOFree(void * address, vm_size_t size)
175 {
176 if (address) {
177 kfree(address, size);
178 #if IOALLOCDEBUG
179 debug_iomalloc_size -= size;
180 #endif
181 }
182 }
183
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185
186 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
187 {
188 kern_return_t kr;
189 vm_address_t address;
190 vm_address_t allocationAddress;
191 vm_size_t adjustedSize;
192 vm_offset_t alignMask;
193
194 if (size == 0)
195 return 0;
196 if (alignment == 0)
197 alignment = 1;
198
199 alignMask = alignment - 1;
200 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
201
202 if (adjustedSize >= page_size) {
203
204 kr = kernel_memory_allocate(kernel_map, &address,
205 size, alignMask, 0);
206 if (KERN_SUCCESS != kr)
207 address = 0;
208
209 } else {
210
211 adjustedSize += alignMask;
212
213 if (adjustedSize >= page_size) {
214
215 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
216 adjustedSize, 0, 0);
217 if (KERN_SUCCESS != kr)
218 allocationAddress = 0;
219
220 } else
221 allocationAddress = (vm_address_t) kalloc(adjustedSize);
222
223 if (allocationAddress) {
224 address = (allocationAddress + alignMask
225 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
226 & (~alignMask);
227
228 *((vm_size_t *)(address - sizeof(vm_size_t)
229 - sizeof(vm_address_t))) = adjustedSize;
230 *((vm_address_t *)(address - sizeof(vm_address_t)))
231 = allocationAddress;
232 } else
233 address = 0;
234 }
235
236 assert(0 == (address & alignMask));
237
238 #if IOALLOCDEBUG
239 if( address)
240 debug_iomalloc_size += size;
241 #endif
242
243 return (void *) address;
244 }
245
246 void IOFreeAligned(void * address, vm_size_t size)
247 {
248 vm_address_t allocationAddress;
249 vm_size_t adjustedSize;
250
251 if( !address)
252 return;
253
254 assert(size);
255
256 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
257 if (adjustedSize >= page_size) {
258
259 kmem_free( kernel_map, (vm_address_t) address, size);
260
261 } else {
262 adjustedSize = *((vm_size_t *)( (vm_address_t) address
263 - sizeof(vm_address_t) - sizeof(vm_size_t)));
264 allocationAddress = *((vm_address_t *)( (vm_address_t) address
265 - sizeof(vm_address_t) ));
266
267 if (adjustedSize >= page_size)
268 kmem_free( kernel_map, allocationAddress, adjustedSize);
269 else
270 kfree((void *)allocationAddress, adjustedSize);
271 }
272
273 #if IOALLOCDEBUG
274 debug_iomalloc_size -= size;
275 #endif
276 }
277
278 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
279
280 void
281 IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size)
282 {
283 mach_vm_address_t allocationAddress;
284 mach_vm_size_t adjustedSize;
285
286 if (!address)
287 return;
288
289 assert(size);
290
291 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
292 if (adjustedSize >= page_size) {
293
294 kmem_free( kernel_map, (vm_address_t) address, size);
295
296 } else {
297
298 adjustedSize = *((mach_vm_size_t *)
299 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
300 allocationAddress = *((mach_vm_address_t *)
301 (address - sizeof(mach_vm_address_t) ));
302 kfree((void *)allocationAddress, adjustedSize);
303 }
304
305 #if IOALLOCDEBUG
306 debug_iomalloc_size -= size;
307 #endif
308 }
309
310 mach_vm_address_t
311 IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment)
312 {
313 kern_return_t kr;
314 mach_vm_address_t address;
315 mach_vm_address_t allocationAddress;
316 mach_vm_size_t adjustedSize;
317 mach_vm_address_t alignMask;
318
319 if (size == 0)
320 return (0);
321 if (alignment == 0)
322 alignment = 1;
323
324 alignMask = alignment - 1;
325 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
326
327 if (adjustedSize >= page_size)
328 {
329 vm_offset_t virt;
330 adjustedSize = size;
331 if (adjustedSize > page_size)
332 {
333 kr = kmem_alloc_contig(kernel_map, &virt, size,
334 alignMask, 0);
335 }
336 else
337 {
338 kr = kernel_memory_allocate(kernel_map, &virt,
339 size, alignMask, 0);
340 }
341 if (KERN_SUCCESS == kr)
342 address = virt;
343 else
344 address = 0;
345 }
346 else
347 {
348 adjustedSize += alignMask;
349 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
350
351 if (allocationAddress) {
352
353 address = (allocationAddress + alignMask
354 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
355 & (~alignMask);
356
357 if (atop_32(address) != atop_32(address + size - 1))
358 address = round_page_32(address);
359
360 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
361 - sizeof(mach_vm_address_t))) = adjustedSize;
362 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
363 = allocationAddress;
364 } else
365 address = 0;
366 }
367
368 #if IOALLOCDEBUG
369 if (address)
370 debug_iomalloc_size += size;
371 #endif
372
373 return (address);
374 }
375
376 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
377
378 struct _IOMallocContiguousEntry
379 {
380 mach_vm_address_t virtualAddr;
381 IOBufferMemoryDescriptor * md;
382 queue_chain_t link;
383 };
384 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
385
386 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
387 IOPhysicalAddress * physicalAddress)
388 {
389 mach_vm_address_t address = 0;
390
391 if (size == 0)
392 return 0;
393 if (alignment == 0)
394 alignment = 1;
395
396 /* Do we want a physical address? */
397 if (!physicalAddress)
398 {
399 address = IOKernelAllocateContiguous(size, alignment);
400 }
401 else do
402 {
403 IOBufferMemoryDescriptor * bmd;
404 mach_vm_address_t physicalMask;
405 vm_offset_t alignMask;
406
407 alignMask = alignment - 1;
408 physicalMask = 0xFFFFFFFF ^ (alignMask & PAGE_MASK);
409 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
410 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
411 if (!bmd)
412 break;
413
414 _IOMallocContiguousEntry *
415 entry = IONew(_IOMallocContiguousEntry, 1);
416 if (!entry)
417 {
418 bmd->release();
419 break;
420 }
421 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
422 entry->md = bmd;
423 lck_mtx_lock(gIOMallocContiguousEntriesLock);
424 queue_enter( &gIOMallocContiguousEntries, entry,
425 _IOMallocContiguousEntry *, link );
426 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
427
428 address = (mach_vm_address_t) entry->virtualAddr;
429 *physicalAddress = bmd->getPhysicalAddress();
430 }
431 while (false);
432
433 return (void *) address;
434 }
435
436 void IOFreeContiguous(void * _address, vm_size_t size)
437 {
438 _IOMallocContiguousEntry * entry;
439 IOMemoryDescriptor * md = NULL;
440
441 mach_vm_address_t address = (mach_vm_address_t) _address;
442
443 if( !address)
444 return;
445
446 assert(size);
447
448 lck_mtx_lock(gIOMallocContiguousEntriesLock);
449 queue_iterate( &gIOMallocContiguousEntries, entry,
450 _IOMallocContiguousEntry *, link )
451 {
452 if( entry->virtualAddr == address ) {
453 md = entry->md;
454 queue_remove( &gIOMallocContiguousEntries, entry,
455 _IOMallocContiguousEntry *, link );
456 break;
457 }
458 }
459 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
460
461 if (md)
462 {
463 md->release();
464 IODelete(entry, _IOMallocContiguousEntry, 1);
465 }
466 else
467 {
468 IOKernelFreeContiguous((mach_vm_address_t) address, size);
469 }
470 }
471
472 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
473
474 kern_return_t IOIteratePageableMaps(vm_size_t size,
475 IOIteratePageableMapsCallback callback, void * ref)
476 {
477 kern_return_t kr = kIOReturnNotReady;
478 vm_size_t segSize;
479 UInt32 attempts;
480 UInt32 index;
481 vm_offset_t min;
482 vm_map_t map;
483
484 if (size > kIOPageableMaxMapSize)
485 return( kIOReturnBadArgument );
486
487 do {
488 index = gIOKitPageableSpace.hint;
489 attempts = gIOKitPageableSpace.count;
490 while( attempts--) {
491 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
492 if( KERN_SUCCESS == kr) {
493 gIOKitPageableSpace.hint = index;
494 break;
495 }
496 if( index)
497 index--;
498 else
499 index = gIOKitPageableSpace.count - 1;
500 }
501 if( KERN_SUCCESS == kr)
502 break;
503
504 lck_mtx_lock( gIOKitPageableSpace.lock );
505
506 index = gIOKitPageableSpace.count;
507 if( index >= (kIOMaxPageableMaps - 1)) {
508 lck_mtx_unlock( gIOKitPageableSpace.lock );
509 break;
510 }
511
512 if( size < kIOPageableMapSize)
513 segSize = kIOPageableMapSize;
514 else
515 segSize = size;
516
517 min = 0;
518 kr = kmem_suballoc(kernel_map,
519 &min,
520 segSize,
521 TRUE,
522 VM_FLAGS_ANYWHERE,
523 &map);
524 if( KERN_SUCCESS != kr) {
525 lck_mtx_unlock( gIOKitPageableSpace.lock );
526 break;
527 }
528
529 gIOKitPageableSpace.maps[index].map = map;
530 gIOKitPageableSpace.maps[index].address = min;
531 gIOKitPageableSpace.maps[index].end = min + segSize;
532 gIOKitPageableSpace.hint = index;
533 gIOKitPageableSpace.count = index + 1;
534
535 lck_mtx_unlock( gIOKitPageableSpace.lock );
536
537 } while( true );
538
539 return kr;
540 }
541
542 struct IOMallocPageableRef
543 {
544 vm_address_t address;
545 vm_size_t size;
546 };
547
548 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
549 {
550 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
551 kern_return_t kr;
552
553 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
554
555 return( kr );
556 }
557
558 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
559 {
560 kern_return_t kr = kIOReturnNotReady;
561 struct IOMallocPageableRef ref;
562
563 if (alignment > page_size)
564 return( 0 );
565 if (size > kIOPageableMaxMapSize)
566 return( 0 );
567
568 ref.size = size;
569 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
570 if( kIOReturnSuccess != kr)
571 ref.address = 0;
572
573 #if IOALLOCDEBUG
574 if( ref.address)
575 debug_iomallocpageable_size += round_page_32(size);
576 #endif
577
578 return( (void *) ref.address );
579 }
580
581 vm_map_t IOPageableMapForAddress( vm_address_t address )
582 {
583 vm_map_t map = 0;
584 UInt32 index;
585
586 for( index = 0; index < gIOKitPageableSpace.count; index++) {
587 if( (address >= gIOKitPageableSpace.maps[index].address)
588 && (address < gIOKitPageableSpace.maps[index].end) ) {
589 map = gIOKitPageableSpace.maps[index].map;
590 break;
591 }
592 }
593 if( !map)
594 IOPanic("IOPageableMapForAddress: null");
595
596 return( map );
597 }
598
599 void IOFreePageable(void * address, vm_size_t size)
600 {
601 vm_map_t map;
602
603 map = IOPageableMapForAddress( (vm_address_t) address);
604 if( map)
605 kmem_free( map, (vm_offset_t) address, size);
606
607 #if IOALLOCDEBUG
608 debug_iomallocpageable_size -= round_page_32(size);
609 #endif
610 }
611
612 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
613
614 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
615 IOByteCount length, IOOptionBits cacheMode )
616 {
617 IOReturn ret = kIOReturnSuccess;
618 ppnum_t pagenum;
619
620 if( task != kernel_task)
621 return( kIOReturnUnsupported );
622
623 length = round_page_32(address + length) - trunc_page_32( address );
624 address = trunc_page_32( address );
625
626 // make map mode
627 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
628
629 while( (kIOReturnSuccess == ret) && (length > 0) ) {
630
631 // Get the physical page number
632 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
633 if( pagenum) {
634 ret = IOUnmapPages( get_task_map(task), address, page_size );
635 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
636 } else
637 ret = kIOReturnVMError;
638
639 address += page_size;
640 length -= page_size;
641 }
642
643 return( ret );
644 }
645
646
647 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
648 IOByteCount length )
649 {
650 if( task != kernel_task)
651 return( kIOReturnUnsupported );
652
653 #if __ppc__
654 flush_dcache64( (addr64_t) address, (unsigned) length, false );
655 #endif
656
657 return( kIOReturnSuccess );
658 }
659
660 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
661
662 SInt32 OSKernelStackRemaining( void )
663 {
664 SInt32 stack;
665
666 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
667
668 return( stack );
669 }
670
671 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
672
673 void IOSleep(unsigned milliseconds)
674 {
675 delay_for_interval(milliseconds, kMillisecondScale);
676 }
677
678 /*
679 * Spin for indicated number of microseconds.
680 */
681 void IODelay(unsigned microseconds)
682 {
683 delay_for_interval(microseconds, kMicrosecondScale);
684 }
685
686 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
687
688 void IOLog(const char *format, ...)
689 {
690 va_list ap;
691 extern void conslog_putc(char);
692 extern void logwakeup(void);
693
694 va_start(ap, format);
695 _doprnt(format, &ap, conslog_putc, 16);
696 va_end(ap);
697 }
698
699 void IOPanic(const char *reason)
700 {
701 panic(reason);
702 }
703
704 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
705
706 /*
707 * Convert a integer constant (typically a #define or enum) to a string.
708 */
709 static char noValue[80]; // that's pretty
710
711 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
712 {
713 for( ; regValueArray->name; regValueArray++) {
714 if(regValueArray->value == value)
715 return(regValueArray->name);
716 }
717 sprintf(noValue, "0x%x (UNDEFINED)", value);
718 return((const char *)noValue);
719 }
720
721 IOReturn IOFindValueForName(const char *string,
722 const IONamedValue *regValueArray,
723 int *value)
724 {
725 for( ; regValueArray->name; regValueArray++) {
726 if(!strcmp(regValueArray->name, string)) {
727 *value = regValueArray->value;
728 return kIOReturnSuccess;
729 }
730 }
731 return kIOReturnBadArgument;
732 }
733
734 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
735
736 IOAlignment IOSizeToAlignment(unsigned int size)
737 {
738 register int shift;
739 const int intsize = sizeof(unsigned int) * 8;
740
741 for (shift = 1; shift < intsize; shift++) {
742 if (size & 0x80000000)
743 return (IOAlignment)(intsize - shift);
744 size <<= 1;
745 }
746 return 0;
747 }
748
749 unsigned int IOAlignmentToSize(IOAlignment align)
750 {
751 unsigned int size;
752
753 for (size = 1; align; align--) {
754 size <<= 1;
755 }
756 return size;
757 }
758
759 } /* extern "C" */