]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
4e86365930e518cdf06940572d48e7438f6eb173
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * HISTORY
32 *
33 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
34 * 17-Nov-98 cpp
35 *
36 */
37
38 #include <IOKit/system.h>
39 #include <mach/sync_policy.h>
40 #include <machine/machine_routines.h>
41 #include <libkern/c++/OSCPPDebug.h>
42
43 #include <IOKit/assert.h>
44
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOBufferMemoryDescriptor.h>
50 #include <IOKit/IOKitDebug.h>
51
52 #include "IOKitKernelInternal.h"
53
54 extern "C"
55 {
56
57
58 mach_timespec_t IOZeroTvalspec = { 0, 0 };
59
60 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
61
62 extern kern_return_t kmem_suballoc(
63 vm_map_t parent,
64 vm_offset_t *addr,
65 vm_size_t size,
66 boolean_t pageable,
67 boolean_t anywhere,
68 vm_map_t *new_map);
69
70 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
71
72 lck_grp_t *IOLockGroup;
73
74 /*
75 * Global variables for use by iLogger
76 * These symbols are for use only by Apple diagnostic code.
77 * Binary compatibility is not guaranteed for kexts that reference these symbols.
78 */
79
80 void *_giDebugLogInternal = NULL;
81 void *_giDebugLogDataInternal = NULL;
82 void *_giDebugReserved1 = NULL;
83 void *_giDebugReserved2 = NULL;
84
85
86 /*
87 * Static variables for this module.
88 */
89
90 static queue_head_t gIOMallocContiguousEntries;
91 static lck_mtx_t * gIOMallocContiguousEntriesLock;
92
93 enum { kIOMaxPageableMaps = 16 };
94 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
95 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
96
97 /* LP64todo - these need to expand */
98 typedef struct {
99 vm_map_t map;
100 vm_offset_t address;
101 vm_offset_t end;
102 } IOMapData;
103
104 static struct {
105 UInt32 count;
106 UInt32 hint;
107 IOMapData maps[ kIOMaxPageableMaps ];
108 lck_mtx_t * lock;
109 } gIOKitPageableSpace;
110
111 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
112
113 void IOLibInit(void)
114 {
115 kern_return_t ret;
116
117 static bool libInitialized;
118
119 if(libInitialized)
120 return;
121
122 gIOKitPageableSpace.maps[0].address = 0;
123 ret = kmem_suballoc(kernel_map,
124 &gIOKitPageableSpace.maps[0].address,
125 kIOPageableMapSize,
126 TRUE,
127 VM_FLAGS_ANYWHERE,
128 &gIOKitPageableSpace.maps[0].map);
129 if (ret != KERN_SUCCESS)
130 panic("failed to allocate iokit pageable map\n");
131
132 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
133
134 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
135 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
136 gIOKitPageableSpace.hint = 0;
137 gIOKitPageableSpace.count = 1;
138
139 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
140 queue_init( &gIOMallocContiguousEntries );
141
142 libInitialized = true;
143 }
144
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146
147 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
148 {
149 kern_return_t result;
150 thread_t thread;
151
152 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
153 if (result != KERN_SUCCESS)
154 return (NULL);
155
156 thread_deallocate(thread);
157
158 return (thread);
159 }
160
161
162 void IOExitThread(void)
163 {
164 (void) thread_terminate(current_thread());
165 }
166
167 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
168
169
170 void * IOMalloc(vm_size_t size)
171 {
172 void * address;
173
174 address = (void *)kalloc(size);
175 #if IOALLOCDEBUG
176 if (address)
177 debug_iomalloc_size += size;
178 #endif
179 return address;
180 }
181
182 void IOFree(void * address, vm_size_t size)
183 {
184 if (address) {
185 kfree(address, size);
186 #if IOALLOCDEBUG
187 debug_iomalloc_size -= size;
188 #endif
189 }
190 }
191
192 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
193
194 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
195 {
196 kern_return_t kr;
197 vm_address_t address;
198 vm_address_t allocationAddress;
199 vm_size_t adjustedSize;
200 vm_offset_t alignMask;
201
202 if (size == 0)
203 return 0;
204 if (alignment == 0)
205 alignment = 1;
206
207 alignMask = alignment - 1;
208 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
209
210 if (adjustedSize >= page_size) {
211
212 kr = kernel_memory_allocate(kernel_map, &address,
213 size, alignMask, 0);
214 if (KERN_SUCCESS != kr)
215 address = 0;
216
217 } else {
218
219 adjustedSize += alignMask;
220
221 if (adjustedSize >= page_size) {
222
223 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
224 adjustedSize, 0, 0);
225 if (KERN_SUCCESS != kr)
226 allocationAddress = 0;
227
228 } else
229 allocationAddress = (vm_address_t) kalloc(adjustedSize);
230
231 if (allocationAddress) {
232 address = (allocationAddress + alignMask
233 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
234 & (~alignMask);
235
236 *((vm_size_t *)(address - sizeof(vm_size_t)
237 - sizeof(vm_address_t))) = adjustedSize;
238 *((vm_address_t *)(address - sizeof(vm_address_t)))
239 = allocationAddress;
240 } else
241 address = 0;
242 }
243
244 assert(0 == (address & alignMask));
245
246 #if IOALLOCDEBUG
247 if( address)
248 debug_iomalloc_size += size;
249 #endif
250
251 return (void *) address;
252 }
253
254 void IOFreeAligned(void * address, vm_size_t size)
255 {
256 vm_address_t allocationAddress;
257 vm_size_t adjustedSize;
258
259 if( !address)
260 return;
261
262 assert(size);
263
264 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
265 if (adjustedSize >= page_size) {
266
267 kmem_free( kernel_map, (vm_address_t) address, size);
268
269 } else {
270 adjustedSize = *((vm_size_t *)( (vm_address_t) address
271 - sizeof(vm_address_t) - sizeof(vm_size_t)));
272 allocationAddress = *((vm_address_t *)( (vm_address_t) address
273 - sizeof(vm_address_t) ));
274
275 if (adjustedSize >= page_size)
276 kmem_free( kernel_map, allocationAddress, adjustedSize);
277 else
278 kfree((void *)allocationAddress, adjustedSize);
279 }
280
281 #if IOALLOCDEBUG
282 debug_iomalloc_size -= size;
283 #endif
284 }
285
286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
287
288 void
289 IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size)
290 {
291 mach_vm_address_t allocationAddress;
292 mach_vm_size_t adjustedSize;
293
294 if (!address)
295 return;
296
297 assert(size);
298
299 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
300 if (adjustedSize >= page_size) {
301
302 kmem_free( kernel_map, (vm_address_t) address, size);
303
304 } else {
305
306 adjustedSize = *((mach_vm_size_t *)
307 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
308 allocationAddress = *((mach_vm_address_t *)
309 (address - sizeof(mach_vm_address_t) ));
310 kfree((void *)allocationAddress, adjustedSize);
311 }
312
313 #if IOALLOCDEBUG
314 debug_iomalloc_size -= size;
315 #endif
316 }
317
318 mach_vm_address_t
319 IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment)
320 {
321 kern_return_t kr;
322 mach_vm_address_t address;
323 mach_vm_address_t allocationAddress;
324 mach_vm_size_t adjustedSize;
325 mach_vm_address_t alignMask;
326
327 if (size == 0)
328 return (0);
329 if (alignment == 0)
330 alignment = 1;
331
332 alignMask = alignment - 1;
333 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
334
335 if (adjustedSize >= page_size)
336 {
337 vm_offset_t virt;
338 adjustedSize = size;
339 if (adjustedSize > page_size)
340 {
341 kr = kmem_alloc_contig(kernel_map, &virt, size,
342 alignMask, 0);
343 }
344 else
345 {
346 kr = kernel_memory_allocate(kernel_map, &virt,
347 size, alignMask, 0);
348 }
349 if (KERN_SUCCESS == kr)
350 address = virt;
351 else
352 address = 0;
353 }
354 else
355 {
356 adjustedSize += alignMask;
357 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
358
359 if (allocationAddress) {
360
361 address = (allocationAddress + alignMask
362 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
363 & (~alignMask);
364
365 if (atop_32(address) != atop_32(address + size - 1))
366 address = round_page_32(address);
367
368 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
369 - sizeof(mach_vm_address_t))) = adjustedSize;
370 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
371 = allocationAddress;
372 } else
373 address = 0;
374 }
375
376 #if IOALLOCDEBUG
377 if (address)
378 debug_iomalloc_size += size;
379 #endif
380
381 return (address);
382 }
383
384 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
385
386 struct _IOMallocContiguousEntry
387 {
388 mach_vm_address_t virtualAddr;
389 IOBufferMemoryDescriptor * md;
390 queue_chain_t link;
391 };
392 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
393
394 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
395 IOPhysicalAddress * physicalAddress)
396 {
397 mach_vm_address_t address = 0;
398
399 if (size == 0)
400 return 0;
401 if (alignment == 0)
402 alignment = 1;
403
404 /* Do we want a physical address? */
405 if (!physicalAddress)
406 {
407 address = IOKernelAllocateContiguous(size, alignment);
408 }
409 else do
410 {
411 IOBufferMemoryDescriptor * bmd;
412 mach_vm_address_t physicalMask;
413 vm_offset_t alignMask;
414
415 alignMask = alignment - 1;
416 physicalMask = 0xFFFFFFFF ^ (alignMask & PAGE_MASK);
417 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
418 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
419 if (!bmd)
420 break;
421
422 _IOMallocContiguousEntry *
423 entry = IONew(_IOMallocContiguousEntry, 1);
424 if (!entry)
425 {
426 bmd->release();
427 break;
428 }
429 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
430 entry->md = bmd;
431 lck_mtx_lock(gIOMallocContiguousEntriesLock);
432 queue_enter( &gIOMallocContiguousEntries, entry,
433 _IOMallocContiguousEntry *, link );
434 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
435
436 address = (mach_vm_address_t) entry->virtualAddr;
437 *physicalAddress = bmd->getPhysicalAddress();
438 }
439 while (false);
440
441 return (void *) address;
442 }
443
444 void IOFreeContiguous(void * _address, vm_size_t size)
445 {
446 _IOMallocContiguousEntry * entry;
447 IOMemoryDescriptor * md = NULL;
448
449 mach_vm_address_t address = (mach_vm_address_t) _address;
450
451 if( !address)
452 return;
453
454 assert(size);
455
456 lck_mtx_lock(gIOMallocContiguousEntriesLock);
457 queue_iterate( &gIOMallocContiguousEntries, entry,
458 _IOMallocContiguousEntry *, link )
459 {
460 if( entry->virtualAddr == address ) {
461 md = entry->md;
462 queue_remove( &gIOMallocContiguousEntries, entry,
463 _IOMallocContiguousEntry *, link );
464 break;
465 }
466 }
467 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
468
469 if (md)
470 {
471 md->release();
472 IODelete(entry, _IOMallocContiguousEntry, 1);
473 }
474 else
475 {
476 IOKernelFreeContiguous((mach_vm_address_t) address, size);
477 }
478 }
479
480 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
481
482 kern_return_t IOIteratePageableMaps(vm_size_t size,
483 IOIteratePageableMapsCallback callback, void * ref)
484 {
485 kern_return_t kr = kIOReturnNotReady;
486 vm_size_t segSize;
487 UInt32 attempts;
488 UInt32 index;
489 vm_offset_t min;
490 vm_map_t map;
491
492 if (size > kIOPageableMaxMapSize)
493 return( kIOReturnBadArgument );
494
495 do {
496 index = gIOKitPageableSpace.hint;
497 attempts = gIOKitPageableSpace.count;
498 while( attempts--) {
499 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
500 if( KERN_SUCCESS == kr) {
501 gIOKitPageableSpace.hint = index;
502 break;
503 }
504 if( index)
505 index--;
506 else
507 index = gIOKitPageableSpace.count - 1;
508 }
509 if( KERN_SUCCESS == kr)
510 break;
511
512 lck_mtx_lock( gIOKitPageableSpace.lock );
513
514 index = gIOKitPageableSpace.count;
515 if( index >= (kIOMaxPageableMaps - 1)) {
516 lck_mtx_unlock( gIOKitPageableSpace.lock );
517 break;
518 }
519
520 if( size < kIOPageableMapSize)
521 segSize = kIOPageableMapSize;
522 else
523 segSize = size;
524
525 min = 0;
526 kr = kmem_suballoc(kernel_map,
527 &min,
528 segSize,
529 TRUE,
530 VM_FLAGS_ANYWHERE,
531 &map);
532 if( KERN_SUCCESS != kr) {
533 lck_mtx_unlock( gIOKitPageableSpace.lock );
534 break;
535 }
536
537 gIOKitPageableSpace.maps[index].map = map;
538 gIOKitPageableSpace.maps[index].address = min;
539 gIOKitPageableSpace.maps[index].end = min + segSize;
540 gIOKitPageableSpace.hint = index;
541 gIOKitPageableSpace.count = index + 1;
542
543 lck_mtx_unlock( gIOKitPageableSpace.lock );
544
545 } while( true );
546
547 return kr;
548 }
549
550 struct IOMallocPageableRef
551 {
552 vm_address_t address;
553 vm_size_t size;
554 };
555
556 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
557 {
558 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
559 kern_return_t kr;
560
561 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
562
563 return( kr );
564 }
565
566 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
567 {
568 kern_return_t kr = kIOReturnNotReady;
569 struct IOMallocPageableRef ref;
570
571 if (alignment > page_size)
572 return( 0 );
573 if (size > kIOPageableMaxMapSize)
574 return( 0 );
575
576 ref.size = size;
577 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
578 if( kIOReturnSuccess != kr)
579 ref.address = 0;
580
581 #if IOALLOCDEBUG
582 if( ref.address)
583 debug_iomallocpageable_size += round_page_32(size);
584 #endif
585
586 return( (void *) ref.address );
587 }
588
589 vm_map_t IOPageableMapForAddress( vm_address_t address )
590 {
591 vm_map_t map = 0;
592 UInt32 index;
593
594 for( index = 0; index < gIOKitPageableSpace.count; index++) {
595 if( (address >= gIOKitPageableSpace.maps[index].address)
596 && (address < gIOKitPageableSpace.maps[index].end) ) {
597 map = gIOKitPageableSpace.maps[index].map;
598 break;
599 }
600 }
601 if( !map)
602 IOPanic("IOPageableMapForAddress: null");
603
604 return( map );
605 }
606
607 void IOFreePageable(void * address, vm_size_t size)
608 {
609 vm_map_t map;
610
611 map = IOPageableMapForAddress( (vm_address_t) address);
612 if( map)
613 kmem_free( map, (vm_offset_t) address, size);
614
615 #if IOALLOCDEBUG
616 debug_iomallocpageable_size -= round_page_32(size);
617 #endif
618 }
619
620 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
621
622 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
623 IOByteCount length, IOOptionBits cacheMode )
624 {
625 IOReturn ret = kIOReturnSuccess;
626 ppnum_t pagenum;
627
628 if( task != kernel_task)
629 return( kIOReturnUnsupported );
630
631 length = round_page_32(address + length) - trunc_page_32( address );
632 address = trunc_page_32( address );
633
634 // make map mode
635 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
636
637 while( (kIOReturnSuccess == ret) && (length > 0) ) {
638
639 // Get the physical page number
640 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
641 if( pagenum) {
642 ret = IOUnmapPages( get_task_map(task), address, page_size );
643 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
644 } else
645 ret = kIOReturnVMError;
646
647 address += page_size;
648 length -= page_size;
649 }
650
651 return( ret );
652 }
653
654
655 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
656 IOByteCount length )
657 {
658 if( task != kernel_task)
659 return( kIOReturnUnsupported );
660
661 #if __ppc__
662 flush_dcache64( (addr64_t) address, (unsigned) length, false );
663 #endif
664
665 return( kIOReturnSuccess );
666 }
667
668 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
669
670 SInt32 OSKernelStackRemaining( void )
671 {
672 SInt32 stack;
673
674 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
675
676 return( stack );
677 }
678
679 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
680
681 void IOSleep(unsigned milliseconds)
682 {
683 delay_for_interval(milliseconds, kMillisecondScale);
684 }
685
686 /*
687 * Spin for indicated number of microseconds.
688 */
689 void IODelay(unsigned microseconds)
690 {
691 delay_for_interval(microseconds, kMicrosecondScale);
692 }
693
694 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
695
696 void IOLog(const char *format, ...)
697 {
698 va_list ap;
699 extern void conslog_putc(char);
700 extern void logwakeup(void);
701
702 va_start(ap, format);
703 _doprnt(format, &ap, conslog_putc, 16);
704 va_end(ap);
705 }
706
707 void IOPanic(const char *reason)
708 {
709 panic(reason);
710 }
711
712 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
713
714 /*
715 * Convert a integer constant (typically a #define or enum) to a string.
716 */
717 static char noValue[80]; // that's pretty
718
719 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
720 {
721 for( ; regValueArray->name; regValueArray++) {
722 if(regValueArray->value == value)
723 return(regValueArray->name);
724 }
725 sprintf(noValue, "0x%x (UNDEFINED)", value);
726 return((const char *)noValue);
727 }
728
729 IOReturn IOFindValueForName(const char *string,
730 const IONamedValue *regValueArray,
731 int *value)
732 {
733 for( ; regValueArray->name; regValueArray++) {
734 if(!strcmp(regValueArray->name, string)) {
735 *value = regValueArray->value;
736 return kIOReturnSuccess;
737 }
738 }
739 return kIOReturnBadArgument;
740 }
741
742 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
743
744 IOAlignment IOSizeToAlignment(unsigned int size)
745 {
746 register int shift;
747 const int intsize = sizeof(unsigned int) * 8;
748
749 for (shift = 1; shift < intsize; shift++) {
750 if (size & 0x80000000)
751 return (IOAlignment)(intsize - shift);
752 size <<= 1;
753 }
754 return 0;
755 }
756
757 unsigned int IOAlignmentToSize(IOAlignment align)
758 {
759 unsigned int size;
760
761 for (size = 1; align; align--) {
762 size <<= 1;
763 }
764 return size;
765 }
766
767 } /* extern "C" */