]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
xnu-1699.24.23.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <sys/msgbuf.h>
62
63 #if IOKITSTATS
64
65 #define IOStatisticsAlloc(type, size) \
66 do { \
67 IOStatistics::countAlloc(type, size); \
68 } while (0)
69
70 #else
71
72 #define IOStatisticsAlloc(type, size)
73
74 #endif /* IOKITSTATS */
75
76 extern "C"
77 {
78
79
80 mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
82 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
84 extern int
85 __doprnt(
86 const char *fmt,
87 va_list argp,
88 void (*putc)(int, void *),
89 void *arg,
90 int radix);
91
92 extern void cons_putc_locked(char);
93 extern void bsd_log_lock(void);
94 extern void bsd_log_unlock(void);
95
96
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99 lck_grp_t *IOLockGroup;
100
101 /*
102 * Global variables for use by iLogger
103 * These symbols are for use only by Apple diagnostic code.
104 * Binary compatibility is not guaranteed for kexts that reference these symbols.
105 */
106
107 void *_giDebugLogInternal = NULL;
108 void *_giDebugLogDataInternal = NULL;
109 void *_giDebugReserved1 = NULL;
110 void *_giDebugReserved2 = NULL;
111
112
113 /*
114 * Static variables for this module.
115 */
116
117 static queue_head_t gIOMallocContiguousEntries;
118 static lck_mtx_t * gIOMallocContiguousEntriesLock;
119
120 enum { kIOMaxPageableMaps = 16 };
121 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
122 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
123
124 typedef struct {
125 vm_map_t map;
126 vm_offset_t address;
127 vm_offset_t end;
128 } IOMapData;
129
130 static struct {
131 UInt32 count;
132 UInt32 hint;
133 IOMapData maps[ kIOMaxPageableMaps ];
134 lck_mtx_t * lock;
135 } gIOKitPageableSpace;
136
137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
138
139 void IOLibInit(void)
140 {
141 kern_return_t ret;
142
143 static bool libInitialized;
144
145 if(libInitialized)
146 return;
147
148 gIOKitPageableSpace.maps[0].address = 0;
149 ret = kmem_suballoc(kernel_map,
150 &gIOKitPageableSpace.maps[0].address,
151 kIOPageableMapSize,
152 TRUE,
153 VM_FLAGS_ANYWHERE,
154 &gIOKitPageableSpace.maps[0].map);
155 if (ret != KERN_SUCCESS)
156 panic("failed to allocate iokit pageable map\n");
157
158 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
159
160 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
161 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
162 gIOKitPageableSpace.hint = 0;
163 gIOKitPageableSpace.count = 1;
164
165 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
166 queue_init( &gIOMallocContiguousEntries );
167
168 libInitialized = true;
169 }
170
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
174 {
175 kern_return_t result;
176 thread_t thread;
177
178 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
179 if (result != KERN_SUCCESS)
180 return (NULL);
181
182 thread_deallocate(thread);
183
184 return (thread);
185 }
186
187
188 void IOExitThread(void)
189 {
190 (void) thread_terminate(current_thread());
191 }
192
193 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
194
195
196 void * IOMalloc(vm_size_t size)
197 {
198 void * address;
199
200 address = (void *)kalloc(size);
201 if ( address ) {
202 #if IOALLOCDEBUG
203 debug_iomalloc_size += size;
204 #endif
205 IOStatisticsAlloc(kIOStatisticsMalloc, size);
206 }
207
208 return address;
209 }
210
211 void IOFree(void * address, vm_size_t size)
212 {
213 if (address) {
214 kfree(address, size);
215 #if IOALLOCDEBUG
216 debug_iomalloc_size -= size;
217 #endif
218 IOStatisticsAlloc(kIOStatisticsFree, size);
219 }
220 }
221
222 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
223
224 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
225 {
226 kern_return_t kr;
227 vm_offset_t address;
228 vm_offset_t allocationAddress;
229 vm_size_t adjustedSize;
230 uintptr_t alignMask;
231
232 if (size == 0)
233 return 0;
234 if (alignment == 0)
235 alignment = 1;
236
237 alignMask = alignment - 1;
238 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
239
240 if (adjustedSize >= page_size) {
241
242 kr = kernel_memory_allocate(kernel_map, &address,
243 size, alignMask, 0);
244 if (KERN_SUCCESS != kr)
245 address = 0;
246
247 } else {
248
249 adjustedSize += alignMask;
250
251 if (adjustedSize >= page_size) {
252
253 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
254 adjustedSize, 0, 0);
255 if (KERN_SUCCESS != kr)
256 allocationAddress = 0;
257
258 } else
259 allocationAddress = (vm_address_t) kalloc(adjustedSize);
260
261 if (allocationAddress) {
262 address = (allocationAddress + alignMask
263 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
264 & (~alignMask);
265
266 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
267 = adjustedSize;
268 *((vm_address_t *)(address - sizeof(vm_address_t)))
269 = allocationAddress;
270 } else
271 address = 0;
272 }
273
274 assert(0 == (address & alignMask));
275
276 if( address) {
277 #if IOALLOCDEBUG
278 debug_iomalloc_size += size;
279 #endif
280 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
281 }
282
283 return (void *) address;
284 }
285
286 void IOFreeAligned(void * address, vm_size_t size)
287 {
288 vm_address_t allocationAddress;
289 vm_size_t adjustedSize;
290
291 if( !address)
292 return;
293
294 assert(size);
295
296 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
297 if (adjustedSize >= page_size) {
298
299 kmem_free( kernel_map, (vm_offset_t) address, size);
300
301 } else {
302 adjustedSize = *((vm_size_t *)( (vm_address_t) address
303 - sizeof(vm_address_t) - sizeof(vm_size_t)));
304 allocationAddress = *((vm_address_t *)( (vm_address_t) address
305 - sizeof(vm_address_t) ));
306
307 if (adjustedSize >= page_size)
308 kmem_free( kernel_map, allocationAddress, adjustedSize);
309 else
310 kfree((void *)allocationAddress, adjustedSize);
311 }
312
313 #if IOALLOCDEBUG
314 debug_iomalloc_size -= size;
315 #endif
316
317 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
318 }
319
320 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
321
322 void
323 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
324 {
325 mach_vm_address_t allocationAddress;
326 mach_vm_size_t adjustedSize;
327
328 if (!address)
329 return;
330
331 assert(size);
332
333 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
334 if (adjustedSize >= page_size) {
335
336 kmem_free( kernel_map, (vm_offset_t) address, size);
337
338 } else {
339
340 adjustedSize = *((mach_vm_size_t *)
341 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
342 allocationAddress = *((mach_vm_address_t *)
343 (address - sizeof(mach_vm_address_t) ));
344 kfree((void *)allocationAddress, adjustedSize);
345 }
346
347 #if IOALLOCDEBUG
348 debug_iomalloc_size -= size;
349 #endif
350 }
351
352 mach_vm_address_t
353 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
354 mach_vm_size_t alignment, bool contiguous)
355 {
356 kern_return_t kr;
357 mach_vm_address_t address;
358 mach_vm_address_t allocationAddress;
359 mach_vm_size_t adjustedSize;
360 mach_vm_address_t alignMask;
361
362 if (size == 0)
363 return (0);
364 if (alignment == 0)
365 alignment = 1;
366
367 alignMask = alignment - 1;
368 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
369
370 contiguous = (contiguous && (adjustedSize > page_size))
371 || (alignment > page_size);
372
373 if (contiguous || maxPhys)
374 {
375 int options = 0;
376 vm_offset_t virt;
377
378 adjustedSize = size;
379 contiguous = (contiguous && (adjustedSize > page_size))
380 || (alignment > page_size);
381
382 if ((!contiguous) && (maxPhys <= 0xFFFFFFFF))
383 {
384 maxPhys = 0;
385 options |= KMA_LOMEM;
386 }
387
388 if (contiguous || maxPhys)
389 {
390 kr = kmem_alloc_contig(kernel_map, &virt, size,
391 alignMask, atop(maxPhys), atop(alignMask), 0);
392 }
393 else
394 {
395 kr = kernel_memory_allocate(kernel_map, &virt,
396 size, alignMask, options);
397 }
398 if (KERN_SUCCESS == kr)
399 address = virt;
400 else
401 address = 0;
402 }
403 else
404 {
405 adjustedSize += alignMask;
406 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
407
408 if (allocationAddress) {
409
410 address = (allocationAddress + alignMask
411 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
412 & (~alignMask);
413
414 if (atop_32(address) != atop_32(address + size - 1))
415 address = round_page(address);
416
417 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
418 - sizeof(mach_vm_address_t))) = adjustedSize;
419 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
420 = allocationAddress;
421 } else
422 address = 0;
423 }
424
425 #if IOALLOCDEBUG
426 if (address) {
427 debug_iomalloc_size += size;
428 }
429 #endif
430
431 return (address);
432 }
433
434
435 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
436
437 struct _IOMallocContiguousEntry
438 {
439 mach_vm_address_t virtualAddr;
440 IOBufferMemoryDescriptor * md;
441 queue_chain_t link;
442 };
443 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
444
445 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
446 IOPhysicalAddress * physicalAddress)
447 {
448 mach_vm_address_t address = 0;
449
450 if (size == 0)
451 return 0;
452 if (alignment == 0)
453 alignment = 1;
454
455 /* Do we want a physical address? */
456 if (!physicalAddress)
457 {
458 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
459 }
460 else do
461 {
462 IOBufferMemoryDescriptor * bmd;
463 mach_vm_address_t physicalMask;
464 vm_offset_t alignMask;
465
466 alignMask = alignment - 1;
467 physicalMask = (0xFFFFFFFF ^ alignMask);
468
469 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
470 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
471 if (!bmd)
472 break;
473
474 _IOMallocContiguousEntry *
475 entry = IONew(_IOMallocContiguousEntry, 1);
476 if (!entry)
477 {
478 bmd->release();
479 break;
480 }
481 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
482 entry->md = bmd;
483 lck_mtx_lock(gIOMallocContiguousEntriesLock);
484 queue_enter( &gIOMallocContiguousEntries, entry,
485 _IOMallocContiguousEntry *, link );
486 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
487
488 address = (mach_vm_address_t) entry->virtualAddr;
489 *physicalAddress = bmd->getPhysicalAddress();
490 }
491 while (false);
492
493 if (address) {
494 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
495 }
496
497 return (void *) address;
498 }
499
500 void IOFreeContiguous(void * _address, vm_size_t size)
501 {
502 _IOMallocContiguousEntry * entry;
503 IOMemoryDescriptor * md = NULL;
504
505 mach_vm_address_t address = (mach_vm_address_t) _address;
506
507 if( !address)
508 return;
509
510 assert(size);
511
512 lck_mtx_lock(gIOMallocContiguousEntriesLock);
513 queue_iterate( &gIOMallocContiguousEntries, entry,
514 _IOMallocContiguousEntry *, link )
515 {
516 if( entry->virtualAddr == address ) {
517 md = entry->md;
518 queue_remove( &gIOMallocContiguousEntries, entry,
519 _IOMallocContiguousEntry *, link );
520 break;
521 }
522 }
523 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
524
525 if (md)
526 {
527 md->release();
528 IODelete(entry, _IOMallocContiguousEntry, 1);
529 }
530 else
531 {
532 IOKernelFreePhysical((mach_vm_address_t) address, size);
533 }
534
535 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
536 }
537
538 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
539
540 kern_return_t IOIteratePageableMaps(vm_size_t size,
541 IOIteratePageableMapsCallback callback, void * ref)
542 {
543 kern_return_t kr = kIOReturnNotReady;
544 vm_size_t segSize;
545 UInt32 attempts;
546 UInt32 index;
547 vm_offset_t min;
548 vm_map_t map;
549
550 if (size > kIOPageableMaxMapSize)
551 return( kIOReturnBadArgument );
552
553 do {
554 index = gIOKitPageableSpace.hint;
555 attempts = gIOKitPageableSpace.count;
556 while( attempts--) {
557 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
558 if( KERN_SUCCESS == kr) {
559 gIOKitPageableSpace.hint = index;
560 break;
561 }
562 if( index)
563 index--;
564 else
565 index = gIOKitPageableSpace.count - 1;
566 }
567 if( KERN_SUCCESS == kr)
568 break;
569
570 lck_mtx_lock( gIOKitPageableSpace.lock );
571
572 index = gIOKitPageableSpace.count;
573 if( index >= (kIOMaxPageableMaps - 1)) {
574 lck_mtx_unlock( gIOKitPageableSpace.lock );
575 break;
576 }
577
578 if( size < kIOPageableMapSize)
579 segSize = kIOPageableMapSize;
580 else
581 segSize = size;
582
583 min = 0;
584 kr = kmem_suballoc(kernel_map,
585 &min,
586 segSize,
587 TRUE,
588 VM_FLAGS_ANYWHERE,
589 &map);
590 if( KERN_SUCCESS != kr) {
591 lck_mtx_unlock( gIOKitPageableSpace.lock );
592 break;
593 }
594
595 gIOKitPageableSpace.maps[index].map = map;
596 gIOKitPageableSpace.maps[index].address = min;
597 gIOKitPageableSpace.maps[index].end = min + segSize;
598 gIOKitPageableSpace.hint = index;
599 gIOKitPageableSpace.count = index + 1;
600
601 lck_mtx_unlock( gIOKitPageableSpace.lock );
602
603 } while( true );
604
605 return kr;
606 }
607
608 struct IOMallocPageableRef
609 {
610 vm_offset_t address;
611 vm_size_t size;
612 };
613
614 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
615 {
616 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
617 kern_return_t kr;
618
619 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
620
621 return( kr );
622 }
623
624 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
625 {
626 kern_return_t kr = kIOReturnNotReady;
627 struct IOMallocPageableRef ref;
628
629 if (alignment > page_size)
630 return( 0 );
631 if (size > kIOPageableMaxMapSize)
632 return( 0 );
633
634 ref.size = size;
635 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
636 if( kIOReturnSuccess != kr)
637 ref.address = 0;
638
639 if( ref.address) {
640 #if IOALLOCDEBUG
641 debug_iomallocpageable_size += round_page(size);
642 #endif
643 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
644 }
645
646 return( (void *) ref.address );
647 }
648
649 vm_map_t IOPageableMapForAddress( uintptr_t address )
650 {
651 vm_map_t map = 0;
652 UInt32 index;
653
654 for( index = 0; index < gIOKitPageableSpace.count; index++) {
655 if( (address >= gIOKitPageableSpace.maps[index].address)
656 && (address < gIOKitPageableSpace.maps[index].end) ) {
657 map = gIOKitPageableSpace.maps[index].map;
658 break;
659 }
660 }
661 if( !map)
662 panic("IOPageableMapForAddress: null");
663
664 return( map );
665 }
666
667 void IOFreePageable(void * address, vm_size_t size)
668 {
669 vm_map_t map;
670
671 map = IOPageableMapForAddress( (vm_address_t) address);
672 if( map)
673 kmem_free( map, (vm_offset_t) address, size);
674
675 #if IOALLOCDEBUG
676 debug_iomallocpageable_size -= round_page(size);
677 #endif
678
679 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
680 }
681
682 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
683
684 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
685 IOByteCount length, IOOptionBits cacheMode )
686 {
687 IOReturn ret = kIOReturnSuccess;
688 ppnum_t pagenum;
689
690 if( task != kernel_task)
691 return( kIOReturnUnsupported );
692 if ((address | length) & PAGE_MASK)
693 {
694 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
695 return( kIOReturnUnsupported );
696 }
697 length = round_page(address + length) - trunc_page( address );
698 address = trunc_page( address );
699
700 // make map mode
701 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
702
703 while( (kIOReturnSuccess == ret) && (length > 0) ) {
704
705 // Get the physical page number
706 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
707 if( pagenum) {
708 ret = IOUnmapPages( get_task_map(task), address, page_size );
709 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
710 } else
711 ret = kIOReturnVMError;
712
713 address += page_size;
714 length -= page_size;
715 }
716
717 return( ret );
718 }
719
720
721 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
722 IOByteCount length )
723 {
724 if( task != kernel_task)
725 return( kIOReturnUnsupported );
726
727 flush_dcache64( (addr64_t) address, (unsigned) length, false );
728
729 return( kIOReturnSuccess );
730 }
731
732 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
733
734 vm_offset_t OSKernelStackRemaining( void )
735 {
736 return (ml_stack_remaining());
737 }
738
739 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
740
741 /*
742 * Spin for indicated number of milliseconds.
743 */
744 void IOSleep(unsigned milliseconds)
745 {
746 delay_for_interval(milliseconds, kMillisecondScale);
747 }
748
749 /*
750 * Spin for indicated number of microseconds.
751 */
752 void IODelay(unsigned microseconds)
753 {
754 delay_for_interval(microseconds, kMicrosecondScale);
755 }
756
757 /*
758 * Spin for indicated number of nanoseconds.
759 */
760 void IOPause(unsigned nanoseconds)
761 {
762 delay_for_interval(nanoseconds, kNanosecondScale);
763 }
764
765 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
766
767 static void _iolog_consputc(int ch, void *arg __unused)
768 {
769 cons_putc_locked(ch);
770 }
771
772 static void _iolog_logputc(int ch, void *arg __unused)
773 {
774 log_putc_locked(ch);
775 }
776
777 void IOLog(const char *format, ...)
778 {
779 va_list ap;
780
781 va_start(ap, format);
782 IOLogv(format, ap);
783 va_end(ap);
784 }
785
786 void IOLogv(const char *format, va_list ap)
787 {
788 va_list ap2;
789
790 va_copy(ap2, ap);
791
792 bsd_log_lock();
793 __doprnt(format, ap, _iolog_logputc, NULL, 16);
794 bsd_log_unlock();
795
796 __doprnt(format, ap2, _iolog_consputc, NULL, 16);
797 }
798
799 #if !__LP64__
800 void IOPanic(const char *reason)
801 {
802 panic("%s", reason);
803 }
804 #endif
805
806 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
807
808 /*
809 * Convert a integer constant (typically a #define or enum) to a string.
810 */
811 static char noValue[80]; // that's pretty
812
813 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
814 {
815 for( ; regValueArray->name; regValueArray++) {
816 if(regValueArray->value == value)
817 return(regValueArray->name);
818 }
819 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
820 return((const char *)noValue);
821 }
822
823 IOReturn IOFindValueForName(const char *string,
824 const IONamedValue *regValueArray,
825 int *value)
826 {
827 for( ; regValueArray->name; regValueArray++) {
828 if(!strcmp(regValueArray->name, string)) {
829 *value = regValueArray->value;
830 return kIOReturnSuccess;
831 }
832 }
833 return kIOReturnBadArgument;
834 }
835
836 OSString * IOCopyLogNameForPID(int pid)
837 {
838 char buf[128];
839 size_t len;
840 snprintf(buf, sizeof(buf), "pid %d, ", pid);
841 len = strlen(buf);
842 proc_name(pid, buf + len, sizeof(buf) - len);
843 return (OSString::withCString(buf));
844 }
845
846 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
847
848 IOAlignment IOSizeToAlignment(unsigned int size)
849 {
850 register int shift;
851 const int intsize = sizeof(unsigned int) * 8;
852
853 for (shift = 1; shift < intsize; shift++) {
854 if (size & 0x80000000)
855 return (IOAlignment)(intsize - shift);
856 size <<= 1;
857 }
858 return 0;
859 }
860
861 unsigned int IOAlignmentToSize(IOAlignment align)
862 {
863 unsigned int size;
864
865 for (size = 1; align; align--) {
866 size <<= 1;
867 }
868 return size;
869 }
870
871 } /* extern "C" */
872
873
874