]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
xnu-1699.26.8.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <sys/msgbuf.h>
62
63 #if IOKITSTATS
64
65 #define IOStatisticsAlloc(type, size) \
66 do { \
67 IOStatistics::countAlloc(type, size); \
68 } while (0)
69
70 #else
71
72 #define IOStatisticsAlloc(type, size)
73
74 #endif /* IOKITSTATS */
75
76 extern "C"
77 {
78
79
80 mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
82 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
84 extern int
85 __doprnt(
86 const char *fmt,
87 va_list argp,
88 void (*putc)(int, void *),
89 void *arg,
90 int radix);
91
92 extern void cons_putc_locked(char);
93 extern void bsd_log_lock(void);
94 extern void bsd_log_unlock(void);
95
96
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99 lck_grp_t *IOLockGroup;
100
101 /*
102 * Global variables for use by iLogger
103 * These symbols are for use only by Apple diagnostic code.
104 * Binary compatibility is not guaranteed for kexts that reference these symbols.
105 */
106
107 void *_giDebugLogInternal = NULL;
108 void *_giDebugLogDataInternal = NULL;
109 void *_giDebugReserved1 = NULL;
110 void *_giDebugReserved2 = NULL;
111
112
113 /*
114 * Static variables for this module.
115 */
116
117 static queue_head_t gIOMallocContiguousEntries;
118 static lck_mtx_t * gIOMallocContiguousEntriesLock;
119
120 enum { kIOMaxPageableMaps = 16 };
121 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
122 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
123
124 typedef struct {
125 vm_map_t map;
126 vm_offset_t address;
127 vm_offset_t end;
128 } IOMapData;
129
130 static struct {
131 UInt32 count;
132 UInt32 hint;
133 IOMapData maps[ kIOMaxPageableMaps ];
134 lck_mtx_t * lock;
135 } gIOKitPageableSpace;
136
137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
138
139 void IOLibInit(void)
140 {
141 kern_return_t ret;
142
143 static bool libInitialized;
144
145 if(libInitialized)
146 return;
147
148 gIOKitPageableSpace.maps[0].address = 0;
149 ret = kmem_suballoc(kernel_map,
150 &gIOKitPageableSpace.maps[0].address,
151 kIOPageableMapSize,
152 TRUE,
153 VM_FLAGS_ANYWHERE,
154 &gIOKitPageableSpace.maps[0].map);
155 if (ret != KERN_SUCCESS)
156 panic("failed to allocate iokit pageable map\n");
157
158 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
159
160 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
161 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
162 gIOKitPageableSpace.hint = 0;
163 gIOKitPageableSpace.count = 1;
164
165 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
166 queue_init( &gIOMallocContiguousEntries );
167
168 libInitialized = true;
169 }
170
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
174 {
175 kern_return_t result;
176 thread_t thread;
177
178 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
179 if (result != KERN_SUCCESS)
180 return (NULL);
181
182 thread_deallocate(thread);
183
184 return (thread);
185 }
186
187
188 void IOExitThread(void)
189 {
190 (void) thread_terminate(current_thread());
191 }
192
193 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
194
195
196 void * IOMalloc(vm_size_t size)
197 {
198 void * address;
199
200 address = (void *)kalloc(size);
201 if ( address ) {
202 #if IOALLOCDEBUG
203 debug_iomalloc_size += size;
204 #endif
205 IOStatisticsAlloc(kIOStatisticsMalloc, size);
206 }
207
208 return address;
209 }
210
211 void IOFree(void * address, vm_size_t size)
212 {
213 if (address) {
214 kfree(address, size);
215 #if IOALLOCDEBUG
216 debug_iomalloc_size -= size;
217 #endif
218 IOStatisticsAlloc(kIOStatisticsFree, size);
219 }
220 }
221
222 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
223
224 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
225 {
226 kern_return_t kr;
227 vm_offset_t address;
228 vm_offset_t allocationAddress;
229 vm_size_t adjustedSize;
230 uintptr_t alignMask;
231
232 if (size == 0)
233 return 0;
234 if (alignment == 0)
235 alignment = 1;
236
237 alignMask = alignment - 1;
238 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
239
240 if (adjustedSize >= page_size) {
241
242 kr = kernel_memory_allocate(kernel_map, &address,
243 size, alignMask, 0);
244 if (KERN_SUCCESS != kr)
245 address = 0;
246
247 } else {
248
249 adjustedSize += alignMask;
250
251 if (adjustedSize >= page_size) {
252
253 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
254 adjustedSize, 0, 0);
255 if (KERN_SUCCESS != kr)
256 allocationAddress = 0;
257
258 } else
259 allocationAddress = (vm_address_t) kalloc(adjustedSize);
260
261 if (allocationAddress) {
262 address = (allocationAddress + alignMask
263 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
264 & (~alignMask);
265
266 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
267 = adjustedSize;
268 *((vm_address_t *)(address - sizeof(vm_address_t)))
269 = allocationAddress;
270 } else
271 address = 0;
272 }
273
274 assert(0 == (address & alignMask));
275
276 if( address) {
277 #if IOALLOCDEBUG
278 debug_iomalloc_size += size;
279 #endif
280 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
281 }
282
283 return (void *) address;
284 }
285
286 void IOFreeAligned(void * address, vm_size_t size)
287 {
288 vm_address_t allocationAddress;
289 vm_size_t adjustedSize;
290
291 if( !address)
292 return;
293
294 assert(size);
295
296 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
297 if (adjustedSize >= page_size) {
298
299 kmem_free( kernel_map, (vm_offset_t) address, size);
300
301 } else {
302 adjustedSize = *((vm_size_t *)( (vm_address_t) address
303 - sizeof(vm_address_t) - sizeof(vm_size_t)));
304 allocationAddress = *((vm_address_t *)( (vm_address_t) address
305 - sizeof(vm_address_t) ));
306
307 if (adjustedSize >= page_size)
308 kmem_free( kernel_map, allocationAddress, adjustedSize);
309 else
310 kfree((void *)allocationAddress, adjustedSize);
311 }
312
313 #if IOALLOCDEBUG
314 debug_iomalloc_size -= size;
315 #endif
316
317 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
318 }
319
320 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
321
322 void
323 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
324 {
325 mach_vm_address_t allocationAddress;
326 mach_vm_size_t adjustedSize;
327
328 if (!address)
329 return;
330
331 assert(size);
332
333 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
334 if (adjustedSize >= page_size) {
335
336 kmem_free( kernel_map, (vm_offset_t) address, size);
337
338 } else {
339
340 adjustedSize = *((mach_vm_size_t *)
341 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
342 allocationAddress = *((mach_vm_address_t *)
343 (address - sizeof(mach_vm_address_t) ));
344 kfree((void *)allocationAddress, adjustedSize);
345 }
346
347 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
348 #if IOALLOCDEBUG
349 debug_iomalloc_size -= size;
350 #endif
351 }
352
353 mach_vm_address_t
354 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
355 mach_vm_size_t alignment, bool contiguous)
356 {
357 kern_return_t kr;
358 mach_vm_address_t address;
359 mach_vm_address_t allocationAddress;
360 mach_vm_size_t adjustedSize;
361 mach_vm_address_t alignMask;
362
363 if (size == 0)
364 return (0);
365 if (alignment == 0)
366 alignment = 1;
367
368 alignMask = alignment - 1;
369 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
370
371 contiguous = (contiguous && (adjustedSize > page_size))
372 || (alignment > page_size);
373
374 if (contiguous || maxPhys)
375 {
376 int options = 0;
377 vm_offset_t virt;
378
379 adjustedSize = size;
380 contiguous = (contiguous && (adjustedSize > page_size))
381 || (alignment > page_size);
382
383 if (!contiguous)
384 {
385 if (maxPhys <= 0xFFFFFFFF)
386 {
387 maxPhys = 0;
388 options |= KMA_LOMEM;
389 }
390 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
391 {
392 maxPhys = 0;
393 }
394 }
395 if (contiguous || maxPhys)
396 {
397 kr = kmem_alloc_contig(kernel_map, &virt, size,
398 alignMask, atop(maxPhys), atop(alignMask), 0);
399 }
400 else
401 {
402 kr = kernel_memory_allocate(kernel_map, &virt,
403 size, alignMask, options);
404 }
405 if (KERN_SUCCESS == kr)
406 address = virt;
407 else
408 address = 0;
409 }
410 else
411 {
412 adjustedSize += alignMask;
413 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
414
415 if (allocationAddress) {
416
417 address = (allocationAddress + alignMask
418 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
419 & (~alignMask);
420
421 if (atop_32(address) != atop_32(address + size - 1))
422 address = round_page(address);
423
424 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
425 - sizeof(mach_vm_address_t))) = adjustedSize;
426 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
427 = allocationAddress;
428 } else
429 address = 0;
430 }
431
432 if (address) {
433 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
434 #if IOALLOCDEBUG
435 debug_iomalloc_size += size;
436 #endif
437 }
438
439 return (address);
440 }
441
442
443 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
444
445 struct _IOMallocContiguousEntry
446 {
447 mach_vm_address_t virtualAddr;
448 IOBufferMemoryDescriptor * md;
449 queue_chain_t link;
450 };
451 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
452
453 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
454 IOPhysicalAddress * physicalAddress)
455 {
456 mach_vm_address_t address = 0;
457
458 if (size == 0)
459 return 0;
460 if (alignment == 0)
461 alignment = 1;
462
463 /* Do we want a physical address? */
464 if (!physicalAddress)
465 {
466 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
467 }
468 else do
469 {
470 IOBufferMemoryDescriptor * bmd;
471 mach_vm_address_t physicalMask;
472 vm_offset_t alignMask;
473
474 alignMask = alignment - 1;
475 physicalMask = (0xFFFFFFFF ^ alignMask);
476
477 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
478 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
479 if (!bmd)
480 break;
481
482 _IOMallocContiguousEntry *
483 entry = IONew(_IOMallocContiguousEntry, 1);
484 if (!entry)
485 {
486 bmd->release();
487 break;
488 }
489 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
490 entry->md = bmd;
491 lck_mtx_lock(gIOMallocContiguousEntriesLock);
492 queue_enter( &gIOMallocContiguousEntries, entry,
493 _IOMallocContiguousEntry *, link );
494 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
495
496 address = (mach_vm_address_t) entry->virtualAddr;
497 *physicalAddress = bmd->getPhysicalAddress();
498 }
499 while (false);
500
501 return (void *) address;
502 }
503
504 void IOFreeContiguous(void * _address, vm_size_t size)
505 {
506 _IOMallocContiguousEntry * entry;
507 IOMemoryDescriptor * md = NULL;
508
509 mach_vm_address_t address = (mach_vm_address_t) _address;
510
511 if( !address)
512 return;
513
514 assert(size);
515
516 lck_mtx_lock(gIOMallocContiguousEntriesLock);
517 queue_iterate( &gIOMallocContiguousEntries, entry,
518 _IOMallocContiguousEntry *, link )
519 {
520 if( entry->virtualAddr == address ) {
521 md = entry->md;
522 queue_remove( &gIOMallocContiguousEntries, entry,
523 _IOMallocContiguousEntry *, link );
524 break;
525 }
526 }
527 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
528
529 if (md)
530 {
531 md->release();
532 IODelete(entry, _IOMallocContiguousEntry, 1);
533 }
534 else
535 {
536 IOKernelFreePhysical((mach_vm_address_t) address, size);
537 }
538 }
539
540 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
541
542 kern_return_t IOIteratePageableMaps(vm_size_t size,
543 IOIteratePageableMapsCallback callback, void * ref)
544 {
545 kern_return_t kr = kIOReturnNotReady;
546 vm_size_t segSize;
547 UInt32 attempts;
548 UInt32 index;
549 vm_offset_t min;
550 vm_map_t map;
551
552 if (size > kIOPageableMaxMapSize)
553 return( kIOReturnBadArgument );
554
555 do {
556 index = gIOKitPageableSpace.hint;
557 attempts = gIOKitPageableSpace.count;
558 while( attempts--) {
559 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
560 if( KERN_SUCCESS == kr) {
561 gIOKitPageableSpace.hint = index;
562 break;
563 }
564 if( index)
565 index--;
566 else
567 index = gIOKitPageableSpace.count - 1;
568 }
569 if( KERN_SUCCESS == kr)
570 break;
571
572 lck_mtx_lock( gIOKitPageableSpace.lock );
573
574 index = gIOKitPageableSpace.count;
575 if( index >= (kIOMaxPageableMaps - 1)) {
576 lck_mtx_unlock( gIOKitPageableSpace.lock );
577 break;
578 }
579
580 if( size < kIOPageableMapSize)
581 segSize = kIOPageableMapSize;
582 else
583 segSize = size;
584
585 min = 0;
586 kr = kmem_suballoc(kernel_map,
587 &min,
588 segSize,
589 TRUE,
590 VM_FLAGS_ANYWHERE,
591 &map);
592 if( KERN_SUCCESS != kr) {
593 lck_mtx_unlock( gIOKitPageableSpace.lock );
594 break;
595 }
596
597 gIOKitPageableSpace.maps[index].map = map;
598 gIOKitPageableSpace.maps[index].address = min;
599 gIOKitPageableSpace.maps[index].end = min + segSize;
600 gIOKitPageableSpace.hint = index;
601 gIOKitPageableSpace.count = index + 1;
602
603 lck_mtx_unlock( gIOKitPageableSpace.lock );
604
605 } while( true );
606
607 return kr;
608 }
609
610 struct IOMallocPageableRef
611 {
612 vm_offset_t address;
613 vm_size_t size;
614 };
615
616 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
617 {
618 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
619 kern_return_t kr;
620
621 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
622
623 return( kr );
624 }
625
626 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
627 {
628 kern_return_t kr = kIOReturnNotReady;
629 struct IOMallocPageableRef ref;
630
631 if (alignment > page_size)
632 return( 0 );
633 if (size > kIOPageableMaxMapSize)
634 return( 0 );
635
636 ref.size = size;
637 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
638 if( kIOReturnSuccess != kr)
639 ref.address = 0;
640
641 if( ref.address) {
642 #if IOALLOCDEBUG
643 debug_iomallocpageable_size += round_page(size);
644 #endif
645 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
646 }
647
648 return( (void *) ref.address );
649 }
650
651 vm_map_t IOPageableMapForAddress( uintptr_t address )
652 {
653 vm_map_t map = 0;
654 UInt32 index;
655
656 for( index = 0; index < gIOKitPageableSpace.count; index++) {
657 if( (address >= gIOKitPageableSpace.maps[index].address)
658 && (address < gIOKitPageableSpace.maps[index].end) ) {
659 map = gIOKitPageableSpace.maps[index].map;
660 break;
661 }
662 }
663 if( !map)
664 panic("IOPageableMapForAddress: null");
665
666 return( map );
667 }
668
669 void IOFreePageable(void * address, vm_size_t size)
670 {
671 vm_map_t map;
672
673 map = IOPageableMapForAddress( (vm_address_t) address);
674 if( map)
675 kmem_free( map, (vm_offset_t) address, size);
676
677 #if IOALLOCDEBUG
678 debug_iomallocpageable_size -= round_page(size);
679 #endif
680
681 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
682 }
683
684 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
685
686 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
687 IOByteCount length, IOOptionBits cacheMode )
688 {
689 IOReturn ret = kIOReturnSuccess;
690 ppnum_t pagenum;
691
692 if( task != kernel_task)
693 return( kIOReturnUnsupported );
694 if ((address | length) & PAGE_MASK)
695 {
696 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
697 return( kIOReturnUnsupported );
698 }
699 length = round_page(address + length) - trunc_page( address );
700 address = trunc_page( address );
701
702 // make map mode
703 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
704
705 while( (kIOReturnSuccess == ret) && (length > 0) ) {
706
707 // Get the physical page number
708 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
709 if( pagenum) {
710 ret = IOUnmapPages( get_task_map(task), address, page_size );
711 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
712 } else
713 ret = kIOReturnVMError;
714
715 address += page_size;
716 length -= page_size;
717 }
718
719 return( ret );
720 }
721
722
723 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
724 IOByteCount length )
725 {
726 if( task != kernel_task)
727 return( kIOReturnUnsupported );
728
729 flush_dcache64( (addr64_t) address, (unsigned) length, false );
730
731 return( kIOReturnSuccess );
732 }
733
734 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
735
736 vm_offset_t OSKernelStackRemaining( void )
737 {
738 return (ml_stack_remaining());
739 }
740
741 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
742
743 /*
744 * Spin for indicated number of milliseconds.
745 */
746 void IOSleep(unsigned milliseconds)
747 {
748 delay_for_interval(milliseconds, kMillisecondScale);
749 }
750
751 /*
752 * Spin for indicated number of microseconds.
753 */
754 void IODelay(unsigned microseconds)
755 {
756 delay_for_interval(microseconds, kMicrosecondScale);
757 }
758
759 /*
760 * Spin for indicated number of nanoseconds.
761 */
762 void IOPause(unsigned nanoseconds)
763 {
764 delay_for_interval(nanoseconds, kNanosecondScale);
765 }
766
767 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
768
769 static void _iolog_consputc(int ch, void *arg __unused)
770 {
771 cons_putc_locked(ch);
772 }
773
774 static void _iolog_logputc(int ch, void *arg __unused)
775 {
776 log_putc_locked(ch);
777 }
778
779 void IOLog(const char *format, ...)
780 {
781 va_list ap;
782
783 va_start(ap, format);
784 IOLogv(format, ap);
785 va_end(ap);
786 }
787
788 void IOLogv(const char *format, va_list ap)
789 {
790 va_list ap2;
791
792 va_copy(ap2, ap);
793
794 bsd_log_lock();
795 __doprnt(format, ap, _iolog_logputc, NULL, 16);
796 bsd_log_unlock();
797
798 __doprnt(format, ap2, _iolog_consputc, NULL, 16);
799 }
800
801 #if !__LP64__
802 void IOPanic(const char *reason)
803 {
804 panic("%s", reason);
805 }
806 #endif
807
808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
809
810 /*
811 * Convert a integer constant (typically a #define or enum) to a string.
812 */
813 static char noValue[80]; // that's pretty
814
815 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
816 {
817 for( ; regValueArray->name; regValueArray++) {
818 if(regValueArray->value == value)
819 return(regValueArray->name);
820 }
821 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
822 return((const char *)noValue);
823 }
824
825 IOReturn IOFindValueForName(const char *string,
826 const IONamedValue *regValueArray,
827 int *value)
828 {
829 for( ; regValueArray->name; regValueArray++) {
830 if(!strcmp(regValueArray->name, string)) {
831 *value = regValueArray->value;
832 return kIOReturnSuccess;
833 }
834 }
835 return kIOReturnBadArgument;
836 }
837
838 OSString * IOCopyLogNameForPID(int pid)
839 {
840 char buf[128];
841 size_t len;
842 snprintf(buf, sizeof(buf), "pid %d, ", pid);
843 len = strlen(buf);
844 proc_name(pid, buf + len, sizeof(buf) - len);
845 return (OSString::withCString(buf));
846 }
847
848 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
849
850 IOAlignment IOSizeToAlignment(unsigned int size)
851 {
852 register int shift;
853 const int intsize = sizeof(unsigned int) * 8;
854
855 for (shift = 1; shift < intsize; shift++) {
856 if (size & 0x80000000)
857 return (IOAlignment)(intsize - shift);
858 size <<= 1;
859 }
860 return 0;
861 }
862
863 unsigned int IOAlignmentToSize(IOAlignment align)
864 {
865 unsigned int size;
866
867 for (size = 1; align; align--) {
868 size <<= 1;
869 }
870 return size;
871 }
872
873 } /* extern "C" */
874
875
876