]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
886176acf7b3510f54197c1e6642dce70316193c
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <sys/msgbuf.h>
62
63 #if IOKITSTATS
64
65 #define IOStatisticsAlloc(type, size) \
66 do { \
67 IOStatistics::countAlloc(type, size); \
68 } while (0)
69
70 #else
71
72 #define IOStatisticsAlloc(type, size)
73
74 #endif /* IOKITSTATS */
75
76 extern "C"
77 {
78
79
80 mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
82 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
84 extern int
85 __doprnt(
86 const char *fmt,
87 va_list argp,
88 void (*putc)(int, void *),
89 void *arg,
90 int radix);
91
92 extern void cons_putc_locked(char);
93 extern void bsd_log_lock(void);
94 extern void bsd_log_unlock(void);
95
96
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99 lck_grp_t *IOLockGroup;
100
101 /*
102 * Global variables for use by iLogger
103 * These symbols are for use only by Apple diagnostic code.
104 * Binary compatibility is not guaranteed for kexts that reference these symbols.
105 */
106
107 void *_giDebugLogInternal = NULL;
108 void *_giDebugLogDataInternal = NULL;
109 void *_giDebugReserved1 = NULL;
110 void *_giDebugReserved2 = NULL;
111
112
113 /*
114 * Static variables for this module.
115 */
116
117 static queue_head_t gIOMallocContiguousEntries;
118 static lck_mtx_t * gIOMallocContiguousEntriesLock;
119
120 enum { kIOMaxPageableMaps = 16 };
121 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
122 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
123
124 typedef struct {
125 vm_map_t map;
126 vm_offset_t address;
127 vm_offset_t end;
128 } IOMapData;
129
130 static struct {
131 UInt32 count;
132 UInt32 hint;
133 IOMapData maps[ kIOMaxPageableMaps ];
134 lck_mtx_t * lock;
135 } gIOKitPageableSpace;
136
137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
138
139 void IOLibInit(void)
140 {
141 kern_return_t ret;
142
143 static bool libInitialized;
144
145 if(libInitialized)
146 return;
147
148 gIOKitPageableSpace.maps[0].address = 0;
149 ret = kmem_suballoc(kernel_map,
150 &gIOKitPageableSpace.maps[0].address,
151 kIOPageableMapSize,
152 TRUE,
153 VM_FLAGS_ANYWHERE,
154 &gIOKitPageableSpace.maps[0].map);
155 if (ret != KERN_SUCCESS)
156 panic("failed to allocate iokit pageable map\n");
157
158 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
159
160 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
161 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
162 gIOKitPageableSpace.hint = 0;
163 gIOKitPageableSpace.count = 1;
164
165 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
166 queue_init( &gIOMallocContiguousEntries );
167
168 libInitialized = true;
169 }
170
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
174 {
175 kern_return_t result;
176 thread_t thread;
177
178 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
179 if (result != KERN_SUCCESS)
180 return (NULL);
181
182 thread_deallocate(thread);
183
184 return (thread);
185 }
186
187
188 void IOExitThread(void)
189 {
190 (void) thread_terminate(current_thread());
191 }
192
193 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
194
195
196 void * IOMalloc(vm_size_t size)
197 {
198 void * address;
199
200 address = (void *)kalloc(size);
201 if ( address ) {
202 #if IOALLOCDEBUG
203 debug_iomalloc_size += size;
204 #endif
205 IOStatisticsAlloc(kIOStatisticsMalloc, size);
206 }
207
208 return address;
209 }
210
211 void IOFree(void * address, vm_size_t size)
212 {
213 if (address) {
214 kfree(address, size);
215 #if IOALLOCDEBUG
216 debug_iomalloc_size -= size;
217 #endif
218 IOStatisticsAlloc(kIOStatisticsFree, size);
219 }
220 }
221
222 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
223
224 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
225 {
226 kern_return_t kr;
227 vm_offset_t address;
228 vm_offset_t allocationAddress;
229 vm_size_t adjustedSize;
230 uintptr_t alignMask;
231
232 if (size == 0)
233 return 0;
234 if (alignment == 0)
235 alignment = 1;
236
237 alignMask = alignment - 1;
238 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
239
240 if (size > adjustedSize) {
241 address = 0; /* overflow detected */
242 }
243 else if (adjustedSize >= page_size) {
244
245 kr = kernel_memory_allocate(kernel_map, &address,
246 size, alignMask, 0);
247 if (KERN_SUCCESS != kr)
248 address = 0;
249
250 } else {
251
252 adjustedSize += alignMask;
253
254 if (adjustedSize >= page_size) {
255
256 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
257 adjustedSize, 0, 0);
258 if (KERN_SUCCESS != kr)
259 allocationAddress = 0;
260
261 } else
262 allocationAddress = (vm_address_t) kalloc(adjustedSize);
263
264 if (allocationAddress) {
265 address = (allocationAddress + alignMask
266 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
267 & (~alignMask);
268
269 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
270 = adjustedSize;
271 *((vm_address_t *)(address - sizeof(vm_address_t)))
272 = allocationAddress;
273 } else
274 address = 0;
275 }
276
277 assert(0 == (address & alignMask));
278
279 if( address) {
280 #if IOALLOCDEBUG
281 debug_iomalloc_size += size;
282 #endif
283 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
284 }
285
286 return (void *) address;
287 }
288
289 void IOFreeAligned(void * address, vm_size_t size)
290 {
291 vm_address_t allocationAddress;
292 vm_size_t adjustedSize;
293
294 if( !address)
295 return;
296
297 assert(size);
298
299 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
300 if (adjustedSize >= page_size) {
301
302 kmem_free( kernel_map, (vm_offset_t) address, size);
303
304 } else {
305 adjustedSize = *((vm_size_t *)( (vm_address_t) address
306 - sizeof(vm_address_t) - sizeof(vm_size_t)));
307 allocationAddress = *((vm_address_t *)( (vm_address_t) address
308 - sizeof(vm_address_t) ));
309
310 if (adjustedSize >= page_size)
311 kmem_free( kernel_map, allocationAddress, adjustedSize);
312 else
313 kfree((void *)allocationAddress, adjustedSize);
314 }
315
316 #if IOALLOCDEBUG
317 debug_iomalloc_size -= size;
318 #endif
319
320 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
321 }
322
323 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
324
325 void
326 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
327 {
328 mach_vm_address_t allocationAddress;
329 mach_vm_size_t adjustedSize;
330
331 if (!address)
332 return;
333
334 assert(size);
335
336 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
337 if (adjustedSize >= page_size) {
338
339 kmem_free( kernel_map, (vm_offset_t) address, size);
340
341 } else {
342
343 adjustedSize = *((mach_vm_size_t *)
344 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
345 allocationAddress = *((mach_vm_address_t *)
346 (address - sizeof(mach_vm_address_t) ));
347 kfree((void *)allocationAddress, adjustedSize);
348 }
349
350 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
351 #if IOALLOCDEBUG
352 debug_iomalloc_size -= size;
353 #endif
354 }
355
356 mach_vm_address_t
357 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
358 mach_vm_size_t alignment, bool contiguous)
359 {
360 kern_return_t kr;
361 mach_vm_address_t address;
362 mach_vm_address_t allocationAddress;
363 mach_vm_size_t adjustedSize;
364 mach_vm_address_t alignMask;
365
366 if (size == 0)
367 return (0);
368 if (alignment == 0)
369 alignment = 1;
370
371 alignMask = alignment - 1;
372 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
373
374 contiguous = (contiguous && (adjustedSize > page_size))
375 || (alignment > page_size);
376
377 if (contiguous || maxPhys)
378 {
379 int options = 0;
380 vm_offset_t virt;
381
382 adjustedSize = size;
383 contiguous = (contiguous && (adjustedSize > page_size))
384 || (alignment > page_size);
385
386 if (!contiguous)
387 {
388 if (maxPhys <= 0xFFFFFFFF)
389 {
390 maxPhys = 0;
391 options |= KMA_LOMEM;
392 }
393 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
394 {
395 maxPhys = 0;
396 }
397 }
398 if (contiguous || maxPhys)
399 {
400 kr = kmem_alloc_contig(kernel_map, &virt, size,
401 alignMask, atop(maxPhys), atop(alignMask), 0);
402 }
403 else
404 {
405 kr = kernel_memory_allocate(kernel_map, &virt,
406 size, alignMask, options);
407 }
408 if (KERN_SUCCESS == kr)
409 address = virt;
410 else
411 address = 0;
412 }
413 else
414 {
415 adjustedSize += alignMask;
416 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
417
418 if (allocationAddress) {
419
420 address = (allocationAddress + alignMask
421 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
422 & (~alignMask);
423
424 if (atop_32(address) != atop_32(address + size - 1))
425 address = round_page(address);
426
427 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
428 - sizeof(mach_vm_address_t))) = adjustedSize;
429 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
430 = allocationAddress;
431 } else
432 address = 0;
433 }
434
435 if (address) {
436 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
437 #if IOALLOCDEBUG
438 debug_iomalloc_size += size;
439 #endif
440 }
441
442 return (address);
443 }
444
445
446 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
447
448 struct _IOMallocContiguousEntry
449 {
450 mach_vm_address_t virtualAddr;
451 IOBufferMemoryDescriptor * md;
452 queue_chain_t link;
453 };
454 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
455
456 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
457 IOPhysicalAddress * physicalAddress)
458 {
459 mach_vm_address_t address = 0;
460
461 if (size == 0)
462 return 0;
463 if (alignment == 0)
464 alignment = 1;
465
466 /* Do we want a physical address? */
467 if (!physicalAddress)
468 {
469 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
470 }
471 else do
472 {
473 IOBufferMemoryDescriptor * bmd;
474 mach_vm_address_t physicalMask;
475 vm_offset_t alignMask;
476
477 alignMask = alignment - 1;
478 physicalMask = (0xFFFFFFFF ^ alignMask);
479
480 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
481 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
482 if (!bmd)
483 break;
484
485 _IOMallocContiguousEntry *
486 entry = IONew(_IOMallocContiguousEntry, 1);
487 if (!entry)
488 {
489 bmd->release();
490 break;
491 }
492 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
493 entry->md = bmd;
494 lck_mtx_lock(gIOMallocContiguousEntriesLock);
495 queue_enter( &gIOMallocContiguousEntries, entry,
496 _IOMallocContiguousEntry *, link );
497 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
498
499 address = (mach_vm_address_t) entry->virtualAddr;
500 *physicalAddress = bmd->getPhysicalAddress();
501 }
502 while (false);
503
504 return (void *) address;
505 }
506
507 void IOFreeContiguous(void * _address, vm_size_t size)
508 {
509 _IOMallocContiguousEntry * entry;
510 IOMemoryDescriptor * md = NULL;
511
512 mach_vm_address_t address = (mach_vm_address_t) _address;
513
514 if( !address)
515 return;
516
517 assert(size);
518
519 lck_mtx_lock(gIOMallocContiguousEntriesLock);
520 queue_iterate( &gIOMallocContiguousEntries, entry,
521 _IOMallocContiguousEntry *, link )
522 {
523 if( entry->virtualAddr == address ) {
524 md = entry->md;
525 queue_remove( &gIOMallocContiguousEntries, entry,
526 _IOMallocContiguousEntry *, link );
527 break;
528 }
529 }
530 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
531
532 if (md)
533 {
534 md->release();
535 IODelete(entry, _IOMallocContiguousEntry, 1);
536 }
537 else
538 {
539 IOKernelFreePhysical((mach_vm_address_t) address, size);
540 }
541 }
542
543 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
544
545 kern_return_t IOIteratePageableMaps(vm_size_t size,
546 IOIteratePageableMapsCallback callback, void * ref)
547 {
548 kern_return_t kr = kIOReturnNotReady;
549 vm_size_t segSize;
550 UInt32 attempts;
551 UInt32 index;
552 vm_offset_t min;
553 vm_map_t map;
554
555 if (size > kIOPageableMaxMapSize)
556 return( kIOReturnBadArgument );
557
558 do {
559 index = gIOKitPageableSpace.hint;
560 attempts = gIOKitPageableSpace.count;
561 while( attempts--) {
562 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
563 if( KERN_SUCCESS == kr) {
564 gIOKitPageableSpace.hint = index;
565 break;
566 }
567 if( index)
568 index--;
569 else
570 index = gIOKitPageableSpace.count - 1;
571 }
572 if( KERN_SUCCESS == kr)
573 break;
574
575 lck_mtx_lock( gIOKitPageableSpace.lock );
576
577 index = gIOKitPageableSpace.count;
578 if( index >= (kIOMaxPageableMaps - 1)) {
579 lck_mtx_unlock( gIOKitPageableSpace.lock );
580 break;
581 }
582
583 if( size < kIOPageableMapSize)
584 segSize = kIOPageableMapSize;
585 else
586 segSize = size;
587
588 min = 0;
589 kr = kmem_suballoc(kernel_map,
590 &min,
591 segSize,
592 TRUE,
593 VM_FLAGS_ANYWHERE,
594 &map);
595 if( KERN_SUCCESS != kr) {
596 lck_mtx_unlock( gIOKitPageableSpace.lock );
597 break;
598 }
599
600 gIOKitPageableSpace.maps[index].map = map;
601 gIOKitPageableSpace.maps[index].address = min;
602 gIOKitPageableSpace.maps[index].end = min + segSize;
603 gIOKitPageableSpace.hint = index;
604 gIOKitPageableSpace.count = index + 1;
605
606 lck_mtx_unlock( gIOKitPageableSpace.lock );
607
608 } while( true );
609
610 return kr;
611 }
612
613 struct IOMallocPageableRef
614 {
615 vm_offset_t address;
616 vm_size_t size;
617 };
618
619 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
620 {
621 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
622 kern_return_t kr;
623
624 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
625
626 return( kr );
627 }
628
629 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
630 {
631 kern_return_t kr = kIOReturnNotReady;
632 struct IOMallocPageableRef ref;
633
634 if (alignment > page_size)
635 return( 0 );
636 if (size > kIOPageableMaxMapSize)
637 return( 0 );
638
639 ref.size = size;
640 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
641 if( kIOReturnSuccess != kr)
642 ref.address = 0;
643
644 if( ref.address) {
645 #if IOALLOCDEBUG
646 debug_iomallocpageable_size += round_page(size);
647 #endif
648 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
649 }
650
651 return( (void *) ref.address );
652 }
653
654 vm_map_t IOPageableMapForAddress( uintptr_t address )
655 {
656 vm_map_t map = 0;
657 UInt32 index;
658
659 for( index = 0; index < gIOKitPageableSpace.count; index++) {
660 if( (address >= gIOKitPageableSpace.maps[index].address)
661 && (address < gIOKitPageableSpace.maps[index].end) ) {
662 map = gIOKitPageableSpace.maps[index].map;
663 break;
664 }
665 }
666 if( !map)
667 panic("IOPageableMapForAddress: null");
668
669 return( map );
670 }
671
672 void IOFreePageable(void * address, vm_size_t size)
673 {
674 vm_map_t map;
675
676 map = IOPageableMapForAddress( (vm_address_t) address);
677 if( map)
678 kmem_free( map, (vm_offset_t) address, size);
679
680 #if IOALLOCDEBUG
681 debug_iomallocpageable_size -= round_page(size);
682 #endif
683
684 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
685 }
686
687 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
688
689 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
690 IOByteCount length, IOOptionBits cacheMode )
691 {
692 IOReturn ret = kIOReturnSuccess;
693 ppnum_t pagenum;
694
695 if( task != kernel_task)
696 return( kIOReturnUnsupported );
697 if ((address | length) & PAGE_MASK)
698 {
699 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
700 return( kIOReturnUnsupported );
701 }
702 length = round_page(address + length) - trunc_page( address );
703 address = trunc_page( address );
704
705 // make map mode
706 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
707
708 while( (kIOReturnSuccess == ret) && (length > 0) ) {
709
710 // Get the physical page number
711 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
712 if( pagenum) {
713 ret = IOUnmapPages( get_task_map(task), address, page_size );
714 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
715 } else
716 ret = kIOReturnVMError;
717
718 address += page_size;
719 length -= page_size;
720 }
721
722 return( ret );
723 }
724
725
726 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
727 IOByteCount length )
728 {
729 if( task != kernel_task)
730 return( kIOReturnUnsupported );
731
732 flush_dcache64( (addr64_t) address, (unsigned) length, false );
733
734 return( kIOReturnSuccess );
735 }
736
737 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
738
739 vm_offset_t OSKernelStackRemaining( void )
740 {
741 return (ml_stack_remaining());
742 }
743
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745
746 /*
747 * Spin for indicated number of milliseconds.
748 */
749 void IOSleep(unsigned milliseconds)
750 {
751 delay_for_interval(milliseconds, kMillisecondScale);
752 }
753
754 /*
755 * Spin for indicated number of microseconds.
756 */
757 void IODelay(unsigned microseconds)
758 {
759 delay_for_interval(microseconds, kMicrosecondScale);
760 }
761
762 /*
763 * Spin for indicated number of nanoseconds.
764 */
765 void IOPause(unsigned nanoseconds)
766 {
767 delay_for_interval(nanoseconds, kNanosecondScale);
768 }
769
770 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
771
772 static void _iolog_consputc(int ch, void *arg __unused)
773 {
774 cons_putc_locked(ch);
775 }
776
777 static void _iolog_logputc(int ch, void *arg __unused)
778 {
779 log_putc_locked(ch);
780 }
781
782 void IOLog(const char *format, ...)
783 {
784 va_list ap;
785
786 va_start(ap, format);
787 IOLogv(format, ap);
788 va_end(ap);
789 }
790
791 void IOLogv(const char *format, va_list ap)
792 {
793 va_list ap2;
794
795 va_copy(ap2, ap);
796
797 bsd_log_lock();
798 __doprnt(format, ap, _iolog_logputc, NULL, 16);
799 bsd_log_unlock();
800
801 __doprnt(format, ap2, _iolog_consputc, NULL, 16);
802 }
803
804 #if !__LP64__
805 void IOPanic(const char *reason)
806 {
807 panic("%s", reason);
808 }
809 #endif
810
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
812
813 /*
814 * Convert a integer constant (typically a #define or enum) to a string.
815 */
816 static char noValue[80]; // that's pretty
817
818 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
819 {
820 for( ; regValueArray->name; regValueArray++) {
821 if(regValueArray->value == value)
822 return(regValueArray->name);
823 }
824 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
825 return((const char *)noValue);
826 }
827
828 IOReturn IOFindValueForName(const char *string,
829 const IONamedValue *regValueArray,
830 int *value)
831 {
832 for( ; regValueArray->name; regValueArray++) {
833 if(!strcmp(regValueArray->name, string)) {
834 *value = regValueArray->value;
835 return kIOReturnSuccess;
836 }
837 }
838 return kIOReturnBadArgument;
839 }
840
841 OSString * IOCopyLogNameForPID(int pid)
842 {
843 char buf[128];
844 size_t len;
845 snprintf(buf, sizeof(buf), "pid %d, ", pid);
846 len = strlen(buf);
847 proc_name(pid, buf + len, sizeof(buf) - len);
848 return (OSString::withCString(buf));
849 }
850
851 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
852
853 IOAlignment IOSizeToAlignment(unsigned int size)
854 {
855 register int shift;
856 const int intsize = sizeof(unsigned int) * 8;
857
858 for (shift = 1; shift < intsize; shift++) {
859 if (size & 0x80000000)
860 return (IOAlignment)(intsize - shift);
861 size <<= 1;
862 }
863 return 0;
864 }
865
866 unsigned int IOAlignmentToSize(IOAlignment align)
867 {
868 unsigned int size;
869
870 for (size = 1; align; align--) {
871 size <<= 1;
872 }
873 return size;
874 }
875
876 } /* extern "C" */
877
878
879