]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
e40d20afc2be6dd869de850f1b72c1e24f8c8171
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 extern "C"
59 {
60
61
62 mach_timespec_t IOZeroTvalspec = { 0, 0 };
63
64 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
65
66 int
67 __doprnt(
68 const char *fmt,
69 va_list argp,
70 void (*putc)(int, void *),
71 void *arg,
72 int radix);
73
74 extern void conslog_putc(char);
75
76
77 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
78
79 lck_grp_t *IOLockGroup;
80
81 /*
82 * Global variables for use by iLogger
83 * These symbols are for use only by Apple diagnostic code.
84 * Binary compatibility is not guaranteed for kexts that reference these symbols.
85 */
86
87 void *_giDebugLogInternal = NULL;
88 void *_giDebugLogDataInternal = NULL;
89 void *_giDebugReserved1 = NULL;
90 void *_giDebugReserved2 = NULL;
91
92
93 /*
94 * Static variables for this module.
95 */
96
97 static queue_head_t gIOMallocContiguousEntries;
98 static lck_mtx_t * gIOMallocContiguousEntriesLock;
99
100 enum { kIOMaxPageableMaps = 16 };
101 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
102 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
103
104 typedef struct {
105 vm_map_t map;
106 vm_offset_t address;
107 vm_offset_t end;
108 } IOMapData;
109
110 static struct {
111 UInt32 count;
112 UInt32 hint;
113 IOMapData maps[ kIOMaxPageableMaps ];
114 lck_mtx_t * lock;
115 } gIOKitPageableSpace;
116
117 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
118
119 void IOLibInit(void)
120 {
121 kern_return_t ret;
122
123 static bool libInitialized;
124
125 if(libInitialized)
126 return;
127
128 gIOKitPageableSpace.maps[0].address = 0;
129 ret = kmem_suballoc(kernel_map,
130 &gIOKitPageableSpace.maps[0].address,
131 kIOPageableMapSize,
132 TRUE,
133 VM_FLAGS_ANYWHERE,
134 &gIOKitPageableSpace.maps[0].map);
135 if (ret != KERN_SUCCESS)
136 panic("failed to allocate iokit pageable map\n");
137
138 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
139
140 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
141 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
142 gIOKitPageableSpace.hint = 0;
143 gIOKitPageableSpace.count = 1;
144
145 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
146 queue_init( &gIOMallocContiguousEntries );
147
148 libInitialized = true;
149 }
150
151 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
152
153 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
154 {
155 kern_return_t result;
156 thread_t thread;
157
158 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
159 if (result != KERN_SUCCESS)
160 return (NULL);
161
162 thread_deallocate(thread);
163
164 return (thread);
165 }
166
167
168 void IOExitThread(void)
169 {
170 (void) thread_terminate(current_thread());
171 }
172
173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
175
176 void * IOMalloc(vm_size_t size)
177 {
178 void * address;
179
180 address = (void *)kalloc(size);
181 #if IOALLOCDEBUG
182 if (address) {
183 debug_iomalloc_size += size;
184 }
185 #endif
186 return address;
187 }
188
189 void IOFree(void * address, vm_size_t size)
190 {
191 if (address) {
192 kfree(address, size);
193 #if IOALLOCDEBUG
194 debug_iomalloc_size -= size;
195 #endif
196 }
197 }
198
199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
200
201 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
202 {
203 kern_return_t kr;
204 vm_offset_t address;
205 vm_offset_t allocationAddress;
206 vm_size_t adjustedSize;
207 uintptr_t alignMask;
208
209 if (size == 0)
210 return 0;
211 if (alignment == 0)
212 alignment = 1;
213
214 alignMask = alignment - 1;
215 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
216
217 if (adjustedSize >= page_size) {
218
219 kr = kernel_memory_allocate(kernel_map, &address,
220 size, alignMask, 0);
221 if (KERN_SUCCESS != kr)
222 address = 0;
223
224 } else {
225
226 adjustedSize += alignMask;
227
228 if (adjustedSize >= page_size) {
229
230 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
231 adjustedSize, 0, 0);
232 if (KERN_SUCCESS != kr)
233 allocationAddress = 0;
234
235 } else
236 allocationAddress = (vm_address_t) kalloc(adjustedSize);
237
238 if (allocationAddress) {
239 address = (allocationAddress + alignMask
240 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
241 & (~alignMask);
242
243 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
244 = adjustedSize;
245 *((vm_address_t *)(address - sizeof(vm_address_t)))
246 = allocationAddress;
247 } else
248 address = 0;
249 }
250
251 assert(0 == (address & alignMask));
252
253 #if IOALLOCDEBUG
254 if( address) {
255 debug_iomalloc_size += size;
256 }
257 #endif
258
259 return (void *) address;
260 }
261
262 void IOFreeAligned(void * address, vm_size_t size)
263 {
264 vm_address_t allocationAddress;
265 vm_size_t adjustedSize;
266
267 if( !address)
268 return;
269
270 assert(size);
271
272 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
273 if (adjustedSize >= page_size) {
274
275 kmem_free( kernel_map, (vm_offset_t) address, size);
276
277 } else {
278 adjustedSize = *((vm_size_t *)( (vm_address_t) address
279 - sizeof(vm_address_t) - sizeof(vm_size_t)));
280 allocationAddress = *((vm_address_t *)( (vm_address_t) address
281 - sizeof(vm_address_t) ));
282
283 if (adjustedSize >= page_size)
284 kmem_free( kernel_map, allocationAddress, adjustedSize);
285 else
286 kfree((void *)allocationAddress, adjustedSize);
287 }
288
289 #if IOALLOCDEBUG
290 debug_iomalloc_size -= size;
291 #endif
292 }
293
294 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
295
296 void
297 IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size)
298 {
299 mach_vm_address_t allocationAddress;
300 mach_vm_size_t adjustedSize;
301
302 if (!address)
303 return;
304
305 assert(size);
306
307 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
308 if (adjustedSize >= page_size) {
309
310 kmem_free( kernel_map, (vm_offset_t) address, size);
311
312 } else {
313
314 adjustedSize = *((mach_vm_size_t *)
315 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
316 allocationAddress = *((mach_vm_address_t *)
317 (address - sizeof(mach_vm_address_t) ));
318 kfree((void *)allocationAddress, adjustedSize);
319 }
320
321 #if IOALLOCDEBUG
322 debug_iomalloc_size -= size;
323 #endif
324 }
325
326 mach_vm_address_t
327 IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_address_t maxPhys,
328 mach_vm_size_t alignment)
329 {
330 kern_return_t kr;
331 mach_vm_address_t address;
332 mach_vm_address_t allocationAddress;
333 mach_vm_size_t adjustedSize;
334 mach_vm_address_t alignMask;
335
336 if (size == 0)
337 return (0);
338 if (alignment == 0)
339 alignment = 1;
340
341 alignMask = alignment - 1;
342 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
343
344 if (adjustedSize >= page_size)
345 {
346 vm_offset_t virt;
347 adjustedSize = size;
348 if ((adjustedSize > page_size) || (alignment > page_size) || maxPhys)
349 {
350 kr = kmem_alloc_contig(kernel_map, &virt, size,
351 alignMask, atop(maxPhys), atop(alignMask), 0);
352 }
353 else
354 {
355 kr = kernel_memory_allocate(kernel_map, &virt,
356 size, alignMask, 0);
357 }
358 if (KERN_SUCCESS == kr)
359 address = virt;
360 else
361 address = 0;
362 }
363 else
364 {
365 adjustedSize += alignMask;
366 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
367
368 if (allocationAddress) {
369
370 address = (allocationAddress + alignMask
371 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
372 & (~alignMask);
373
374 if (atop_32(address) != atop_32(address + size - 1))
375 address = round_page(address);
376
377 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
378 - sizeof(mach_vm_address_t))) = adjustedSize;
379 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
380 = allocationAddress;
381 } else
382 address = 0;
383 }
384
385 #if IOALLOCDEBUG
386 if (address) {
387 debug_iomalloc_size += size;
388 }
389 #endif
390
391 return (address);
392 }
393
394 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
395
396 struct _IOMallocContiguousEntry
397 {
398 mach_vm_address_t virtualAddr;
399 IOBufferMemoryDescriptor * md;
400 queue_chain_t link;
401 };
402 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
403
404 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
405 IOPhysicalAddress * physicalAddress)
406 {
407 mach_vm_address_t address = 0;
408
409 if (size == 0)
410 return 0;
411 if (alignment == 0)
412 alignment = 1;
413
414 /* Do we want a physical address? */
415 if (!physicalAddress)
416 {
417 address = IOKernelAllocateContiguous(size, 0 /*maxPhys*/, alignment);
418 }
419 else do
420 {
421 IOBufferMemoryDescriptor * bmd;
422 mach_vm_address_t physicalMask;
423 vm_offset_t alignMask;
424
425 alignMask = alignment - 1;
426 physicalMask = (0xFFFFFFFF ^ alignMask);
427
428 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
429 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
430 if (!bmd)
431 break;
432
433 _IOMallocContiguousEntry *
434 entry = IONew(_IOMallocContiguousEntry, 1);
435 if (!entry)
436 {
437 bmd->release();
438 break;
439 }
440 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
441 entry->md = bmd;
442 lck_mtx_lock(gIOMallocContiguousEntriesLock);
443 queue_enter( &gIOMallocContiguousEntries, entry,
444 _IOMallocContiguousEntry *, link );
445 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
446
447 address = (mach_vm_address_t) entry->virtualAddr;
448 *physicalAddress = bmd->getPhysicalAddress();
449 }
450 while (false);
451
452 return (void *) address;
453 }
454
455 void IOFreeContiguous(void * _address, vm_size_t size)
456 {
457 _IOMallocContiguousEntry * entry;
458 IOMemoryDescriptor * md = NULL;
459
460 mach_vm_address_t address = (mach_vm_address_t) _address;
461
462 if( !address)
463 return;
464
465 assert(size);
466
467 lck_mtx_lock(gIOMallocContiguousEntriesLock);
468 queue_iterate( &gIOMallocContiguousEntries, entry,
469 _IOMallocContiguousEntry *, link )
470 {
471 if( entry->virtualAddr == address ) {
472 md = entry->md;
473 queue_remove( &gIOMallocContiguousEntries, entry,
474 _IOMallocContiguousEntry *, link );
475 break;
476 }
477 }
478 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
479
480 if (md)
481 {
482 md->release();
483 IODelete(entry, _IOMallocContiguousEntry, 1);
484 }
485 else
486 {
487 IOKernelFreeContiguous((mach_vm_address_t) address, size);
488 }
489 }
490
491 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
492
493 kern_return_t IOIteratePageableMaps(vm_size_t size,
494 IOIteratePageableMapsCallback callback, void * ref)
495 {
496 kern_return_t kr = kIOReturnNotReady;
497 vm_size_t segSize;
498 UInt32 attempts;
499 UInt32 index;
500 vm_offset_t min;
501 vm_map_t map;
502
503 if (size > kIOPageableMaxMapSize)
504 return( kIOReturnBadArgument );
505
506 do {
507 index = gIOKitPageableSpace.hint;
508 attempts = gIOKitPageableSpace.count;
509 while( attempts--) {
510 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
511 if( KERN_SUCCESS == kr) {
512 gIOKitPageableSpace.hint = index;
513 break;
514 }
515 if( index)
516 index--;
517 else
518 index = gIOKitPageableSpace.count - 1;
519 }
520 if( KERN_SUCCESS == kr)
521 break;
522
523 lck_mtx_lock( gIOKitPageableSpace.lock );
524
525 index = gIOKitPageableSpace.count;
526 if( index >= (kIOMaxPageableMaps - 1)) {
527 lck_mtx_unlock( gIOKitPageableSpace.lock );
528 break;
529 }
530
531 if( size < kIOPageableMapSize)
532 segSize = kIOPageableMapSize;
533 else
534 segSize = size;
535
536 min = 0;
537 kr = kmem_suballoc(kernel_map,
538 &min,
539 segSize,
540 TRUE,
541 VM_FLAGS_ANYWHERE,
542 &map);
543 if( KERN_SUCCESS != kr) {
544 lck_mtx_unlock( gIOKitPageableSpace.lock );
545 break;
546 }
547
548 gIOKitPageableSpace.maps[index].map = map;
549 gIOKitPageableSpace.maps[index].address = min;
550 gIOKitPageableSpace.maps[index].end = min + segSize;
551 gIOKitPageableSpace.hint = index;
552 gIOKitPageableSpace.count = index + 1;
553
554 lck_mtx_unlock( gIOKitPageableSpace.lock );
555
556 } while( true );
557
558 return kr;
559 }
560
561 struct IOMallocPageableRef
562 {
563 vm_offset_t address;
564 vm_size_t size;
565 };
566
567 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
568 {
569 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
570 kern_return_t kr;
571
572 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
573
574 return( kr );
575 }
576
577 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
578 {
579 kern_return_t kr = kIOReturnNotReady;
580 struct IOMallocPageableRef ref;
581
582 if (alignment > page_size)
583 return( 0 );
584 if (size > kIOPageableMaxMapSize)
585 return( 0 );
586
587 ref.size = size;
588 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
589 if( kIOReturnSuccess != kr)
590 ref.address = 0;
591
592 #if IOALLOCDEBUG
593 if( ref.address)
594 debug_iomallocpageable_size += round_page(size);
595 #endif
596
597 return( (void *) ref.address );
598 }
599
600 vm_map_t IOPageableMapForAddress( uintptr_t address )
601 {
602 vm_map_t map = 0;
603 UInt32 index;
604
605 for( index = 0; index < gIOKitPageableSpace.count; index++) {
606 if( (address >= gIOKitPageableSpace.maps[index].address)
607 && (address < gIOKitPageableSpace.maps[index].end) ) {
608 map = gIOKitPageableSpace.maps[index].map;
609 break;
610 }
611 }
612 if( !map)
613 panic("IOPageableMapForAddress: null");
614
615 return( map );
616 }
617
618 void IOFreePageable(void * address, vm_size_t size)
619 {
620 vm_map_t map;
621
622 map = IOPageableMapForAddress( (vm_address_t) address);
623 if( map)
624 kmem_free( map, (vm_offset_t) address, size);
625
626 #if IOALLOCDEBUG
627 debug_iomallocpageable_size -= round_page(size);
628 #endif
629 }
630
631 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
632
633 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
634 IOByteCount length, IOOptionBits cacheMode )
635 {
636 IOReturn ret = kIOReturnSuccess;
637 ppnum_t pagenum;
638
639 if( task != kernel_task)
640 return( kIOReturnUnsupported );
641 if ((address | length) & PAGE_MASK)
642 {
643 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
644 return( kIOReturnUnsupported );
645 }
646 length = round_page(address + length) - trunc_page( address );
647 address = trunc_page( address );
648
649 // make map mode
650 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
651
652 while( (kIOReturnSuccess == ret) && (length > 0) ) {
653
654 // Get the physical page number
655 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
656 if( pagenum) {
657 ret = IOUnmapPages( get_task_map(task), address, page_size );
658 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
659 } else
660 ret = kIOReturnVMError;
661
662 address += page_size;
663 length -= page_size;
664 }
665
666 return( ret );
667 }
668
669
670 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
671 IOByteCount length )
672 {
673 if( task != kernel_task)
674 return( kIOReturnUnsupported );
675
676 flush_dcache64( (addr64_t) address, (unsigned) length, false );
677
678 return( kIOReturnSuccess );
679 }
680
681 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
682
683 vm_offset_t OSKernelStackRemaining( void )
684 {
685 return (ml_stack_remaining());
686 }
687
688 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
689
690 /*
691 * Spin for indicated number of milliseconds.
692 */
693 void IOSleep(unsigned milliseconds)
694 {
695 delay_for_interval(milliseconds, kMillisecondScale);
696 }
697
698 /*
699 * Spin for indicated number of microseconds.
700 */
701 void IODelay(unsigned microseconds)
702 {
703 delay_for_interval(microseconds, kMicrosecondScale);
704 }
705
706 /*
707 * Spin for indicated number of nanoseconds.
708 */
709 void IOPause(unsigned nanoseconds)
710 {
711 delay_for_interval(nanoseconds, kNanosecondScale);
712 }
713
714 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
715
716 static void _iolog_putc(int ch, void *arg __unused)
717 {
718 conslog_putc(ch);
719 }
720
721 void IOLog(const char *format, ...)
722 {
723 va_list ap;
724
725 va_start(ap, format);
726 __doprnt(format, ap, _iolog_putc, NULL, 16);
727 va_end(ap);
728 }
729
730 void IOLogv(const char *format, va_list ap)
731 {
732 __doprnt(format, ap, _iolog_putc, NULL, 16);
733 }
734
735 #if !__LP64__
736 void IOPanic(const char *reason)
737 {
738 panic("%s", reason);
739 }
740 #endif
741
742 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
743
744 /*
745 * Convert a integer constant (typically a #define or enum) to a string.
746 */
747 static char noValue[80]; // that's pretty
748
749 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
750 {
751 for( ; regValueArray->name; regValueArray++) {
752 if(regValueArray->value == value)
753 return(regValueArray->name);
754 }
755 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
756 return((const char *)noValue);
757 }
758
759 IOReturn IOFindValueForName(const char *string,
760 const IONamedValue *regValueArray,
761 int *value)
762 {
763 for( ; regValueArray->name; regValueArray++) {
764 if(!strcmp(regValueArray->name, string)) {
765 *value = regValueArray->value;
766 return kIOReturnSuccess;
767 }
768 }
769 return kIOReturnBadArgument;
770 }
771
772 OSString * IOCopyLogNameForPID(int pid)
773 {
774 char buf[128];
775 size_t len;
776 snprintf(buf, sizeof(buf), "pid %d, ", pid);
777 len = strlen(buf);
778 proc_name(pid, buf + len, sizeof(buf) - len);
779 return (OSString::withCString(buf));
780 }
781
782 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
783
784 IOAlignment IOSizeToAlignment(unsigned int size)
785 {
786 register int shift;
787 const int intsize = sizeof(unsigned int) * 8;
788
789 for (shift = 1; shift < intsize; shift++) {
790 if (size & 0x80000000)
791 return (IOAlignment)(intsize - shift);
792 size <<= 1;
793 }
794 return 0;
795 }
796
797 unsigned int IOAlignmentToSize(IOAlignment align)
798 {
799 unsigned int size;
800
801 for (size = 1; align; align--) {
802 size <<= 1;
803 }
804 return size;
805 }
806
807 } /* extern "C" */
808
809
810