]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
CommitLineData
1c79356b 1/*
9bccf70c 2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
1c79356b
A
23 * HISTORY
24 *
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
26 * 17-Nov-98 cpp
27 *
28 */
29
30#include <IOKit/system.h>
31#include <mach/sync_policy.h>
32#include <machine/machine_routines.h>
33#include <libkern/c++/OSCPPDebug.h>
34
35#include <IOKit/assert.h>
36
37#include <IOKit/IOReturn.h>
38#include <IOKit/IOLib.h>
55e303ae 39#include <IOKit/IOMapper.h>
1c79356b
A
40#include <IOKit/IOKitDebug.h>
41
42mach_timespec_t IOZeroTvalspec = { 0, 0 };
43
55e303ae
A
44extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
45
46/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c
A
47
48/*
49 * Global variables for use by iLogger
50 * These symbols are for use only by Apple diagnostic code.
51 * Binary compatibility is not guaranteed for kexts that reference these symbols.
52 */
53
54void *_giDebugLogInternal = NULL;
55void *_giDebugLogDataInternal = NULL;
56void *_giDebugReserved1 = NULL;
57void *_giDebugReserved2 = NULL;
58
59
1c79356b
A
60/*
61 * Static variables for this module.
62 */
63
64static IOThreadFunc threadArgFcn;
55e303ae
A
65static void * threadArgArg;
66static lock_t * threadArgLock;
1c79356b 67
55e303ae
A
68static queue_head_t gIOMallocContiguousEntries;
69static mutex_t * gIOMallocContiguousEntriesLock;
1c79356b
A
70
71enum { kIOMaxPageableMaps = 16 };
72enum { kIOPageableMapSize = 16 * 1024 * 1024 };
55e303ae 73enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
1c79356b
A
74
75typedef struct {
76 vm_map_t map;
77 vm_offset_t address;
78 vm_offset_t end;
79} IOMapData;
80
81static struct {
82 UInt32 count;
83 UInt32 hint;
84 IOMapData maps[ kIOMaxPageableMaps ];
85 mutex_t * lock;
86} gIOKitPageableSpace;
87
55e303ae 88/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
89
90void IOLibInit(void)
91{
92 kern_return_t ret;
93
94 static bool libInitialized;
95
96 if(libInitialized)
97 return;
98
99 threadArgLock = lock_alloc( true, NULL, NULL );
100
101 gIOKitPageableSpace.maps[0].address = 0;
102 ret = kmem_suballoc(kernel_map,
103 &gIOKitPageableSpace.maps[0].address,
104 kIOPageableMapSize,
105 TRUE,
106 TRUE,
107 &gIOKitPageableSpace.maps[0].map);
108 if (ret != KERN_SUCCESS)
109 panic("failed to allocate iokit pageable map\n");
110
111 gIOKitPageableSpace.lock = mutex_alloc( 0 );
112 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
113 gIOKitPageableSpace.hint = 0;
114 gIOKitPageableSpace.count = 1;
115
55e303ae
A
116 gIOMallocContiguousEntriesLock = mutex_alloc( 0 );
117 queue_init( &gIOMallocContiguousEntries );
118
1c79356b
A
119 libInitialized = true;
120}
121
122/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
124/*
125 * We pass an argument to a new thread by saving fcn and arg in some
126 * locked variables and starting the thread at ioThreadStart(). This
127 * function retrives fcn and arg and makes the appropriate call.
128 *
129 */
130
131static void ioThreadStart( void )
132{
133 IOThreadFunc fcn;
134 void * arg;
135
136 fcn = threadArgFcn;
137 arg = threadArgArg;
138 lock_done( threadArgLock);
139
140 (*fcn)(arg);
141
142 IOExitThread();
143}
144
145IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
146{
147 IOThread thread;
148
149 lock_write( threadArgLock);
150 threadArgFcn = fcn;
151 threadArgArg = arg;
152
153 thread = kernel_thread( kernel_task, ioThreadStart);
154
155 return(thread);
156}
157
158
159volatile void IOExitThread()
160{
161 (void) thread_terminate(current_act());
162}
163
164/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
165
166
167void * IOMalloc(vm_size_t size)
168{
169 void * address;
170
171 address = (void *)kalloc(size);
172#if IOALLOCDEBUG
173 if (address)
174 debug_iomalloc_size += size;
175#endif
176 return address;
177}
178
179void IOFree(void * address, vm_size_t size)
180{
181 if (address) {
182 kfree((vm_offset_t)address, size);
183#if IOALLOCDEBUG
184 debug_iomalloc_size -= size;
185#endif
186 }
187}
188
189/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
190
191void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
192{
193 kern_return_t kr;
194 vm_address_t address;
195 vm_address_t allocationAddress;
196 vm_size_t adjustedSize;
197 vm_offset_t alignMask;
198
199 if (size == 0)
200 return 0;
201 if (alignment == 0)
202 alignment = 1;
203
204 alignMask = alignment - 1;
205 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
206
207 if (adjustedSize >= page_size) {
208
209 kr = kernel_memory_allocate(kernel_map, &address,
9bccf70c
A
210 size, alignMask, 0);
211 if (KERN_SUCCESS != kr)
1c79356b 212 address = 0;
1c79356b
A
213
214 } else {
215
216 adjustedSize += alignMask;
9bccf70c
A
217
218 if (adjustedSize >= page_size) {
219
220 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
221 adjustedSize, 0, 0);
222 if (KERN_SUCCESS != kr)
223 allocationAddress = 0;
224
225 } else
226 allocationAddress = (vm_address_t) kalloc(adjustedSize);
1c79356b
A
227
228 if (allocationAddress) {
229 address = (allocationAddress + alignMask
230 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
231 & (~alignMask);
232
233 *((vm_size_t *)(address - sizeof(vm_size_t)
234 - sizeof(vm_address_t))) = adjustedSize;
235 *((vm_address_t *)(address - sizeof(vm_address_t)))
236 = allocationAddress;
237 } else
238 address = 0;
239 }
240
241 assert(0 == (address & alignMask));
242
243#if IOALLOCDEBUG
244 if( address)
245 debug_iomalloc_size += size;
246#endif
247
248 return (void *) address;
249}
250
251void IOFreeAligned(void * address, vm_size_t size)
252{
253 vm_address_t allocationAddress;
254 vm_size_t adjustedSize;
255
256 if( !address)
257 return;
258
259 assert(size);
260
261 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
262 if (adjustedSize >= page_size) {
263
264 kmem_free( kernel_map, (vm_address_t) address, size);
265
266 } else {
267 adjustedSize = *((vm_size_t *)( (vm_address_t) address
268 - sizeof(vm_address_t) - sizeof(vm_size_t)));
269 allocationAddress = *((vm_address_t *)( (vm_address_t) address
270 - sizeof(vm_address_t) ));
271
9bccf70c
A
272 if (adjustedSize >= page_size)
273 kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize);
274 else
275 kfree((vm_offset_t) allocationAddress, adjustedSize);
1c79356b
A
276 }
277
278#if IOALLOCDEBUG
279 debug_iomalloc_size -= size;
280#endif
281}
282
283/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
284
55e303ae
A
285struct _IOMallocContiguousEntry
286{
287 void * virtual;
288 ppnum_t ioBase;
289 queue_chain_t link;
290};
291typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
292
1c79356b
A
293void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
294 IOPhysicalAddress * physicalAddress)
295{
296 kern_return_t kr;
297 vm_address_t address;
298 vm_address_t allocationAddress;
299 vm_size_t adjustedSize;
300 vm_offset_t alignMask;
55e303ae 301 ppnum_t pagenum;
1c79356b
A
302
303 if (size == 0)
304 return 0;
305 if (alignment == 0)
306 alignment = 1;
307
308 alignMask = alignment - 1;
309 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
310
55e303ae
A
311 if (adjustedSize >= page_size)
312 {
313 adjustedSize = size;
314 if (adjustedSize > page_size)
315 {
316 kr = kmem_alloc_contig(kernel_map, &address, size,
317 alignMask, 0);
318 }
319 else
320 {
321 kr = kernel_memory_allocate(kernel_map, &address,
322 size, alignMask, 0);
323 }
1c79356b
A
324 if (KERN_SUCCESS != kr)
325 address = 0;
55e303ae
A
326 }
327 else
328 {
1c79356b 329 adjustedSize += alignMask;
9bccf70c
A
330 allocationAddress = (vm_address_t) kalloc(adjustedSize);
331
1c79356b
A
332 if (allocationAddress) {
333
334 address = (allocationAddress + alignMask
335 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
336 & (~alignMask);
337
55e303ae
A
338 if (atop_32(address) != atop_32(address + size - 1))
339 address = round_page_32(address);
1c79356b
A
340
341 *((vm_size_t *)(address - sizeof(vm_size_t)
342 - sizeof(vm_address_t))) = adjustedSize;
343 *((vm_address_t *)(address - sizeof(vm_address_t)))
344 = allocationAddress;
345 } else
346 address = 0;
347 }
348
55e303ae
A
349 /* Do we want a physical address? */
350 if (address && physicalAddress)
351 {
352 do
353 {
354 /* Get the physical page */
355 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
356 if(pagenum)
357 {
358 IOByteCount offset;
359 ppnum_t base;
360
361 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
362 if (base)
363 {
364 _IOMallocContiguousEntry *
365 entry = IONew(_IOMallocContiguousEntry, 1);
366 if (!entry)
367 {
368 IOFreeContiguous((void *) address, size);
369 address = 0;
370 break;
371 }
372 entry->virtual = (void *) address;
373 entry->ioBase = base;
374 mutex_lock(gIOMallocContiguousEntriesLock);
375 queue_enter( &gIOMallocContiguousEntries, entry,
376 _IOMallocContiguousEntry *, link );
377 mutex_unlock(gIOMallocContiguousEntriesLock);
378
379 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
380 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
381 IOMapperInsertPage( base, offset, pagenum );
382 }
383 else
384 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
385 }
386 else
387 /* Did not find, return 0 */
388 *physicalAddress = (IOPhysicalAddress) 0;
389 }
390 while (false);
391 }
1c79356b
A
392
393 assert(0 == (address & alignMask));
394
395#if IOALLOCDEBUG
396 if( address)
397 debug_iomalloc_size += size;
398#endif
399
400 return (void *) address;
401}
402
403void IOFreeContiguous(void * address, vm_size_t size)
404{
55e303ae
A
405 vm_address_t allocationAddress;
406 vm_size_t adjustedSize;
407 _IOMallocContiguousEntry * entry;
408 ppnum_t base = 0;
1c79356b
A
409
410 if( !address)
411 return;
412
413 assert(size);
414
55e303ae
A
415 mutex_lock(gIOMallocContiguousEntriesLock);
416 queue_iterate( &gIOMallocContiguousEntries, entry,
417 _IOMallocContiguousEntry *, link )
418 {
419 if( entry->virtual == address ) {
420 base = entry->ioBase;
421 queue_remove( &gIOMallocContiguousEntries, entry,
422 _IOMallocContiguousEntry *, link );
423 break;
424 }
425 }
426 mutex_unlock(gIOMallocContiguousEntriesLock);
427
428 if (base)
429 {
430 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
431 IODelete(entry, _IOMallocContiguousEntry, 1);
432 }
433
1c79356b
A
434 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
435 if (adjustedSize >= page_size) {
436
437 kmem_free( kernel_map, (vm_address_t) address, size);
438
439 } else {
440 adjustedSize = *((vm_size_t *)( (vm_address_t) address
441 - sizeof(vm_address_t) - sizeof(vm_size_t)));
442 allocationAddress = *((vm_address_t *)( (vm_address_t) address
443 - sizeof(vm_address_t) ));
444
445 kfree((vm_offset_t) allocationAddress, adjustedSize);
446 }
447
448#if IOALLOCDEBUG
449 debug_iomalloc_size -= size;
450#endif
451}
452
453/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
454
0b4e3aa0
A
455typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
456
457kern_return_t IOIteratePageableMaps(vm_size_t size,
458 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
459{
460 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
461 vm_size_t segSize;
462 UInt32 attempts;
463 UInt32 index;
464 vm_offset_t min;
465 vm_map_t map;
466
1c79356b 467 if (size > kIOPageableMaxMapSize)
0b4e3aa0 468 return( kIOReturnBadArgument );
1c79356b
A
469
470 do {
471 index = gIOKitPageableSpace.hint;
472 attempts = gIOKitPageableSpace.count;
473 while( attempts--) {
0b4e3aa0 474 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
475 if( KERN_SUCCESS == kr) {
476 gIOKitPageableSpace.hint = index;
477 break;
478 }
479 if( index)
480 index--;
481 else
482 index = gIOKitPageableSpace.count - 1;
483 }
484 if( KERN_SUCCESS == kr)
485 break;
486
487 mutex_lock( gIOKitPageableSpace.lock );
488
489 index = gIOKitPageableSpace.count;
490 if( index >= (kIOMaxPageableMaps - 1)) {
491 mutex_unlock( gIOKitPageableSpace.lock );
492 break;
493 }
494
495 if( size < kIOPageableMapSize)
496 segSize = kIOPageableMapSize;
497 else
498 segSize = size;
499
500 min = 0;
501 kr = kmem_suballoc(kernel_map,
502 &min,
503 segSize,
504 TRUE,
505 TRUE,
506 &map);
507 if( KERN_SUCCESS != kr) {
508 mutex_unlock( gIOKitPageableSpace.lock );
509 break;
510 }
511
512 gIOKitPageableSpace.maps[index].map = map;
513 gIOKitPageableSpace.maps[index].address = min;
514 gIOKitPageableSpace.maps[index].end = min + segSize;
515 gIOKitPageableSpace.hint = index;
516 gIOKitPageableSpace.count = index + 1;
517
518 mutex_unlock( gIOKitPageableSpace.lock );
519
520 } while( true );
521
0b4e3aa0
A
522 return kr;
523}
524
525struct IOMallocPageableRef
526{
527 vm_address_t address;
528 vm_size_t size;
529};
530
531static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
532{
533 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
534 kern_return_t kr;
535
536 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
537
538 return( kr );
539}
540
541void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
542{
543 kern_return_t kr = kIOReturnNotReady;
544 struct IOMallocPageableRef ref;
545
546 if (alignment > page_size)
547 return( 0 );
548 if (size > kIOPageableMaxMapSize)
549 return( 0 );
550
551 ref.size = size;
552 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
553 if( kIOReturnSuccess != kr)
554 ref.address = 0;
1c79356b
A
555
556#if IOALLOCDEBUG
0b4e3aa0 557 if( ref.address)
55e303ae 558 debug_iomalloc_size += round_page_32(size);
1c79356b
A
559#endif
560
0b4e3aa0 561 return( (void *) ref.address );
1c79356b
A
562}
563
564vm_map_t IOPageableMapForAddress( vm_address_t address )
565{
566 vm_map_t map = 0;
567 UInt32 index;
568
569 for( index = 0; index < gIOKitPageableSpace.count; index++) {
570 if( (address >= gIOKitPageableSpace.maps[index].address)
571 && (address < gIOKitPageableSpace.maps[index].end) ) {
572 map = gIOKitPageableSpace.maps[index].map;
573 break;
574 }
575 }
576 if( !map)
577 IOPanic("IOPageableMapForAddress: null");
578
579 return( map );
580}
581
582void IOFreePageable(void * address, vm_size_t size)
583{
584 vm_map_t map;
585
586 map = IOPageableMapForAddress( (vm_address_t) address);
587 if( map)
588 kmem_free( map, (vm_offset_t) address, size);
589
590#if IOALLOCDEBUG
55e303ae 591 debug_iomalloc_size -= round_page_32(size);
1c79356b
A
592#endif
593}
594
595/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
596
597extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
598 vm_size_t length, unsigned int options);
55e303ae 599extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
1c79356b
A
600
601IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
602 IOByteCount length, IOOptionBits cacheMode )
603{
604 IOReturn ret = kIOReturnSuccess;
55e303ae 605 ppnum_t pagenum;
1c79356b
A
606
607 if( task != kernel_task)
608 return( kIOReturnUnsupported );
609
55e303ae
A
610 length = round_page_32(address + length) - trunc_page_32( address );
611 address = trunc_page_32( address );
1c79356b
A
612
613 // make map mode
614 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
615
616 while( (kIOReturnSuccess == ret) && (length > 0) ) {
617
55e303ae
A
618 // Get the physical page number
619 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
620 if( pagenum) {
621 ret = IOUnmapPages( get_task_map(task), address, page_size );
622 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
623 } else
1c79356b
A
624 ret = kIOReturnVMError;
625
55e303ae 626 address += page_size;
1c79356b
A
627 length -= page_size;
628 }
629
630 return( ret );
631}
632
633
634IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
635 IOByteCount length )
636{
637 if( task != kernel_task)
638 return( kIOReturnUnsupported );
639
640#if __ppc__
55e303ae 641 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
642#endif
643
644 return( kIOReturnSuccess );
645}
646
647/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
648
649SInt32 OSKernelStackRemaining( void )
650{
651 SInt32 stack;
652
653 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
654
655 return( stack );
656}
657
658/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
659
660void IOSleep(unsigned milliseconds)
661{
9bccf70c
A
662 wait_result_t wait_result;
663
664 wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT);
665 assert(wait_result == THREAD_WAITING);
1c79356b 666
9bccf70c
A
667 wait_result = thread_block(THREAD_CONTINUE_NULL);
668 assert(wait_result == THREAD_TIMED_OUT);
1c79356b
A
669}
670
671/*
672 * Spin for indicated number of microseconds.
673 */
674void IODelay(unsigned microseconds)
675{
676 extern void delay(int usec);
677
678 delay(microseconds);
679}
680
681/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
682
683void IOLog(const char *format, ...)
684{
685 va_list ap;
686 extern void conslog_putc(char);
687 extern void logwakeup();
688
689 va_start(ap, format);
690 _doprnt(format, &ap, conslog_putc, 16);
691 va_end(ap);
692}
693
694void IOPanic(const char *reason)
695{
696 panic(reason);
697}
698
699/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
700
701/*
702 * Convert a integer constant (typically a #define or enum) to a string.
703 */
704static char noValue[80]; // that's pretty
705
706const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
707{
708 for( ; regValueArray->name; regValueArray++) {
709 if(regValueArray->value == value)
710 return(regValueArray->name);
711 }
712 sprintf(noValue, "0x%x (UNDEFINED)", value);
713 return((const char *)noValue);
714}
715
716IOReturn IOFindValueForName(const char *string,
717 const IONamedValue *regValueArray,
718 int *value)
719{
720 for( ; regValueArray->name; regValueArray++) {
721 if(!strcmp(regValueArray->name, string)) {
722 *value = regValueArray->value;
723 return kIOReturnSuccess;
724 }
725 }
726 return kIOReturnBadArgument;
727}
728
729/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
730
731IOAlignment IOSizeToAlignment(unsigned int size)
732{
733 register int shift;
734 const int intsize = sizeof(unsigned int) * 8;
735
736 for (shift = 1; shift < intsize; shift++) {
737 if (size & 0x80000000)
738 return (IOAlignment)(intsize - shift);
739 size <<= 1;
740 }
741 return 0;
742}
743
744unsigned int IOAlignmentToSize(IOAlignment align)
745{
746 unsigned int size;
747
748 for (size = 1; align; align--) {
749 size <<= 1;
750 }
751 return size;
752}
0b4e3aa0
A
753
754IOReturn IONDRVLibrariesInitialize( void )
755{
756 return( kIOReturnUnsupported );
757}