]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
1c79356b
A
24 * HISTORY
25 *
26 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
27 * 17-Nov-98 cpp
28 *
29 */
30
31#include <IOKit/system.h>
32#include <mach/sync_policy.h>
33#include <machine/machine_routines.h>
34#include <libkern/c++/OSCPPDebug.h>
35
36#include <IOKit/assert.h>
37
38#include <IOKit/IOReturn.h>
39#include <IOKit/IOLib.h>
91447636 40#include <IOKit/IOLocks.h>
55e303ae 41#include <IOKit/IOMapper.h>
1c79356b
A
42#include <IOKit/IOKitDebug.h>
43
91447636
A
44#include "IOKitKernelInternal.h"
45
1c79356b
A
46mach_timespec_t IOZeroTvalspec = { 0, 0 };
47
55e303ae
A
48extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
49
50/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 51
91447636
A
52lck_grp_t *IOLockGroup;
53
9bccf70c
A
54/*
55 * Global variables for use by iLogger
56 * These symbols are for use only by Apple diagnostic code.
57 * Binary compatibility is not guaranteed for kexts that reference these symbols.
58 */
59
60void *_giDebugLogInternal = NULL;
61void *_giDebugLogDataInternal = NULL;
62void *_giDebugReserved1 = NULL;
63void *_giDebugReserved2 = NULL;
64
65
1c79356b
A
66/*
67 * Static variables for this module.
68 */
69
55e303ae 70static queue_head_t gIOMallocContiguousEntries;
91447636 71static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b
A
72
73enum { kIOMaxPageableMaps = 16 };
483a1d10 74enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 75enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
1c79356b 76
91447636 77/* LP64todo - these need to expand */
1c79356b
A
78typedef struct {
79 vm_map_t map;
80 vm_offset_t address;
81 vm_offset_t end;
82} IOMapData;
83
84static struct {
85 UInt32 count;
86 UInt32 hint;
87 IOMapData maps[ kIOMaxPageableMaps ];
91447636 88 lck_mtx_t * lock;
1c79356b
A
89} gIOKitPageableSpace;
90
55e303ae 91/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
92
93void IOLibInit(void)
94{
95 kern_return_t ret;
96
97 static bool libInitialized;
98
99 if(libInitialized)
100 return;
101
1c79356b
A
102 gIOKitPageableSpace.maps[0].address = 0;
103 ret = kmem_suballoc(kernel_map,
104 &gIOKitPageableSpace.maps[0].address,
105 kIOPageableMapSize,
106 TRUE,
91447636 107 VM_FLAGS_ANYWHERE,
1c79356b
A
108 &gIOKitPageableSpace.maps[0].map);
109 if (ret != KERN_SUCCESS)
110 panic("failed to allocate iokit pageable map\n");
111
91447636
A
112 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
113
114 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
115 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
116 gIOKitPageableSpace.hint = 0;
117 gIOKitPageableSpace.count = 1;
118
91447636 119 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
120 queue_init( &gIOMallocContiguousEntries );
121
1c79356b
A
122 libInitialized = true;
123}
124
125/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
1c79356b
A
127IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
128{
91447636
A
129 kern_return_t result;
130 thread_t thread;
1c79356b 131
91447636
A
132 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
133 if (result != KERN_SUCCESS)
134 return (NULL);
1c79356b 135
91447636 136 thread_deallocate(thread);
1c79356b 137
91447636 138 return (thread);
1c79356b
A
139}
140
141
91447636 142volatile void IOExitThread(void)
1c79356b 143{
91447636 144 (void) thread_terminate(current_thread());
1c79356b
A
145}
146
147/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
148
149
150void * IOMalloc(vm_size_t size)
151{
152 void * address;
153
154 address = (void *)kalloc(size);
155#if IOALLOCDEBUG
156 if (address)
157 debug_iomalloc_size += size;
158#endif
159 return address;
160}
161
162void IOFree(void * address, vm_size_t size)
163{
164 if (address) {
91447636 165 kfree(address, size);
1c79356b
A
166#if IOALLOCDEBUG
167 debug_iomalloc_size -= size;
168#endif
169 }
170}
171
172/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
173
174void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
175{
176 kern_return_t kr;
177 vm_address_t address;
178 vm_address_t allocationAddress;
179 vm_size_t adjustedSize;
180 vm_offset_t alignMask;
181
182 if (size == 0)
183 return 0;
184 if (alignment == 0)
185 alignment = 1;
186
187 alignMask = alignment - 1;
188 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
189
190 if (adjustedSize >= page_size) {
191
192 kr = kernel_memory_allocate(kernel_map, &address,
9bccf70c
A
193 size, alignMask, 0);
194 if (KERN_SUCCESS != kr)
1c79356b 195 address = 0;
1c79356b
A
196
197 } else {
198
199 adjustedSize += alignMask;
9bccf70c
A
200
201 if (adjustedSize >= page_size) {
202
203 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
204 adjustedSize, 0, 0);
205 if (KERN_SUCCESS != kr)
206 allocationAddress = 0;
207
208 } else
209 allocationAddress = (vm_address_t) kalloc(adjustedSize);
1c79356b
A
210
211 if (allocationAddress) {
212 address = (allocationAddress + alignMask
213 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
214 & (~alignMask);
215
216 *((vm_size_t *)(address - sizeof(vm_size_t)
217 - sizeof(vm_address_t))) = adjustedSize;
218 *((vm_address_t *)(address - sizeof(vm_address_t)))
219 = allocationAddress;
220 } else
221 address = 0;
222 }
223
224 assert(0 == (address & alignMask));
225
226#if IOALLOCDEBUG
227 if( address)
228 debug_iomalloc_size += size;
229#endif
230
231 return (void *) address;
232}
233
234void IOFreeAligned(void * address, vm_size_t size)
235{
236 vm_address_t allocationAddress;
237 vm_size_t adjustedSize;
238
239 if( !address)
240 return;
241
242 assert(size);
243
244 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
245 if (adjustedSize >= page_size) {
246
247 kmem_free( kernel_map, (vm_address_t) address, size);
248
249 } else {
250 adjustedSize = *((vm_size_t *)( (vm_address_t) address
251 - sizeof(vm_address_t) - sizeof(vm_size_t)));
252 allocationAddress = *((vm_address_t *)( (vm_address_t) address
253 - sizeof(vm_address_t) ));
254
9bccf70c 255 if (adjustedSize >= page_size)
91447636 256 kmem_free( kernel_map, allocationAddress, adjustedSize);
9bccf70c 257 else
91447636 258 kfree((void *)allocationAddress, adjustedSize);
1c79356b
A
259 }
260
261#if IOALLOCDEBUG
262 debug_iomalloc_size -= size;
263#endif
264}
265
266/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
267
55e303ae
A
268struct _IOMallocContiguousEntry
269{
270 void * virtual;
271 ppnum_t ioBase;
272 queue_chain_t link;
273};
274typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
275
1c79356b
A
276void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
277 IOPhysicalAddress * physicalAddress)
278{
279 kern_return_t kr;
280 vm_address_t address;
281 vm_address_t allocationAddress;
282 vm_size_t adjustedSize;
283 vm_offset_t alignMask;
55e303ae 284 ppnum_t pagenum;
1c79356b
A
285
286 if (size == 0)
287 return 0;
288 if (alignment == 0)
289 alignment = 1;
290
291 alignMask = alignment - 1;
292 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
293
55e303ae
A
294 if (adjustedSize >= page_size)
295 {
296 adjustedSize = size;
297 if (adjustedSize > page_size)
298 {
299 kr = kmem_alloc_contig(kernel_map, &address, size,
300 alignMask, 0);
301 }
302 else
303 {
304 kr = kernel_memory_allocate(kernel_map, &address,
305 size, alignMask, 0);
306 }
1c79356b
A
307 if (KERN_SUCCESS != kr)
308 address = 0;
55e303ae
A
309 }
310 else
311 {
1c79356b 312 adjustedSize += alignMask;
9bccf70c
A
313 allocationAddress = (vm_address_t) kalloc(adjustedSize);
314
1c79356b
A
315 if (allocationAddress) {
316
317 address = (allocationAddress + alignMask
318 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
319 & (~alignMask);
320
55e303ae
A
321 if (atop_32(address) != atop_32(address + size - 1))
322 address = round_page_32(address);
1c79356b
A
323
324 *((vm_size_t *)(address - sizeof(vm_size_t)
325 - sizeof(vm_address_t))) = adjustedSize;
326 *((vm_address_t *)(address - sizeof(vm_address_t)))
327 = allocationAddress;
328 } else
329 address = 0;
330 }
331
55e303ae
A
332 /* Do we want a physical address? */
333 if (address && physicalAddress)
334 {
335 do
336 {
337 /* Get the physical page */
338 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
339 if(pagenum)
340 {
341 IOByteCount offset;
342 ppnum_t base;
343
344 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
345 if (base)
346 {
347 _IOMallocContiguousEntry *
348 entry = IONew(_IOMallocContiguousEntry, 1);
349 if (!entry)
350 {
351 IOFreeContiguous((void *) address, size);
352 address = 0;
353 break;
354 }
355 entry->virtual = (void *) address;
356 entry->ioBase = base;
91447636 357 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
358 queue_enter( &gIOMallocContiguousEntries, entry,
359 _IOMallocContiguousEntry *, link );
91447636 360 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae
A
361
362 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
363 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
364 IOMapperInsertPage( base, offset, pagenum );
365 }
366 else
367 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
368 }
369 else
370 /* Did not find, return 0 */
371 *physicalAddress = (IOPhysicalAddress) 0;
372 }
373 while (false);
374 }
1c79356b
A
375
376 assert(0 == (address & alignMask));
377
378#if IOALLOCDEBUG
379 if( address)
380 debug_iomalloc_size += size;
381#endif
382
383 return (void *) address;
384}
385
386void IOFreeContiguous(void * address, vm_size_t size)
387{
55e303ae
A
388 vm_address_t allocationAddress;
389 vm_size_t adjustedSize;
390 _IOMallocContiguousEntry * entry;
391 ppnum_t base = 0;
1c79356b
A
392
393 if( !address)
394 return;
395
396 assert(size);
397
91447636 398 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
399 queue_iterate( &gIOMallocContiguousEntries, entry,
400 _IOMallocContiguousEntry *, link )
401 {
402 if( entry->virtual == address ) {
403 base = entry->ioBase;
404 queue_remove( &gIOMallocContiguousEntries, entry,
405 _IOMallocContiguousEntry *, link );
406 break;
407 }
408 }
91447636 409 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae
A
410
411 if (base)
412 {
413 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
414 IODelete(entry, _IOMallocContiguousEntry, 1);
415 }
416
1c79356b
A
417 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
418 if (adjustedSize >= page_size) {
419
420 kmem_free( kernel_map, (vm_address_t) address, size);
421
422 } else {
423 adjustedSize = *((vm_size_t *)( (vm_address_t) address
424 - sizeof(vm_address_t) - sizeof(vm_size_t)));
425 allocationAddress = *((vm_address_t *)( (vm_address_t) address
426 - sizeof(vm_address_t) ));
427
91447636 428 kfree((void *)allocationAddress, adjustedSize);
1c79356b
A
429 }
430
431#if IOALLOCDEBUG
432 debug_iomalloc_size -= size;
433#endif
434}
435
436/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
437
0b4e3aa0
A
438kern_return_t IOIteratePageableMaps(vm_size_t size,
439 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
440{
441 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
442 vm_size_t segSize;
443 UInt32 attempts;
444 UInt32 index;
445 vm_offset_t min;
446 vm_map_t map;
447
1c79356b 448 if (size > kIOPageableMaxMapSize)
0b4e3aa0 449 return( kIOReturnBadArgument );
1c79356b
A
450
451 do {
452 index = gIOKitPageableSpace.hint;
453 attempts = gIOKitPageableSpace.count;
454 while( attempts--) {
0b4e3aa0 455 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
456 if( KERN_SUCCESS == kr) {
457 gIOKitPageableSpace.hint = index;
458 break;
459 }
460 if( index)
461 index--;
462 else
463 index = gIOKitPageableSpace.count - 1;
464 }
465 if( KERN_SUCCESS == kr)
466 break;
467
91447636 468 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
469
470 index = gIOKitPageableSpace.count;
471 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 472 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
473 break;
474 }
475
476 if( size < kIOPageableMapSize)
477 segSize = kIOPageableMapSize;
478 else
479 segSize = size;
480
481 min = 0;
482 kr = kmem_suballoc(kernel_map,
483 &min,
484 segSize,
485 TRUE,
91447636 486 VM_FLAGS_ANYWHERE,
1c79356b
A
487 &map);
488 if( KERN_SUCCESS != kr) {
91447636 489 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
490 break;
491 }
492
493 gIOKitPageableSpace.maps[index].map = map;
494 gIOKitPageableSpace.maps[index].address = min;
495 gIOKitPageableSpace.maps[index].end = min + segSize;
496 gIOKitPageableSpace.hint = index;
497 gIOKitPageableSpace.count = index + 1;
498
91447636 499 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
500
501 } while( true );
502
0b4e3aa0
A
503 return kr;
504}
505
506struct IOMallocPageableRef
507{
508 vm_address_t address;
509 vm_size_t size;
510};
511
512static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
513{
514 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
515 kern_return_t kr;
516
517 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
518
519 return( kr );
520}
521
522void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
523{
524 kern_return_t kr = kIOReturnNotReady;
525 struct IOMallocPageableRef ref;
526
527 if (alignment > page_size)
528 return( 0 );
529 if (size > kIOPageableMaxMapSize)
530 return( 0 );
531
532 ref.size = size;
533 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
534 if( kIOReturnSuccess != kr)
535 ref.address = 0;
1c79356b
A
536
537#if IOALLOCDEBUG
0b4e3aa0 538 if( ref.address)
91447636 539 debug_iomallocpageable_size += round_page_32(size);
1c79356b
A
540#endif
541
0b4e3aa0 542 return( (void *) ref.address );
1c79356b
A
543}
544
545vm_map_t IOPageableMapForAddress( vm_address_t address )
546{
547 vm_map_t map = 0;
548 UInt32 index;
549
550 for( index = 0; index < gIOKitPageableSpace.count; index++) {
551 if( (address >= gIOKitPageableSpace.maps[index].address)
552 && (address < gIOKitPageableSpace.maps[index].end) ) {
553 map = gIOKitPageableSpace.maps[index].map;
554 break;
555 }
556 }
557 if( !map)
558 IOPanic("IOPageableMapForAddress: null");
559
560 return( map );
561}
562
563void IOFreePageable(void * address, vm_size_t size)
564{
565 vm_map_t map;
566
567 map = IOPageableMapForAddress( (vm_address_t) address);
568 if( map)
569 kmem_free( map, (vm_offset_t) address, size);
570
571#if IOALLOCDEBUG
91447636 572 debug_iomallocpageable_size -= round_page_32(size);
1c79356b
A
573#endif
574}
575
576/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
577
1c79356b
A
578IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
579 IOByteCount length, IOOptionBits cacheMode )
580{
581 IOReturn ret = kIOReturnSuccess;
55e303ae 582 ppnum_t pagenum;
1c79356b
A
583
584 if( task != kernel_task)
585 return( kIOReturnUnsupported );
586
55e303ae
A
587 length = round_page_32(address + length) - trunc_page_32( address );
588 address = trunc_page_32( address );
1c79356b
A
589
590 // make map mode
591 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
592
593 while( (kIOReturnSuccess == ret) && (length > 0) ) {
594
55e303ae
A
595 // Get the physical page number
596 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
597 if( pagenum) {
598 ret = IOUnmapPages( get_task_map(task), address, page_size );
599 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
600 } else
1c79356b
A
601 ret = kIOReturnVMError;
602
55e303ae 603 address += page_size;
1c79356b
A
604 length -= page_size;
605 }
606
607 return( ret );
608}
609
610
611IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
612 IOByteCount length )
613{
614 if( task != kernel_task)
615 return( kIOReturnUnsupported );
616
617#if __ppc__
55e303ae 618 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
619#endif
620
621 return( kIOReturnSuccess );
622}
623
624/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
625
626SInt32 OSKernelStackRemaining( void )
627{
628 SInt32 stack;
629
630 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
631
632 return( stack );
633}
634
635/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
636
637void IOSleep(unsigned milliseconds)
638{
91447636 639 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
640}
641
642/*
643 * Spin for indicated number of microseconds.
644 */
645void IODelay(unsigned microseconds)
646{
91447636 647 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
648}
649
650/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
651
652void IOLog(const char *format, ...)
653{
654 va_list ap;
655 extern void conslog_putc(char);
91447636 656 extern void logwakeup(void);
1c79356b
A
657
658 va_start(ap, format);
659 _doprnt(format, &ap, conslog_putc, 16);
660 va_end(ap);
661}
662
663void IOPanic(const char *reason)
664{
665 panic(reason);
666}
667
668/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
669
670/*
671 * Convert a integer constant (typically a #define or enum) to a string.
672 */
673static char noValue[80]; // that's pretty
674
675const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
676{
677 for( ; regValueArray->name; regValueArray++) {
678 if(regValueArray->value == value)
679 return(regValueArray->name);
680 }
681 sprintf(noValue, "0x%x (UNDEFINED)", value);
682 return((const char *)noValue);
683}
684
685IOReturn IOFindValueForName(const char *string,
686 const IONamedValue *regValueArray,
687 int *value)
688{
689 for( ; regValueArray->name; regValueArray++) {
690 if(!strcmp(regValueArray->name, string)) {
691 *value = regValueArray->value;
692 return kIOReturnSuccess;
693 }
694 }
695 return kIOReturnBadArgument;
696}
697
698/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
699
700IOAlignment IOSizeToAlignment(unsigned int size)
701{
702 register int shift;
703 const int intsize = sizeof(unsigned int) * 8;
704
705 for (shift = 1; shift < intsize; shift++) {
706 if (size & 0x80000000)
707 return (IOAlignment)(intsize - shift);
708 size <<= 1;
709 }
710 return 0;
711}
712
713unsigned int IOAlignmentToSize(IOAlignment align)
714{
715 unsigned int size;
716
717 for (size = 1; align; align--) {
718 size <<= 1;
719 }
720 return size;
721}