]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
1c79356b
A
23 * HISTORY
24 *
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
26 * 17-Nov-98 cpp
27 *
28 */
29
30#include <IOKit/system.h>
31#include <mach/sync_policy.h>
32#include <machine/machine_routines.h>
33#include <libkern/c++/OSCPPDebug.h>
34
35#include <IOKit/assert.h>
36
37#include <IOKit/IOReturn.h>
38#include <IOKit/IOLib.h>
91447636 39#include <IOKit/IOLocks.h>
55e303ae 40#include <IOKit/IOMapper.h>
1c79356b
A
41#include <IOKit/IOKitDebug.h>
42
91447636
A
43#include "IOKitKernelInternal.h"
44
1c79356b
A
45mach_timespec_t IOZeroTvalspec = { 0, 0 };
46
55e303ae
A
47extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
48
49/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 50
91447636
A
51lck_grp_t *IOLockGroup;
52
9bccf70c
A
53/*
54 * Global variables for use by iLogger
55 * These symbols are for use only by Apple diagnostic code.
56 * Binary compatibility is not guaranteed for kexts that reference these symbols.
57 */
58
59void *_giDebugLogInternal = NULL;
60void *_giDebugLogDataInternal = NULL;
61void *_giDebugReserved1 = NULL;
62void *_giDebugReserved2 = NULL;
63
64
1c79356b
A
65/*
66 * Static variables for this module.
67 */
68
55e303ae 69static queue_head_t gIOMallocContiguousEntries;
91447636 70static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b
A
71
72enum { kIOMaxPageableMaps = 16 };
483a1d10 73enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 74enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
1c79356b 75
91447636 76/* LP64todo - these need to expand */
1c79356b
A
77typedef struct {
78 vm_map_t map;
79 vm_offset_t address;
80 vm_offset_t end;
81} IOMapData;
82
83static struct {
84 UInt32 count;
85 UInt32 hint;
86 IOMapData maps[ kIOMaxPageableMaps ];
91447636 87 lck_mtx_t * lock;
1c79356b
A
88} gIOKitPageableSpace;
89
55e303ae 90/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
91
92void IOLibInit(void)
93{
94 kern_return_t ret;
95
96 static bool libInitialized;
97
98 if(libInitialized)
99 return;
100
1c79356b
A
101 gIOKitPageableSpace.maps[0].address = 0;
102 ret = kmem_suballoc(kernel_map,
103 &gIOKitPageableSpace.maps[0].address,
104 kIOPageableMapSize,
105 TRUE,
91447636 106 VM_FLAGS_ANYWHERE,
1c79356b
A
107 &gIOKitPageableSpace.maps[0].map);
108 if (ret != KERN_SUCCESS)
109 panic("failed to allocate iokit pageable map\n");
110
91447636
A
111 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
112
113 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
114 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
115 gIOKitPageableSpace.hint = 0;
116 gIOKitPageableSpace.count = 1;
117
91447636 118 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
119 queue_init( &gIOMallocContiguousEntries );
120
1c79356b
A
121 libInitialized = true;
122}
123
124/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
1c79356b
A
126IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
127{
91447636
A
128 kern_return_t result;
129 thread_t thread;
1c79356b 130
91447636
A
131 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
132 if (result != KERN_SUCCESS)
133 return (NULL);
1c79356b 134
91447636 135 thread_deallocate(thread);
1c79356b 136
91447636 137 return (thread);
1c79356b
A
138}
139
140
91447636 141volatile void IOExitThread(void)
1c79356b 142{
91447636 143 (void) thread_terminate(current_thread());
1c79356b
A
144}
145
146/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
147
148
149void * IOMalloc(vm_size_t size)
150{
151 void * address;
152
153 address = (void *)kalloc(size);
154#if IOALLOCDEBUG
155 if (address)
156 debug_iomalloc_size += size;
157#endif
158 return address;
159}
160
161void IOFree(void * address, vm_size_t size)
162{
163 if (address) {
91447636 164 kfree(address, size);
1c79356b
A
165#if IOALLOCDEBUG
166 debug_iomalloc_size -= size;
167#endif
168 }
169}
170
171/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
174{
175 kern_return_t kr;
176 vm_address_t address;
177 vm_address_t allocationAddress;
178 vm_size_t adjustedSize;
179 vm_offset_t alignMask;
180
181 if (size == 0)
182 return 0;
183 if (alignment == 0)
184 alignment = 1;
185
186 alignMask = alignment - 1;
187 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
188
189 if (adjustedSize >= page_size) {
190
191 kr = kernel_memory_allocate(kernel_map, &address,
9bccf70c
A
192 size, alignMask, 0);
193 if (KERN_SUCCESS != kr)
1c79356b 194 address = 0;
1c79356b
A
195
196 } else {
197
198 adjustedSize += alignMask;
9bccf70c
A
199
200 if (adjustedSize >= page_size) {
201
202 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
203 adjustedSize, 0, 0);
204 if (KERN_SUCCESS != kr)
205 allocationAddress = 0;
206
207 } else
208 allocationAddress = (vm_address_t) kalloc(adjustedSize);
1c79356b
A
209
210 if (allocationAddress) {
211 address = (allocationAddress + alignMask
212 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
213 & (~alignMask);
214
215 *((vm_size_t *)(address - sizeof(vm_size_t)
216 - sizeof(vm_address_t))) = adjustedSize;
217 *((vm_address_t *)(address - sizeof(vm_address_t)))
218 = allocationAddress;
219 } else
220 address = 0;
221 }
222
223 assert(0 == (address & alignMask));
224
225#if IOALLOCDEBUG
226 if( address)
227 debug_iomalloc_size += size;
228#endif
229
230 return (void *) address;
231}
232
233void IOFreeAligned(void * address, vm_size_t size)
234{
235 vm_address_t allocationAddress;
236 vm_size_t adjustedSize;
237
238 if( !address)
239 return;
240
241 assert(size);
242
243 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
244 if (adjustedSize >= page_size) {
245
246 kmem_free( kernel_map, (vm_address_t) address, size);
247
248 } else {
249 adjustedSize = *((vm_size_t *)( (vm_address_t) address
250 - sizeof(vm_address_t) - sizeof(vm_size_t)));
251 allocationAddress = *((vm_address_t *)( (vm_address_t) address
252 - sizeof(vm_address_t) ));
253
9bccf70c 254 if (adjustedSize >= page_size)
91447636 255 kmem_free( kernel_map, allocationAddress, adjustedSize);
9bccf70c 256 else
91447636 257 kfree((void *)allocationAddress, adjustedSize);
1c79356b
A
258 }
259
260#if IOALLOCDEBUG
261 debug_iomalloc_size -= size;
262#endif
263}
264
265/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
266
55e303ae
A
267struct _IOMallocContiguousEntry
268{
269 void * virtual;
270 ppnum_t ioBase;
271 queue_chain_t link;
272};
273typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
274
1c79356b
A
275void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
276 IOPhysicalAddress * physicalAddress)
277{
278 kern_return_t kr;
279 vm_address_t address;
280 vm_address_t allocationAddress;
281 vm_size_t adjustedSize;
282 vm_offset_t alignMask;
55e303ae 283 ppnum_t pagenum;
1c79356b
A
284
285 if (size == 0)
286 return 0;
287 if (alignment == 0)
288 alignment = 1;
289
290 alignMask = alignment - 1;
291 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
292
55e303ae
A
293 if (adjustedSize >= page_size)
294 {
295 adjustedSize = size;
296 if (adjustedSize > page_size)
297 {
298 kr = kmem_alloc_contig(kernel_map, &address, size,
299 alignMask, 0);
300 }
301 else
302 {
303 kr = kernel_memory_allocate(kernel_map, &address,
304 size, alignMask, 0);
305 }
1c79356b
A
306 if (KERN_SUCCESS != kr)
307 address = 0;
55e303ae
A
308 }
309 else
310 {
1c79356b 311 adjustedSize += alignMask;
9bccf70c
A
312 allocationAddress = (vm_address_t) kalloc(adjustedSize);
313
1c79356b
A
314 if (allocationAddress) {
315
316 address = (allocationAddress + alignMask
317 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
318 & (~alignMask);
319
55e303ae
A
320 if (atop_32(address) != atop_32(address + size - 1))
321 address = round_page_32(address);
1c79356b
A
322
323 *((vm_size_t *)(address - sizeof(vm_size_t)
324 - sizeof(vm_address_t))) = adjustedSize;
325 *((vm_address_t *)(address - sizeof(vm_address_t)))
326 = allocationAddress;
327 } else
328 address = 0;
329 }
330
55e303ae
A
331 /* Do we want a physical address? */
332 if (address && physicalAddress)
333 {
334 do
335 {
336 /* Get the physical page */
337 pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
338 if(pagenum)
339 {
340 IOByteCount offset;
341 ppnum_t base;
342
343 base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
344 if (base)
345 {
346 _IOMallocContiguousEntry *
347 entry = IONew(_IOMallocContiguousEntry, 1);
348 if (!entry)
349 {
350 IOFreeContiguous((void *) address, size);
351 address = 0;
352 break;
353 }
354 entry->virtual = (void *) address;
355 entry->ioBase = base;
91447636 356 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
357 queue_enter( &gIOMallocContiguousEntries, entry,
358 _IOMallocContiguousEntry *, link );
91447636 359 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae
A
360
361 *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
362 for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
363 IOMapperInsertPage( base, offset, pagenum );
364 }
365 else
366 *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
367 }
368 else
369 /* Did not find, return 0 */
370 *physicalAddress = (IOPhysicalAddress) 0;
371 }
372 while (false);
373 }
1c79356b
A
374
375 assert(0 == (address & alignMask));
376
377#if IOALLOCDEBUG
378 if( address)
379 debug_iomalloc_size += size;
380#endif
381
382 return (void *) address;
383}
384
385void IOFreeContiguous(void * address, vm_size_t size)
386{
55e303ae
A
387 vm_address_t allocationAddress;
388 vm_size_t adjustedSize;
389 _IOMallocContiguousEntry * entry;
390 ppnum_t base = 0;
1c79356b
A
391
392 if( !address)
393 return;
394
395 assert(size);
396
91447636 397 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
398 queue_iterate( &gIOMallocContiguousEntries, entry,
399 _IOMallocContiguousEntry *, link )
400 {
401 if( entry->virtual == address ) {
402 base = entry->ioBase;
403 queue_remove( &gIOMallocContiguousEntries, entry,
404 _IOMallocContiguousEntry *, link );
405 break;
406 }
407 }
91447636 408 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae
A
409
410 if (base)
411 {
412 IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
413 IODelete(entry, _IOMallocContiguousEntry, 1);
414 }
415
1c79356b
A
416 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
417 if (adjustedSize >= page_size) {
418
419 kmem_free( kernel_map, (vm_address_t) address, size);
420
421 } else {
422 adjustedSize = *((vm_size_t *)( (vm_address_t) address
423 - sizeof(vm_address_t) - sizeof(vm_size_t)));
424 allocationAddress = *((vm_address_t *)( (vm_address_t) address
425 - sizeof(vm_address_t) ));
426
91447636 427 kfree((void *)allocationAddress, adjustedSize);
1c79356b
A
428 }
429
430#if IOALLOCDEBUG
431 debug_iomalloc_size -= size;
432#endif
433}
434
435/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
436
0b4e3aa0
A
437kern_return_t IOIteratePageableMaps(vm_size_t size,
438 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
439{
440 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
441 vm_size_t segSize;
442 UInt32 attempts;
443 UInt32 index;
444 vm_offset_t min;
445 vm_map_t map;
446
1c79356b 447 if (size > kIOPageableMaxMapSize)
0b4e3aa0 448 return( kIOReturnBadArgument );
1c79356b
A
449
450 do {
451 index = gIOKitPageableSpace.hint;
452 attempts = gIOKitPageableSpace.count;
453 while( attempts--) {
0b4e3aa0 454 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
455 if( KERN_SUCCESS == kr) {
456 gIOKitPageableSpace.hint = index;
457 break;
458 }
459 if( index)
460 index--;
461 else
462 index = gIOKitPageableSpace.count - 1;
463 }
464 if( KERN_SUCCESS == kr)
465 break;
466
91447636 467 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
468
469 index = gIOKitPageableSpace.count;
470 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 471 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
472 break;
473 }
474
475 if( size < kIOPageableMapSize)
476 segSize = kIOPageableMapSize;
477 else
478 segSize = size;
479
480 min = 0;
481 kr = kmem_suballoc(kernel_map,
482 &min,
483 segSize,
484 TRUE,
91447636 485 VM_FLAGS_ANYWHERE,
1c79356b
A
486 &map);
487 if( KERN_SUCCESS != kr) {
91447636 488 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
489 break;
490 }
491
492 gIOKitPageableSpace.maps[index].map = map;
493 gIOKitPageableSpace.maps[index].address = min;
494 gIOKitPageableSpace.maps[index].end = min + segSize;
495 gIOKitPageableSpace.hint = index;
496 gIOKitPageableSpace.count = index + 1;
497
91447636 498 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
499
500 } while( true );
501
0b4e3aa0
A
502 return kr;
503}
504
505struct IOMallocPageableRef
506{
507 vm_address_t address;
508 vm_size_t size;
509};
510
511static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
512{
513 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
514 kern_return_t kr;
515
516 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
517
518 return( kr );
519}
520
521void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
522{
523 kern_return_t kr = kIOReturnNotReady;
524 struct IOMallocPageableRef ref;
525
526 if (alignment > page_size)
527 return( 0 );
528 if (size > kIOPageableMaxMapSize)
529 return( 0 );
530
531 ref.size = size;
532 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
533 if( kIOReturnSuccess != kr)
534 ref.address = 0;
1c79356b
A
535
536#if IOALLOCDEBUG
0b4e3aa0 537 if( ref.address)
91447636 538 debug_iomallocpageable_size += round_page_32(size);
1c79356b
A
539#endif
540
0b4e3aa0 541 return( (void *) ref.address );
1c79356b
A
542}
543
544vm_map_t IOPageableMapForAddress( vm_address_t address )
545{
546 vm_map_t map = 0;
547 UInt32 index;
548
549 for( index = 0; index < gIOKitPageableSpace.count; index++) {
550 if( (address >= gIOKitPageableSpace.maps[index].address)
551 && (address < gIOKitPageableSpace.maps[index].end) ) {
552 map = gIOKitPageableSpace.maps[index].map;
553 break;
554 }
555 }
556 if( !map)
557 IOPanic("IOPageableMapForAddress: null");
558
559 return( map );
560}
561
562void IOFreePageable(void * address, vm_size_t size)
563{
564 vm_map_t map;
565
566 map = IOPageableMapForAddress( (vm_address_t) address);
567 if( map)
568 kmem_free( map, (vm_offset_t) address, size);
569
570#if IOALLOCDEBUG
91447636 571 debug_iomallocpageable_size -= round_page_32(size);
1c79356b
A
572#endif
573}
574
575/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
576
1c79356b
A
577IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
578 IOByteCount length, IOOptionBits cacheMode )
579{
580 IOReturn ret = kIOReturnSuccess;
55e303ae 581 ppnum_t pagenum;
1c79356b
A
582
583 if( task != kernel_task)
584 return( kIOReturnUnsupported );
585
55e303ae
A
586 length = round_page_32(address + length) - trunc_page_32( address );
587 address = trunc_page_32( address );
1c79356b
A
588
589 // make map mode
590 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
591
592 while( (kIOReturnSuccess == ret) && (length > 0) ) {
593
55e303ae
A
594 // Get the physical page number
595 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
596 if( pagenum) {
597 ret = IOUnmapPages( get_task_map(task), address, page_size );
598 ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
599 } else
1c79356b
A
600 ret = kIOReturnVMError;
601
55e303ae 602 address += page_size;
1c79356b
A
603 length -= page_size;
604 }
605
606 return( ret );
607}
608
609
610IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
611 IOByteCount length )
612{
613 if( task != kernel_task)
614 return( kIOReturnUnsupported );
615
616#if __ppc__
55e303ae 617 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
618#endif
619
620 return( kIOReturnSuccess );
621}
622
623/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
624
625SInt32 OSKernelStackRemaining( void )
626{
627 SInt32 stack;
628
629 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
630
631 return( stack );
632}
633
634/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
635
636void IOSleep(unsigned milliseconds)
637{
91447636 638 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
639}
640
641/*
642 * Spin for indicated number of microseconds.
643 */
644void IODelay(unsigned microseconds)
645{
91447636 646 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
647}
648
649/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
650
651void IOLog(const char *format, ...)
652{
653 va_list ap;
654 extern void conslog_putc(char);
91447636 655 extern void logwakeup(void);
1c79356b
A
656
657 va_start(ap, format);
658 _doprnt(format, &ap, conslog_putc, 16);
659 va_end(ap);
660}
661
662void IOPanic(const char *reason)
663{
664 panic(reason);
665}
666
667/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
668
669/*
670 * Convert a integer constant (typically a #define or enum) to a string.
671 */
672static char noValue[80]; // that's pretty
673
674const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
675{
676 for( ; regValueArray->name; regValueArray++) {
677 if(regValueArray->value == value)
678 return(regValueArray->name);
679 }
680 sprintf(noValue, "0x%x (UNDEFINED)", value);
681 return((const char *)noValue);
682}
683
684IOReturn IOFindValueForName(const char *string,
685 const IONamedValue *regValueArray,
686 int *value)
687{
688 for( ; regValueArray->name; regValueArray++) {
689 if(!strcmp(regValueArray->name, string)) {
690 *value = regValueArray->value;
691 return kIOReturnSuccess;
692 }
693 }
694 return kIOReturnBadArgument;
695}
696
697/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
698
699IOAlignment IOSizeToAlignment(unsigned int size)
700{
701 register int shift;
702 const int intsize = sizeof(unsigned int) * 8;
703
704 for (shift = 1; shift < intsize; shift++) {
705 if (size & 0x80000000)
706 return (IOAlignment)(intsize - shift);
707 size <<= 1;
708 }
709 return 0;
710}
711
712unsigned int IOAlignmentToSize(IOAlignment align)
713{
714 unsigned int size;
715
716 for (size = 1; align; align--) {
717 size <<= 1;
718 }
719 return size;
720}