]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.cpp
xnu-3789.31.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
CommitLineData
3e170ce0 1/*
2d21ac55 2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
1c79356b
A
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
b0d623f7 39#include <vm/vm_kern.h>
1c79356b
A
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
91447636 46#include <IOKit/IOLocks.h>
55e303ae 47#include <IOKit/IOMapper.h>
0c530ab8 48#include <IOKit/IOBufferMemoryDescriptor.h>
1c79356b
A
49#include <IOKit/IOKitDebug.h>
50
91447636
A
51#include "IOKitKernelInternal.h"
52
2d21ac55
A
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
6d2010ae
A
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
39037602 61#include <os/log_private.h>
6d2010ae
A
62#include <sys/msgbuf.h>
63
64#if IOKITSTATS
65
66#define IOStatisticsAlloc(type, size) \
67do { \
68 IOStatistics::countAlloc(type, size); \
69} while (0)
70
71#else
72
73#define IOStatisticsAlloc(type, size)
74
75#endif /* IOKITSTATS */
76
3e170ce0
A
77
78#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
79
80
0c530ab8
A
81extern "C"
82{
83
84
1c79356b
A
85mach_timespec_t IOZeroTvalspec = { 0, 0 };
86
55e303ae
A
87extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
88
6d2010ae 89extern int
b0d623f7
A
90__doprnt(
91 const char *fmt,
92 va_list argp,
93 void (*putc)(int, void *),
94 void *arg,
3e170ce0
A
95 int radix,
96 int is_log);
b0d623f7 97
6d2010ae
A
98extern void cons_putc_locked(char);
99extern void bsd_log_lock(void);
100extern void bsd_log_unlock(void);
39236c6e 101extern void logwakeup();
b0d623f7 102
0c530ab8 103
55e303ae 104/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 105
91447636
A
106lck_grp_t *IOLockGroup;
107
9bccf70c
A
108/*
109 * Global variables for use by iLogger
110 * These symbols are for use only by Apple diagnostic code.
111 * Binary compatibility is not guaranteed for kexts that reference these symbols.
112 */
113
114void *_giDebugLogInternal = NULL;
115void *_giDebugLogDataInternal = NULL;
116void *_giDebugReserved1 = NULL;
117void *_giDebugReserved2 = NULL;
118
39236c6e 119iopa_t gIOBMDPageAllocator;
9bccf70c 120
1c79356b
A
121/*
122 * Static variables for this module.
123 */
124
55e303ae 125static queue_head_t gIOMallocContiguousEntries;
91447636 126static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b 127
22ba694c
A
128#if __x86_64__
129enum { kIOMaxPageableMaps = 8 };
130enum { kIOPageableMapSize = 512 * 1024 * 1024 };
131enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
132#else
133enum { kIOMaxPageableMaps = 16 };
134enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 135enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
22ba694c 136#endif
1c79356b
A
137
138typedef struct {
b0d623f7 139 vm_map_t map;
1c79356b
A
140 vm_offset_t address;
141 vm_offset_t end;
142} IOMapData;
143
144static struct {
145 UInt32 count;
146 UInt32 hint;
147 IOMapData maps[ kIOMaxPageableMaps ];
91447636 148 lck_mtx_t * lock;
1c79356b
A
149} gIOKitPageableSpace;
150
39236c6e
A
151static iopa_t gIOPageablePageAllocator;
152
fe8ab488
A
153uint32_t gIOPageAllocChunkBytes;
154
3e170ce0
A
155#if IOTRACKING
156IOTrackingQueue * gIOMallocTracking;
157IOTrackingQueue * gIOWireTracking;
158IOTrackingQueue * gIOMapTracking;
159#endif /* IOTRACKING */
160
55e303ae 161/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
162
163void IOLibInit(void)
164{
165 kern_return_t ret;
166
167 static bool libInitialized;
168
169 if(libInitialized)
170 return;
171
3e170ce0
A
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174#if IOTRACKING
175 IOTrackingInit();
39037602
A
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
3e170ce0
A
187#endif
188
1c79356b
A
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
3e170ce0 194 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
1c79356b
A
195 &gIOKitPageableSpace.maps[0].map);
196 if (ret != KERN_SUCCESS)
197 panic("failed to allocate iokit pageable map\n");
198
91447636 199 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
200 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
201 gIOKitPageableSpace.hint = 0;
202 gIOKitPageableSpace.count = 1;
203
91447636 204 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
205 queue_init( &gIOMallocContiguousEntries );
206
fe8ab488
A
207 gIOPageAllocChunkBytes = PAGE_SIZE/64;
208 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
39236c6e
A
209 iopa_init(&gIOBMDPageAllocator);
210 iopa_init(&gIOPageablePageAllocator);
211
3e170ce0 212
1c79356b
A
213 libInitialized = true;
214}
215
216/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
217
3e170ce0
A
218static uint32_t
219log2up(uint32_t size)
220{
221 if (size <= 1) size = 0;
222 else size = 32 - __builtin_clz(size - 1);
223 return (size);
224}
225
226/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
227
1c79356b
A
228IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
229{
91447636
A
230 kern_return_t result;
231 thread_t thread;
1c79356b 232
91447636
A
233 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
234 if (result != KERN_SUCCESS)
235 return (NULL);
1c79356b 236
91447636 237 thread_deallocate(thread);
1c79356b 238
91447636 239 return (thread);
1c79356b
A
240}
241
242
0c530ab8 243void IOExitThread(void)
1c79356b 244{
0c530ab8 245 (void) thread_terminate(current_thread());
1c79356b
A
246}
247
248/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
249
3e170ce0
A
250#if IOTRACKING
251struct IOLibMallocHeader
252{
253 IOTrackingAddress tracking;
254};
255#endif
256
257#if IOTRACKING
258#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
259#else
260#define sizeofIOLibMallocHeader (0)
261#endif
262
263/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
264
265void * IOMalloc(vm_size_t size)
266{
267 void * address;
3e170ce0
A
268 vm_size_t allocSize;
269
270 allocSize = size + sizeofIOLibMallocHeader;
271#if IOTRACKING
272 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
273#endif
274 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
1c79356b 275
6d2010ae 276 if ( address ) {
3e170ce0
A
277#if IOTRACKING
278 if (TRACK_ALLOC) {
279 IOLibMallocHeader * hdr;
280 hdr = (typeof(hdr)) address;
281 bzero(&hdr->tracking, sizeof(hdr->tracking));
282 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
283 hdr->tracking.size = size;
284 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
285 }
286#endif
287 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
288
1c79356b 289#if IOALLOCDEBUG
3e170ce0 290 OSAddAtomic(size, &debug_iomalloc_size);
1c79356b 291#endif
3e170ce0 292 IOStatisticsAlloc(kIOStatisticsMalloc, size);
6d2010ae
A
293 }
294
1c79356b
A
295 return address;
296}
297
d190cdc3 298void IOFree(void * inAddress, vm_size_t size)
1c79356b 299{
d190cdc3 300 void * address;
3e170ce0 301
d190cdc3
A
302 if ((address = inAddress))
303 {
3e170ce0
A
304 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
305
306#if IOTRACKING
d190cdc3
A
307 if (TRACK_ALLOC)
308 {
3e170ce0 309 IOLibMallocHeader * hdr;
d190cdc3
A
310 struct ptr_reference{ void * ptr; };
311 volatile struct ptr_reference ptr;
312
313 // we're about to block in IOTrackingRemove(), make sure the original pointer
314 // exists in memory or a register for leak scanning to find
315 ptr.ptr = inAddress;
316
3e170ce0
A
317 hdr = (typeof(hdr)) address;
318 if (size != hdr->tracking.size)
319 {
320 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
321 size = hdr->tracking.size;
322 }
323 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
d190cdc3 324 ptr.ptr = NULL;
3e170ce0
A
325 }
326#endif
327
328 kfree(address, size + sizeofIOLibMallocHeader);
1c79356b 329#if IOALLOCDEBUG
3e170ce0 330 OSAddAtomic(-size, &debug_iomalloc_size);
1c79356b 331#endif
3e170ce0 332 IOStatisticsAlloc(kIOStatisticsFree, size);
1c79356b
A
333 }
334}
335
336/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
337
3e170ce0
A
338vm_tag_t
339IOMemoryTag(vm_map_t map)
340{
341 vm_tag_t tag;
342
343 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
344
345 tag = vm_tag_bt();
346 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
347
348 return (tag);
349}
350
351/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
352
353struct IOLibPageMallocHeader
354{
355 mach_vm_size_t allocationSize;
356 mach_vm_address_t allocationAddress;
357#if IOTRACKING
358 IOTrackingAddress tracking;
359#endif
360};
361
362#if IOTRACKING
363#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
364#else
365#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
366#endif
367
368/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
369
1c79356b
A
370void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
371{
3e170ce0
A
372 kern_return_t kr;
373 vm_offset_t address;
374 vm_offset_t allocationAddress;
375 vm_size_t adjustedSize;
376 uintptr_t alignMask;
377 IOLibPageMallocHeader * hdr;
1c79356b
A
378
379 if (size == 0)
380 return 0;
1c79356b 381
3e170ce0 382 alignment = (1UL << log2up(alignment));
1c79356b 383 alignMask = alignment - 1;
3e170ce0 384 adjustedSize = size + sizeofIOLibPageMallocHeader;
1c79356b 385
316670eb
A
386 if (size > adjustedSize) {
387 address = 0; /* overflow detected */
388 }
389 else if (adjustedSize >= page_size) {
1c79356b
A
390
391 kr = kernel_memory_allocate(kernel_map, &address,
3e170ce0
A
392 size, alignMask, 0, IOMemoryTag(kernel_map));
393 if (KERN_SUCCESS != kr) address = 0;
394#if IOTRACKING
395 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
396#endif
1c79356b
A
397
398 } else {
399
400 adjustedSize += alignMask;
9bccf70c
A
401
402 if (adjustedSize >= page_size) {
403
404 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
3e170ce0
A
405 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
406 if (KERN_SUCCESS != kr) allocationAddress = 0;
9bccf70c
A
407
408 } else
3e170ce0 409 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
1c79356b
A
410
411 if (allocationAddress) {
3e170ce0 412 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
1c79356b
A
413 & (~alignMask);
414
3e170ce0
A
415 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
416 hdr->allocationSize = adjustedSize;
417 hdr->allocationAddress = allocationAddress;
418#if IOTRACKING
419 if (TRACK_ALLOC) {
420 bzero(&hdr->tracking, sizeof(hdr->tracking));
421 hdr->tracking.address = ~address;
422 hdr->tracking.size = size;
423 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
424 }
425#endif
1c79356b
A
426 } else
427 address = 0;
428 }
429
430 assert(0 == (address & alignMask));
431
2d21ac55 432 if( address) {
6d2010ae 433#if IOALLOCDEBUG
3e170ce0 434 OSAddAtomic(size, &debug_iomalloc_size);
1c79356b 435#endif
6d2010ae
A
436 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
437 }
1c79356b
A
438
439 return (void *) address;
440}
441
442void IOFreeAligned(void * address, vm_size_t size)
443{
3e170ce0
A
444 vm_address_t allocationAddress;
445 vm_size_t adjustedSize;
446 IOLibPageMallocHeader * hdr;
1c79356b
A
447
448 if( !address)
449 return;
450
451 assert(size);
452
3e170ce0 453 adjustedSize = size + sizeofIOLibPageMallocHeader;
1c79356b 454 if (adjustedSize >= page_size) {
3e170ce0
A
455#if IOTRACKING
456 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
457#endif
b0d623f7 458 kmem_free( kernel_map, (vm_offset_t) address, size);
1c79356b
A
459
460 } else {
3e170ce0
A
461 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
462 adjustedSize = hdr->allocationSize;
463 allocationAddress = hdr->allocationAddress;
1c79356b 464
3e170ce0
A
465#if IOTRACKING
466 if (TRACK_ALLOC)
467 {
468 if (size != hdr->tracking.size)
469 {
470 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
471 size = hdr->tracking.size;
472 }
473 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
474 }
475#endif
476 if (adjustedSize >= page_size) {
91447636 477 kmem_free( kernel_map, allocationAddress, adjustedSize);
3e170ce0
A
478 } else {
479 kfree((void *)allocationAddress, adjustedSize);
480 }
1c79356b
A
481 }
482
483#if IOALLOCDEBUG
3e170ce0 484 OSAddAtomic(-size, &debug_iomalloc_size);
1c79356b 485#endif
6d2010ae
A
486
487 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
1c79356b
A
488}
489
490/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
491
0c530ab8 492void
0b4c1975 493IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
55e303ae 494{
3e170ce0
A
495 mach_vm_address_t allocationAddress;
496 mach_vm_size_t adjustedSize;
497 IOLibPageMallocHeader * hdr;
4452a7af 498
0c530ab8
A
499 if (!address)
500 return;
501
502 assert(size);
503
3e170ce0 504 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
0c530ab8 505 if (adjustedSize >= page_size) {
3e170ce0
A
506#if IOTRACKING
507 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
508#endif
b0d623f7 509 kmem_free( kernel_map, (vm_offset_t) address, size);
0c530ab8
A
510
511 } else {
512
3e170ce0
A
513 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
514 adjustedSize = hdr->allocationSize;
515 allocationAddress = hdr->allocationAddress;
516#if IOTRACKING
517 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
518#endif
0c530ab8
A
519 kfree((void *)allocationAddress, adjustedSize);
520 }
521
7ddcb079 522 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
0c530ab8 523#if IOALLOCDEBUG
3e170ce0 524 OSAddAtomic(-size, &debug_iomalloc_size);
0c530ab8
A
525#endif
526}
527
fe8ab488 528
0c530ab8 529mach_vm_address_t
0b4c1975 530IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
6d2010ae 531 mach_vm_size_t alignment, bool contiguous)
1c79356b 532{
3e170ce0
A
533 kern_return_t kr;
534 mach_vm_address_t address;
535 mach_vm_address_t allocationAddress;
536 mach_vm_size_t adjustedSize;
537 mach_vm_address_t alignMask;
538 IOLibPageMallocHeader * hdr;
1c79356b
A
539
540 if (size == 0)
0c530ab8 541 return (0);
1c79356b
A
542 if (alignment == 0)
543 alignment = 1;
544
545 alignMask = alignment - 1;
3e170ce0
A
546 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
547 if (adjustedSize < size) return (0);
1c79356b 548
0b4c1975
A
549 contiguous = (contiguous && (adjustedSize > page_size))
550 || (alignment > page_size);
551
552 if (contiguous || maxPhys)
55e303ae 553 {
0b4c1975 554 int options = 0;
0c530ab8 555 vm_offset_t virt;
0b4c1975 556
55e303ae 557 adjustedSize = size;
0b4c1975
A
558 contiguous = (contiguous && (adjustedSize > page_size))
559 || (alignment > page_size);
560
7ddcb079
A
561 if (!contiguous)
562 {
563 if (maxPhys <= 0xFFFFFFFF)
564 {
565 maxPhys = 0;
566 options |= KMA_LOMEM;
567 }
568 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
569 {
570 maxPhys = 0;
571 }
572 }
0b4c1975 573 if (contiguous || maxPhys)
55e303ae 574 {
0c530ab8 575 kr = kmem_alloc_contig(kernel_map, &virt, size,
3e170ce0 576 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
55e303ae
A
577 }
578 else
579 {
0c530ab8 580 kr = kernel_memory_allocate(kernel_map, &virt,
3e170ce0 581 size, alignMask, options, IOMemoryTag(kernel_map));
55e303ae 582 }
0c530ab8 583 if (KERN_SUCCESS == kr)
3e170ce0 584 {
0c530ab8 585 address = virt;
3e170ce0
A
586#if IOTRACKING
587 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
588#endif
589 }
0c530ab8 590 else
1c79356b 591 address = 0;
55e303ae
A
592 }
593 else
594 {
1c79356b 595 adjustedSize += alignMask;
3e170ce0
A
596 if (adjustedSize < size) return (0);
597 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
9bccf70c 598
1c79356b
A
599 if (allocationAddress) {
600
3e170ce0
A
601
602 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
1c79356b
A
603 & (~alignMask);
604
55e303ae 605 if (atop_32(address) != atop_32(address + size - 1))
b0d623f7 606 address = round_page(address);
1c79356b 607
3e170ce0
A
608 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
609 hdr->allocationSize = adjustedSize;
610 hdr->allocationAddress = allocationAddress;
611#if IOTRACKING
612 if (TRACK_ALLOC) {
613 bzero(&hdr->tracking, sizeof(hdr->tracking));
614 hdr->tracking.address = ~address;
615 hdr->tracking.size = size;
616 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
617 }
618#endif
1c79356b
A
619 } else
620 address = 0;
621 }
622
2d21ac55 623 if (address) {
7ddcb079
A
624 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
625#if IOALLOCDEBUG
3e170ce0 626 OSAddAtomic(size, &debug_iomalloc_size);
0c530ab8 627#endif
7ddcb079 628 }
0c530ab8
A
629
630 return (address);
631}
632
6d2010ae 633
0c530ab8
A
634/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
635
636struct _IOMallocContiguousEntry
637{
638 mach_vm_address_t virtualAddr;
639 IOBufferMemoryDescriptor * md;
640 queue_chain_t link;
641};
642typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
643
644void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
645 IOPhysicalAddress * physicalAddress)
646{
647 mach_vm_address_t address = 0;
648
649 if (size == 0)
650 return 0;
651 if (alignment == 0)
652 alignment = 1;
653
55e303ae 654 /* Do we want a physical address? */
0c530ab8 655 if (!physicalAddress)
c0fea474 656 {
0b4c1975 657 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
0c530ab8
A
658 }
659 else do
660 {
661 IOBufferMemoryDescriptor * bmd;
662 mach_vm_address_t physicalMask;
b0d623f7 663 vm_offset_t alignMask;
0c530ab8
A
664
665 alignMask = alignment - 1;
b0d623f7
A
666 physicalMask = (0xFFFFFFFF ^ alignMask);
667
0c530ab8
A
668 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
669 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
670 if (!bmd)
671 break;
672
673 _IOMallocContiguousEntry *
674 entry = IONew(_IOMallocContiguousEntry, 1);
675 if (!entry)
55e303ae 676 {
0c530ab8
A
677 bmd->release();
678 break;
55e303ae 679 }
0c530ab8
A
680 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
681 entry->md = bmd;
682 lck_mtx_lock(gIOMallocContiguousEntriesLock);
683 queue_enter( &gIOMallocContiguousEntries, entry,
684 _IOMallocContiguousEntry *, link );
685 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
686
687 address = (mach_vm_address_t) entry->virtualAddr;
688 *physicalAddress = bmd->getPhysicalAddress();
55e303ae 689 }
0c530ab8 690 while (false);
1c79356b
A
691
692 return (void *) address;
693}
694
0c530ab8 695void IOFreeContiguous(void * _address, vm_size_t size)
1c79356b 696{
55e303ae 697 _IOMallocContiguousEntry * entry;
0c530ab8
A
698 IOMemoryDescriptor * md = NULL;
699
700 mach_vm_address_t address = (mach_vm_address_t) _address;
1c79356b
A
701
702 if( !address)
703 return;
704
705 assert(size);
706
91447636 707 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
708 queue_iterate( &gIOMallocContiguousEntries, entry,
709 _IOMallocContiguousEntry *, link )
710 {
0c530ab8
A
711 if( entry->virtualAddr == address ) {
712 md = entry->md;
55e303ae
A
713 queue_remove( &gIOMallocContiguousEntries, entry,
714 _IOMallocContiguousEntry *, link );
715 break;
716 }
717 }
91447636 718 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae 719
0c530ab8 720 if (md)
55e303ae 721 {
0c530ab8 722 md->release();
55e303ae
A
723 IODelete(entry, _IOMallocContiguousEntry, 1);
724 }
0c530ab8
A
725 else
726 {
0b4c1975 727 IOKernelFreePhysical((mach_vm_address_t) address, size);
1c79356b 728 }
1c79356b
A
729}
730
731/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
732
0b4e3aa0
A
733kern_return_t IOIteratePageableMaps(vm_size_t size,
734 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
735{
736 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
737 vm_size_t segSize;
738 UInt32 attempts;
739 UInt32 index;
740 vm_offset_t min;
741 vm_map_t map;
742
1c79356b 743 if (size > kIOPageableMaxMapSize)
0b4e3aa0 744 return( kIOReturnBadArgument );
1c79356b
A
745
746 do {
747 index = gIOKitPageableSpace.hint;
748 attempts = gIOKitPageableSpace.count;
749 while( attempts--) {
0b4e3aa0 750 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
751 if( KERN_SUCCESS == kr) {
752 gIOKitPageableSpace.hint = index;
753 break;
754 }
755 if( index)
756 index--;
757 else
758 index = gIOKitPageableSpace.count - 1;
759 }
d190cdc3 760 if (KERN_NO_SPACE != kr)
1c79356b
A
761 break;
762
91447636 763 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
764
765 index = gIOKitPageableSpace.count;
766 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 767 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
768 break;
769 }
770
771 if( size < kIOPageableMapSize)
772 segSize = kIOPageableMapSize;
773 else
774 segSize = size;
775
776 min = 0;
777 kr = kmem_suballoc(kernel_map,
778 &min,
779 segSize,
780 TRUE,
3e170ce0 781 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
1c79356b
A
782 &map);
783 if( KERN_SUCCESS != kr) {
91447636 784 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
785 break;
786 }
787
788 gIOKitPageableSpace.maps[index].map = map;
789 gIOKitPageableSpace.maps[index].address = min;
790 gIOKitPageableSpace.maps[index].end = min + segSize;
791 gIOKitPageableSpace.hint = index;
792 gIOKitPageableSpace.count = index + 1;
793
91447636 794 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
795
796 } while( true );
797
0b4e3aa0
A
798 return kr;
799}
800
801struct IOMallocPageableRef
802{
b0d623f7 803 vm_offset_t address;
3e170ce0
A
804 vm_size_t size;
805 vm_tag_t tag;
0b4e3aa0
A
806};
807
808static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
809{
810 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
811 kern_return_t kr;
812
3e170ce0 813 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
0b4e3aa0
A
814
815 return( kr );
816}
817
3e170ce0 818static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
0b4e3aa0
A
819{
820 kern_return_t kr = kIOReturnNotReady;
821 struct IOMallocPageableRef ref;
822
823 if (alignment > page_size)
824 return( 0 );
825 if (size > kIOPageableMaxMapSize)
826 return( 0 );
827
828 ref.size = size;
3e170ce0 829 ref.tag = tag;
0b4e3aa0
A
830 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
831 if( kIOReturnSuccess != kr)
832 ref.address = 0;
1c79356b 833
0b4e3aa0 834 return( (void *) ref.address );
1c79356b
A
835}
836
b0d623f7 837vm_map_t IOPageableMapForAddress( uintptr_t address )
1c79356b
A
838{
839 vm_map_t map = 0;
840 UInt32 index;
841
842 for( index = 0; index < gIOKitPageableSpace.count; index++) {
843 if( (address >= gIOKitPageableSpace.maps[index].address)
844 && (address < gIOKitPageableSpace.maps[index].end) ) {
845 map = gIOKitPageableSpace.maps[index].map;
846 break;
847 }
848 }
849 if( !map)
b0d623f7 850 panic("IOPageableMapForAddress: null");
1c79356b
A
851
852 return( map );
853}
854
39236c6e 855static void IOFreePageablePages(void * address, vm_size_t size)
1c79356b
A
856{
857 vm_map_t map;
858
859 map = IOPageableMapForAddress( (vm_address_t) address);
860 if( map)
861 kmem_free( map, (vm_offset_t) address, size);
39236c6e 862}
1c79356b 863
39236c6e
A
864static uintptr_t IOMallocOnePageablePage(iopa_t * a)
865{
3e170ce0 866 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
39236c6e
A
867}
868
869void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
870{
871 void * addr;
872
3e170ce0 873 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
39236c6e
A
874 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
875
876 if (addr) {
1c79356b 877#if IOALLOCDEBUG
3e170ce0 878 OSAddAtomicLong(size, &debug_iomallocpageable_size);
1c79356b 879#endif
39236c6e
A
880 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
881 }
882
883 return (addr);
884}
6d2010ae 885
39236c6e
A
886void IOFreePageable(void * address, vm_size_t size)
887{
888#if IOALLOCDEBUG
3e170ce0 889 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
39236c6e 890#endif
6d2010ae 891 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
39236c6e 892
fe8ab488 893 if (size < (page_size - 4*gIOPageAllocChunkBytes))
39236c6e
A
894 {
895 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
896 size = page_size;
897 }
898 if (address) IOFreePageablePages(address, size);
899}
900
901/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
902
39236c6e
A
903extern "C" void
904iopa_init(iopa_t * a)
905{
906 bzero(a, sizeof(*a));
907 a->lock = IOLockAlloc();
908 queue_init(&a->list);
909}
910
911static uintptr_t
912iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
913{
914 uint32_t n, s;
915 uint64_t avail = pa->avail;
916
917 assert(avail);
918
919 // find strings of count 1 bits in avail
920 for (n = count; n > 1; n -= s)
921 {
922 s = n >> 1;
923 avail = avail & (avail << s);
924 }
925 // and aligned
926 avail &= align;
927
928 if (avail)
929 {
930 n = __builtin_clzll(avail);
931 pa->avail &= ~((-1ULL << (64 - count)) >> n);
932 if (!pa->avail && pa->link.next)
933 {
934 remque(&pa->link);
935 pa->link.next = 0;
936 }
fe8ab488 937 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
39236c6e
A
938 }
939
940 return (0);
941}
942
39236c6e
A
943uintptr_t
944iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
945{
946 static const uint64_t align_masks[] = {
947 0xFFFFFFFFFFFFFFFF,
948 0xAAAAAAAAAAAAAAAA,
949 0x8888888888888888,
950 0x8080808080808080,
951 0x8000800080008000,
952 0x8000000080000000,
953 0x8000000000000000,
954 };
955 iopa_page_t * pa;
956 uintptr_t addr = 0;
957 uint32_t count;
958 uint64_t align;
959
960 if (!bytes) bytes = 1;
fe8ab488
A
961 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
962 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
39236c6e
A
963
964 IOLockLock(a->lock);
3e170ce0 965 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
39236c6e
A
966 while (!queue_end(&a->list, &pa->link))
967 {
968 addr = iopa_allocinpage(pa, count, align);
969 if (addr)
970 {
971 a->bytecount += bytes;
972 break;
973 }
3e170ce0 974 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
39236c6e
A
975 }
976 IOLockUnlock(a->lock);
977
978 if (!addr)
979 {
980 addr = alloc(a);
981 if (addr)
982 {
fe8ab488 983 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
39236c6e
A
984 pa->signature = kIOPageAllocSignature;
985 pa->avail = -2ULL;
986
987 addr = iopa_allocinpage(pa, count, align);
988 IOLockLock(a->lock);
989 if (pa->avail) enqueue_head(&a->list, &pa->link);
990 a->pagecount++;
991 if (addr) a->bytecount += bytes;
992 IOLockUnlock(a->lock);
993 }
994 }
995
996 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
997 return (addr);
998}
999
1000uintptr_t
1001iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1002{
1003 iopa_page_t * pa;
1004 uint32_t count;
1005 uintptr_t chunk;
1006
1007 if (!bytes) bytes = 1;
1008
1009 chunk = (addr & page_mask);
fe8ab488 1010 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
39236c6e 1011
fe8ab488 1012 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
39236c6e
A
1013 assert(kIOPageAllocSignature == pa->signature);
1014
fe8ab488
A
1015 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1016 chunk /= gIOPageAllocChunkBytes;
39236c6e
A
1017
1018 IOLockLock(a->lock);
1019 if (!pa->avail)
1020 {
1021 assert(!pa->link.next);
1022 enqueue_tail(&a->list, &pa->link);
1023 }
1024 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1025 if (pa->avail != -2ULL) pa = 0;
1026 else
1027 {
1028 remque(&pa->link);
1029 pa->link.next = 0;
1030 pa->signature = 0;
1031 a->pagecount--;
1032 // page to free
1033 pa = (typeof(pa)) trunc_page(pa);
1034 }
1035 a->bytecount -= bytes;
1036 IOLockUnlock(a->lock);
1037
1038 return ((uintptr_t) pa);
1c79356b 1039}
b0d623f7 1040
1c79356b
A
1041/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1042
1c79356b
A
1043IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1044 IOByteCount length, IOOptionBits cacheMode )
1045{
1046 IOReturn ret = kIOReturnSuccess;
55e303ae 1047 ppnum_t pagenum;
1c79356b
A
1048
1049 if( task != kernel_task)
1050 return( kIOReturnUnsupported );
b0d623f7
A
1051 if ((address | length) & PAGE_MASK)
1052 {
1053// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1054 return( kIOReturnUnsupported );
1055 }
1056 length = round_page(address + length) - trunc_page( address );
1057 address = trunc_page( address );
1c79356b
A
1058
1059 // make map mode
1060 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1061
1062 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1063
55e303ae
A
1064 // Get the physical page number
1065 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1066 if( pagenum) {
1067 ret = IOUnmapPages( get_task_map(task), address, page_size );
0c530ab8 1068 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
55e303ae 1069 } else
1c79356b
A
1070 ret = kIOReturnVMError;
1071
55e303ae 1072 address += page_size;
1c79356b
A
1073 length -= page_size;
1074 }
1075
1076 return( ret );
1077}
1078
1079
1080IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1081 IOByteCount length )
1082{
1083 if( task != kernel_task)
1084 return( kIOReturnUnsupported );
1085
55e303ae 1086 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
1087
1088 return( kIOReturnSuccess );
1089}
1090
1091/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1092
b0d623f7 1093vm_offset_t OSKernelStackRemaining( void )
1c79356b 1094{
b0d623f7 1095 return (ml_stack_remaining());
1c79356b
A
1096}
1097
1098/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1099
2d21ac55
A
1100/*
1101 * Spin for indicated number of milliseconds.
1102 */
1c79356b
A
1103void IOSleep(unsigned milliseconds)
1104{
91447636 1105 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
1106}
1107
3e170ce0
A
1108/*
1109 * Spin for indicated number of milliseconds, and potentially an
1110 * additional number of milliseconds up to the leeway values.
1111 */
1112void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1113{
1114 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1115}
1116
1c79356b
A
1117/*
1118 * Spin for indicated number of microseconds.
1119 */
1120void IODelay(unsigned microseconds)
1121{
91447636 1122 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
1123}
1124
2d21ac55
A
1125/*
1126 * Spin for indicated number of nanoseconds.
1127 */
1128void IOPause(unsigned nanoseconds)
1129{
1130 delay_for_interval(nanoseconds, kNanosecondScale);
1131}
1132
1c79356b
A
1133/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1134
6d2010ae 1135static void _iolog_consputc(int ch, void *arg __unused)
b0d623f7 1136{
6d2010ae
A
1137 cons_putc_locked(ch);
1138}
1139
39037602 1140static void _IOLogv(const char *format, va_list ap, void *caller);
b0d623f7 1141
39037602 1142__attribute__((noinline,not_tail_called))
1c79356b
A
1143void IOLog(const char *format, ...)
1144{
39037602 1145 void *caller = __builtin_return_address(0);
6d2010ae 1146 va_list ap;
1c79356b 1147
6d2010ae 1148 va_start(ap, format);
39037602 1149 _IOLogv(format, ap, caller);
6d2010ae 1150 va_end(ap);
1c79356b
A
1151}
1152
39037602 1153__attribute__((noinline,not_tail_called))
b0d623f7 1154void IOLogv(const char *format, va_list ap)
39037602
A
1155{
1156 void *caller = __builtin_return_address(0);
1157 _IOLogv(format, ap, caller);
1158}
1159
1160void _IOLogv(const char *format, va_list ap, void *caller)
b0d623f7 1161{
6d2010ae
A
1162 va_list ap2;
1163
39037602
A
1164 /* Ideally not called at interrupt context or with interrupts disabled. Needs further validate */
1165 /* assert(TRUE == ml_get_interrupts_enabled()); */
1166
6d2010ae
A
1167 va_copy(ap2, ap);
1168
39037602 1169 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
6d2010ae 1170
3e170ce0
A
1171 __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE);
1172 va_end(ap2);
b0d623f7
A
1173}
1174
1175#if !__LP64__
1c79356b
A
1176void IOPanic(const char *reason)
1177{
2d21ac55 1178 panic("%s", reason);
1c79356b 1179}
b0d623f7 1180#endif
1c79356b
A
1181
1182/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1183
1184/*
1185 * Convert a integer constant (typically a #define or enum) to a string.
1186 */
1187static char noValue[80]; // that's pretty
1188
1189const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1190{
1191 for( ; regValueArray->name; regValueArray++) {
1192 if(regValueArray->value == value)
1193 return(regValueArray->name);
1194 }
2d21ac55 1195 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1c79356b
A
1196 return((const char *)noValue);
1197}
1198
1199IOReturn IOFindValueForName(const char *string,
1200 const IONamedValue *regValueArray,
1201 int *value)
1202{
1203 for( ; regValueArray->name; regValueArray++) {
1204 if(!strcmp(regValueArray->name, string)) {
1205 *value = regValueArray->value;
1206 return kIOReturnSuccess;
1207 }
1208 }
1209 return kIOReturnBadArgument;
1210}
1211
2d21ac55
A
1212OSString * IOCopyLogNameForPID(int pid)
1213{
1214 char buf[128];
1215 size_t len;
1216 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1217 len = strlen(buf);
1218 proc_name(pid, buf + len, sizeof(buf) - len);
1219 return (OSString::withCString(buf));
1220}
1221
1c79356b
A
1222/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1223
1224IOAlignment IOSizeToAlignment(unsigned int size)
1225{
3e170ce0 1226 int shift;
1c79356b
A
1227 const int intsize = sizeof(unsigned int) * 8;
1228
1229 for (shift = 1; shift < intsize; shift++) {
1230 if (size & 0x80000000)
1231 return (IOAlignment)(intsize - shift);
1232 size <<= 1;
1233 }
1234 return 0;
1235}
1236
1237unsigned int IOAlignmentToSize(IOAlignment align)
1238{
1239 unsigned int size;
1240
1241 for (size = 1; align; align--) {
1242 size <<= 1;
1243 }
1244 return size;
1245}
0c530ab8
A
1246
1247} /* extern "C" */
2d21ac55
A
1248
1249
1250