]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.cpp
xnu-4903.221.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
CommitLineData
3e170ce0 1/*
2d21ac55 2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
1c79356b
A
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
b0d623f7 39#include <vm/vm_kern.h>
1c79356b
A
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
91447636 46#include <IOKit/IOLocks.h>
55e303ae 47#include <IOKit/IOMapper.h>
0c530ab8 48#include <IOKit/IOBufferMemoryDescriptor.h>
1c79356b
A
49#include <IOKit/IOKitDebug.h>
50
91447636
A
51#include "IOKitKernelInternal.h"
52
2d21ac55
A
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
6d2010ae
A
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
39037602 61#include <os/log_private.h>
6d2010ae 62#include <sys/msgbuf.h>
5ba3f43e 63#include <console/serial_protos.h>
6d2010ae
A
64
65#if IOKITSTATS
66
67#define IOStatisticsAlloc(type, size) \
68do { \
69 IOStatistics::countAlloc(type, size); \
70} while (0)
71
72#else
73
74#define IOStatisticsAlloc(type, size)
75
76#endif /* IOKITSTATS */
77
3e170ce0
A
78
79#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
0c530ab8
A
82extern "C"
83{
84
85
1c79356b
A
86mach_timespec_t IOZeroTvalspec = { 0, 0 };
87
55e303ae
A
88extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
89
6d2010ae 90extern int
b0d623f7
A
91__doprnt(
92 const char *fmt,
93 va_list argp,
94 void (*putc)(int, void *),
95 void *arg,
3e170ce0
A
96 int radix,
97 int is_log);
b0d623f7 98
6d2010ae
A
99extern void cons_putc_locked(char);
100extern void bsd_log_lock(void);
101extern void bsd_log_unlock(void);
b0d623f7 102
0c530ab8 103
55e303ae 104/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 105
91447636
A
106lck_grp_t *IOLockGroup;
107
9bccf70c
A
108/*
109 * Global variables for use by iLogger
110 * These symbols are for use only by Apple diagnostic code.
111 * Binary compatibility is not guaranteed for kexts that reference these symbols.
112 */
113
114void *_giDebugLogInternal = NULL;
115void *_giDebugLogDataInternal = NULL;
116void *_giDebugReserved1 = NULL;
117void *_giDebugReserved2 = NULL;
118
39236c6e 119iopa_t gIOBMDPageAllocator;
9bccf70c 120
1c79356b
A
121/*
122 * Static variables for this module.
123 */
124
55e303ae 125static queue_head_t gIOMallocContiguousEntries;
91447636 126static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b 127
22ba694c
A
128#if __x86_64__
129enum { kIOMaxPageableMaps = 8 };
130enum { kIOPageableMapSize = 512 * 1024 * 1024 };
131enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
132#else
133enum { kIOMaxPageableMaps = 16 };
134enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 135enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
22ba694c 136#endif
1c79356b
A
137
138typedef struct {
b0d623f7 139 vm_map_t map;
1c79356b
A
140 vm_offset_t address;
141 vm_offset_t end;
142} IOMapData;
143
144static struct {
145 UInt32 count;
146 UInt32 hint;
147 IOMapData maps[ kIOMaxPageableMaps ];
91447636 148 lck_mtx_t * lock;
1c79356b
A
149} gIOKitPageableSpace;
150
39236c6e
A
151static iopa_t gIOPageablePageAllocator;
152
fe8ab488
A
153uint32_t gIOPageAllocChunkBytes;
154
3e170ce0
A
155#if IOTRACKING
156IOTrackingQueue * gIOMallocTracking;
157IOTrackingQueue * gIOWireTracking;
158IOTrackingQueue * gIOMapTracking;
159#endif /* IOTRACKING */
160
55e303ae 161/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
162
163void IOLibInit(void)
164{
165 kern_return_t ret;
166
167 static bool libInitialized;
168
169 if(libInitialized)
170 return;
171
3e170ce0
A
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174#if IOTRACKING
175 IOTrackingInit();
39037602
A
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
3e170ce0
A
187#endif
188
1c79356b
A
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
5ba3f43e
A
194 VM_FLAGS_ANYWHERE,
195 VM_MAP_KERNEL_FLAGS_NONE,
196 VM_KERN_MEMORY_IOKIT,
1c79356b
A
197 &gIOKitPageableSpace.maps[0].map);
198 if (ret != KERN_SUCCESS)
199 panic("failed to allocate iokit pageable map\n");
200
91447636 201 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
202 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
203 gIOKitPageableSpace.hint = 0;
204 gIOKitPageableSpace.count = 1;
205
91447636 206 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
207 queue_init( &gIOMallocContiguousEntries );
208
fe8ab488
A
209 gIOPageAllocChunkBytes = PAGE_SIZE/64;
210 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
39236c6e
A
211 iopa_init(&gIOBMDPageAllocator);
212 iopa_init(&gIOPageablePageAllocator);
213
3e170ce0 214
1c79356b
A
215 libInitialized = true;
216}
217
218/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219
3e170ce0
A
220static uint32_t
221log2up(uint32_t size)
222{
223 if (size <= 1) size = 0;
224 else size = 32 - __builtin_clz(size - 1);
225 return (size);
226}
227
228/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
229
1c79356b
A
230IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
231{
91447636
A
232 kern_return_t result;
233 thread_t thread;
1c79356b 234
91447636
A
235 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
236 if (result != KERN_SUCCESS)
237 return (NULL);
1c79356b 238
91447636 239 thread_deallocate(thread);
1c79356b 240
91447636 241 return (thread);
1c79356b
A
242}
243
244
0c530ab8 245void IOExitThread(void)
1c79356b 246{
0c530ab8 247 (void) thread_terminate(current_thread());
1c79356b
A
248}
249
250/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
3e170ce0
A
252#if IOTRACKING
253struct IOLibMallocHeader
254{
255 IOTrackingAddress tracking;
256};
257#endif
258
259#if IOTRACKING
260#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
261#else
262#define sizeofIOLibMallocHeader (0)
263#endif
264
265/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
266
267void * IOMalloc(vm_size_t size)
268{
269 void * address;
3e170ce0
A
270 vm_size_t allocSize;
271
272 allocSize = size + sizeofIOLibMallocHeader;
273#if IOTRACKING
274 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
275#endif
276 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
1c79356b 277
6d2010ae 278 if ( address ) {
3e170ce0
A
279#if IOTRACKING
280 if (TRACK_ALLOC) {
281 IOLibMallocHeader * hdr;
282 hdr = (typeof(hdr)) address;
283 bzero(&hdr->tracking, sizeof(hdr->tracking));
284 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
285 hdr->tracking.size = size;
5ba3f43e 286 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
3e170ce0
A
287 }
288#endif
289 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
290
1c79356b 291#if IOALLOCDEBUG
3e170ce0 292 OSAddAtomic(size, &debug_iomalloc_size);
1c79356b 293#endif
3e170ce0 294 IOStatisticsAlloc(kIOStatisticsMalloc, size);
6d2010ae
A
295 }
296
1c79356b
A
297 return address;
298}
299
d190cdc3 300void IOFree(void * inAddress, vm_size_t size)
1c79356b 301{
d190cdc3 302 void * address;
3e170ce0 303
d190cdc3
A
304 if ((address = inAddress))
305 {
3e170ce0
A
306 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
307
308#if IOTRACKING
d190cdc3
A
309 if (TRACK_ALLOC)
310 {
3e170ce0 311 IOLibMallocHeader * hdr;
d190cdc3
A
312 struct ptr_reference{ void * ptr; };
313 volatile struct ptr_reference ptr;
314
315 // we're about to block in IOTrackingRemove(), make sure the original pointer
316 // exists in memory or a register for leak scanning to find
317 ptr.ptr = inAddress;
318
3e170ce0
A
319 hdr = (typeof(hdr)) address;
320 if (size != hdr->tracking.size)
321 {
322 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
323 size = hdr->tracking.size;
324 }
325 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
d190cdc3 326 ptr.ptr = NULL;
3e170ce0
A
327 }
328#endif
329
330 kfree(address, size + sizeofIOLibMallocHeader);
1c79356b 331#if IOALLOCDEBUG
3e170ce0 332 OSAddAtomic(-size, &debug_iomalloc_size);
1c79356b 333#endif
3e170ce0 334 IOStatisticsAlloc(kIOStatisticsFree, size);
1c79356b
A
335 }
336}
337
338/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339
3e170ce0
A
340vm_tag_t
341IOMemoryTag(vm_map_t map)
342{
343 vm_tag_t tag;
344
345 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
346
347 tag = vm_tag_bt();
348 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
349
350 return (tag);
351}
352
353/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354
355struct IOLibPageMallocHeader
356{
357 mach_vm_size_t allocationSize;
358 mach_vm_address_t allocationAddress;
359#if IOTRACKING
360 IOTrackingAddress tracking;
361#endif
362};
363
364#if IOTRACKING
365#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
366#else
367#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
368#endif
369
370/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
371
1c79356b
A
372void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
373{
3e170ce0
A
374 kern_return_t kr;
375 vm_offset_t address;
376 vm_offset_t allocationAddress;
377 vm_size_t adjustedSize;
378 uintptr_t alignMask;
379 IOLibPageMallocHeader * hdr;
1c79356b
A
380
381 if (size == 0)
382 return 0;
1c79356b 383
3e170ce0 384 alignment = (1UL << log2up(alignment));
1c79356b 385 alignMask = alignment - 1;
3e170ce0 386 adjustedSize = size + sizeofIOLibPageMallocHeader;
1c79356b 387
316670eb
A
388 if (size > adjustedSize) {
389 address = 0; /* overflow detected */
390 }
391 else if (adjustedSize >= page_size) {
1c79356b
A
392
393 kr = kernel_memory_allocate(kernel_map, &address,
3e170ce0
A
394 size, alignMask, 0, IOMemoryTag(kernel_map));
395 if (KERN_SUCCESS != kr) address = 0;
396#if IOTRACKING
397 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
398#endif
1c79356b
A
399
400 } else {
401
402 adjustedSize += alignMask;
9bccf70c
A
403
404 if (adjustedSize >= page_size) {
405
406 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
3e170ce0
A
407 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
408 if (KERN_SUCCESS != kr) allocationAddress = 0;
9bccf70c
A
409
410 } else
3e170ce0 411 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
1c79356b
A
412
413 if (allocationAddress) {
3e170ce0 414 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
1c79356b
A
415 & (~alignMask);
416
3e170ce0
A
417 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
418 hdr->allocationSize = adjustedSize;
419 hdr->allocationAddress = allocationAddress;
420#if IOTRACKING
421 if (TRACK_ALLOC) {
422 bzero(&hdr->tracking, sizeof(hdr->tracking));
423 hdr->tracking.address = ~address;
424 hdr->tracking.size = size;
5ba3f43e 425 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
3e170ce0
A
426 }
427#endif
1c79356b
A
428 } else
429 address = 0;
430 }
431
432 assert(0 == (address & alignMask));
433
2d21ac55 434 if( address) {
6d2010ae 435#if IOALLOCDEBUG
3e170ce0 436 OSAddAtomic(size, &debug_iomalloc_size);
1c79356b 437#endif
6d2010ae
A
438 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
439 }
1c79356b
A
440
441 return (void *) address;
442}
443
444void IOFreeAligned(void * address, vm_size_t size)
445{
3e170ce0
A
446 vm_address_t allocationAddress;
447 vm_size_t adjustedSize;
448 IOLibPageMallocHeader * hdr;
1c79356b
A
449
450 if( !address)
451 return;
452
453 assert(size);
454
3e170ce0 455 adjustedSize = size + sizeofIOLibPageMallocHeader;
1c79356b 456 if (adjustedSize >= page_size) {
3e170ce0
A
457#if IOTRACKING
458 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
459#endif
b0d623f7 460 kmem_free( kernel_map, (vm_offset_t) address, size);
1c79356b
A
461
462 } else {
3e170ce0
A
463 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
464 adjustedSize = hdr->allocationSize;
465 allocationAddress = hdr->allocationAddress;
1c79356b 466
3e170ce0
A
467#if IOTRACKING
468 if (TRACK_ALLOC)
469 {
470 if (size != hdr->tracking.size)
471 {
472 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
473 size = hdr->tracking.size;
474 }
475 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
476 }
477#endif
478 if (adjustedSize >= page_size) {
91447636 479 kmem_free( kernel_map, allocationAddress, adjustedSize);
3e170ce0
A
480 } else {
481 kfree((void *)allocationAddress, adjustedSize);
482 }
1c79356b
A
483 }
484
485#if IOALLOCDEBUG
3e170ce0 486 OSAddAtomic(-size, &debug_iomalloc_size);
1c79356b 487#endif
6d2010ae
A
488
489 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
1c79356b
A
490}
491
492/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
493
0c530ab8 494void
0b4c1975 495IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
55e303ae 496{
3e170ce0
A
497 mach_vm_address_t allocationAddress;
498 mach_vm_size_t adjustedSize;
499 IOLibPageMallocHeader * hdr;
4452a7af 500
0c530ab8
A
501 if (!address)
502 return;
503
504 assert(size);
505
3e170ce0 506 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
0c530ab8 507 if (adjustedSize >= page_size) {
3e170ce0
A
508#if IOTRACKING
509 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
510#endif
b0d623f7 511 kmem_free( kernel_map, (vm_offset_t) address, size);
0c530ab8
A
512
513 } else {
514
3e170ce0
A
515 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
516 adjustedSize = hdr->allocationSize;
517 allocationAddress = hdr->allocationAddress;
518#if IOTRACKING
519 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
520#endif
0c530ab8
A
521 kfree((void *)allocationAddress, adjustedSize);
522 }
523
7ddcb079 524 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
0c530ab8 525#if IOALLOCDEBUG
3e170ce0 526 OSAddAtomic(-size, &debug_iomalloc_size);
0c530ab8
A
527#endif
528}
529
5ba3f43e
A
530#if __arm__ || __arm64__
531extern unsigned long gPhysBase, gPhysSize;
532#endif
fe8ab488 533
0c530ab8 534mach_vm_address_t
0b4c1975 535IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
6d2010ae 536 mach_vm_size_t alignment, bool contiguous)
1c79356b 537{
3e170ce0
A
538 kern_return_t kr;
539 mach_vm_address_t address;
540 mach_vm_address_t allocationAddress;
541 mach_vm_size_t adjustedSize;
542 mach_vm_address_t alignMask;
543 IOLibPageMallocHeader * hdr;
1c79356b
A
544
545 if (size == 0)
0c530ab8 546 return (0);
1c79356b
A
547 if (alignment == 0)
548 alignment = 1;
549
550 alignMask = alignment - 1;
d9a64523
A
551
552 if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) return (0);
1c79356b 553
0b4c1975
A
554 contiguous = (contiguous && (adjustedSize > page_size))
555 || (alignment > page_size);
556
557 if (contiguous || maxPhys)
55e303ae 558 {
0b4c1975 559 int options = 0;
0c530ab8 560 vm_offset_t virt;
0b4c1975 561
55e303ae 562 adjustedSize = size;
0b4c1975
A
563 contiguous = (contiguous && (adjustedSize > page_size))
564 || (alignment > page_size);
565
7ddcb079
A
566 if (!contiguous)
567 {
5ba3f43e
A
568#if __arm__ || __arm64__
569 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize))
570 {
571 maxPhys = 0;
572 }
573 else
574#endif
7ddcb079
A
575 if (maxPhys <= 0xFFFFFFFF)
576 {
577 maxPhys = 0;
578 options |= KMA_LOMEM;
579 }
580 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
581 {
582 maxPhys = 0;
583 }
584 }
0b4c1975 585 if (contiguous || maxPhys)
55e303ae 586 {
0c530ab8 587 kr = kmem_alloc_contig(kernel_map, &virt, size,
3e170ce0 588 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
55e303ae
A
589 }
590 else
591 {
0c530ab8 592 kr = kernel_memory_allocate(kernel_map, &virt,
3e170ce0 593 size, alignMask, options, IOMemoryTag(kernel_map));
55e303ae 594 }
0c530ab8 595 if (KERN_SUCCESS == kr)
3e170ce0 596 {
0c530ab8 597 address = virt;
3e170ce0
A
598#if IOTRACKING
599 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
600#endif
601 }
0c530ab8 602 else
1c79356b 603 address = 0;
55e303ae
A
604 }
605 else
606 {
1c79356b 607 adjustedSize += alignMask;
3e170ce0
A
608 if (adjustedSize < size) return (0);
609 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
9bccf70c 610
1c79356b
A
611 if (allocationAddress) {
612
3e170ce0
A
613
614 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
1c79356b
A
615 & (~alignMask);
616
55e303ae 617 if (atop_32(address) != atop_32(address + size - 1))
b0d623f7 618 address = round_page(address);
1c79356b 619
3e170ce0
A
620 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
621 hdr->allocationSize = adjustedSize;
622 hdr->allocationAddress = allocationAddress;
623#if IOTRACKING
624 if (TRACK_ALLOC) {
625 bzero(&hdr->tracking, sizeof(hdr->tracking));
626 hdr->tracking.address = ~address;
627 hdr->tracking.size = size;
5ba3f43e 628 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
3e170ce0
A
629 }
630#endif
1c79356b
A
631 } else
632 address = 0;
633 }
634
2d21ac55 635 if (address) {
7ddcb079
A
636 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
637#if IOALLOCDEBUG
3e170ce0 638 OSAddAtomic(size, &debug_iomalloc_size);
0c530ab8 639#endif
7ddcb079 640 }
0c530ab8
A
641
642 return (address);
643}
644
6d2010ae 645
0c530ab8
A
646/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
647
648struct _IOMallocContiguousEntry
649{
650 mach_vm_address_t virtualAddr;
651 IOBufferMemoryDescriptor * md;
652 queue_chain_t link;
653};
654typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
655
656void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
657 IOPhysicalAddress * physicalAddress)
658{
659 mach_vm_address_t address = 0;
660
661 if (size == 0)
662 return 0;
663 if (alignment == 0)
664 alignment = 1;
665
55e303ae 666 /* Do we want a physical address? */
0c530ab8 667 if (!physicalAddress)
c0fea474 668 {
0b4c1975 669 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
0c530ab8
A
670 }
671 else do
672 {
673 IOBufferMemoryDescriptor * bmd;
674 mach_vm_address_t physicalMask;
b0d623f7 675 vm_offset_t alignMask;
0c530ab8
A
676
677 alignMask = alignment - 1;
b0d623f7
A
678 physicalMask = (0xFFFFFFFF ^ alignMask);
679
0c530ab8
A
680 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
681 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
682 if (!bmd)
683 break;
684
685 _IOMallocContiguousEntry *
686 entry = IONew(_IOMallocContiguousEntry, 1);
687 if (!entry)
55e303ae 688 {
0c530ab8
A
689 bmd->release();
690 break;
55e303ae 691 }
0c530ab8
A
692 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
693 entry->md = bmd;
694 lck_mtx_lock(gIOMallocContiguousEntriesLock);
695 queue_enter( &gIOMallocContiguousEntries, entry,
696 _IOMallocContiguousEntry *, link );
697 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
698
699 address = (mach_vm_address_t) entry->virtualAddr;
700 *physicalAddress = bmd->getPhysicalAddress();
55e303ae 701 }
0c530ab8 702 while (false);
1c79356b
A
703
704 return (void *) address;
705}
706
0c530ab8 707void IOFreeContiguous(void * _address, vm_size_t size)
1c79356b 708{
55e303ae 709 _IOMallocContiguousEntry * entry;
0c530ab8
A
710 IOMemoryDescriptor * md = NULL;
711
712 mach_vm_address_t address = (mach_vm_address_t) _address;
1c79356b
A
713
714 if( !address)
715 return;
716
717 assert(size);
718
91447636 719 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
720 queue_iterate( &gIOMallocContiguousEntries, entry,
721 _IOMallocContiguousEntry *, link )
722 {
0c530ab8
A
723 if( entry->virtualAddr == address ) {
724 md = entry->md;
55e303ae
A
725 queue_remove( &gIOMallocContiguousEntries, entry,
726 _IOMallocContiguousEntry *, link );
727 break;
728 }
729 }
91447636 730 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae 731
0c530ab8 732 if (md)
55e303ae 733 {
0c530ab8 734 md->release();
55e303ae
A
735 IODelete(entry, _IOMallocContiguousEntry, 1);
736 }
0c530ab8
A
737 else
738 {
0b4c1975 739 IOKernelFreePhysical((mach_vm_address_t) address, size);
1c79356b 740 }
1c79356b
A
741}
742
743/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
744
0b4e3aa0
A
745kern_return_t IOIteratePageableMaps(vm_size_t size,
746 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
747{
748 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
749 vm_size_t segSize;
750 UInt32 attempts;
751 UInt32 index;
752 vm_offset_t min;
753 vm_map_t map;
754
1c79356b 755 if (size > kIOPageableMaxMapSize)
0b4e3aa0 756 return( kIOReturnBadArgument );
1c79356b
A
757
758 do {
759 index = gIOKitPageableSpace.hint;
760 attempts = gIOKitPageableSpace.count;
761 while( attempts--) {
0b4e3aa0 762 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
763 if( KERN_SUCCESS == kr) {
764 gIOKitPageableSpace.hint = index;
765 break;
766 }
767 if( index)
768 index--;
769 else
770 index = gIOKitPageableSpace.count - 1;
771 }
d190cdc3 772 if (KERN_NO_SPACE != kr)
1c79356b
A
773 break;
774
91447636 775 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
776
777 index = gIOKitPageableSpace.count;
778 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 779 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
780 break;
781 }
782
783 if( size < kIOPageableMapSize)
784 segSize = kIOPageableMapSize;
785 else
786 segSize = size;
787
788 min = 0;
789 kr = kmem_suballoc(kernel_map,
790 &min,
791 segSize,
792 TRUE,
5ba3f43e
A
793 VM_FLAGS_ANYWHERE,
794 VM_MAP_KERNEL_FLAGS_NONE,
795 VM_KERN_MEMORY_IOKIT,
1c79356b
A
796 &map);
797 if( KERN_SUCCESS != kr) {
91447636 798 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
799 break;
800 }
801
802 gIOKitPageableSpace.maps[index].map = map;
803 gIOKitPageableSpace.maps[index].address = min;
804 gIOKitPageableSpace.maps[index].end = min + segSize;
805 gIOKitPageableSpace.hint = index;
806 gIOKitPageableSpace.count = index + 1;
807
91447636 808 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
809
810 } while( true );
811
0b4e3aa0
A
812 return kr;
813}
814
815struct IOMallocPageableRef
816{
b0d623f7 817 vm_offset_t address;
3e170ce0
A
818 vm_size_t size;
819 vm_tag_t tag;
0b4e3aa0
A
820};
821
822static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
823{
824 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
825 kern_return_t kr;
826
3e170ce0 827 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
0b4e3aa0
A
828
829 return( kr );
830}
831
3e170ce0 832static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
0b4e3aa0
A
833{
834 kern_return_t kr = kIOReturnNotReady;
835 struct IOMallocPageableRef ref;
836
837 if (alignment > page_size)
838 return( 0 );
839 if (size > kIOPageableMaxMapSize)
840 return( 0 );
841
842 ref.size = size;
3e170ce0 843 ref.tag = tag;
0b4e3aa0
A
844 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
845 if( kIOReturnSuccess != kr)
846 ref.address = 0;
1c79356b 847
0b4e3aa0 848 return( (void *) ref.address );
1c79356b
A
849}
850
b0d623f7 851vm_map_t IOPageableMapForAddress( uintptr_t address )
1c79356b
A
852{
853 vm_map_t map = 0;
854 UInt32 index;
855
856 for( index = 0; index < gIOKitPageableSpace.count; index++) {
857 if( (address >= gIOKitPageableSpace.maps[index].address)
858 && (address < gIOKitPageableSpace.maps[index].end) ) {
859 map = gIOKitPageableSpace.maps[index].map;
860 break;
861 }
862 }
863 if( !map)
b0d623f7 864 panic("IOPageableMapForAddress: null");
1c79356b
A
865
866 return( map );
867}
868
39236c6e 869static void IOFreePageablePages(void * address, vm_size_t size)
1c79356b
A
870{
871 vm_map_t map;
872
873 map = IOPageableMapForAddress( (vm_address_t) address);
874 if( map)
875 kmem_free( map, (vm_offset_t) address, size);
39236c6e 876}
1c79356b 877
39236c6e
A
878static uintptr_t IOMallocOnePageablePage(iopa_t * a)
879{
3e170ce0 880 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
39236c6e
A
881}
882
883void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
884{
885 void * addr;
886
3e170ce0 887 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
39236c6e
A
888 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
889
890 if (addr) {
1c79356b 891#if IOALLOCDEBUG
3e170ce0 892 OSAddAtomicLong(size, &debug_iomallocpageable_size);
1c79356b 893#endif
39236c6e
A
894 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
895 }
896
897 return (addr);
898}
6d2010ae 899
39236c6e
A
900void IOFreePageable(void * address, vm_size_t size)
901{
902#if IOALLOCDEBUG
3e170ce0 903 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
39236c6e 904#endif
6d2010ae 905 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
39236c6e 906
fe8ab488 907 if (size < (page_size - 4*gIOPageAllocChunkBytes))
39236c6e
A
908 {
909 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
910 size = page_size;
911 }
912 if (address) IOFreePageablePages(address, size);
913}
914
915/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
916
39236c6e
A
917extern "C" void
918iopa_init(iopa_t * a)
919{
920 bzero(a, sizeof(*a));
921 a->lock = IOLockAlloc();
922 queue_init(&a->list);
923}
924
925static uintptr_t
926iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
927{
928 uint32_t n, s;
929 uint64_t avail = pa->avail;
930
931 assert(avail);
932
933 // find strings of count 1 bits in avail
934 for (n = count; n > 1; n -= s)
935 {
936 s = n >> 1;
937 avail = avail & (avail << s);
938 }
939 // and aligned
940 avail &= align;
941
942 if (avail)
943 {
944 n = __builtin_clzll(avail);
945 pa->avail &= ~((-1ULL << (64 - count)) >> n);
946 if (!pa->avail && pa->link.next)
947 {
948 remque(&pa->link);
949 pa->link.next = 0;
950 }
fe8ab488 951 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
39236c6e
A
952 }
953
954 return (0);
955}
956
39236c6e
A
957uintptr_t
958iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
959{
960 static const uint64_t align_masks[] = {
961 0xFFFFFFFFFFFFFFFF,
962 0xAAAAAAAAAAAAAAAA,
963 0x8888888888888888,
964 0x8080808080808080,
965 0x8000800080008000,
966 0x8000000080000000,
967 0x8000000000000000,
968 };
969 iopa_page_t * pa;
970 uintptr_t addr = 0;
971 uint32_t count;
972 uint64_t align;
973
974 if (!bytes) bytes = 1;
fe8ab488
A
975 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
976 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
39236c6e
A
977
978 IOLockLock(a->lock);
3e170ce0 979 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
39236c6e
A
980 while (!queue_end(&a->list, &pa->link))
981 {
982 addr = iopa_allocinpage(pa, count, align);
983 if (addr)
984 {
985 a->bytecount += bytes;
986 break;
987 }
3e170ce0 988 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
39236c6e
A
989 }
990 IOLockUnlock(a->lock);
991
992 if (!addr)
993 {
994 addr = alloc(a);
995 if (addr)
996 {
fe8ab488 997 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
39236c6e
A
998 pa->signature = kIOPageAllocSignature;
999 pa->avail = -2ULL;
1000
1001 addr = iopa_allocinpage(pa, count, align);
1002 IOLockLock(a->lock);
1003 if (pa->avail) enqueue_head(&a->list, &pa->link);
1004 a->pagecount++;
1005 if (addr) a->bytecount += bytes;
1006 IOLockUnlock(a->lock);
1007 }
1008 }
1009
1010 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1011 return (addr);
1012}
1013
1014uintptr_t
1015iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1016{
1017 iopa_page_t * pa;
1018 uint32_t count;
1019 uintptr_t chunk;
1020
1021 if (!bytes) bytes = 1;
1022
1023 chunk = (addr & page_mask);
fe8ab488 1024 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
39236c6e 1025
fe8ab488 1026 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
39236c6e
A
1027 assert(kIOPageAllocSignature == pa->signature);
1028
fe8ab488
A
1029 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1030 chunk /= gIOPageAllocChunkBytes;
39236c6e
A
1031
1032 IOLockLock(a->lock);
1033 if (!pa->avail)
1034 {
1035 assert(!pa->link.next);
1036 enqueue_tail(&a->list, &pa->link);
1037 }
1038 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1039 if (pa->avail != -2ULL) pa = 0;
1040 else
1041 {
1042 remque(&pa->link);
1043 pa->link.next = 0;
1044 pa->signature = 0;
1045 a->pagecount--;
1046 // page to free
1047 pa = (typeof(pa)) trunc_page(pa);
1048 }
1049 a->bytecount -= bytes;
1050 IOLockUnlock(a->lock);
1051
1052 return ((uintptr_t) pa);
1c79356b 1053}
b0d623f7 1054
1c79356b
A
1055/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1056
1c79356b
A
1057IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1058 IOByteCount length, IOOptionBits cacheMode )
1059{
1060 IOReturn ret = kIOReturnSuccess;
55e303ae 1061 ppnum_t pagenum;
1c79356b
A
1062
1063 if( task != kernel_task)
1064 return( kIOReturnUnsupported );
b0d623f7
A
1065 if ((address | length) & PAGE_MASK)
1066 {
1067// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1068 return( kIOReturnUnsupported );
1069 }
1070 length = round_page(address + length) - trunc_page( address );
1071 address = trunc_page( address );
1c79356b
A
1072
1073 // make map mode
1074 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1075
1076 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1077
55e303ae
A
1078 // Get the physical page number
1079 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1080 if( pagenum) {
1081 ret = IOUnmapPages( get_task_map(task), address, page_size );
0c530ab8 1082 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
55e303ae 1083 } else
1c79356b
A
1084 ret = kIOReturnVMError;
1085
55e303ae 1086 address += page_size;
1c79356b
A
1087 length -= page_size;
1088 }
1089
1090 return( ret );
1091}
1092
1093
1094IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1095 IOByteCount length )
1096{
1097 if( task != kernel_task)
1098 return( kIOReturnUnsupported );
1099
55e303ae 1100 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
1101
1102 return( kIOReturnSuccess );
1103}
1104
1105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1106
b0d623f7 1107vm_offset_t OSKernelStackRemaining( void )
1c79356b 1108{
b0d623f7 1109 return (ml_stack_remaining());
1c79356b
A
1110}
1111
1112/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1113
2d21ac55
A
1114/*
1115 * Spin for indicated number of milliseconds.
1116 */
1c79356b
A
1117void IOSleep(unsigned milliseconds)
1118{
91447636 1119 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
1120}
1121
3e170ce0
A
1122/*
1123 * Spin for indicated number of milliseconds, and potentially an
1124 * additional number of milliseconds up to the leeway values.
1125 */
1126void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1127{
1128 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1129}
1130
1c79356b
A
1131/*
1132 * Spin for indicated number of microseconds.
1133 */
1134void IODelay(unsigned microseconds)
1135{
91447636 1136 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
1137}
1138
2d21ac55
A
1139/*
1140 * Spin for indicated number of nanoseconds.
1141 */
1142void IOPause(unsigned nanoseconds)
1143{
1144 delay_for_interval(nanoseconds, kNanosecondScale);
1145}
1146
1c79356b
A
1147/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1148
5ba3f43e 1149static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1,0);
b0d623f7 1150
39037602 1151__attribute__((noinline,not_tail_called))
1c79356b
A
1152void IOLog(const char *format, ...)
1153{
39037602 1154 void *caller = __builtin_return_address(0);
6d2010ae 1155 va_list ap;
1c79356b 1156
6d2010ae 1157 va_start(ap, format);
39037602 1158 _IOLogv(format, ap, caller);
6d2010ae 1159 va_end(ap);
1c79356b
A
1160}
1161
39037602 1162__attribute__((noinline,not_tail_called))
b0d623f7 1163void IOLogv(const char *format, va_list ap)
39037602
A
1164{
1165 void *caller = __builtin_return_address(0);
1166 _IOLogv(format, ap, caller);
1167}
1168
1169void _IOLogv(const char *format, va_list ap, void *caller)
b0d623f7 1170{
6d2010ae 1171 va_list ap2;
5ba3f43e
A
1172 struct console_printbuf_state info_data;
1173 console_printbuf_state_init(&info_data, TRUE, TRUE);
39037602 1174
6d2010ae
A
1175 va_copy(ap2, ap);
1176
39037602 1177 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
6d2010ae 1178
5ba3f43e
A
1179 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1180 console_printbuf_clear(&info_data);
3e170ce0 1181 va_end(ap2);
5ba3f43e
A
1182
1183 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled");
b0d623f7
A
1184}
1185
1186#if !__LP64__
1c79356b
A
1187void IOPanic(const char *reason)
1188{
2d21ac55 1189 panic("%s", reason);
1c79356b 1190}
b0d623f7 1191#endif
1c79356b
A
1192
1193/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1194
d26ffc64
A
1195void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1196 void (*output)(const char *format, ...))
1197{
1198 uint8_t c, chars[17];
1199 size_t idx;
1200
1201 output("%s(0x%x):\n", title, size);
1202 if (size > 4096) size = 4096;
1203 chars[16] = idx = 0;
1204 while (true) {
1205 if (!(idx & 15)) {
1206 if (idx) output(" |%s|\n", chars);
1207 if (idx >= size) break;
1208 output("%04x: ", idx);
1209 }
1210 else if (!(idx & 7)) output(" ");
1211
1212 c = ((char *)buffer)[idx];
1213 output("%02x ", c);
1214 chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1215
1216 idx++;
1217 if ((idx == size) && (idx & 15)) {
1218 chars[idx & 15] = 0;
1219 while (idx & 15) {
1220 idx++;
1221 output(" ");
1222 }
1223 }
1224 }
1225}
1226
1227/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1228
1c79356b
A
1229/*
1230 * Convert a integer constant (typically a #define or enum) to a string.
1231 */
1232static char noValue[80]; // that's pretty
1233
1234const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1235{
1236 for( ; regValueArray->name; regValueArray++) {
1237 if(regValueArray->value == value)
1238 return(regValueArray->name);
1239 }
2d21ac55 1240 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1c79356b
A
1241 return((const char *)noValue);
1242}
1243
1244IOReturn IOFindValueForName(const char *string,
1245 const IONamedValue *regValueArray,
1246 int *value)
1247{
1248 for( ; regValueArray->name; regValueArray++) {
1249 if(!strcmp(regValueArray->name, string)) {
1250 *value = regValueArray->value;
1251 return kIOReturnSuccess;
1252 }
1253 }
1254 return kIOReturnBadArgument;
1255}
1256
2d21ac55
A
1257OSString * IOCopyLogNameForPID(int pid)
1258{
1259 char buf[128];
1260 size_t len;
1261 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1262 len = strlen(buf);
1263 proc_name(pid, buf + len, sizeof(buf) - len);
1264 return (OSString::withCString(buf));
1265}
1266
1c79356b
A
1267/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1268
1269IOAlignment IOSizeToAlignment(unsigned int size)
1270{
3e170ce0 1271 int shift;
1c79356b
A
1272 const int intsize = sizeof(unsigned int) * 8;
1273
1274 for (shift = 1; shift < intsize; shift++) {
1275 if (size & 0x80000000)
1276 return (IOAlignment)(intsize - shift);
1277 size <<= 1;
1278 }
1279 return 0;
1280}
1281
1282unsigned int IOAlignmentToSize(IOAlignment align)
1283{
1284 unsigned int size;
1285
1286 for (size = 1; align; align--) {
1287 size <<= 1;
1288 }
1289 return size;
1290}
0c530ab8
A
1291
1292} /* extern "C" */
2d21ac55
A
1293
1294
1295