]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.cpp
xnu-3248.60.10.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
CommitLineData
3e170ce0 1/*
2d21ac55 2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
1c79356b
A
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
b0d623f7 39#include <vm/vm_kern.h>
1c79356b
A
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
91447636 46#include <IOKit/IOLocks.h>
55e303ae 47#include <IOKit/IOMapper.h>
0c530ab8 48#include <IOKit/IOBufferMemoryDescriptor.h>
1c79356b
A
49#include <IOKit/IOKitDebug.h>
50
91447636
A
51#include "IOKitKernelInternal.h"
52
2d21ac55
A
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
6d2010ae
A
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
61#include <sys/msgbuf.h>
62
63#if IOKITSTATS
64
65#define IOStatisticsAlloc(type, size) \
66do { \
67 IOStatistics::countAlloc(type, size); \
68} while (0)
69
70#else
71
72#define IOStatisticsAlloc(type, size)
73
74#endif /* IOKITSTATS */
75
3e170ce0
A
76
77#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
78
79
0c530ab8
A
80extern "C"
81{
82
83
1c79356b
A
84mach_timespec_t IOZeroTvalspec = { 0, 0 };
85
55e303ae
A
86extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
87
6d2010ae 88extern int
b0d623f7
A
89__doprnt(
90 const char *fmt,
91 va_list argp,
92 void (*putc)(int, void *),
93 void *arg,
3e170ce0
A
94 int radix,
95 int is_log);
b0d623f7 96
6d2010ae
A
97extern void cons_putc_locked(char);
98extern void bsd_log_lock(void);
99extern void bsd_log_unlock(void);
39236c6e 100extern void logwakeup();
b0d623f7 101
0c530ab8 102
55e303ae 103/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 104
91447636
A
105lck_grp_t *IOLockGroup;
106
9bccf70c
A
107/*
108 * Global variables for use by iLogger
109 * These symbols are for use only by Apple diagnostic code.
110 * Binary compatibility is not guaranteed for kexts that reference these symbols.
111 */
112
113void *_giDebugLogInternal = NULL;
114void *_giDebugLogDataInternal = NULL;
115void *_giDebugReserved1 = NULL;
116void *_giDebugReserved2 = NULL;
117
39236c6e 118iopa_t gIOBMDPageAllocator;
9bccf70c 119
1c79356b
A
120/*
121 * Static variables for this module.
122 */
123
55e303ae 124static queue_head_t gIOMallocContiguousEntries;
91447636 125static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b 126
22ba694c
A
127#if __x86_64__
128enum { kIOMaxPageableMaps = 8 };
129enum { kIOPageableMapSize = 512 * 1024 * 1024 };
130enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
131#else
132enum { kIOMaxPageableMaps = 16 };
133enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 134enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
22ba694c 135#endif
1c79356b
A
136
137typedef struct {
b0d623f7 138 vm_map_t map;
1c79356b
A
139 vm_offset_t address;
140 vm_offset_t end;
141} IOMapData;
142
143static struct {
144 UInt32 count;
145 UInt32 hint;
146 IOMapData maps[ kIOMaxPageableMaps ];
91447636 147 lck_mtx_t * lock;
1c79356b
A
148} gIOKitPageableSpace;
149
39236c6e
A
150static iopa_t gIOPageablePageAllocator;
151
fe8ab488
A
152uint32_t gIOPageAllocChunkBytes;
153
3e170ce0
A
154#if IOTRACKING
155IOTrackingQueue * gIOMallocTracking;
156IOTrackingQueue * gIOWireTracking;
157IOTrackingQueue * gIOMapTracking;
158#endif /* IOTRACKING */
159
55e303ae 160/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
161
162void IOLibInit(void)
163{
164 kern_return_t ret;
165
166 static bool libInitialized;
167
168 if(libInitialized)
169 return;
170
3e170ce0
A
171 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
172
173#if IOTRACKING
174 IOTrackingInit();
175 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, true);
176 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, page_size, false);
177 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, page_size, false);
178#endif
179
1c79356b
A
180 gIOKitPageableSpace.maps[0].address = 0;
181 ret = kmem_suballoc(kernel_map,
182 &gIOKitPageableSpace.maps[0].address,
183 kIOPageableMapSize,
184 TRUE,
3e170ce0 185 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
1c79356b
A
186 &gIOKitPageableSpace.maps[0].map);
187 if (ret != KERN_SUCCESS)
188 panic("failed to allocate iokit pageable map\n");
189
91447636 190 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
191 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
192 gIOKitPageableSpace.hint = 0;
193 gIOKitPageableSpace.count = 1;
194
91447636 195 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
196 queue_init( &gIOMallocContiguousEntries );
197
fe8ab488
A
198 gIOPageAllocChunkBytes = PAGE_SIZE/64;
199 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
39236c6e
A
200 iopa_init(&gIOBMDPageAllocator);
201 iopa_init(&gIOPageablePageAllocator);
202
3e170ce0 203
1c79356b
A
204 libInitialized = true;
205}
206
207/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
208
3e170ce0
A
209static uint32_t
210log2up(uint32_t size)
211{
212 if (size <= 1) size = 0;
213 else size = 32 - __builtin_clz(size - 1);
214 return (size);
215}
216
217/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
218
1c79356b
A
219IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
220{
91447636
A
221 kern_return_t result;
222 thread_t thread;
1c79356b 223
91447636
A
224 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
225 if (result != KERN_SUCCESS)
226 return (NULL);
1c79356b 227
91447636 228 thread_deallocate(thread);
1c79356b 229
91447636 230 return (thread);
1c79356b
A
231}
232
233
0c530ab8 234void IOExitThread(void)
1c79356b 235{
0c530ab8 236 (void) thread_terminate(current_thread());
1c79356b
A
237}
238
239/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
240
3e170ce0
A
241#if IOTRACKING
242struct IOLibMallocHeader
243{
244 IOTrackingAddress tracking;
245};
246#endif
247
248#if IOTRACKING
249#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
250#else
251#define sizeofIOLibMallocHeader (0)
252#endif
253
254/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
255
256void * IOMalloc(vm_size_t size)
257{
258 void * address;
3e170ce0
A
259 vm_size_t allocSize;
260
261 allocSize = size + sizeofIOLibMallocHeader;
262#if IOTRACKING
263 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
264#endif
265 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
1c79356b 266
6d2010ae 267 if ( address ) {
3e170ce0
A
268#if IOTRACKING
269 if (TRACK_ALLOC) {
270 IOLibMallocHeader * hdr;
271 hdr = (typeof(hdr)) address;
272 bzero(&hdr->tracking, sizeof(hdr->tracking));
273 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
274 hdr->tracking.size = size;
275 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
276 }
277#endif
278 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
279
1c79356b 280#if IOALLOCDEBUG
3e170ce0 281 OSAddAtomic(size, &debug_iomalloc_size);
1c79356b 282#endif
3e170ce0 283 IOStatisticsAlloc(kIOStatisticsMalloc, size);
6d2010ae
A
284 }
285
1c79356b
A
286 return address;
287}
288
289void IOFree(void * address, vm_size_t size)
290{
291 if (address) {
3e170ce0
A
292
293 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
294
295#if IOTRACKING
296 if (TRACK_ALLOC) {
297 IOLibMallocHeader * hdr;
298 hdr = (typeof(hdr)) address;
299 if (size != hdr->tracking.size)
300 {
301 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
302 size = hdr->tracking.size;
303 }
304 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
305 }
306#endif
307
308 kfree(address, size + sizeofIOLibMallocHeader);
1c79356b 309#if IOALLOCDEBUG
3e170ce0 310 OSAddAtomic(-size, &debug_iomalloc_size);
1c79356b 311#endif
3e170ce0 312 IOStatisticsAlloc(kIOStatisticsFree, size);
1c79356b
A
313 }
314}
315
316/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
317
3e170ce0
A
318vm_tag_t
319IOMemoryTag(vm_map_t map)
320{
321 vm_tag_t tag;
322
323 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
324
325 tag = vm_tag_bt();
326 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
327
328 return (tag);
329}
330
331/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
332
333struct IOLibPageMallocHeader
334{
335 mach_vm_size_t allocationSize;
336 mach_vm_address_t allocationAddress;
337#if IOTRACKING
338 IOTrackingAddress tracking;
339#endif
340};
341
342#if IOTRACKING
343#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
344#else
345#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
346#endif
347
348/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
349
1c79356b
A
350void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
351{
3e170ce0
A
352 kern_return_t kr;
353 vm_offset_t address;
354 vm_offset_t allocationAddress;
355 vm_size_t adjustedSize;
356 uintptr_t alignMask;
357 IOLibPageMallocHeader * hdr;
1c79356b
A
358
359 if (size == 0)
360 return 0;
1c79356b 361
3e170ce0 362 alignment = (1UL << log2up(alignment));
1c79356b 363 alignMask = alignment - 1;
3e170ce0 364 adjustedSize = size + sizeofIOLibPageMallocHeader;
1c79356b 365
316670eb
A
366 if (size > adjustedSize) {
367 address = 0; /* overflow detected */
368 }
369 else if (adjustedSize >= page_size) {
1c79356b
A
370
371 kr = kernel_memory_allocate(kernel_map, &address,
3e170ce0
A
372 size, alignMask, 0, IOMemoryTag(kernel_map));
373 if (KERN_SUCCESS != kr) address = 0;
374#if IOTRACKING
375 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
376#endif
1c79356b
A
377
378 } else {
379
380 adjustedSize += alignMask;
9bccf70c
A
381
382 if (adjustedSize >= page_size) {
383
384 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
3e170ce0
A
385 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
386 if (KERN_SUCCESS != kr) allocationAddress = 0;
9bccf70c
A
387
388 } else
3e170ce0 389 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
1c79356b
A
390
391 if (allocationAddress) {
3e170ce0 392 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
1c79356b
A
393 & (~alignMask);
394
3e170ce0
A
395 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
396 hdr->allocationSize = adjustedSize;
397 hdr->allocationAddress = allocationAddress;
398#if IOTRACKING
399 if (TRACK_ALLOC) {
400 bzero(&hdr->tracking, sizeof(hdr->tracking));
401 hdr->tracking.address = ~address;
402 hdr->tracking.size = size;
403 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
404 }
405#endif
1c79356b
A
406 } else
407 address = 0;
408 }
409
410 assert(0 == (address & alignMask));
411
2d21ac55 412 if( address) {
6d2010ae 413#if IOALLOCDEBUG
3e170ce0 414 OSAddAtomic(size, &debug_iomalloc_size);
1c79356b 415#endif
6d2010ae
A
416 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
417 }
1c79356b
A
418
419 return (void *) address;
420}
421
422void IOFreeAligned(void * address, vm_size_t size)
423{
3e170ce0
A
424 vm_address_t allocationAddress;
425 vm_size_t adjustedSize;
426 IOLibPageMallocHeader * hdr;
1c79356b
A
427
428 if( !address)
429 return;
430
431 assert(size);
432
3e170ce0 433 adjustedSize = size + sizeofIOLibPageMallocHeader;
1c79356b 434 if (adjustedSize >= page_size) {
3e170ce0
A
435#if IOTRACKING
436 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
437#endif
b0d623f7 438 kmem_free( kernel_map, (vm_offset_t) address, size);
1c79356b
A
439
440 } else {
3e170ce0
A
441 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
442 adjustedSize = hdr->allocationSize;
443 allocationAddress = hdr->allocationAddress;
1c79356b 444
3e170ce0
A
445#if IOTRACKING
446 if (TRACK_ALLOC)
447 {
448 if (size != hdr->tracking.size)
449 {
450 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
451 size = hdr->tracking.size;
452 }
453 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
454 }
455#endif
456 if (adjustedSize >= page_size) {
91447636 457 kmem_free( kernel_map, allocationAddress, adjustedSize);
3e170ce0
A
458 } else {
459 kfree((void *)allocationAddress, adjustedSize);
460 }
1c79356b
A
461 }
462
463#if IOALLOCDEBUG
3e170ce0 464 OSAddAtomic(-size, &debug_iomalloc_size);
1c79356b 465#endif
6d2010ae
A
466
467 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
1c79356b
A
468}
469
470/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
471
0c530ab8 472void
0b4c1975 473IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
55e303ae 474{
3e170ce0
A
475 mach_vm_address_t allocationAddress;
476 mach_vm_size_t adjustedSize;
477 IOLibPageMallocHeader * hdr;
4452a7af 478
0c530ab8
A
479 if (!address)
480 return;
481
482 assert(size);
483
3e170ce0 484 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
0c530ab8 485 if (adjustedSize >= page_size) {
3e170ce0
A
486#if IOTRACKING
487 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
488#endif
b0d623f7 489 kmem_free( kernel_map, (vm_offset_t) address, size);
0c530ab8
A
490
491 } else {
492
3e170ce0
A
493 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
494 adjustedSize = hdr->allocationSize;
495 allocationAddress = hdr->allocationAddress;
496#if IOTRACKING
497 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
498#endif
0c530ab8
A
499 kfree((void *)allocationAddress, adjustedSize);
500 }
501
7ddcb079 502 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
0c530ab8 503#if IOALLOCDEBUG
3e170ce0 504 OSAddAtomic(-size, &debug_iomalloc_size);
0c530ab8
A
505#endif
506}
507
fe8ab488 508
0c530ab8 509mach_vm_address_t
0b4c1975 510IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
6d2010ae 511 mach_vm_size_t alignment, bool contiguous)
1c79356b 512{
3e170ce0
A
513 kern_return_t kr;
514 mach_vm_address_t address;
515 mach_vm_address_t allocationAddress;
516 mach_vm_size_t adjustedSize;
517 mach_vm_address_t alignMask;
518 IOLibPageMallocHeader * hdr;
1c79356b
A
519
520 if (size == 0)
0c530ab8 521 return (0);
1c79356b
A
522 if (alignment == 0)
523 alignment = 1;
524
525 alignMask = alignment - 1;
3e170ce0
A
526 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
527 if (adjustedSize < size) return (0);
1c79356b 528
0b4c1975
A
529 contiguous = (contiguous && (adjustedSize > page_size))
530 || (alignment > page_size);
531
532 if (contiguous || maxPhys)
55e303ae 533 {
0b4c1975 534 int options = 0;
0c530ab8 535 vm_offset_t virt;
0b4c1975 536
55e303ae 537 adjustedSize = size;
0b4c1975
A
538 contiguous = (contiguous && (adjustedSize > page_size))
539 || (alignment > page_size);
540
7ddcb079
A
541 if (!contiguous)
542 {
543 if (maxPhys <= 0xFFFFFFFF)
544 {
545 maxPhys = 0;
546 options |= KMA_LOMEM;
547 }
548 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
549 {
550 maxPhys = 0;
551 }
552 }
0b4c1975 553 if (contiguous || maxPhys)
55e303ae 554 {
0c530ab8 555 kr = kmem_alloc_contig(kernel_map, &virt, size,
3e170ce0 556 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
55e303ae
A
557 }
558 else
559 {
0c530ab8 560 kr = kernel_memory_allocate(kernel_map, &virt,
3e170ce0 561 size, alignMask, options, IOMemoryTag(kernel_map));
55e303ae 562 }
0c530ab8 563 if (KERN_SUCCESS == kr)
3e170ce0 564 {
0c530ab8 565 address = virt;
3e170ce0
A
566#if IOTRACKING
567 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
568#endif
569 }
0c530ab8 570 else
1c79356b 571 address = 0;
55e303ae
A
572 }
573 else
574 {
1c79356b 575 adjustedSize += alignMask;
3e170ce0
A
576 if (adjustedSize < size) return (0);
577 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
9bccf70c 578
1c79356b
A
579 if (allocationAddress) {
580
3e170ce0
A
581
582 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
1c79356b
A
583 & (~alignMask);
584
55e303ae 585 if (atop_32(address) != atop_32(address + size - 1))
b0d623f7 586 address = round_page(address);
1c79356b 587
3e170ce0
A
588 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
589 hdr->allocationSize = adjustedSize;
590 hdr->allocationAddress = allocationAddress;
591#if IOTRACKING
592 if (TRACK_ALLOC) {
593 bzero(&hdr->tracking, sizeof(hdr->tracking));
594 hdr->tracking.address = ~address;
595 hdr->tracking.size = size;
596 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
597 }
598#endif
1c79356b
A
599 } else
600 address = 0;
601 }
602
2d21ac55 603 if (address) {
7ddcb079
A
604 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
605#if IOALLOCDEBUG
3e170ce0 606 OSAddAtomic(size, &debug_iomalloc_size);
0c530ab8 607#endif
7ddcb079 608 }
0c530ab8
A
609
610 return (address);
611}
612
6d2010ae 613
0c530ab8
A
614/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
615
616struct _IOMallocContiguousEntry
617{
618 mach_vm_address_t virtualAddr;
619 IOBufferMemoryDescriptor * md;
620 queue_chain_t link;
621};
622typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
623
624void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
625 IOPhysicalAddress * physicalAddress)
626{
627 mach_vm_address_t address = 0;
628
629 if (size == 0)
630 return 0;
631 if (alignment == 0)
632 alignment = 1;
633
55e303ae 634 /* Do we want a physical address? */
0c530ab8 635 if (!physicalAddress)
c0fea474 636 {
0b4c1975 637 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
0c530ab8
A
638 }
639 else do
640 {
641 IOBufferMemoryDescriptor * bmd;
642 mach_vm_address_t physicalMask;
b0d623f7 643 vm_offset_t alignMask;
0c530ab8
A
644
645 alignMask = alignment - 1;
b0d623f7
A
646 physicalMask = (0xFFFFFFFF ^ alignMask);
647
0c530ab8
A
648 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
649 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
650 if (!bmd)
651 break;
652
653 _IOMallocContiguousEntry *
654 entry = IONew(_IOMallocContiguousEntry, 1);
655 if (!entry)
55e303ae 656 {
0c530ab8
A
657 bmd->release();
658 break;
55e303ae 659 }
0c530ab8
A
660 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
661 entry->md = bmd;
662 lck_mtx_lock(gIOMallocContiguousEntriesLock);
663 queue_enter( &gIOMallocContiguousEntries, entry,
664 _IOMallocContiguousEntry *, link );
665 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
666
667 address = (mach_vm_address_t) entry->virtualAddr;
668 *physicalAddress = bmd->getPhysicalAddress();
55e303ae 669 }
0c530ab8 670 while (false);
1c79356b
A
671
672 return (void *) address;
673}
674
0c530ab8 675void IOFreeContiguous(void * _address, vm_size_t size)
1c79356b 676{
55e303ae 677 _IOMallocContiguousEntry * entry;
0c530ab8
A
678 IOMemoryDescriptor * md = NULL;
679
680 mach_vm_address_t address = (mach_vm_address_t) _address;
1c79356b
A
681
682 if( !address)
683 return;
684
685 assert(size);
686
91447636 687 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
688 queue_iterate( &gIOMallocContiguousEntries, entry,
689 _IOMallocContiguousEntry *, link )
690 {
0c530ab8
A
691 if( entry->virtualAddr == address ) {
692 md = entry->md;
55e303ae
A
693 queue_remove( &gIOMallocContiguousEntries, entry,
694 _IOMallocContiguousEntry *, link );
695 break;
696 }
697 }
91447636 698 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae 699
0c530ab8 700 if (md)
55e303ae 701 {
0c530ab8 702 md->release();
55e303ae
A
703 IODelete(entry, _IOMallocContiguousEntry, 1);
704 }
0c530ab8
A
705 else
706 {
0b4c1975 707 IOKernelFreePhysical((mach_vm_address_t) address, size);
1c79356b 708 }
1c79356b
A
709}
710
711/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
712
0b4e3aa0
A
713kern_return_t IOIteratePageableMaps(vm_size_t size,
714 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
715{
716 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
717 vm_size_t segSize;
718 UInt32 attempts;
719 UInt32 index;
720 vm_offset_t min;
721 vm_map_t map;
722
1c79356b 723 if (size > kIOPageableMaxMapSize)
0b4e3aa0 724 return( kIOReturnBadArgument );
1c79356b
A
725
726 do {
727 index = gIOKitPageableSpace.hint;
728 attempts = gIOKitPageableSpace.count;
729 while( attempts--) {
0b4e3aa0 730 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
731 if( KERN_SUCCESS == kr) {
732 gIOKitPageableSpace.hint = index;
733 break;
734 }
735 if( index)
736 index--;
737 else
738 index = gIOKitPageableSpace.count - 1;
739 }
740 if( KERN_SUCCESS == kr)
741 break;
742
91447636 743 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
744
745 index = gIOKitPageableSpace.count;
746 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 747 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
748 break;
749 }
750
751 if( size < kIOPageableMapSize)
752 segSize = kIOPageableMapSize;
753 else
754 segSize = size;
755
756 min = 0;
757 kr = kmem_suballoc(kernel_map,
758 &min,
759 segSize,
760 TRUE,
3e170ce0 761 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
1c79356b
A
762 &map);
763 if( KERN_SUCCESS != kr) {
91447636 764 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
765 break;
766 }
767
768 gIOKitPageableSpace.maps[index].map = map;
769 gIOKitPageableSpace.maps[index].address = min;
770 gIOKitPageableSpace.maps[index].end = min + segSize;
771 gIOKitPageableSpace.hint = index;
772 gIOKitPageableSpace.count = index + 1;
773
91447636 774 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
775
776 } while( true );
777
0b4e3aa0
A
778 return kr;
779}
780
781struct IOMallocPageableRef
782{
b0d623f7 783 vm_offset_t address;
3e170ce0
A
784 vm_size_t size;
785 vm_tag_t tag;
0b4e3aa0
A
786};
787
788static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
789{
790 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
791 kern_return_t kr;
792
3e170ce0 793 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
0b4e3aa0
A
794
795 return( kr );
796}
797
3e170ce0 798static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
0b4e3aa0
A
799{
800 kern_return_t kr = kIOReturnNotReady;
801 struct IOMallocPageableRef ref;
802
803 if (alignment > page_size)
804 return( 0 );
805 if (size > kIOPageableMaxMapSize)
806 return( 0 );
807
808 ref.size = size;
3e170ce0 809 ref.tag = tag;
0b4e3aa0
A
810 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
811 if( kIOReturnSuccess != kr)
812 ref.address = 0;
1c79356b 813
0b4e3aa0 814 return( (void *) ref.address );
1c79356b
A
815}
816
b0d623f7 817vm_map_t IOPageableMapForAddress( uintptr_t address )
1c79356b
A
818{
819 vm_map_t map = 0;
820 UInt32 index;
821
822 for( index = 0; index < gIOKitPageableSpace.count; index++) {
823 if( (address >= gIOKitPageableSpace.maps[index].address)
824 && (address < gIOKitPageableSpace.maps[index].end) ) {
825 map = gIOKitPageableSpace.maps[index].map;
826 break;
827 }
828 }
829 if( !map)
b0d623f7 830 panic("IOPageableMapForAddress: null");
1c79356b
A
831
832 return( map );
833}
834
39236c6e 835static void IOFreePageablePages(void * address, vm_size_t size)
1c79356b
A
836{
837 vm_map_t map;
838
839 map = IOPageableMapForAddress( (vm_address_t) address);
840 if( map)
841 kmem_free( map, (vm_offset_t) address, size);
39236c6e 842}
1c79356b 843
39236c6e
A
844static uintptr_t IOMallocOnePageablePage(iopa_t * a)
845{
3e170ce0 846 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
39236c6e
A
847}
848
849void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
850{
851 void * addr;
852
3e170ce0 853 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
39236c6e
A
854 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
855
856 if (addr) {
1c79356b 857#if IOALLOCDEBUG
3e170ce0 858 OSAddAtomicLong(size, &debug_iomallocpageable_size);
1c79356b 859#endif
39236c6e
A
860 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
861 }
862
863 return (addr);
864}
6d2010ae 865
39236c6e
A
866void IOFreePageable(void * address, vm_size_t size)
867{
868#if IOALLOCDEBUG
3e170ce0 869 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
39236c6e 870#endif
6d2010ae 871 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
39236c6e 872
fe8ab488 873 if (size < (page_size - 4*gIOPageAllocChunkBytes))
39236c6e
A
874 {
875 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
876 size = page_size;
877 }
878 if (address) IOFreePageablePages(address, size);
879}
880
881/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
882
39236c6e
A
883extern "C" void
884iopa_init(iopa_t * a)
885{
886 bzero(a, sizeof(*a));
887 a->lock = IOLockAlloc();
888 queue_init(&a->list);
889}
890
891static uintptr_t
892iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
893{
894 uint32_t n, s;
895 uint64_t avail = pa->avail;
896
897 assert(avail);
898
899 // find strings of count 1 bits in avail
900 for (n = count; n > 1; n -= s)
901 {
902 s = n >> 1;
903 avail = avail & (avail << s);
904 }
905 // and aligned
906 avail &= align;
907
908 if (avail)
909 {
910 n = __builtin_clzll(avail);
911 pa->avail &= ~((-1ULL << (64 - count)) >> n);
912 if (!pa->avail && pa->link.next)
913 {
914 remque(&pa->link);
915 pa->link.next = 0;
916 }
fe8ab488 917 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
39236c6e
A
918 }
919
920 return (0);
921}
922
39236c6e
A
923uintptr_t
924iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
925{
926 static const uint64_t align_masks[] = {
927 0xFFFFFFFFFFFFFFFF,
928 0xAAAAAAAAAAAAAAAA,
929 0x8888888888888888,
930 0x8080808080808080,
931 0x8000800080008000,
932 0x8000000080000000,
933 0x8000000000000000,
934 };
935 iopa_page_t * pa;
936 uintptr_t addr = 0;
937 uint32_t count;
938 uint64_t align;
939
940 if (!bytes) bytes = 1;
fe8ab488
A
941 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
942 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
39236c6e
A
943
944 IOLockLock(a->lock);
3e170ce0 945 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
39236c6e
A
946 while (!queue_end(&a->list, &pa->link))
947 {
948 addr = iopa_allocinpage(pa, count, align);
949 if (addr)
950 {
951 a->bytecount += bytes;
952 break;
953 }
3e170ce0 954 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
39236c6e
A
955 }
956 IOLockUnlock(a->lock);
957
958 if (!addr)
959 {
960 addr = alloc(a);
961 if (addr)
962 {
fe8ab488 963 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
39236c6e
A
964 pa->signature = kIOPageAllocSignature;
965 pa->avail = -2ULL;
966
967 addr = iopa_allocinpage(pa, count, align);
968 IOLockLock(a->lock);
969 if (pa->avail) enqueue_head(&a->list, &pa->link);
970 a->pagecount++;
971 if (addr) a->bytecount += bytes;
972 IOLockUnlock(a->lock);
973 }
974 }
975
976 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
977 return (addr);
978}
979
980uintptr_t
981iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
982{
983 iopa_page_t * pa;
984 uint32_t count;
985 uintptr_t chunk;
986
987 if (!bytes) bytes = 1;
988
989 chunk = (addr & page_mask);
fe8ab488 990 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
39236c6e 991
fe8ab488 992 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
39236c6e
A
993 assert(kIOPageAllocSignature == pa->signature);
994
fe8ab488
A
995 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
996 chunk /= gIOPageAllocChunkBytes;
39236c6e
A
997
998 IOLockLock(a->lock);
999 if (!pa->avail)
1000 {
1001 assert(!pa->link.next);
1002 enqueue_tail(&a->list, &pa->link);
1003 }
1004 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1005 if (pa->avail != -2ULL) pa = 0;
1006 else
1007 {
1008 remque(&pa->link);
1009 pa->link.next = 0;
1010 pa->signature = 0;
1011 a->pagecount--;
1012 // page to free
1013 pa = (typeof(pa)) trunc_page(pa);
1014 }
1015 a->bytecount -= bytes;
1016 IOLockUnlock(a->lock);
1017
1018 return ((uintptr_t) pa);
1c79356b 1019}
b0d623f7 1020
1c79356b
A
1021/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1022
1c79356b
A
1023IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1024 IOByteCount length, IOOptionBits cacheMode )
1025{
1026 IOReturn ret = kIOReturnSuccess;
55e303ae 1027 ppnum_t pagenum;
1c79356b
A
1028
1029 if( task != kernel_task)
1030 return( kIOReturnUnsupported );
b0d623f7
A
1031 if ((address | length) & PAGE_MASK)
1032 {
1033// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1034 return( kIOReturnUnsupported );
1035 }
1036 length = round_page(address + length) - trunc_page( address );
1037 address = trunc_page( address );
1c79356b
A
1038
1039 // make map mode
1040 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1041
1042 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1043
55e303ae
A
1044 // Get the physical page number
1045 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1046 if( pagenum) {
1047 ret = IOUnmapPages( get_task_map(task), address, page_size );
0c530ab8 1048 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
55e303ae 1049 } else
1c79356b
A
1050 ret = kIOReturnVMError;
1051
55e303ae 1052 address += page_size;
1c79356b
A
1053 length -= page_size;
1054 }
1055
1056 return( ret );
1057}
1058
1059
1060IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1061 IOByteCount length )
1062{
1063 if( task != kernel_task)
1064 return( kIOReturnUnsupported );
1065
55e303ae 1066 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
1067
1068 return( kIOReturnSuccess );
1069}
1070
1071/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1072
b0d623f7 1073vm_offset_t OSKernelStackRemaining( void )
1c79356b 1074{
b0d623f7 1075 return (ml_stack_remaining());
1c79356b
A
1076}
1077
1078/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1079
2d21ac55
A
1080/*
1081 * Spin for indicated number of milliseconds.
1082 */
1c79356b
A
1083void IOSleep(unsigned milliseconds)
1084{
91447636 1085 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
1086}
1087
3e170ce0
A
1088/*
1089 * Spin for indicated number of milliseconds, and potentially an
1090 * additional number of milliseconds up to the leeway values.
1091 */
1092void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1093{
1094 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1095}
1096
1c79356b
A
1097/*
1098 * Spin for indicated number of microseconds.
1099 */
1100void IODelay(unsigned microseconds)
1101{
91447636 1102 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
1103}
1104
2d21ac55
A
1105/*
1106 * Spin for indicated number of nanoseconds.
1107 */
1108void IOPause(unsigned nanoseconds)
1109{
1110 delay_for_interval(nanoseconds, kNanosecondScale);
1111}
1112
1c79356b
A
1113/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1114
6d2010ae 1115static void _iolog_consputc(int ch, void *arg __unused)
b0d623f7 1116{
6d2010ae
A
1117 cons_putc_locked(ch);
1118}
1119
1120static void _iolog_logputc(int ch, void *arg __unused)
1121{
1122 log_putc_locked(ch);
b0d623f7
A
1123}
1124
1c79356b
A
1125void IOLog(const char *format, ...)
1126{
6d2010ae 1127 va_list ap;
1c79356b 1128
6d2010ae
A
1129 va_start(ap, format);
1130 IOLogv(format, ap);
1131 va_end(ap);
1c79356b
A
1132}
1133
b0d623f7
A
1134void IOLogv(const char *format, va_list ap)
1135{
6d2010ae
A
1136 va_list ap2;
1137
1138 va_copy(ap2, ap);
1139
1140 bsd_log_lock();
3e170ce0 1141 __doprnt(format, ap, _iolog_logputc, NULL, 16, TRUE);
6d2010ae 1142 bsd_log_unlock();
39236c6e 1143 logwakeup();
6d2010ae 1144
3e170ce0
A
1145 __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE);
1146 va_end(ap2);
b0d623f7
A
1147}
1148
1149#if !__LP64__
1c79356b
A
1150void IOPanic(const char *reason)
1151{
2d21ac55 1152 panic("%s", reason);
1c79356b 1153}
b0d623f7 1154#endif
1c79356b
A
1155
1156/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1157
1158/*
1159 * Convert a integer constant (typically a #define or enum) to a string.
1160 */
1161static char noValue[80]; // that's pretty
1162
1163const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1164{
1165 for( ; regValueArray->name; regValueArray++) {
1166 if(regValueArray->value == value)
1167 return(regValueArray->name);
1168 }
2d21ac55 1169 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1c79356b
A
1170 return((const char *)noValue);
1171}
1172
1173IOReturn IOFindValueForName(const char *string,
1174 const IONamedValue *regValueArray,
1175 int *value)
1176{
1177 for( ; regValueArray->name; regValueArray++) {
1178 if(!strcmp(regValueArray->name, string)) {
1179 *value = regValueArray->value;
1180 return kIOReturnSuccess;
1181 }
1182 }
1183 return kIOReturnBadArgument;
1184}
1185
2d21ac55
A
1186OSString * IOCopyLogNameForPID(int pid)
1187{
1188 char buf[128];
1189 size_t len;
1190 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1191 len = strlen(buf);
1192 proc_name(pid, buf + len, sizeof(buf) - len);
1193 return (OSString::withCString(buf));
1194}
1195
1c79356b
A
1196/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1197
1198IOAlignment IOSizeToAlignment(unsigned int size)
1199{
3e170ce0 1200 int shift;
1c79356b
A
1201 const int intsize = sizeof(unsigned int) * 8;
1202
1203 for (shift = 1; shift < intsize; shift++) {
1204 if (size & 0x80000000)
1205 return (IOAlignment)(intsize - shift);
1206 size <<= 1;
1207 }
1208 return 0;
1209}
1210
1211unsigned int IOAlignmentToSize(IOAlignment align)
1212{
1213 unsigned int size;
1214
1215 for (size = 1; align; align--) {
1216 size <<= 1;
1217 }
1218 return size;
1219}
0c530ab8
A
1220
1221} /* extern "C" */
2d21ac55
A
1222
1223
1224