]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
2fba9bc5d5dbd2eca2a121b4a776032bdf2d0e6d
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
64
65 #if IOKITSTATS
66
67 #define IOStatisticsAlloc(type, size) \
68 do { \
69 IOStatistics::countAlloc(type, size); \
70 } while (0)
71
72 #else
73
74 #define IOStatisticsAlloc(type, size)
75
76 #endif /* IOKITSTATS */
77
78
79 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
82 extern "C"
83 {
84
85
86 mach_timespec_t IOZeroTvalspec = { 0, 0 };
87
88 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
89
90 extern int
91 __doprnt(
92 const char *fmt,
93 va_list argp,
94 void (*putc)(int, void *),
95 void *arg,
96 int radix,
97 int is_log);
98
99 extern void cons_putc_locked(char);
100 extern void bsd_log_lock(void);
101 extern void bsd_log_unlock(void);
102
103
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
106 lck_grp_t *IOLockGroup;
107
108 /*
109 * Global variables for use by iLogger
110 * These symbols are for use only by Apple diagnostic code.
111 * Binary compatibility is not guaranteed for kexts that reference these symbols.
112 */
113
114 void *_giDebugLogInternal = NULL;
115 void *_giDebugLogDataInternal = NULL;
116 void *_giDebugReserved1 = NULL;
117 void *_giDebugReserved2 = NULL;
118
119 iopa_t gIOBMDPageAllocator;
120
121 /*
122 * Static variables for this module.
123 */
124
125 static queue_head_t gIOMallocContiguousEntries;
126 static lck_mtx_t * gIOMallocContiguousEntriesLock;
127
128 #if __x86_64__
129 enum { kIOMaxPageableMaps = 8 };
130 enum { kIOPageableMapSize = 512 * 1024 * 1024 };
131 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
132 #else
133 enum { kIOMaxPageableMaps = 16 };
134 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
135 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
136 #endif
137
138 typedef struct {
139 vm_map_t map;
140 vm_offset_t address;
141 vm_offset_t end;
142 } IOMapData;
143
144 static struct {
145 UInt32 count;
146 UInt32 hint;
147 IOMapData maps[ kIOMaxPageableMaps ];
148 lck_mtx_t * lock;
149 } gIOKitPageableSpace;
150
151 static iopa_t gIOPageablePageAllocator;
152
153 uint32_t gIOPageAllocChunkBytes;
154
155 #if IOTRACKING
156 IOTrackingQueue * gIOMallocTracking;
157 IOTrackingQueue * gIOWireTracking;
158 IOTrackingQueue * gIOMapTracking;
159 #endif /* IOTRACKING */
160
161 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
162
163 void IOLibInit(void)
164 {
165 kern_return_t ret;
166
167 static bool libInitialized;
168
169 if(libInitialized)
170 return;
171
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174 #if IOTRACKING
175 IOTrackingInit();
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
187 #endif
188
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
194 VM_FLAGS_ANYWHERE,
195 VM_MAP_KERNEL_FLAGS_NONE,
196 VM_KERN_MEMORY_IOKIT,
197 &gIOKitPageableSpace.maps[0].map);
198 if (ret != KERN_SUCCESS)
199 panic("failed to allocate iokit pageable map\n");
200
201 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
202 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
203 gIOKitPageableSpace.hint = 0;
204 gIOKitPageableSpace.count = 1;
205
206 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
207 queue_init( &gIOMallocContiguousEntries );
208
209 gIOPageAllocChunkBytes = PAGE_SIZE/64;
210 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
211 iopa_init(&gIOBMDPageAllocator);
212 iopa_init(&gIOPageablePageAllocator);
213
214
215 libInitialized = true;
216 }
217
218 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219
220 static uint32_t
221 log2up(uint32_t size)
222 {
223 if (size <= 1) size = 0;
224 else size = 32 - __builtin_clz(size - 1);
225 return (size);
226 }
227
228 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
229
230 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
231 {
232 kern_return_t result;
233 thread_t thread;
234
235 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
236 if (result != KERN_SUCCESS)
237 return (NULL);
238
239 thread_deallocate(thread);
240
241 return (thread);
242 }
243
244
245 void IOExitThread(void)
246 {
247 (void) thread_terminate(current_thread());
248 }
249
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
252 #if IOTRACKING
253 struct IOLibMallocHeader
254 {
255 IOTrackingAddress tracking;
256 };
257 #endif
258
259 #if IOTRACKING
260 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
261 #else
262 #define sizeofIOLibMallocHeader (0)
263 #endif
264
265 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
266
267 void * IOMalloc(vm_size_t size)
268 {
269 void * address;
270 vm_size_t allocSize;
271
272 allocSize = size + sizeofIOLibMallocHeader;
273 #if IOTRACKING
274 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
275 #endif
276 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
277
278 if ( address ) {
279 #if IOTRACKING
280 if (TRACK_ALLOC) {
281 IOLibMallocHeader * hdr;
282 hdr = (typeof(hdr)) address;
283 bzero(&hdr->tracking, sizeof(hdr->tracking));
284 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
285 hdr->tracking.size = size;
286 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
287 }
288 #endif
289 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
290
291 #if IOALLOCDEBUG
292 OSAddAtomic(size, &debug_iomalloc_size);
293 #endif
294 IOStatisticsAlloc(kIOStatisticsMalloc, size);
295 }
296
297 return address;
298 }
299
300 void IOFree(void * inAddress, vm_size_t size)
301 {
302 void * address;
303
304 if ((address = inAddress))
305 {
306 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
307
308 #if IOTRACKING
309 if (TRACK_ALLOC)
310 {
311 IOLibMallocHeader * hdr;
312 struct ptr_reference{ void * ptr; };
313 volatile struct ptr_reference ptr;
314
315 // we're about to block in IOTrackingRemove(), make sure the original pointer
316 // exists in memory or a register for leak scanning to find
317 ptr.ptr = inAddress;
318
319 hdr = (typeof(hdr)) address;
320 if (size != hdr->tracking.size)
321 {
322 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
323 size = hdr->tracking.size;
324 }
325 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
326 ptr.ptr = NULL;
327 }
328 #endif
329
330 kfree(address, size + sizeofIOLibMallocHeader);
331 #if IOALLOCDEBUG
332 OSAddAtomic(-size, &debug_iomalloc_size);
333 #endif
334 IOStatisticsAlloc(kIOStatisticsFree, size);
335 }
336 }
337
338 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339
340 vm_tag_t
341 IOMemoryTag(vm_map_t map)
342 {
343 vm_tag_t tag;
344
345 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
346
347 tag = vm_tag_bt();
348 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
349
350 return (tag);
351 }
352
353 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354
355 struct IOLibPageMallocHeader
356 {
357 mach_vm_size_t allocationSize;
358 mach_vm_address_t allocationAddress;
359 #if IOTRACKING
360 IOTrackingAddress tracking;
361 #endif
362 };
363
364 #if IOTRACKING
365 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
366 #else
367 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
368 #endif
369
370 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
371
372 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
373 {
374 kern_return_t kr;
375 vm_offset_t address;
376 vm_offset_t allocationAddress;
377 vm_size_t adjustedSize;
378 uintptr_t alignMask;
379 IOLibPageMallocHeader * hdr;
380
381 if (size == 0)
382 return 0;
383
384 alignment = (1UL << log2up(alignment));
385 alignMask = alignment - 1;
386 adjustedSize = size + sizeofIOLibPageMallocHeader;
387
388 if (size > adjustedSize) {
389 address = 0; /* overflow detected */
390 }
391 else if (adjustedSize >= page_size) {
392
393 kr = kernel_memory_allocate(kernel_map, &address,
394 size, alignMask, 0, IOMemoryTag(kernel_map));
395 if (KERN_SUCCESS != kr) address = 0;
396 #if IOTRACKING
397 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
398 #endif
399
400 } else {
401
402 adjustedSize += alignMask;
403
404 if (adjustedSize >= page_size) {
405
406 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
407 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
408 if (KERN_SUCCESS != kr) allocationAddress = 0;
409
410 } else
411 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
412
413 if (allocationAddress) {
414 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
415 & (~alignMask);
416
417 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
418 hdr->allocationSize = adjustedSize;
419 hdr->allocationAddress = allocationAddress;
420 #if IOTRACKING
421 if (TRACK_ALLOC) {
422 bzero(&hdr->tracking, sizeof(hdr->tracking));
423 hdr->tracking.address = ~address;
424 hdr->tracking.size = size;
425 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
426 }
427 #endif
428 } else
429 address = 0;
430 }
431
432 assert(0 == (address & alignMask));
433
434 if( address) {
435 #if IOALLOCDEBUG
436 OSAddAtomic(size, &debug_iomalloc_size);
437 #endif
438 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
439 }
440
441 return (void *) address;
442 }
443
444 void IOFreeAligned(void * address, vm_size_t size)
445 {
446 vm_address_t allocationAddress;
447 vm_size_t adjustedSize;
448 IOLibPageMallocHeader * hdr;
449
450 if( !address)
451 return;
452
453 assert(size);
454
455 adjustedSize = size + sizeofIOLibPageMallocHeader;
456 if (adjustedSize >= page_size) {
457 #if IOTRACKING
458 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
459 #endif
460 kmem_free( kernel_map, (vm_offset_t) address, size);
461
462 } else {
463 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
464 adjustedSize = hdr->allocationSize;
465 allocationAddress = hdr->allocationAddress;
466
467 #if IOTRACKING
468 if (TRACK_ALLOC)
469 {
470 if (size != hdr->tracking.size)
471 {
472 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
473 size = hdr->tracking.size;
474 }
475 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
476 }
477 #endif
478 if (adjustedSize >= page_size) {
479 kmem_free( kernel_map, allocationAddress, adjustedSize);
480 } else {
481 kfree((void *)allocationAddress, adjustedSize);
482 }
483 }
484
485 #if IOALLOCDEBUG
486 OSAddAtomic(-size, &debug_iomalloc_size);
487 #endif
488
489 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
490 }
491
492 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
493
494 void
495 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
496 {
497 mach_vm_address_t allocationAddress;
498 mach_vm_size_t adjustedSize;
499 IOLibPageMallocHeader * hdr;
500
501 if (!address)
502 return;
503
504 assert(size);
505
506 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
507 if (adjustedSize >= page_size) {
508 #if IOTRACKING
509 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
510 #endif
511 kmem_free( kernel_map, (vm_offset_t) address, size);
512
513 } else {
514
515 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
516 adjustedSize = hdr->allocationSize;
517 allocationAddress = hdr->allocationAddress;
518 #if IOTRACKING
519 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
520 #endif
521 kfree((void *)allocationAddress, adjustedSize);
522 }
523
524 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
525 #if IOALLOCDEBUG
526 OSAddAtomic(-size, &debug_iomalloc_size);
527 #endif
528 }
529
530 #if __arm__ || __arm64__
531 extern unsigned long gPhysBase, gPhysSize;
532 #endif
533
534 mach_vm_address_t
535 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
536 mach_vm_size_t alignment, bool contiguous)
537 {
538 kern_return_t kr;
539 mach_vm_address_t address;
540 mach_vm_address_t allocationAddress;
541 mach_vm_size_t adjustedSize;
542 mach_vm_address_t alignMask;
543 IOLibPageMallocHeader * hdr;
544
545 if (size == 0)
546 return (0);
547 if (alignment == 0)
548 alignment = 1;
549
550 alignMask = alignment - 1;
551 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
552 if (adjustedSize < size) return (0);
553
554 contiguous = (contiguous && (adjustedSize > page_size))
555 || (alignment > page_size);
556
557 if (contiguous || maxPhys)
558 {
559 int options = 0;
560 vm_offset_t virt;
561
562 adjustedSize = size;
563 contiguous = (contiguous && (adjustedSize > page_size))
564 || (alignment > page_size);
565
566 if (!contiguous)
567 {
568 #if __arm__ || __arm64__
569 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize))
570 {
571 maxPhys = 0;
572 }
573 else
574 #endif
575 if (maxPhys <= 0xFFFFFFFF)
576 {
577 maxPhys = 0;
578 options |= KMA_LOMEM;
579 }
580 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
581 {
582 maxPhys = 0;
583 }
584 }
585 if (contiguous || maxPhys)
586 {
587 kr = kmem_alloc_contig(kernel_map, &virt, size,
588 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
589 }
590 else
591 {
592 kr = kernel_memory_allocate(kernel_map, &virt,
593 size, alignMask, options, IOMemoryTag(kernel_map));
594 }
595 if (KERN_SUCCESS == kr)
596 {
597 address = virt;
598 #if IOTRACKING
599 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
600 #endif
601 }
602 else
603 address = 0;
604 }
605 else
606 {
607 adjustedSize += alignMask;
608 if (adjustedSize < size) return (0);
609 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
610
611 if (allocationAddress) {
612
613
614 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
615 & (~alignMask);
616
617 if (atop_32(address) != atop_32(address + size - 1))
618 address = round_page(address);
619
620 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
621 hdr->allocationSize = adjustedSize;
622 hdr->allocationAddress = allocationAddress;
623 #if IOTRACKING
624 if (TRACK_ALLOC) {
625 bzero(&hdr->tracking, sizeof(hdr->tracking));
626 hdr->tracking.address = ~address;
627 hdr->tracking.size = size;
628 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
629 }
630 #endif
631 } else
632 address = 0;
633 }
634
635 if (address) {
636 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
637 #if IOALLOCDEBUG
638 OSAddAtomic(size, &debug_iomalloc_size);
639 #endif
640 }
641
642 return (address);
643 }
644
645
646 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
647
648 struct _IOMallocContiguousEntry
649 {
650 mach_vm_address_t virtualAddr;
651 IOBufferMemoryDescriptor * md;
652 queue_chain_t link;
653 };
654 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
655
656 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
657 IOPhysicalAddress * physicalAddress)
658 {
659 mach_vm_address_t address = 0;
660
661 if (size == 0)
662 return 0;
663 if (alignment == 0)
664 alignment = 1;
665
666 /* Do we want a physical address? */
667 if (!physicalAddress)
668 {
669 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
670 }
671 else do
672 {
673 IOBufferMemoryDescriptor * bmd;
674 mach_vm_address_t physicalMask;
675 vm_offset_t alignMask;
676
677 alignMask = alignment - 1;
678 physicalMask = (0xFFFFFFFF ^ alignMask);
679
680 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
681 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
682 if (!bmd)
683 break;
684
685 _IOMallocContiguousEntry *
686 entry = IONew(_IOMallocContiguousEntry, 1);
687 if (!entry)
688 {
689 bmd->release();
690 break;
691 }
692 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
693 entry->md = bmd;
694 lck_mtx_lock(gIOMallocContiguousEntriesLock);
695 queue_enter( &gIOMallocContiguousEntries, entry,
696 _IOMallocContiguousEntry *, link );
697 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
698
699 address = (mach_vm_address_t) entry->virtualAddr;
700 *physicalAddress = bmd->getPhysicalAddress();
701 }
702 while (false);
703
704 return (void *) address;
705 }
706
707 void IOFreeContiguous(void * _address, vm_size_t size)
708 {
709 _IOMallocContiguousEntry * entry;
710 IOMemoryDescriptor * md = NULL;
711
712 mach_vm_address_t address = (mach_vm_address_t) _address;
713
714 if( !address)
715 return;
716
717 assert(size);
718
719 lck_mtx_lock(gIOMallocContiguousEntriesLock);
720 queue_iterate( &gIOMallocContiguousEntries, entry,
721 _IOMallocContiguousEntry *, link )
722 {
723 if( entry->virtualAddr == address ) {
724 md = entry->md;
725 queue_remove( &gIOMallocContiguousEntries, entry,
726 _IOMallocContiguousEntry *, link );
727 break;
728 }
729 }
730 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
731
732 if (md)
733 {
734 md->release();
735 IODelete(entry, _IOMallocContiguousEntry, 1);
736 }
737 else
738 {
739 IOKernelFreePhysical((mach_vm_address_t) address, size);
740 }
741 }
742
743 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
744
745 kern_return_t IOIteratePageableMaps(vm_size_t size,
746 IOIteratePageableMapsCallback callback, void * ref)
747 {
748 kern_return_t kr = kIOReturnNotReady;
749 vm_size_t segSize;
750 UInt32 attempts;
751 UInt32 index;
752 vm_offset_t min;
753 vm_map_t map;
754
755 if (size > kIOPageableMaxMapSize)
756 return( kIOReturnBadArgument );
757
758 do {
759 index = gIOKitPageableSpace.hint;
760 attempts = gIOKitPageableSpace.count;
761 while( attempts--) {
762 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
763 if( KERN_SUCCESS == kr) {
764 gIOKitPageableSpace.hint = index;
765 break;
766 }
767 if( index)
768 index--;
769 else
770 index = gIOKitPageableSpace.count - 1;
771 }
772 if (KERN_NO_SPACE != kr)
773 break;
774
775 lck_mtx_lock( gIOKitPageableSpace.lock );
776
777 index = gIOKitPageableSpace.count;
778 if( index >= (kIOMaxPageableMaps - 1)) {
779 lck_mtx_unlock( gIOKitPageableSpace.lock );
780 break;
781 }
782
783 if( size < kIOPageableMapSize)
784 segSize = kIOPageableMapSize;
785 else
786 segSize = size;
787
788 min = 0;
789 kr = kmem_suballoc(kernel_map,
790 &min,
791 segSize,
792 TRUE,
793 VM_FLAGS_ANYWHERE,
794 VM_MAP_KERNEL_FLAGS_NONE,
795 VM_KERN_MEMORY_IOKIT,
796 &map);
797 if( KERN_SUCCESS != kr) {
798 lck_mtx_unlock( gIOKitPageableSpace.lock );
799 break;
800 }
801
802 gIOKitPageableSpace.maps[index].map = map;
803 gIOKitPageableSpace.maps[index].address = min;
804 gIOKitPageableSpace.maps[index].end = min + segSize;
805 gIOKitPageableSpace.hint = index;
806 gIOKitPageableSpace.count = index + 1;
807
808 lck_mtx_unlock( gIOKitPageableSpace.lock );
809
810 } while( true );
811
812 return kr;
813 }
814
815 struct IOMallocPageableRef
816 {
817 vm_offset_t address;
818 vm_size_t size;
819 vm_tag_t tag;
820 };
821
822 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
823 {
824 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
825 kern_return_t kr;
826
827 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
828
829 return( kr );
830 }
831
832 static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
833 {
834 kern_return_t kr = kIOReturnNotReady;
835 struct IOMallocPageableRef ref;
836
837 if (alignment > page_size)
838 return( 0 );
839 if (size > kIOPageableMaxMapSize)
840 return( 0 );
841
842 ref.size = size;
843 ref.tag = tag;
844 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
845 if( kIOReturnSuccess != kr)
846 ref.address = 0;
847
848 return( (void *) ref.address );
849 }
850
851 vm_map_t IOPageableMapForAddress( uintptr_t address )
852 {
853 vm_map_t map = 0;
854 UInt32 index;
855
856 for( index = 0; index < gIOKitPageableSpace.count; index++) {
857 if( (address >= gIOKitPageableSpace.maps[index].address)
858 && (address < gIOKitPageableSpace.maps[index].end) ) {
859 map = gIOKitPageableSpace.maps[index].map;
860 break;
861 }
862 }
863 if( !map)
864 panic("IOPageableMapForAddress: null");
865
866 return( map );
867 }
868
869 static void IOFreePageablePages(void * address, vm_size_t size)
870 {
871 vm_map_t map;
872
873 map = IOPageableMapForAddress( (vm_address_t) address);
874 if( map)
875 kmem_free( map, (vm_offset_t) address, size);
876 }
877
878 static uintptr_t IOMallocOnePageablePage(iopa_t * a)
879 {
880 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
881 }
882
883 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
884 {
885 void * addr;
886
887 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
888 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
889
890 if (addr) {
891 #if IOALLOCDEBUG
892 OSAddAtomicLong(size, &debug_iomallocpageable_size);
893 #endif
894 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
895 }
896
897 return (addr);
898 }
899
900 void IOFreePageable(void * address, vm_size_t size)
901 {
902 #if IOALLOCDEBUG
903 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
904 #endif
905 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
906
907 if (size < (page_size - 4*gIOPageAllocChunkBytes))
908 {
909 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
910 size = page_size;
911 }
912 if (address) IOFreePageablePages(address, size);
913 }
914
915 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
916
917 extern "C" void
918 iopa_init(iopa_t * a)
919 {
920 bzero(a, sizeof(*a));
921 a->lock = IOLockAlloc();
922 queue_init(&a->list);
923 }
924
925 static uintptr_t
926 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
927 {
928 uint32_t n, s;
929 uint64_t avail = pa->avail;
930
931 assert(avail);
932
933 // find strings of count 1 bits in avail
934 for (n = count; n > 1; n -= s)
935 {
936 s = n >> 1;
937 avail = avail & (avail << s);
938 }
939 // and aligned
940 avail &= align;
941
942 if (avail)
943 {
944 n = __builtin_clzll(avail);
945 pa->avail &= ~((-1ULL << (64 - count)) >> n);
946 if (!pa->avail && pa->link.next)
947 {
948 remque(&pa->link);
949 pa->link.next = 0;
950 }
951 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
952 }
953
954 return (0);
955 }
956
957 uintptr_t
958 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
959 {
960 static const uint64_t align_masks[] = {
961 0xFFFFFFFFFFFFFFFF,
962 0xAAAAAAAAAAAAAAAA,
963 0x8888888888888888,
964 0x8080808080808080,
965 0x8000800080008000,
966 0x8000000080000000,
967 0x8000000000000000,
968 };
969 iopa_page_t * pa;
970 uintptr_t addr = 0;
971 uint32_t count;
972 uint64_t align;
973
974 if (!bytes) bytes = 1;
975 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
976 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
977
978 IOLockLock(a->lock);
979 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
980 while (!queue_end(&a->list, &pa->link))
981 {
982 addr = iopa_allocinpage(pa, count, align);
983 if (addr)
984 {
985 a->bytecount += bytes;
986 break;
987 }
988 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
989 }
990 IOLockUnlock(a->lock);
991
992 if (!addr)
993 {
994 addr = alloc(a);
995 if (addr)
996 {
997 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
998 pa->signature = kIOPageAllocSignature;
999 pa->avail = -2ULL;
1000
1001 addr = iopa_allocinpage(pa, count, align);
1002 IOLockLock(a->lock);
1003 if (pa->avail) enqueue_head(&a->list, &pa->link);
1004 a->pagecount++;
1005 if (addr) a->bytecount += bytes;
1006 IOLockUnlock(a->lock);
1007 }
1008 }
1009
1010 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1011 return (addr);
1012 }
1013
1014 uintptr_t
1015 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1016 {
1017 iopa_page_t * pa;
1018 uint32_t count;
1019 uintptr_t chunk;
1020
1021 if (!bytes) bytes = 1;
1022
1023 chunk = (addr & page_mask);
1024 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1025
1026 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
1027 assert(kIOPageAllocSignature == pa->signature);
1028
1029 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1030 chunk /= gIOPageAllocChunkBytes;
1031
1032 IOLockLock(a->lock);
1033 if (!pa->avail)
1034 {
1035 assert(!pa->link.next);
1036 enqueue_tail(&a->list, &pa->link);
1037 }
1038 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1039 if (pa->avail != -2ULL) pa = 0;
1040 else
1041 {
1042 remque(&pa->link);
1043 pa->link.next = 0;
1044 pa->signature = 0;
1045 a->pagecount--;
1046 // page to free
1047 pa = (typeof(pa)) trunc_page(pa);
1048 }
1049 a->bytecount -= bytes;
1050 IOLockUnlock(a->lock);
1051
1052 return ((uintptr_t) pa);
1053 }
1054
1055 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1056
1057 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1058 IOByteCount length, IOOptionBits cacheMode )
1059 {
1060 IOReturn ret = kIOReturnSuccess;
1061 ppnum_t pagenum;
1062
1063 if( task != kernel_task)
1064 return( kIOReturnUnsupported );
1065 if ((address | length) & PAGE_MASK)
1066 {
1067 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1068 return( kIOReturnUnsupported );
1069 }
1070 length = round_page(address + length) - trunc_page( address );
1071 address = trunc_page( address );
1072
1073 // make map mode
1074 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1075
1076 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1077
1078 // Get the physical page number
1079 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1080 if( pagenum) {
1081 ret = IOUnmapPages( get_task_map(task), address, page_size );
1082 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1083 } else
1084 ret = kIOReturnVMError;
1085
1086 address += page_size;
1087 length -= page_size;
1088 }
1089
1090 return( ret );
1091 }
1092
1093
1094 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1095 IOByteCount length )
1096 {
1097 if( task != kernel_task)
1098 return( kIOReturnUnsupported );
1099
1100 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1101
1102 return( kIOReturnSuccess );
1103 }
1104
1105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1106
1107 vm_offset_t OSKernelStackRemaining( void )
1108 {
1109 return (ml_stack_remaining());
1110 }
1111
1112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1113
1114 /*
1115 * Spin for indicated number of milliseconds.
1116 */
1117 void IOSleep(unsigned milliseconds)
1118 {
1119 delay_for_interval(milliseconds, kMillisecondScale);
1120 }
1121
1122 /*
1123 * Spin for indicated number of milliseconds, and potentially an
1124 * additional number of milliseconds up to the leeway values.
1125 */
1126 void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1127 {
1128 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1129 }
1130
1131 /*
1132 * Spin for indicated number of microseconds.
1133 */
1134 void IODelay(unsigned microseconds)
1135 {
1136 delay_for_interval(microseconds, kMicrosecondScale);
1137 }
1138
1139 /*
1140 * Spin for indicated number of nanoseconds.
1141 */
1142 void IOPause(unsigned nanoseconds)
1143 {
1144 delay_for_interval(nanoseconds, kNanosecondScale);
1145 }
1146
1147 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1148
1149 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1,0);
1150
1151 __attribute__((noinline,not_tail_called))
1152 void IOLog(const char *format, ...)
1153 {
1154 void *caller = __builtin_return_address(0);
1155 va_list ap;
1156
1157 va_start(ap, format);
1158 _IOLogv(format, ap, caller);
1159 va_end(ap);
1160 }
1161
1162 __attribute__((noinline,not_tail_called))
1163 void IOLogv(const char *format, va_list ap)
1164 {
1165 void *caller = __builtin_return_address(0);
1166 _IOLogv(format, ap, caller);
1167 }
1168
1169 void _IOLogv(const char *format, va_list ap, void *caller)
1170 {
1171 va_list ap2;
1172 struct console_printbuf_state info_data;
1173 console_printbuf_state_init(&info_data, TRUE, TRUE);
1174
1175 va_copy(ap2, ap);
1176
1177 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1178
1179 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1180 console_printbuf_clear(&info_data);
1181 va_end(ap2);
1182
1183 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled");
1184 }
1185
1186 #if !__LP64__
1187 void IOPanic(const char *reason)
1188 {
1189 panic("%s", reason);
1190 }
1191 #endif
1192
1193 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1194
1195 /*
1196 * Convert a integer constant (typically a #define or enum) to a string.
1197 */
1198 static char noValue[80]; // that's pretty
1199
1200 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1201 {
1202 for( ; regValueArray->name; regValueArray++) {
1203 if(regValueArray->value == value)
1204 return(regValueArray->name);
1205 }
1206 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1207 return((const char *)noValue);
1208 }
1209
1210 IOReturn IOFindValueForName(const char *string,
1211 const IONamedValue *regValueArray,
1212 int *value)
1213 {
1214 for( ; regValueArray->name; regValueArray++) {
1215 if(!strcmp(regValueArray->name, string)) {
1216 *value = regValueArray->value;
1217 return kIOReturnSuccess;
1218 }
1219 }
1220 return kIOReturnBadArgument;
1221 }
1222
1223 OSString * IOCopyLogNameForPID(int pid)
1224 {
1225 char buf[128];
1226 size_t len;
1227 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1228 len = strlen(buf);
1229 proc_name(pid, buf + len, sizeof(buf) - len);
1230 return (OSString::withCString(buf));
1231 }
1232
1233 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1234
1235 IOAlignment IOSizeToAlignment(unsigned int size)
1236 {
1237 int shift;
1238 const int intsize = sizeof(unsigned int) * 8;
1239
1240 for (shift = 1; shift < intsize; shift++) {
1241 if (size & 0x80000000)
1242 return (IOAlignment)(intsize - shift);
1243 size <<= 1;
1244 }
1245 return 0;
1246 }
1247
1248 unsigned int IOAlignmentToSize(IOAlignment align)
1249 {
1250 unsigned int size;
1251
1252 for (size = 1; align; align--) {
1253 size <<= 1;
1254 }
1255 return size;
1256 }
1257
1258 } /* extern "C" */
1259
1260
1261