]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
xnu-6153.141.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
64
65 #if IOKITSTATS
66
67 #define IOStatisticsAlloc(type, size) \
68 do { \
69 IOStatistics::countAlloc(type, size); \
70 } while (0)
71
72 #else
73
74 #define IOStatisticsAlloc(type, size)
75
76 #endif /* IOKITSTATS */
77
78
79 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
82 extern "C"
83 {
84 mach_timespec_t IOZeroTvalspec = { 0, 0 };
85
86 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
87
88 extern int
89 __doprnt(
90 const char *fmt,
91 va_list argp,
92 void (*putc)(int, void *),
93 void *arg,
94 int radix,
95 int is_log);
96
97 extern void cons_putc_locked(char);
98 extern void bsd_log_lock(void);
99 extern void bsd_log_unlock(void);
100
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 lck_grp_t *IOLockGroup;
105
106 /*
107 * Global variables for use by iLogger
108 * These symbols are for use only by Apple diagnostic code.
109 * Binary compatibility is not guaranteed for kexts that reference these symbols.
110 */
111
112 void *_giDebugLogInternal = NULL;
113 void *_giDebugLogDataInternal = NULL;
114 void *_giDebugReserved1 = NULL;
115 void *_giDebugReserved2 = NULL;
116
117 iopa_t gIOBMDPageAllocator;
118
119 /*
120 * Static variables for this module.
121 */
122
123 static queue_head_t gIOMallocContiguousEntries;
124 static lck_mtx_t * gIOMallocContiguousEntriesLock;
125
126 #if __x86_64__
127 enum { kIOMaxPageableMaps = 8 };
128 enum { kIOPageableMapSize = 512 * 1024 * 1024 };
129 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
130 #else
131 enum { kIOMaxPageableMaps = 16 };
132 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
133 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
134 #endif
135
136 typedef struct {
137 vm_map_t map;
138 vm_offset_t address;
139 vm_offset_t end;
140 } IOMapData;
141
142 static struct {
143 UInt32 count;
144 UInt32 hint;
145 IOMapData maps[kIOMaxPageableMaps];
146 lck_mtx_t * lock;
147 } gIOKitPageableSpace;
148
149 static iopa_t gIOPageablePageAllocator;
150
151 uint32_t gIOPageAllocChunkBytes;
152
153 #if IOTRACKING
154 IOTrackingQueue * gIOMallocTracking;
155 IOTrackingQueue * gIOWireTracking;
156 IOTrackingQueue * gIOMapTracking;
157 #endif /* IOTRACKING */
158
159 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
160
161 void
162 IOLibInit(void)
163 {
164 kern_return_t ret;
165
166 static bool libInitialized;
167
168 if (libInitialized) {
169 return;
170 }
171
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174 #if IOTRACKING
175 IOTrackingInit();
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
187 #endif
188
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
194 VM_FLAGS_ANYWHERE,
195 VM_MAP_KERNEL_FLAGS_NONE,
196 VM_KERN_MEMORY_IOKIT,
197 &gIOKitPageableSpace.maps[0].map);
198 if (ret != KERN_SUCCESS) {
199 panic("failed to allocate iokit pageable map\n");
200 }
201
202 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
203 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
204 gIOKitPageableSpace.hint = 0;
205 gIOKitPageableSpace.count = 1;
206
207 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
208 queue_init( &gIOMallocContiguousEntries );
209
210 gIOPageAllocChunkBytes = PAGE_SIZE / 64;
211 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
212 iopa_init(&gIOBMDPageAllocator);
213 iopa_init(&gIOPageablePageAllocator);
214
215
216 libInitialized = true;
217 }
218
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
221 static uint32_t
222 log2up(uint32_t size)
223 {
224 if (size <= 1) {
225 size = 0;
226 } else {
227 size = 32 - __builtin_clz(size - 1);
228 }
229 return size;
230 }
231
232 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
233
234 IOThread
235 IOCreateThread(IOThreadFunc fcn, void *arg)
236 {
237 kern_return_t result;
238 thread_t thread;
239
240 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
241 if (result != KERN_SUCCESS) {
242 return NULL;
243 }
244
245 thread_deallocate(thread);
246
247 return thread;
248 }
249
250
251 void
252 IOExitThread(void)
253 {
254 (void) thread_terminate(current_thread());
255 }
256
257 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
258
259 void *
260 IOMallocZero(vm_size_t size)
261 {
262 void * result;
263 result = IOMalloc(size);
264 if (result) {
265 bzero(result, size);
266 }
267 return result;
268 }
269
270 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
271
272 #if IOTRACKING
273 struct IOLibMallocHeader {
274 IOTrackingAddress tracking;
275 };
276 #endif
277
278 #if IOTRACKING
279 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
280 #else
281 #define sizeofIOLibMallocHeader (0)
282 #endif
283
284 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
285
286 void *
287 IOMalloc(vm_size_t size)
288 {
289 void * address;
290 vm_size_t allocSize;
291
292 allocSize = size + sizeofIOLibMallocHeader;
293 #if IOTRACKING
294 if (sizeofIOLibMallocHeader && (allocSize <= size)) {
295 return NULL; // overflow
296 }
297 #endif
298 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
299
300 if (address) {
301 #if IOTRACKING
302 if (TRACK_ALLOC) {
303 IOLibMallocHeader * hdr;
304 hdr = (typeof(hdr))address;
305 bzero(&hdr->tracking, sizeof(hdr->tracking));
306 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
307 hdr->tracking.size = size;
308 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
309 }
310 #endif
311 address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
312
313 #if IOALLOCDEBUG
314 OSAddAtomic(size, &debug_iomalloc_size);
315 #endif
316 IOStatisticsAlloc(kIOStatisticsMalloc, size);
317 }
318
319 return address;
320 }
321
322 void
323 IOFree(void * inAddress, vm_size_t size)
324 {
325 void * address;
326
327 if ((address = inAddress)) {
328 address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
329
330 #if IOTRACKING
331 if (TRACK_ALLOC) {
332 IOLibMallocHeader * hdr;
333 struct ptr_reference { void * ptr; };
334 volatile struct ptr_reference ptr;
335
336 // we're about to block in IOTrackingRemove(), make sure the original pointer
337 // exists in memory or a register for leak scanning to find
338 ptr.ptr = inAddress;
339
340 hdr = (typeof(hdr))address;
341 if (size != hdr->tracking.size) {
342 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
343 size = hdr->tracking.size;
344 }
345 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
346 ptr.ptr = NULL;
347 }
348 #endif
349
350 kfree(address, size + sizeofIOLibMallocHeader);
351 #if IOALLOCDEBUG
352 OSAddAtomic(-size, &debug_iomalloc_size);
353 #endif
354 IOStatisticsAlloc(kIOStatisticsFree, size);
355 }
356 }
357
358 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
359
360 vm_tag_t
361 IOMemoryTag(vm_map_t map)
362 {
363 vm_tag_t tag;
364
365 if (!vm_kernel_map_is_kernel(map)) {
366 return VM_MEMORY_IOKIT;
367 }
368
369 tag = vm_tag_bt();
370 if (tag == VM_KERN_MEMORY_NONE) {
371 tag = VM_KERN_MEMORY_IOKIT;
372 }
373
374 return tag;
375 }
376
377 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
378
379 struct IOLibPageMallocHeader {
380 mach_vm_size_t allocationSize;
381 mach_vm_address_t allocationAddress;
382 #if IOTRACKING
383 IOTrackingAddress tracking;
384 #endif
385 };
386
387 #if IOTRACKING
388 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
389 #else
390 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
391 #endif
392
393 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
394
395 void *
396 IOMallocAligned(vm_size_t size, vm_size_t alignment)
397 {
398 kern_return_t kr;
399 vm_offset_t address;
400 vm_offset_t allocationAddress;
401 vm_size_t adjustedSize;
402 uintptr_t alignMask;
403 IOLibPageMallocHeader * hdr;
404
405 if (size == 0) {
406 return NULL;
407 }
408
409 alignment = (1UL << log2up(alignment));
410 alignMask = alignment - 1;
411 adjustedSize = size + sizeofIOLibPageMallocHeader;
412
413 if (size > adjustedSize) {
414 address = 0; /* overflow detected */
415 } else if (adjustedSize >= page_size) {
416 kr = kernel_memory_allocate(kernel_map, &address,
417 size, alignMask, 0, IOMemoryTag(kernel_map));
418 if (KERN_SUCCESS != kr) {
419 address = 0;
420 }
421 #if IOTRACKING
422 else if (TRACK_ALLOC) {
423 IOTrackingAlloc(gIOMallocTracking, address, size);
424 }
425 #endif
426 } else {
427 adjustedSize += alignMask;
428
429 if (adjustedSize >= page_size) {
430 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
431 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
432 if (KERN_SUCCESS != kr) {
433 allocationAddress = 0;
434 }
435 } else {
436 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
437 }
438
439 if (allocationAddress) {
440 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
441 & (~alignMask);
442
443 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
444 hdr->allocationSize = adjustedSize;
445 hdr->allocationAddress = allocationAddress;
446 #if IOTRACKING
447 if (TRACK_ALLOC) {
448 bzero(&hdr->tracking, sizeof(hdr->tracking));
449 hdr->tracking.address = ~address;
450 hdr->tracking.size = size;
451 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
452 }
453 #endif
454 } else {
455 address = 0;
456 }
457 }
458
459 assert(0 == (address & alignMask));
460
461 if (address) {
462 #if IOALLOCDEBUG
463 OSAddAtomic(size, &debug_iomalloc_size);
464 #endif
465 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
466 }
467
468 return (void *) address;
469 }
470
471 void
472 IOFreeAligned(void * address, vm_size_t size)
473 {
474 vm_address_t allocationAddress;
475 vm_size_t adjustedSize;
476 IOLibPageMallocHeader * hdr;
477
478 if (!address) {
479 return;
480 }
481
482 assert(size);
483
484 adjustedSize = size + sizeofIOLibPageMallocHeader;
485 if (adjustedSize >= page_size) {
486 #if IOTRACKING
487 if (TRACK_ALLOC) {
488 IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
489 }
490 #endif
491 kmem_free( kernel_map, (vm_offset_t) address, size);
492 } else {
493 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
494 adjustedSize = hdr->allocationSize;
495 allocationAddress = hdr->allocationAddress;
496
497 #if IOTRACKING
498 if (TRACK_ALLOC) {
499 if (size != hdr->tracking.size) {
500 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
501 size = hdr->tracking.size;
502 }
503 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
504 }
505 #endif
506 if (adjustedSize >= page_size) {
507 kmem_free( kernel_map, allocationAddress, adjustedSize);
508 } else {
509 kfree(allocationAddress, adjustedSize);
510 }
511 }
512
513 #if IOALLOCDEBUG
514 OSAddAtomic(-size, &debug_iomalloc_size);
515 #endif
516
517 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
518 }
519
520 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
521
522 void
523 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
524 {
525 mach_vm_address_t allocationAddress;
526 mach_vm_size_t adjustedSize;
527 IOLibPageMallocHeader * hdr;
528
529 if (!address) {
530 return;
531 }
532
533 assert(size);
534
535 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
536 if (adjustedSize >= page_size) {
537 #if IOTRACKING
538 if (TRACK_ALLOC) {
539 IOTrackingFree(gIOMallocTracking, address, size);
540 }
541 #endif
542 kmem_free( kernel_map, (vm_offset_t) address, size);
543 } else {
544 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
545 adjustedSize = hdr->allocationSize;
546 allocationAddress = hdr->allocationAddress;
547 #if IOTRACKING
548 if (TRACK_ALLOC) {
549 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
550 }
551 #endif
552 kfree(allocationAddress, adjustedSize);
553 }
554
555 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
556 #if IOALLOCDEBUG
557 OSAddAtomic(-size, &debug_iomalloc_size);
558 #endif
559 }
560
561 #if __arm__ || __arm64__
562 extern unsigned long gPhysBase, gPhysSize;
563 #endif
564
565 mach_vm_address_t
566 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
567 mach_vm_size_t alignment, bool contiguous)
568 {
569 kern_return_t kr;
570 mach_vm_address_t address;
571 mach_vm_address_t allocationAddress;
572 mach_vm_size_t adjustedSize;
573 mach_vm_address_t alignMask;
574 IOLibPageMallocHeader * hdr;
575
576 if (size == 0) {
577 return 0;
578 }
579 if (alignment == 0) {
580 alignment = 1;
581 }
582
583 alignMask = alignment - 1;
584
585 if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
586 return 0;
587 }
588
589 contiguous = (contiguous && (adjustedSize > page_size))
590 || (alignment > page_size);
591
592 if (contiguous || maxPhys) {
593 int options = 0;
594 vm_offset_t virt;
595
596 adjustedSize = size;
597 contiguous = (contiguous && (adjustedSize > page_size))
598 || (alignment > page_size);
599
600 if (!contiguous) {
601 #if __arm__ || __arm64__
602 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
603 maxPhys = 0;
604 } else
605 #endif
606 if (maxPhys <= 0xFFFFFFFF) {
607 maxPhys = 0;
608 options |= KMA_LOMEM;
609 } else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
610 maxPhys = 0;
611 }
612 }
613 if (contiguous || maxPhys) {
614 kr = kmem_alloc_contig(kernel_map, &virt, size,
615 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
616 } else {
617 kr = kernel_memory_allocate(kernel_map, &virt,
618 size, alignMask, options, IOMemoryTag(kernel_map));
619 }
620 if (KERN_SUCCESS == kr) {
621 address = virt;
622 #if IOTRACKING
623 if (TRACK_ALLOC) {
624 IOTrackingAlloc(gIOMallocTracking, address, size);
625 }
626 #endif
627 } else {
628 address = 0;
629 }
630 } else {
631 adjustedSize += alignMask;
632 if (adjustedSize < size) {
633 return 0;
634 }
635 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
636
637 if (allocationAddress) {
638 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
639 & (~alignMask);
640
641 if (atop_32(address) != atop_32(address + size - 1)) {
642 address = round_page(address);
643 }
644
645 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
646 hdr->allocationSize = adjustedSize;
647 hdr->allocationAddress = allocationAddress;
648 #if IOTRACKING
649 if (TRACK_ALLOC) {
650 bzero(&hdr->tracking, sizeof(hdr->tracking));
651 hdr->tracking.address = ~address;
652 hdr->tracking.size = size;
653 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
654 }
655 #endif
656 } else {
657 address = 0;
658 }
659 }
660
661 if (address) {
662 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
663 #if IOALLOCDEBUG
664 OSAddAtomic(size, &debug_iomalloc_size);
665 #endif
666 }
667
668 return address;
669 }
670
671
672 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
673
674 struct _IOMallocContiguousEntry {
675 mach_vm_address_t virtualAddr;
676 IOBufferMemoryDescriptor * md;
677 queue_chain_t link;
678 };
679 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
680
681 void *
682 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
683 IOPhysicalAddress * physicalAddress)
684 {
685 mach_vm_address_t address = 0;
686
687 if (size == 0) {
688 return NULL;
689 }
690 if (alignment == 0) {
691 alignment = 1;
692 }
693
694 /* Do we want a physical address? */
695 if (!physicalAddress) {
696 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
697 } else {
698 do {
699 IOBufferMemoryDescriptor * bmd;
700 mach_vm_address_t physicalMask;
701 vm_offset_t alignMask;
702
703 alignMask = alignment - 1;
704 physicalMask = (0xFFFFFFFF ^ alignMask);
705
706 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
707 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
708 if (!bmd) {
709 break;
710 }
711
712 _IOMallocContiguousEntry *
713 entry = IONew(_IOMallocContiguousEntry, 1);
714 if (!entry) {
715 bmd->release();
716 break;
717 }
718 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
719 entry->md = bmd;
720 lck_mtx_lock(gIOMallocContiguousEntriesLock);
721 queue_enter( &gIOMallocContiguousEntries, entry,
722 _IOMallocContiguousEntry *, link );
723 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
724
725 address = (mach_vm_address_t) entry->virtualAddr;
726 *physicalAddress = bmd->getPhysicalAddress();
727 }while (false);
728 }
729
730 return (void *) address;
731 }
732
733 void
734 IOFreeContiguous(void * _address, vm_size_t size)
735 {
736 _IOMallocContiguousEntry * entry;
737 IOMemoryDescriptor * md = NULL;
738
739 mach_vm_address_t address = (mach_vm_address_t) _address;
740
741 if (!address) {
742 return;
743 }
744
745 assert(size);
746
747 lck_mtx_lock(gIOMallocContiguousEntriesLock);
748 queue_iterate( &gIOMallocContiguousEntries, entry,
749 _IOMallocContiguousEntry *, link )
750 {
751 if (entry->virtualAddr == address) {
752 md = entry->md;
753 queue_remove( &gIOMallocContiguousEntries, entry,
754 _IOMallocContiguousEntry *, link );
755 break;
756 }
757 }
758 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
759
760 if (md) {
761 md->release();
762 IODelete(entry, _IOMallocContiguousEntry, 1);
763 } else {
764 IOKernelFreePhysical((mach_vm_address_t) address, size);
765 }
766 }
767
768 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
769
770 kern_return_t
771 IOIteratePageableMaps(vm_size_t size,
772 IOIteratePageableMapsCallback callback, void * ref)
773 {
774 kern_return_t kr = kIOReturnNotReady;
775 vm_size_t segSize;
776 UInt32 attempts;
777 UInt32 index;
778 vm_offset_t min;
779 vm_map_t map;
780
781 if (size > kIOPageableMaxMapSize) {
782 return kIOReturnBadArgument;
783 }
784
785 do {
786 index = gIOKitPageableSpace.hint;
787 attempts = gIOKitPageableSpace.count;
788 while (attempts--) {
789 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
790 if (KERN_SUCCESS == kr) {
791 gIOKitPageableSpace.hint = index;
792 break;
793 }
794 if (index) {
795 index--;
796 } else {
797 index = gIOKitPageableSpace.count - 1;
798 }
799 }
800 if (KERN_NO_SPACE != kr) {
801 break;
802 }
803
804 lck_mtx_lock( gIOKitPageableSpace.lock );
805
806 index = gIOKitPageableSpace.count;
807 if (index >= (kIOMaxPageableMaps - 1)) {
808 lck_mtx_unlock( gIOKitPageableSpace.lock );
809 break;
810 }
811
812 if (size < kIOPageableMapSize) {
813 segSize = kIOPageableMapSize;
814 } else {
815 segSize = size;
816 }
817
818 min = 0;
819 kr = kmem_suballoc(kernel_map,
820 &min,
821 segSize,
822 TRUE,
823 VM_FLAGS_ANYWHERE,
824 VM_MAP_KERNEL_FLAGS_NONE,
825 VM_KERN_MEMORY_IOKIT,
826 &map);
827 if (KERN_SUCCESS != kr) {
828 lck_mtx_unlock( gIOKitPageableSpace.lock );
829 break;
830 }
831
832 gIOKitPageableSpace.maps[index].map = map;
833 gIOKitPageableSpace.maps[index].address = min;
834 gIOKitPageableSpace.maps[index].end = min + segSize;
835 gIOKitPageableSpace.hint = index;
836 gIOKitPageableSpace.count = index + 1;
837
838 lck_mtx_unlock( gIOKitPageableSpace.lock );
839 } while (true);
840
841 return kr;
842 }
843
844 struct IOMallocPageableRef {
845 vm_offset_t address;
846 vm_size_t size;
847 vm_tag_t tag;
848 };
849
850 static kern_return_t
851 IOMallocPageableCallback(vm_map_t map, void * _ref)
852 {
853 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
854 kern_return_t kr;
855
856 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
857
858 return kr;
859 }
860
861 static void *
862 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
863 {
864 kern_return_t kr = kIOReturnNotReady;
865 struct IOMallocPageableRef ref;
866
867 if (alignment > page_size) {
868 return NULL;
869 }
870 if (size > kIOPageableMaxMapSize) {
871 return NULL;
872 }
873
874 ref.size = size;
875 ref.tag = tag;
876 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
877 if (kIOReturnSuccess != kr) {
878 ref.address = 0;
879 }
880
881 return (void *) ref.address;
882 }
883
884 vm_map_t
885 IOPageableMapForAddress( uintptr_t address )
886 {
887 vm_map_t map = NULL;
888 UInt32 index;
889
890 for (index = 0; index < gIOKitPageableSpace.count; index++) {
891 if ((address >= gIOKitPageableSpace.maps[index].address)
892 && (address < gIOKitPageableSpace.maps[index].end)) {
893 map = gIOKitPageableSpace.maps[index].map;
894 break;
895 }
896 }
897 if (!map) {
898 panic("IOPageableMapForAddress: null");
899 }
900
901 return map;
902 }
903
904 static void
905 IOFreePageablePages(void * address, vm_size_t size)
906 {
907 vm_map_t map;
908
909 map = IOPageableMapForAddress((vm_address_t) address);
910 if (map) {
911 kmem_free( map, (vm_offset_t) address, size);
912 }
913 }
914
915 static uintptr_t
916 IOMallocOnePageablePage(iopa_t * a)
917 {
918 return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
919 }
920
921 void *
922 IOMallocPageable(vm_size_t size, vm_size_t alignment)
923 {
924 void * addr;
925
926 if (size >= (page_size - 4 * gIOPageAllocChunkBytes)) {
927 addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
928 } else {
929 addr = ((void *) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
930 }
931
932 if (addr) {
933 #if IOALLOCDEBUG
934 OSAddAtomicLong(size, &debug_iomallocpageable_size);
935 #endif
936 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
937 }
938
939 return addr;
940 }
941
942 void
943 IOFreePageable(void * address, vm_size_t size)
944 {
945 #if IOALLOCDEBUG
946 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
947 #endif
948 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
949
950 if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
951 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
952 size = page_size;
953 }
954 if (address) {
955 IOFreePageablePages(address, size);
956 }
957 }
958
959 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
960
961 extern "C" void
962 iopa_init(iopa_t * a)
963 {
964 bzero(a, sizeof(*a));
965 a->lock = IOLockAlloc();
966 queue_init(&a->list);
967 }
968
969 static uintptr_t
970 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
971 {
972 uint32_t n, s;
973 uint64_t avail = pa->avail;
974
975 assert(avail);
976
977 // find strings of count 1 bits in avail
978 for (n = count; n > 1; n -= s) {
979 s = n >> 1;
980 avail = avail & (avail << s);
981 }
982 // and aligned
983 avail &= align;
984
985 if (avail) {
986 n = __builtin_clzll(avail);
987 pa->avail &= ~((-1ULL << (64 - count)) >> n);
988 if (!pa->avail && pa->link.next) {
989 remque(&pa->link);
990 pa->link.next = NULL;
991 }
992 return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
993 }
994
995 return 0;
996 }
997
998 uintptr_t
999 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
1000 {
1001 static const uint64_t align_masks[] = {
1002 0xFFFFFFFFFFFFFFFF,
1003 0xAAAAAAAAAAAAAAAA,
1004 0x8888888888888888,
1005 0x8080808080808080,
1006 0x8000800080008000,
1007 0x8000000080000000,
1008 0x8000000000000000,
1009 };
1010 iopa_page_t * pa;
1011 uintptr_t addr = 0;
1012 uint32_t count;
1013 uint64_t align;
1014
1015 if (!bytes) {
1016 bytes = 1;
1017 }
1018 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1019 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
1020
1021 IOLockLock(a->lock);
1022 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1023 while (!queue_end(&a->list, &pa->link)) {
1024 addr = iopa_allocinpage(pa, count, align);
1025 if (addr) {
1026 a->bytecount += bytes;
1027 break;
1028 }
1029 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1030 }
1031 IOLockUnlock(a->lock);
1032
1033 if (!addr) {
1034 addr = alloc(a);
1035 if (addr) {
1036 pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1037 pa->signature = kIOPageAllocSignature;
1038 pa->avail = -2ULL;
1039
1040 addr = iopa_allocinpage(pa, count, align);
1041 IOLockLock(a->lock);
1042 if (pa->avail) {
1043 enqueue_head(&a->list, &pa->link);
1044 }
1045 a->pagecount++;
1046 if (addr) {
1047 a->bytecount += bytes;
1048 }
1049 IOLockUnlock(a->lock);
1050 }
1051 }
1052
1053 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1054 return addr;
1055 }
1056
1057 uintptr_t
1058 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1059 {
1060 iopa_page_t * pa;
1061 uint32_t count;
1062 uintptr_t chunk;
1063
1064 if (!bytes) {
1065 bytes = 1;
1066 }
1067
1068 chunk = (addr & page_mask);
1069 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1070
1071 pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1072 assert(kIOPageAllocSignature == pa->signature);
1073
1074 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1075 chunk /= gIOPageAllocChunkBytes;
1076
1077 IOLockLock(a->lock);
1078 if (!pa->avail) {
1079 assert(!pa->link.next);
1080 enqueue_tail(&a->list, &pa->link);
1081 }
1082 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1083 if (pa->avail != -2ULL) {
1084 pa = NULL;
1085 } else {
1086 remque(&pa->link);
1087 pa->link.next = NULL;
1088 pa->signature = 0;
1089 a->pagecount--;
1090 // page to free
1091 pa = (typeof(pa))trunc_page(pa);
1092 }
1093 a->bytecount -= bytes;
1094 IOLockUnlock(a->lock);
1095
1096 return (uintptr_t) pa;
1097 }
1098
1099 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1100
1101 IOReturn
1102 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1103 IOByteCount length, IOOptionBits cacheMode )
1104 {
1105 IOReturn ret = kIOReturnSuccess;
1106 ppnum_t pagenum;
1107
1108 if (task != kernel_task) {
1109 return kIOReturnUnsupported;
1110 }
1111 if ((address | length) & PAGE_MASK) {
1112 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1113 return kIOReturnUnsupported;
1114 }
1115 length = round_page(address + length) - trunc_page( address );
1116 address = trunc_page( address );
1117
1118 // make map mode
1119 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1120
1121 while ((kIOReturnSuccess == ret) && (length > 0)) {
1122 // Get the physical page number
1123 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1124 if (pagenum) {
1125 ret = IOUnmapPages( get_task_map(task), address, page_size );
1126 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1127 } else {
1128 ret = kIOReturnVMError;
1129 }
1130
1131 address += page_size;
1132 length -= page_size;
1133 }
1134
1135 return ret;
1136 }
1137
1138
1139 IOReturn
1140 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1141 IOByteCount length )
1142 {
1143 if (task != kernel_task) {
1144 return kIOReturnUnsupported;
1145 }
1146
1147 flush_dcache64((addr64_t) address, (unsigned) length, false );
1148
1149 return kIOReturnSuccess;
1150 }
1151
1152 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1153
1154 vm_offset_t
1155 OSKernelStackRemaining( void )
1156 {
1157 return ml_stack_remaining();
1158 }
1159
1160 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1161
1162 /*
1163 * Spin for indicated number of milliseconds.
1164 */
1165 void
1166 IOSleep(unsigned milliseconds)
1167 {
1168 delay_for_interval(milliseconds, kMillisecondScale);
1169 }
1170
1171 /*
1172 * Spin for indicated number of milliseconds, and potentially an
1173 * additional number of milliseconds up to the leeway values.
1174 */
1175 void
1176 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1177 {
1178 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1179 }
1180
1181 /*
1182 * Spin for indicated number of microseconds.
1183 */
1184 void
1185 IODelay(unsigned microseconds)
1186 {
1187 delay_for_interval(microseconds, kMicrosecondScale);
1188 }
1189
1190 /*
1191 * Spin for indicated number of nanoseconds.
1192 */
1193 void
1194 IOPause(unsigned nanoseconds)
1195 {
1196 delay_for_interval(nanoseconds, kNanosecondScale);
1197 }
1198
1199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1200
1201 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1202
1203 __attribute__((noinline, not_tail_called))
1204 void
1205 IOLog(const char *format, ...)
1206 {
1207 void *caller = __builtin_return_address(0);
1208 va_list ap;
1209
1210 va_start(ap, format);
1211 _IOLogv(format, ap, caller);
1212 va_end(ap);
1213 }
1214
1215 __attribute__((noinline, not_tail_called))
1216 void
1217 IOLogv(const char *format, va_list ap)
1218 {
1219 void *caller = __builtin_return_address(0);
1220 _IOLogv(format, ap, caller);
1221 }
1222
1223 void
1224 _IOLogv(const char *format, va_list ap, void *caller)
1225 {
1226 va_list ap2;
1227 struct console_printbuf_state info_data;
1228 console_printbuf_state_init(&info_data, TRUE, TRUE);
1229
1230 va_copy(ap2, ap);
1231
1232 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1233
1234 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1235 console_printbuf_clear(&info_data);
1236 va_end(ap2);
1237
1238 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled");
1239 }
1240
1241 #if !__LP64__
1242 void
1243 IOPanic(const char *reason)
1244 {
1245 panic("%s", reason);
1246 }
1247 #endif
1248
1249 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1250
1251 void
1252 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1253 void (*output)(const char *format, ...))
1254 {
1255 size_t idx, linestart;
1256 enum { bytelen = (sizeof("0xZZ, ") - 1) };
1257 char hex[(bytelen * 16) + 1];
1258 uint8_t c, chars[17];
1259
1260 output("%s(0x%lx):\n", title, size);
1261 output(" 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1262 if (size > 4096) {
1263 size = 4096;
1264 }
1265 chars[16] = 0;
1266 for (idx = 0, linestart = 0; idx < size;) {
1267 c = ((char *)buffer)[idx];
1268 snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1269 chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1270 idx++;
1271 if ((idx == size) || !(idx & 15)) {
1272 if (idx & 15) {
1273 chars[idx & 15] = 0;
1274 }
1275 output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1276 linestart += 16;
1277 }
1278 }
1279 }
1280
1281 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1282
1283 /*
1284 * Convert a integer constant (typically a #define or enum) to a string.
1285 */
1286 static char noValue[80]; // that's pretty
1287
1288 const char *
1289 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1290 {
1291 for (; regValueArray->name; regValueArray++) {
1292 if (regValueArray->value == value) {
1293 return regValueArray->name;
1294 }
1295 }
1296 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1297 return (const char *)noValue;
1298 }
1299
1300 IOReturn
1301 IOFindValueForName(const char *string,
1302 const IONamedValue *regValueArray,
1303 int *value)
1304 {
1305 for (; regValueArray->name; regValueArray++) {
1306 if (!strcmp(regValueArray->name, string)) {
1307 *value = regValueArray->value;
1308 return kIOReturnSuccess;
1309 }
1310 }
1311 return kIOReturnBadArgument;
1312 }
1313
1314 OSString *
1315 IOCopyLogNameForPID(int pid)
1316 {
1317 char buf[128];
1318 size_t len;
1319 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1320 len = strlen(buf);
1321 proc_name(pid, buf + len, sizeof(buf) - len);
1322 return OSString::withCString(buf);
1323 }
1324
1325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1326
1327 IOAlignment
1328 IOSizeToAlignment(unsigned int size)
1329 {
1330 int shift;
1331 const int intsize = sizeof(unsigned int) * 8;
1332
1333 for (shift = 1; shift < intsize; shift++) {
1334 if (size & 0x80000000) {
1335 return (IOAlignment)(intsize - shift);
1336 }
1337 size <<= 1;
1338 }
1339 return 0;
1340 }
1341
1342 unsigned int
1343 IOAlignmentToSize(IOAlignment align)
1344 {
1345 unsigned int size;
1346
1347 for (size = 1; align; align--) {
1348 size <<= 1;
1349 }
1350 return size;
1351 }
1352 } /* extern "C" */