]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IOLib.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
39#include <vm/vm_kern.h>
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
46#include <IOKit/IOLocks.h>
47#include <IOKit/IOMapper.h>
48#include <IOKit/IOBufferMemoryDescriptor.h>
49#include <IOKit/IOKitDebug.h>
50
51#include "IOKitKernelInternal.h"
52
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
61#include <os/log_private.h>
62#include <sys/msgbuf.h>
63#include <console/serial_protos.h>
64
65#if IOKITSTATS
66
67#define IOStatisticsAlloc(type, size) \
68do { \
69 IOStatistics::countAlloc(type, size); \
70} while (0)
71
72#else
73
74#define IOStatisticsAlloc(type, size)
75
76#endif /* IOKITSTATS */
77
78
79#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
82extern "C"
83{
84mach_timespec_t IOZeroTvalspec = { 0, 0 };
85
86extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
87
88extern int
89__doprnt(
90 const char *fmt,
91 va_list argp,
92 void (*putc)(int, void *),
93 void *arg,
94 int radix,
95 int is_log);
96
97extern void cons_putc_locked(char);
98extern bool bsd_log_lock(bool);
99extern void bsd_log_unlock(void);
100
101
102/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104lck_grp_t *IOLockGroup;
105
106/*
107 * Global variables for use by iLogger
108 * These symbols are for use only by Apple diagnostic code.
109 * Binary compatibility is not guaranteed for kexts that reference these symbols.
110 */
111
112void *_giDebugLogInternal = NULL;
113void *_giDebugLogDataInternal = NULL;
114void *_giDebugReserved1 = NULL;
115void *_giDebugReserved2 = NULL;
116
117iopa_t gIOBMDPageAllocator;
118
119/*
120 * Static variables for this module.
121 */
122
123static queue_head_t gIOMallocContiguousEntries;
124static lck_mtx_t * gIOMallocContiguousEntriesLock;
125
126#if __x86_64__
127enum { kIOMaxPageableMaps = 8 };
128enum { kIOPageableMapSize = 512 * 1024 * 1024 };
129enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
130#else
131enum { kIOMaxPageableMaps = 16 };
132enum { kIOPageableMapSize = 96 * 1024 * 1024 };
133enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
134#endif
135
136typedef struct {
137 vm_map_t map;
138 vm_offset_t address;
139 vm_offset_t end;
140} IOMapData;
141
142static struct {
143 UInt32 count;
144 UInt32 hint;
145 IOMapData maps[kIOMaxPageableMaps];
146 lck_mtx_t * lock;
147} gIOKitPageableSpace;
148
149static iopa_t gIOPageablePageAllocator;
150
151uint32_t gIOPageAllocChunkBytes;
152
153#if IOTRACKING
154IOTrackingQueue * gIOMallocTracking;
155IOTrackingQueue * gIOWireTracking;
156IOTrackingQueue * gIOMapTracking;
157#endif /* IOTRACKING */
158
159/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
160
161void
162IOLibInit(void)
163{
164 kern_return_t ret;
165
166 static bool libInitialized;
167
168 if (libInitialized) {
169 return;
170 }
171
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174#if IOTRACKING
175 IOTrackingInit();
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
187#endif
188
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
194 VM_FLAGS_ANYWHERE,
195 VM_MAP_KERNEL_FLAGS_NONE,
196 VM_KERN_MEMORY_IOKIT,
197 &gIOKitPageableSpace.maps[0].map);
198 if (ret != KERN_SUCCESS) {
199 panic("failed to allocate iokit pageable map\n");
200 }
201
202 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
203 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
204 gIOKitPageableSpace.hint = 0;
205 gIOKitPageableSpace.count = 1;
206
207 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
208 queue_init( &gIOMallocContiguousEntries );
209
210 gIOPageAllocChunkBytes = PAGE_SIZE / 64;
211 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
212 iopa_init(&gIOBMDPageAllocator);
213 iopa_init(&gIOPageablePageAllocator);
214
215
216 libInitialized = true;
217}
218
219/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
221static vm_size_t
222log2up(vm_size_t size)
223{
224 if (size <= 1) {
225 size = 0;
226 } else {
227#if __LP64__
228 size = 64 - __builtin_clzl(size - 1);
229#else
230 size = 32 - __builtin_clzl(size - 1);
231#endif
232 }
233 return size;
234}
235
236/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
237
238IOThread
239IOCreateThread(IOThreadFunc fcn, void *arg)
240{
241 kern_return_t result;
242 thread_t thread;
243
244 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
245 if (result != KERN_SUCCESS) {
246 return NULL;
247 }
248
249 thread_deallocate(thread);
250
251 return thread;
252}
253
254
255void
256IOExitThread(void)
257{
258 (void) thread_terminate(current_thread());
259}
260
261void *
262IOMalloc_external(
263 vm_size_t size);
264void *
265IOMalloc_external(
266 vm_size_t size)
267{
268 return IOMalloc_internal(KHEAP_KEXT, size);
269}
270
271void *
272IOMallocZero_external(
273 vm_size_t size);
274void *
275IOMallocZero_external(
276 vm_size_t size)
277{
278 return IOMallocZero_internal(KHEAP_KEXT, size);
279}
280
281/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
282
283void *
284IOMallocZero_internal(struct kalloc_heap *kalloc_heap_cfg, vm_size_t size)
285{
286 void * result;
287 result = IOMalloc_internal(kalloc_heap_cfg, size);
288 if (result) {
289 bzero(result, size);
290 }
291 return result;
292}
293
294/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
295
296#if IOTRACKING
297struct IOLibMallocHeader {
298 IOTrackingAddress tracking;
299};
300#endif
301
302#if IOTRACKING
303#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
304#else
305#define sizeofIOLibMallocHeader (0)
306#endif
307
308/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
309
310void *
311IOMalloc_internal(struct kalloc_heap *kheap, vm_size_t size)
312{
313 void * address;
314 vm_size_t allocSize;
315
316 allocSize = size + sizeofIOLibMallocHeader;
317#if IOTRACKING
318 if (sizeofIOLibMallocHeader && (allocSize <= size)) {
319 return NULL; // overflow
320 }
321#endif
322 address = kheap_alloc_tag_bt(kheap, allocSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT);
323
324 if (address) {
325#if IOTRACKING
326 if (TRACK_ALLOC) {
327 IOLibMallocHeader * hdr;
328 hdr = (typeof(hdr))address;
329 bzero(&hdr->tracking, sizeof(hdr->tracking));
330 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
331 hdr->tracking.size = size;
332 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
333 }
334#endif
335 address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
336
337#if IOALLOCDEBUG
338 OSAddAtomicLong(size, &debug_iomalloc_size);
339#endif
340 IOStatisticsAlloc(kIOStatisticsMalloc, size);
341 }
342
343 return address;
344}
345
346void
347IOFree(void * inAddress, vm_size_t size)
348{
349 void * address;
350
351 if ((address = inAddress)) {
352 address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
353
354#if IOTRACKING
355 if (TRACK_ALLOC) {
356 IOLibMallocHeader * hdr;
357 struct ptr_reference { void * ptr; };
358 volatile struct ptr_reference ptr;
359
360 // we're about to block in IOTrackingRemove(), make sure the original pointer
361 // exists in memory or a register for leak scanning to find
362 ptr.ptr = inAddress;
363
364 hdr = (typeof(hdr))address;
365 if (size != hdr->tracking.size) {
366 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
367 size = hdr->tracking.size;
368 }
369 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
370 ptr.ptr = NULL;
371 }
372#endif
373
374 kfree(address, size + sizeofIOLibMallocHeader);
375#if IOALLOCDEBUG
376 OSAddAtomicLong(-size, &debug_iomalloc_size);
377#endif
378 IOStatisticsAlloc(kIOStatisticsFree, size);
379 }
380}
381
382/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
383
384vm_tag_t
385IOMemoryTag(vm_map_t map)
386{
387 vm_tag_t tag;
388
389 if (!vm_kernel_map_is_kernel(map)) {
390 return VM_MEMORY_IOKIT;
391 }
392
393 tag = vm_tag_bt();
394 if (tag == VM_KERN_MEMORY_NONE) {
395 tag = VM_KERN_MEMORY_IOKIT;
396 }
397
398 return tag;
399}
400
401/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
402
403struct IOLibPageMallocHeader {
404 mach_vm_size_t allocationSize;
405 mach_vm_address_t allocationAddress;
406#if IOTRACKING
407 IOTrackingAddress tracking;
408#endif
409};
410
411#if IOTRACKING
412#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
413#else
414#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
415#endif
416
417/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
418void *
419IOMallocAligned_external(
420 vm_size_t size, vm_size_t alignment);
421void *
422IOMallocAligned_external(
423 vm_size_t size, vm_size_t alignment)
424{
425 return IOMallocAligned_internal(KHEAP_KEXT, size, alignment);
426}
427
428void *
429IOMallocAligned_internal(struct kalloc_heap *kheap, vm_size_t size,
430 vm_size_t alignment)
431{
432 kern_return_t kr;
433 vm_offset_t address;
434 vm_offset_t allocationAddress;
435 vm_size_t adjustedSize;
436 uintptr_t alignMask;
437 IOLibPageMallocHeader * hdr;
438
439 if (size == 0) {
440 return NULL;
441 }
442 if (((uint32_t) alignment) != alignment) {
443 return NULL;
444 }
445
446 alignment = (1UL << log2up((uint32_t) alignment));
447 alignMask = alignment - 1;
448 adjustedSize = size + sizeofIOLibPageMallocHeader;
449
450 if (size > adjustedSize) {
451 address = 0; /* overflow detected */
452 } else if (adjustedSize >= page_size) {
453 kr = kernel_memory_allocate(kernel_map, &address,
454 size, alignMask, KMA_NONE, IOMemoryTag(kernel_map));
455 if (KERN_SUCCESS != kr) {
456 address = 0;
457 }
458#if IOTRACKING
459 else if (TRACK_ALLOC) {
460 IOTrackingAlloc(gIOMallocTracking, address, size);
461 }
462#endif
463 } else {
464 adjustedSize += alignMask;
465
466 if (adjustedSize >= page_size) {
467 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
468 adjustedSize, 0, KMA_NONE, IOMemoryTag(kernel_map));
469 if (KERN_SUCCESS != kr) {
470 allocationAddress = 0;
471 }
472 } else {
473 allocationAddress = (vm_address_t) kheap_alloc_tag_bt(kheap,
474 adjustedSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT);
475 }
476
477 if (allocationAddress) {
478 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
479 & (~alignMask);
480
481 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
482 hdr->allocationSize = adjustedSize;
483 hdr->allocationAddress = allocationAddress;
484#if IOTRACKING
485 if (TRACK_ALLOC) {
486 bzero(&hdr->tracking, sizeof(hdr->tracking));
487 hdr->tracking.address = ~address;
488 hdr->tracking.size = size;
489 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
490 }
491#endif
492 } else {
493 address = 0;
494 }
495 }
496
497 assert(0 == (address & alignMask));
498
499 if (address) {
500#if IOALLOCDEBUG
501 OSAddAtomicLong(size, &debug_iomalloc_size);
502#endif
503 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
504 }
505
506 return (void *) address;
507}
508
509void
510IOFreeAligned(void * address, vm_size_t size)
511{
512 vm_address_t allocationAddress;
513 vm_size_t adjustedSize;
514 IOLibPageMallocHeader * hdr;
515
516 if (!address) {
517 return;
518 }
519
520 assert(size);
521
522 adjustedSize = size + sizeofIOLibPageMallocHeader;
523 if (adjustedSize >= page_size) {
524#if IOTRACKING
525 if (TRACK_ALLOC) {
526 IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
527 }
528#endif
529 kmem_free( kernel_map, (vm_offset_t) address, size);
530 } else {
531 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
532 adjustedSize = hdr->allocationSize;
533 allocationAddress = hdr->allocationAddress;
534
535#if IOTRACKING
536 if (TRACK_ALLOC) {
537 if (size != hdr->tracking.size) {
538 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
539 size = hdr->tracking.size;
540 }
541 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
542 }
543#endif
544 if (adjustedSize >= page_size) {
545 kmem_free( kernel_map, allocationAddress, adjustedSize);
546 } else {
547 kfree(allocationAddress, adjustedSize);
548 }
549 }
550
551#if IOALLOCDEBUG
552 OSAddAtomicLong(-size, &debug_iomalloc_size);
553#endif
554
555 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
556}
557
558/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
559
560void
561IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
562{
563 vm_address_t allocationAddress;
564 vm_size_t adjustedSize;
565 IOLibPageMallocHeader * hdr;
566
567 if (!address) {
568 return;
569 }
570
571 assert(size);
572
573 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
574 if (adjustedSize >= page_size) {
575#if IOTRACKING
576 if (TRACK_ALLOC) {
577 IOTrackingFree(gIOMallocTracking, address, size);
578 }
579#endif
580 kmem_free( kernel_map, (vm_offset_t) address, size);
581 } else {
582 hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
583 adjustedSize = hdr->allocationSize;
584 allocationAddress = hdr->allocationAddress;
585#if IOTRACKING
586 if (TRACK_ALLOC) {
587 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
588 }
589#endif
590 kfree(allocationAddress, adjustedSize);
591 }
592
593 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
594#if IOALLOCDEBUG
595 OSAddAtomicLong(-size, &debug_iomalloc_size);
596#endif
597}
598
599#if __arm__ || __arm64__
600extern unsigned long gPhysBase, gPhysSize;
601#endif
602
603mach_vm_address_t
604IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
605 mach_vm_size_t alignment, bool contiguous)
606{
607 kern_return_t kr;
608 mach_vm_address_t address;
609 mach_vm_address_t allocationAddress;
610 mach_vm_size_t adjustedSize;
611 mach_vm_address_t alignMask;
612 IOLibPageMallocHeader * hdr;
613
614 if (size == 0) {
615 return 0;
616 }
617 if (alignment == 0) {
618 alignment = 1;
619 }
620
621 alignMask = alignment - 1;
622
623 if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
624 return 0;
625 }
626
627 contiguous = (contiguous && (adjustedSize > page_size))
628 || (alignment > page_size);
629
630 if (contiguous || maxPhys) {
631 kma_flags_t options = KMA_NONE;
632 vm_offset_t virt;
633
634 adjustedSize = size;
635 contiguous = (contiguous && (adjustedSize > page_size))
636 || (alignment > page_size);
637
638 if (!contiguous) {
639#if __arm__ || __arm64__
640 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
641 maxPhys = 0;
642 } else
643#endif
644 if (maxPhys <= 0xFFFFFFFF) {
645 maxPhys = 0;
646 options = (kma_flags_t)(options | KMA_LOMEM);
647 } else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
648 maxPhys = 0;
649 }
650 }
651 if (contiguous || maxPhys) {
652 kr = kmem_alloc_contig(kernel_map, &virt, size,
653 alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
654 KMA_NONE, IOMemoryTag(kernel_map));
655 } else {
656 kr = kernel_memory_allocate(kernel_map, &virt,
657 size, alignMask, options, IOMemoryTag(kernel_map));
658 }
659 if (KERN_SUCCESS == kr) {
660 address = virt;
661#if IOTRACKING
662 if (TRACK_ALLOC) {
663 IOTrackingAlloc(gIOMallocTracking, address, size);
664 }
665#endif
666 } else {
667 address = 0;
668 }
669 } else {
670 adjustedSize += alignMask;
671 if (adjustedSize < size) {
672 return 0;
673 }
674 allocationAddress = (mach_vm_address_t) kheap_alloc_tag_bt(KHEAP_KEXT,
675 adjustedSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT);
676
677 if (allocationAddress) {
678 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
679 & (~alignMask);
680
681 if (atop_32(address) != atop_32(address + size - 1)) {
682 address = round_page(address);
683 }
684
685 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
686 hdr->allocationSize = adjustedSize;
687 hdr->allocationAddress = allocationAddress;
688#if IOTRACKING
689 if (TRACK_ALLOC) {
690 bzero(&hdr->tracking, sizeof(hdr->tracking));
691 hdr->tracking.address = ~address;
692 hdr->tracking.size = size;
693 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
694 }
695#endif
696 } else {
697 address = 0;
698 }
699 }
700
701 if (address) {
702 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
703#if IOALLOCDEBUG
704 OSAddAtomicLong(size, &debug_iomalloc_size);
705#endif
706 }
707
708 return address;
709}
710
711
712/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
713
714struct _IOMallocContiguousEntry {
715 mach_vm_address_t virtualAddr;
716 IOBufferMemoryDescriptor * md;
717 queue_chain_t link;
718};
719typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
720
721void *
722IOMallocContiguous(vm_size_t size, vm_size_t alignment,
723 IOPhysicalAddress * physicalAddress)
724{
725 mach_vm_address_t address = 0;
726
727 if (size == 0) {
728 return NULL;
729 }
730 if (alignment == 0) {
731 alignment = 1;
732 }
733
734 /* Do we want a physical address? */
735 if (!physicalAddress) {
736 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
737 } else {
738 do {
739 IOBufferMemoryDescriptor * bmd;
740 mach_vm_address_t physicalMask;
741 vm_offset_t alignMask;
742
743 alignMask = alignment - 1;
744 physicalMask = (0xFFFFFFFF ^ alignMask);
745
746 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
747 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
748 if (!bmd) {
749 break;
750 }
751
752 _IOMallocContiguousEntry *
753 entry = IONew(_IOMallocContiguousEntry, 1);
754 if (!entry) {
755 bmd->release();
756 break;
757 }
758 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
759 entry->md = bmd;
760 lck_mtx_lock(gIOMallocContiguousEntriesLock);
761 queue_enter( &gIOMallocContiguousEntries, entry,
762 _IOMallocContiguousEntry *, link );
763 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
764
765 address = (mach_vm_address_t) entry->virtualAddr;
766 *physicalAddress = bmd->getPhysicalAddress();
767 }while (false);
768 }
769
770 return (void *) address;
771}
772
773void
774IOFreeContiguous(void * _address, vm_size_t size)
775{
776 _IOMallocContiguousEntry * entry;
777 IOMemoryDescriptor * md = NULL;
778
779 mach_vm_address_t address = (mach_vm_address_t) _address;
780
781 if (!address) {
782 return;
783 }
784
785 assert(size);
786
787 lck_mtx_lock(gIOMallocContiguousEntriesLock);
788 queue_iterate( &gIOMallocContiguousEntries, entry,
789 _IOMallocContiguousEntry *, link )
790 {
791 if (entry->virtualAddr == address) {
792 md = entry->md;
793 queue_remove( &gIOMallocContiguousEntries, entry,
794 _IOMallocContiguousEntry *, link );
795 break;
796 }
797 }
798 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
799
800 if (md) {
801 md->release();
802 IODelete(entry, _IOMallocContiguousEntry, 1);
803 } else {
804 IOKernelFreePhysical((mach_vm_address_t) address, size);
805 }
806}
807
808/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
809
810kern_return_t
811IOIteratePageableMaps(vm_size_t size,
812 IOIteratePageableMapsCallback callback, void * ref)
813{
814 kern_return_t kr = kIOReturnNotReady;
815 vm_size_t segSize;
816 UInt32 attempts;
817 UInt32 index;
818 vm_offset_t min;
819 vm_map_t map;
820
821 if (size > kIOPageableMaxMapSize) {
822 return kIOReturnBadArgument;
823 }
824
825 do {
826 index = gIOKitPageableSpace.hint;
827 attempts = gIOKitPageableSpace.count;
828 while (attempts--) {
829 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
830 if (KERN_SUCCESS == kr) {
831 gIOKitPageableSpace.hint = index;
832 break;
833 }
834 if (index) {
835 index--;
836 } else {
837 index = gIOKitPageableSpace.count - 1;
838 }
839 }
840 if (KERN_NO_SPACE != kr) {
841 break;
842 }
843
844 lck_mtx_lock( gIOKitPageableSpace.lock );
845
846 index = gIOKitPageableSpace.count;
847 if (index >= (kIOMaxPageableMaps - 1)) {
848 lck_mtx_unlock( gIOKitPageableSpace.lock );
849 break;
850 }
851
852 if (size < kIOPageableMapSize) {
853 segSize = kIOPageableMapSize;
854 } else {
855 segSize = size;
856 }
857
858 min = 0;
859 kr = kmem_suballoc(kernel_map,
860 &min,
861 segSize,
862 TRUE,
863 VM_FLAGS_ANYWHERE,
864 VM_MAP_KERNEL_FLAGS_NONE,
865 VM_KERN_MEMORY_IOKIT,
866 &map);
867 if (KERN_SUCCESS != kr) {
868 lck_mtx_unlock( gIOKitPageableSpace.lock );
869 break;
870 }
871
872 gIOKitPageableSpace.maps[index].map = map;
873 gIOKitPageableSpace.maps[index].address = min;
874 gIOKitPageableSpace.maps[index].end = min + segSize;
875 gIOKitPageableSpace.hint = index;
876 gIOKitPageableSpace.count = index + 1;
877
878 lck_mtx_unlock( gIOKitPageableSpace.lock );
879 } while (true);
880
881 return kr;
882}
883
884struct IOMallocPageableRef {
885 vm_offset_t address;
886 vm_size_t size;
887 vm_tag_t tag;
888};
889
890static kern_return_t
891IOMallocPageableCallback(vm_map_t map, void * _ref)
892{
893 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
894 kern_return_t kr;
895
896 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
897
898 return kr;
899}
900
901static void *
902IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
903{
904 kern_return_t kr = kIOReturnNotReady;
905 struct IOMallocPageableRef ref;
906
907 if (alignment > page_size) {
908 return NULL;
909 }
910 if (size > kIOPageableMaxMapSize) {
911 return NULL;
912 }
913
914 ref.size = size;
915 ref.tag = tag;
916 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
917 if (kIOReturnSuccess != kr) {
918 ref.address = 0;
919 }
920
921 return (void *) ref.address;
922}
923
924vm_map_t
925IOPageableMapForAddress( uintptr_t address )
926{
927 vm_map_t map = NULL;
928 UInt32 index;
929
930 for (index = 0; index < gIOKitPageableSpace.count; index++) {
931 if ((address >= gIOKitPageableSpace.maps[index].address)
932 && (address < gIOKitPageableSpace.maps[index].end)) {
933 map = gIOKitPageableSpace.maps[index].map;
934 break;
935 }
936 }
937 if (!map) {
938 panic("IOPageableMapForAddress: null");
939 }
940
941 return map;
942}
943
944static void
945IOFreePageablePages(void * address, vm_size_t size)
946{
947 vm_map_t map;
948
949 map = IOPageableMapForAddress((vm_address_t) address);
950 if (map) {
951 kmem_free( map, (vm_offset_t) address, size);
952 }
953}
954
955static uintptr_t
956IOMallocOnePageablePage(iopa_t * a)
957{
958 return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
959}
960
961static void *
962IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
963{
964 void * addr;
965
966 if (((uint32_t) alignment) != alignment) {
967 return NULL;
968 }
969 if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
970 alignment > page_size) {
971 addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
972 /* Memory allocated this way will already be zeroed. */
973 } else {
974 addr = ((void *) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, (uint32_t) alignment));
975 if (zeroed) {
976 bzero(addr, size);
977 }
978 }
979
980 if (addr) {
981#if IOALLOCDEBUG
982 OSAddAtomicLong(size, &debug_iomallocpageable_size);
983#endif
984 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
985 }
986
987 return addr;
988}
989
990void *
991IOMallocPageable(vm_size_t size, vm_size_t alignment)
992{
993 return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
994}
995
996void *
997IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
998{
999 return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1000}
1001
1002void
1003IOFreePageable(void * address, vm_size_t size)
1004{
1005#if IOALLOCDEBUG
1006 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1007#endif
1008 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1009
1010 if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1011 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1012 size = page_size;
1013 }
1014 if (address) {
1015 IOFreePageablePages(address, size);
1016 }
1017}
1018
1019/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1020
1021extern "C" void
1022iopa_init(iopa_t * a)
1023{
1024 bzero(a, sizeof(*a));
1025 a->lock = IOLockAlloc();
1026 queue_init(&a->list);
1027}
1028
1029static uintptr_t
1030iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1031{
1032 uint32_t n, s;
1033 uint64_t avail = pa->avail;
1034
1035 assert(avail);
1036
1037 // find strings of count 1 bits in avail
1038 for (n = count; n > 1; n -= s) {
1039 s = n >> 1;
1040 avail = avail & (avail << s);
1041 }
1042 // and aligned
1043 avail &= align;
1044
1045 if (avail) {
1046 n = __builtin_clzll(avail);
1047 pa->avail &= ~((-1ULL << (64 - count)) >> n);
1048 if (!pa->avail && pa->link.next) {
1049 remque(&pa->link);
1050 pa->link.next = NULL;
1051 }
1052 return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1053 }
1054
1055 return 0;
1056}
1057
1058uintptr_t
1059iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, vm_size_t balign)
1060{
1061 static const uint64_t align_masks[] = {
1062 0xFFFFFFFFFFFFFFFF,
1063 0xAAAAAAAAAAAAAAAA,
1064 0x8888888888888888,
1065 0x8080808080808080,
1066 0x8000800080008000,
1067 0x8000000080000000,
1068 0x8000000000000000,
1069 };
1070 iopa_page_t * pa;
1071 uintptr_t addr = 0;
1072 uint32_t count;
1073 uint64_t align;
1074 vm_size_t align_masks_idx;
1075
1076 if (((uint32_t) bytes) != bytes) {
1077 return 0;
1078 }
1079 if (!bytes) {
1080 bytes = 1;
1081 }
1082 count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1083
1084 align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1085 assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1086 align = align_masks[align_masks_idx];
1087
1088 IOLockLock(a->lock);
1089 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1090 while (!queue_end(&a->list, &pa->link)) {
1091 addr = iopa_allocinpage(pa, count, align);
1092 if (addr) {
1093 a->bytecount += bytes;
1094 break;
1095 }
1096 __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1097 }
1098 IOLockUnlock(a->lock);
1099
1100 if (!addr) {
1101 addr = alloc(a);
1102 if (addr) {
1103 pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1104 pa->signature = kIOPageAllocSignature;
1105 pa->avail = -2ULL;
1106
1107 addr = iopa_allocinpage(pa, count, align);
1108 IOLockLock(a->lock);
1109 if (pa->avail) {
1110 enqueue_head(&a->list, &pa->link);
1111 }
1112 a->pagecount++;
1113 if (addr) {
1114 a->bytecount += bytes;
1115 }
1116 IOLockUnlock(a->lock);
1117 }
1118 }
1119
1120 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1121 return addr;
1122}
1123
1124uintptr_t
1125iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1126{
1127 iopa_page_t * pa;
1128 uint32_t count;
1129 uintptr_t chunk;
1130
1131 if (((uint32_t) bytes) != bytes) {
1132 return 0;
1133 }
1134 if (!bytes) {
1135 bytes = 1;
1136 }
1137
1138 chunk = (addr & page_mask);
1139 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1140
1141 pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1142 assert(kIOPageAllocSignature == pa->signature);
1143
1144 count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1145 chunk /= gIOPageAllocChunkBytes;
1146
1147 IOLockLock(a->lock);
1148 if (!pa->avail) {
1149 assert(!pa->link.next);
1150 enqueue_tail(&a->list, &pa->link);
1151 }
1152 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1153 if (pa->avail != -2ULL) {
1154 pa = NULL;
1155 } else {
1156 remque(&pa->link);
1157 pa->link.next = NULL;
1158 pa->signature = 0;
1159 a->pagecount--;
1160 // page to free
1161 pa = (typeof(pa))trunc_page(pa);
1162 }
1163 a->bytecount -= bytes;
1164 IOLockUnlock(a->lock);
1165
1166 return (uintptr_t) pa;
1167}
1168
1169/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1170
1171IOReturn
1172IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1173 IOByteCount length, IOOptionBits cacheMode )
1174{
1175 IOReturn ret = kIOReturnSuccess;
1176 ppnum_t pagenum;
1177
1178 if (task != kernel_task) {
1179 return kIOReturnUnsupported;
1180 }
1181 if ((address | length) & PAGE_MASK) {
1182// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1183 return kIOReturnUnsupported;
1184 }
1185 length = round_page(address + length) - trunc_page( address );
1186 address = trunc_page( address );
1187
1188 // make map mode
1189 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1190
1191 while ((kIOReturnSuccess == ret) && (length > 0)) {
1192 // Get the physical page number
1193 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1194 if (pagenum) {
1195 ret = IOUnmapPages( get_task_map(task), address, page_size );
1196 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1197 } else {
1198 ret = kIOReturnVMError;
1199 }
1200
1201 address += page_size;
1202 length -= page_size;
1203 }
1204
1205 return ret;
1206}
1207
1208
1209IOReturn
1210IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1211 IOByteCount length )
1212{
1213 if (task != kernel_task) {
1214 return kIOReturnUnsupported;
1215 }
1216
1217 flush_dcache64((addr64_t) address, (unsigned) length, false );
1218
1219 return kIOReturnSuccess;
1220}
1221
1222/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1223
1224vm_offset_t
1225OSKernelStackRemaining( void )
1226{
1227 return ml_stack_remaining();
1228}
1229
1230/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1231
1232/*
1233 * Spin for indicated number of milliseconds.
1234 */
1235void
1236IOSleep(unsigned milliseconds)
1237{
1238 delay_for_interval(milliseconds, kMillisecondScale);
1239}
1240
1241/*
1242 * Spin for indicated number of milliseconds, and potentially an
1243 * additional number of milliseconds up to the leeway values.
1244 */
1245void
1246IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1247{
1248 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1249}
1250
1251/*
1252 * Spin for indicated number of microseconds.
1253 */
1254void
1255IODelay(unsigned microseconds)
1256{
1257 delay_for_interval(microseconds, kMicrosecondScale);
1258}
1259
1260/*
1261 * Spin for indicated number of nanoseconds.
1262 */
1263void
1264IOPause(unsigned nanoseconds)
1265{
1266 delay_for_interval(nanoseconds, kNanosecondScale);
1267}
1268
1269/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1270
1271static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1272
1273__attribute__((noinline, not_tail_called))
1274void
1275IOLog(const char *format, ...)
1276{
1277 void *caller = __builtin_return_address(0);
1278 va_list ap;
1279
1280 va_start(ap, format);
1281 _IOLogv(format, ap, caller);
1282 va_end(ap);
1283}
1284
1285__attribute__((noinline, not_tail_called))
1286void
1287IOLogv(const char *format, va_list ap)
1288{
1289 void *caller = __builtin_return_address(0);
1290 _IOLogv(format, ap, caller);
1291}
1292
1293void
1294_IOLogv(const char *format, va_list ap, void *caller)
1295{
1296 va_list ap2;
1297 struct console_printbuf_state info_data;
1298 console_printbuf_state_init(&info_data, TRUE, TRUE);
1299
1300 va_copy(ap2, ap);
1301
1302 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1303
1304 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1305 console_printbuf_clear(&info_data);
1306 va_end(ap2);
1307
1308 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled");
1309}
1310
1311#if !__LP64__
1312void
1313IOPanic(const char *reason)
1314{
1315 panic("%s", reason);
1316}
1317#endif
1318
1319/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1320
1321void
1322IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1323 void (*output)(const char *format, ...))
1324{
1325 size_t idx, linestart;
1326 enum { bytelen = (sizeof("0xZZ, ") - 1) };
1327 char hex[(bytelen * 16) + 1];
1328 uint8_t c, chars[17];
1329
1330 output("%s(0x%lx):\n", title, size);
1331 output(" 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1332 if (size > 4096) {
1333 size = 4096;
1334 }
1335 chars[16] = 0;
1336 for (idx = 0, linestart = 0; idx < size;) {
1337 c = ((char *)buffer)[idx];
1338 snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1339 chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1340 idx++;
1341 if ((idx == size) || !(idx & 15)) {
1342 if (idx & 15) {
1343 chars[idx & 15] = 0;
1344 }
1345 output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1346 linestart += 16;
1347 }
1348 }
1349}
1350
1351/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1352
1353/*
1354 * Convert a integer constant (typically a #define or enum) to a string.
1355 */
1356static char noValue[80]; // that's pretty
1357
1358const char *
1359IOFindNameForValue(int value, const IONamedValue *regValueArray)
1360{
1361 for (; regValueArray->name; regValueArray++) {
1362 if (regValueArray->value == value) {
1363 return regValueArray->name;
1364 }
1365 }
1366 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1367 return (const char *)noValue;
1368}
1369
1370IOReturn
1371IOFindValueForName(const char *string,
1372 const IONamedValue *regValueArray,
1373 int *value)
1374{
1375 for (; regValueArray->name; regValueArray++) {
1376 if (!strcmp(regValueArray->name, string)) {
1377 *value = regValueArray->value;
1378 return kIOReturnSuccess;
1379 }
1380 }
1381 return kIOReturnBadArgument;
1382}
1383
1384OSString *
1385IOCopyLogNameForPID(int pid)
1386{
1387 char buf[128];
1388 size_t len;
1389 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1390 len = strlen(buf);
1391 proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1392 return OSString::withCString(buf);
1393}
1394
1395/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1396
1397IOAlignment
1398IOSizeToAlignment(unsigned int size)
1399{
1400 int shift;
1401 const int intsize = sizeof(unsigned int) * 8;
1402
1403 for (shift = 1; shift < intsize; shift++) {
1404 if (size & 0x80000000) {
1405 return (IOAlignment)(intsize - shift);
1406 }
1407 size <<= 1;
1408 }
1409 return 0;
1410}
1411
1412unsigned int
1413IOAlignmentToSize(IOAlignment align)
1414{
1415 unsigned int size;
1416
1417 for (size = 1; align; align--) {
1418 size <<= 1;
1419 }
1420 return size;
1421}
1422} /* extern "C" */