]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
c4a63b9dd65eadf37154d01eff8d90844dc338a7
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
64
65 #if IOKITSTATS
66
67 #define IOStatisticsAlloc(type, size) \
68 do { \
69 IOStatistics::countAlloc(type, size); \
70 } while (0)
71
72 #else
73
74 #define IOStatisticsAlloc(type, size)
75
76 #endif /* IOKITSTATS */
77
78
79 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
82 extern "C"
83 {
84
85
86 mach_timespec_t IOZeroTvalspec = { 0, 0 };
87
88 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
89
90 extern int
91 __doprnt(
92 const char *fmt,
93 va_list argp,
94 void (*putc)(int, void *),
95 void *arg,
96 int radix,
97 int is_log);
98
99 extern void cons_putc_locked(char);
100 extern void bsd_log_lock(void);
101 extern void bsd_log_unlock(void);
102 extern void logwakeup();
103
104
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107 lck_grp_t *IOLockGroup;
108
109 /*
110 * Global variables for use by iLogger
111 * These symbols are for use only by Apple diagnostic code.
112 * Binary compatibility is not guaranteed for kexts that reference these symbols.
113 */
114
115 void *_giDebugLogInternal = NULL;
116 void *_giDebugLogDataInternal = NULL;
117 void *_giDebugReserved1 = NULL;
118 void *_giDebugReserved2 = NULL;
119
120 iopa_t gIOBMDPageAllocator;
121
122 /*
123 * Static variables for this module.
124 */
125
126 static queue_head_t gIOMallocContiguousEntries;
127 static lck_mtx_t * gIOMallocContiguousEntriesLock;
128
129 #if __x86_64__
130 enum { kIOMaxPageableMaps = 8 };
131 enum { kIOPageableMapSize = 512 * 1024 * 1024 };
132 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
133 #else
134 enum { kIOMaxPageableMaps = 16 };
135 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
136 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
137 #endif
138
139 typedef struct {
140 vm_map_t map;
141 vm_offset_t address;
142 vm_offset_t end;
143 } IOMapData;
144
145 static struct {
146 UInt32 count;
147 UInt32 hint;
148 IOMapData maps[ kIOMaxPageableMaps ];
149 lck_mtx_t * lock;
150 } gIOKitPageableSpace;
151
152 static iopa_t gIOPageablePageAllocator;
153
154 uint32_t gIOPageAllocChunkBytes;
155
156 #if IOTRACKING
157 IOTrackingQueue * gIOMallocTracking;
158 IOTrackingQueue * gIOWireTracking;
159 IOTrackingQueue * gIOMapTracking;
160 #endif /* IOTRACKING */
161
162 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
163
164 void IOLibInit(void)
165 {
166 kern_return_t ret;
167
168 static bool libInitialized;
169
170 if(libInitialized)
171 return;
172
173 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
174
175 #if IOTRACKING
176 IOTrackingInit();
177 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
178 kIOTrackingQueueTypeAlloc,
179 37);
180 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
181
182 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
183 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
184 kIOTrackingQueueTypeDefaultOn
185 | kIOTrackingQueueTypeMap
186 | kIOTrackingQueueTypeUser,
187 0);
188 #endif
189
190 gIOKitPageableSpace.maps[0].address = 0;
191 ret = kmem_suballoc(kernel_map,
192 &gIOKitPageableSpace.maps[0].address,
193 kIOPageableMapSize,
194 TRUE,
195 VM_FLAGS_ANYWHERE,
196 VM_MAP_KERNEL_FLAGS_NONE,
197 VM_KERN_MEMORY_IOKIT,
198 &gIOKitPageableSpace.maps[0].map);
199 if (ret != KERN_SUCCESS)
200 panic("failed to allocate iokit pageable map\n");
201
202 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
203 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
204 gIOKitPageableSpace.hint = 0;
205 gIOKitPageableSpace.count = 1;
206
207 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
208 queue_init( &gIOMallocContiguousEntries );
209
210 gIOPageAllocChunkBytes = PAGE_SIZE/64;
211 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
212 iopa_init(&gIOBMDPageAllocator);
213 iopa_init(&gIOPageablePageAllocator);
214
215
216 libInitialized = true;
217 }
218
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
221 static uint32_t
222 log2up(uint32_t size)
223 {
224 if (size <= 1) size = 0;
225 else size = 32 - __builtin_clz(size - 1);
226 return (size);
227 }
228
229 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
230
231 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
232 {
233 kern_return_t result;
234 thread_t thread;
235
236 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
237 if (result != KERN_SUCCESS)
238 return (NULL);
239
240 thread_deallocate(thread);
241
242 return (thread);
243 }
244
245
246 void IOExitThread(void)
247 {
248 (void) thread_terminate(current_thread());
249 }
250
251 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252
253 #if IOTRACKING
254 struct IOLibMallocHeader
255 {
256 IOTrackingAddress tracking;
257 };
258 #endif
259
260 #if IOTRACKING
261 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
262 #else
263 #define sizeofIOLibMallocHeader (0)
264 #endif
265
266 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
267
268 void * IOMalloc(vm_size_t size)
269 {
270 void * address;
271 vm_size_t allocSize;
272
273 allocSize = size + sizeofIOLibMallocHeader;
274 #if IOTRACKING
275 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
276 #endif
277 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
278
279 if ( address ) {
280 #if IOTRACKING
281 if (TRACK_ALLOC) {
282 IOLibMallocHeader * hdr;
283 hdr = (typeof(hdr)) address;
284 bzero(&hdr->tracking, sizeof(hdr->tracking));
285 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
286 hdr->tracking.size = size;
287 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
288 }
289 #endif
290 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
291
292 #if IOALLOCDEBUG
293 OSAddAtomic(size, &debug_iomalloc_size);
294 #endif
295 IOStatisticsAlloc(kIOStatisticsMalloc, size);
296 }
297
298 return address;
299 }
300
301 void IOFree(void * inAddress, vm_size_t size)
302 {
303 void * address;
304
305 if ((address = inAddress))
306 {
307 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
308
309 #if IOTRACKING
310 if (TRACK_ALLOC)
311 {
312 IOLibMallocHeader * hdr;
313 struct ptr_reference{ void * ptr; };
314 volatile struct ptr_reference ptr;
315
316 // we're about to block in IOTrackingRemove(), make sure the original pointer
317 // exists in memory or a register for leak scanning to find
318 ptr.ptr = inAddress;
319
320 hdr = (typeof(hdr)) address;
321 if (size != hdr->tracking.size)
322 {
323 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
324 size = hdr->tracking.size;
325 }
326 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
327 ptr.ptr = NULL;
328 }
329 #endif
330
331 kfree(address, size + sizeofIOLibMallocHeader);
332 #if IOALLOCDEBUG
333 OSAddAtomic(-size, &debug_iomalloc_size);
334 #endif
335 IOStatisticsAlloc(kIOStatisticsFree, size);
336 }
337 }
338
339 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
340
341 vm_tag_t
342 IOMemoryTag(vm_map_t map)
343 {
344 vm_tag_t tag;
345
346 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
347
348 tag = vm_tag_bt();
349 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
350
351 return (tag);
352 }
353
354 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
355
356 struct IOLibPageMallocHeader
357 {
358 mach_vm_size_t allocationSize;
359 mach_vm_address_t allocationAddress;
360 #if IOTRACKING
361 IOTrackingAddress tracking;
362 #endif
363 };
364
365 #if IOTRACKING
366 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
367 #else
368 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
369 #endif
370
371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
372
373 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
374 {
375 kern_return_t kr;
376 vm_offset_t address;
377 vm_offset_t allocationAddress;
378 vm_size_t adjustedSize;
379 uintptr_t alignMask;
380 IOLibPageMallocHeader * hdr;
381
382 if (size == 0)
383 return 0;
384
385 alignment = (1UL << log2up(alignment));
386 alignMask = alignment - 1;
387 adjustedSize = size + sizeofIOLibPageMallocHeader;
388
389 if (size > adjustedSize) {
390 address = 0; /* overflow detected */
391 }
392 else if (adjustedSize >= page_size) {
393
394 kr = kernel_memory_allocate(kernel_map, &address,
395 size, alignMask, 0, IOMemoryTag(kernel_map));
396 if (KERN_SUCCESS != kr) address = 0;
397 #if IOTRACKING
398 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
399 #endif
400
401 } else {
402
403 adjustedSize += alignMask;
404
405 if (adjustedSize >= page_size) {
406
407 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
408 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
409 if (KERN_SUCCESS != kr) allocationAddress = 0;
410
411 } else
412 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
413
414 if (allocationAddress) {
415 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
416 & (~alignMask);
417
418 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
419 hdr->allocationSize = adjustedSize;
420 hdr->allocationAddress = allocationAddress;
421 #if IOTRACKING
422 if (TRACK_ALLOC) {
423 bzero(&hdr->tracking, sizeof(hdr->tracking));
424 hdr->tracking.address = ~address;
425 hdr->tracking.size = size;
426 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
427 }
428 #endif
429 } else
430 address = 0;
431 }
432
433 assert(0 == (address & alignMask));
434
435 if( address) {
436 #if IOALLOCDEBUG
437 OSAddAtomic(size, &debug_iomalloc_size);
438 #endif
439 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
440 }
441
442 return (void *) address;
443 }
444
445 void IOFreeAligned(void * address, vm_size_t size)
446 {
447 vm_address_t allocationAddress;
448 vm_size_t adjustedSize;
449 IOLibPageMallocHeader * hdr;
450
451 if( !address)
452 return;
453
454 assert(size);
455
456 adjustedSize = size + sizeofIOLibPageMallocHeader;
457 if (adjustedSize >= page_size) {
458 #if IOTRACKING
459 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
460 #endif
461 kmem_free( kernel_map, (vm_offset_t) address, size);
462
463 } else {
464 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
465 adjustedSize = hdr->allocationSize;
466 allocationAddress = hdr->allocationAddress;
467
468 #if IOTRACKING
469 if (TRACK_ALLOC)
470 {
471 if (size != hdr->tracking.size)
472 {
473 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
474 size = hdr->tracking.size;
475 }
476 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
477 }
478 #endif
479 if (adjustedSize >= page_size) {
480 kmem_free( kernel_map, allocationAddress, adjustedSize);
481 } else {
482 kfree((void *)allocationAddress, adjustedSize);
483 }
484 }
485
486 #if IOALLOCDEBUG
487 OSAddAtomic(-size, &debug_iomalloc_size);
488 #endif
489
490 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
491 }
492
493 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
494
495 void
496 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
497 {
498 mach_vm_address_t allocationAddress;
499 mach_vm_size_t adjustedSize;
500 IOLibPageMallocHeader * hdr;
501
502 if (!address)
503 return;
504
505 assert(size);
506
507 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
508 if (adjustedSize >= page_size) {
509 #if IOTRACKING
510 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
511 #endif
512 kmem_free( kernel_map, (vm_offset_t) address, size);
513
514 } else {
515
516 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
517 adjustedSize = hdr->allocationSize;
518 allocationAddress = hdr->allocationAddress;
519 #if IOTRACKING
520 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
521 #endif
522 kfree((void *)allocationAddress, adjustedSize);
523 }
524
525 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
526 #if IOALLOCDEBUG
527 OSAddAtomic(-size, &debug_iomalloc_size);
528 #endif
529 }
530
531 #if __arm__ || __arm64__
532 extern unsigned long gPhysBase, gPhysSize;
533 #endif
534
535 mach_vm_address_t
536 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
537 mach_vm_size_t alignment, bool contiguous)
538 {
539 kern_return_t kr;
540 mach_vm_address_t address;
541 mach_vm_address_t allocationAddress;
542 mach_vm_size_t adjustedSize;
543 mach_vm_address_t alignMask;
544 IOLibPageMallocHeader * hdr;
545
546 if (size == 0)
547 return (0);
548 if (alignment == 0)
549 alignment = 1;
550
551 alignMask = alignment - 1;
552 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
553 if (adjustedSize < size) return (0);
554
555 contiguous = (contiguous && (adjustedSize > page_size))
556 || (alignment > page_size);
557
558 if (contiguous || maxPhys)
559 {
560 int options = 0;
561 vm_offset_t virt;
562
563 adjustedSize = size;
564 contiguous = (contiguous && (adjustedSize > page_size))
565 || (alignment > page_size);
566
567 if (!contiguous)
568 {
569 #if __arm__ || __arm64__
570 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize))
571 {
572 maxPhys = 0;
573 }
574 else
575 #endif
576 if (maxPhys <= 0xFFFFFFFF)
577 {
578 maxPhys = 0;
579 options |= KMA_LOMEM;
580 }
581 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
582 {
583 maxPhys = 0;
584 }
585 }
586 if (contiguous || maxPhys)
587 {
588 kr = kmem_alloc_contig(kernel_map, &virt, size,
589 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
590 }
591 else
592 {
593 kr = kernel_memory_allocate(kernel_map, &virt,
594 size, alignMask, options, IOMemoryTag(kernel_map));
595 }
596 if (KERN_SUCCESS == kr)
597 {
598 address = virt;
599 #if IOTRACKING
600 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
601 #endif
602 }
603 else
604 address = 0;
605 }
606 else
607 {
608 adjustedSize += alignMask;
609 if (adjustedSize < size) return (0);
610 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
611
612 if (allocationAddress) {
613
614
615 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
616 & (~alignMask);
617
618 if (atop_32(address) != atop_32(address + size - 1))
619 address = round_page(address);
620
621 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
622 hdr->allocationSize = adjustedSize;
623 hdr->allocationAddress = allocationAddress;
624 #if IOTRACKING
625 if (TRACK_ALLOC) {
626 bzero(&hdr->tracking, sizeof(hdr->tracking));
627 hdr->tracking.address = ~address;
628 hdr->tracking.size = size;
629 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
630 }
631 #endif
632 } else
633 address = 0;
634 }
635
636 if (address) {
637 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
638 #if IOALLOCDEBUG
639 OSAddAtomic(size, &debug_iomalloc_size);
640 #endif
641 }
642
643 return (address);
644 }
645
646
647 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
648
649 struct _IOMallocContiguousEntry
650 {
651 mach_vm_address_t virtualAddr;
652 IOBufferMemoryDescriptor * md;
653 queue_chain_t link;
654 };
655 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
656
657 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
658 IOPhysicalAddress * physicalAddress)
659 {
660 mach_vm_address_t address = 0;
661
662 if (size == 0)
663 return 0;
664 if (alignment == 0)
665 alignment = 1;
666
667 /* Do we want a physical address? */
668 if (!physicalAddress)
669 {
670 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
671 }
672 else do
673 {
674 IOBufferMemoryDescriptor * bmd;
675 mach_vm_address_t physicalMask;
676 vm_offset_t alignMask;
677
678 alignMask = alignment - 1;
679 physicalMask = (0xFFFFFFFF ^ alignMask);
680
681 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
682 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
683 if (!bmd)
684 break;
685
686 _IOMallocContiguousEntry *
687 entry = IONew(_IOMallocContiguousEntry, 1);
688 if (!entry)
689 {
690 bmd->release();
691 break;
692 }
693 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
694 entry->md = bmd;
695 lck_mtx_lock(gIOMallocContiguousEntriesLock);
696 queue_enter( &gIOMallocContiguousEntries, entry,
697 _IOMallocContiguousEntry *, link );
698 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
699
700 address = (mach_vm_address_t) entry->virtualAddr;
701 *physicalAddress = bmd->getPhysicalAddress();
702 }
703 while (false);
704
705 return (void *) address;
706 }
707
708 void IOFreeContiguous(void * _address, vm_size_t size)
709 {
710 _IOMallocContiguousEntry * entry;
711 IOMemoryDescriptor * md = NULL;
712
713 mach_vm_address_t address = (mach_vm_address_t) _address;
714
715 if( !address)
716 return;
717
718 assert(size);
719
720 lck_mtx_lock(gIOMallocContiguousEntriesLock);
721 queue_iterate( &gIOMallocContiguousEntries, entry,
722 _IOMallocContiguousEntry *, link )
723 {
724 if( entry->virtualAddr == address ) {
725 md = entry->md;
726 queue_remove( &gIOMallocContiguousEntries, entry,
727 _IOMallocContiguousEntry *, link );
728 break;
729 }
730 }
731 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
732
733 if (md)
734 {
735 md->release();
736 IODelete(entry, _IOMallocContiguousEntry, 1);
737 }
738 else
739 {
740 IOKernelFreePhysical((mach_vm_address_t) address, size);
741 }
742 }
743
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745
746 kern_return_t IOIteratePageableMaps(vm_size_t size,
747 IOIteratePageableMapsCallback callback, void * ref)
748 {
749 kern_return_t kr = kIOReturnNotReady;
750 vm_size_t segSize;
751 UInt32 attempts;
752 UInt32 index;
753 vm_offset_t min;
754 vm_map_t map;
755
756 if (size > kIOPageableMaxMapSize)
757 return( kIOReturnBadArgument );
758
759 do {
760 index = gIOKitPageableSpace.hint;
761 attempts = gIOKitPageableSpace.count;
762 while( attempts--) {
763 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
764 if( KERN_SUCCESS == kr) {
765 gIOKitPageableSpace.hint = index;
766 break;
767 }
768 if( index)
769 index--;
770 else
771 index = gIOKitPageableSpace.count - 1;
772 }
773 if (KERN_NO_SPACE != kr)
774 break;
775
776 lck_mtx_lock( gIOKitPageableSpace.lock );
777
778 index = gIOKitPageableSpace.count;
779 if( index >= (kIOMaxPageableMaps - 1)) {
780 lck_mtx_unlock( gIOKitPageableSpace.lock );
781 break;
782 }
783
784 if( size < kIOPageableMapSize)
785 segSize = kIOPageableMapSize;
786 else
787 segSize = size;
788
789 min = 0;
790 kr = kmem_suballoc(kernel_map,
791 &min,
792 segSize,
793 TRUE,
794 VM_FLAGS_ANYWHERE,
795 VM_MAP_KERNEL_FLAGS_NONE,
796 VM_KERN_MEMORY_IOKIT,
797 &map);
798 if( KERN_SUCCESS != kr) {
799 lck_mtx_unlock( gIOKitPageableSpace.lock );
800 break;
801 }
802
803 gIOKitPageableSpace.maps[index].map = map;
804 gIOKitPageableSpace.maps[index].address = min;
805 gIOKitPageableSpace.maps[index].end = min + segSize;
806 gIOKitPageableSpace.hint = index;
807 gIOKitPageableSpace.count = index + 1;
808
809 lck_mtx_unlock( gIOKitPageableSpace.lock );
810
811 } while( true );
812
813 return kr;
814 }
815
816 struct IOMallocPageableRef
817 {
818 vm_offset_t address;
819 vm_size_t size;
820 vm_tag_t tag;
821 };
822
823 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
824 {
825 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
826 kern_return_t kr;
827
828 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
829
830 return( kr );
831 }
832
833 static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
834 {
835 kern_return_t kr = kIOReturnNotReady;
836 struct IOMallocPageableRef ref;
837
838 if (alignment > page_size)
839 return( 0 );
840 if (size > kIOPageableMaxMapSize)
841 return( 0 );
842
843 ref.size = size;
844 ref.tag = tag;
845 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
846 if( kIOReturnSuccess != kr)
847 ref.address = 0;
848
849 return( (void *) ref.address );
850 }
851
852 vm_map_t IOPageableMapForAddress( uintptr_t address )
853 {
854 vm_map_t map = 0;
855 UInt32 index;
856
857 for( index = 0; index < gIOKitPageableSpace.count; index++) {
858 if( (address >= gIOKitPageableSpace.maps[index].address)
859 && (address < gIOKitPageableSpace.maps[index].end) ) {
860 map = gIOKitPageableSpace.maps[index].map;
861 break;
862 }
863 }
864 if( !map)
865 panic("IOPageableMapForAddress: null");
866
867 return( map );
868 }
869
870 static void IOFreePageablePages(void * address, vm_size_t size)
871 {
872 vm_map_t map;
873
874 map = IOPageableMapForAddress( (vm_address_t) address);
875 if( map)
876 kmem_free( map, (vm_offset_t) address, size);
877 }
878
879 static uintptr_t IOMallocOnePageablePage(iopa_t * a)
880 {
881 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
882 }
883
884 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
885 {
886 void * addr;
887
888 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
889 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
890
891 if (addr) {
892 #if IOALLOCDEBUG
893 OSAddAtomicLong(size, &debug_iomallocpageable_size);
894 #endif
895 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
896 }
897
898 return (addr);
899 }
900
901 void IOFreePageable(void * address, vm_size_t size)
902 {
903 #if IOALLOCDEBUG
904 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
905 #endif
906 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
907
908 if (size < (page_size - 4*gIOPageAllocChunkBytes))
909 {
910 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
911 size = page_size;
912 }
913 if (address) IOFreePageablePages(address, size);
914 }
915
916 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
917
918 extern "C" void
919 iopa_init(iopa_t * a)
920 {
921 bzero(a, sizeof(*a));
922 a->lock = IOLockAlloc();
923 queue_init(&a->list);
924 }
925
926 static uintptr_t
927 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
928 {
929 uint32_t n, s;
930 uint64_t avail = pa->avail;
931
932 assert(avail);
933
934 // find strings of count 1 bits in avail
935 for (n = count; n > 1; n -= s)
936 {
937 s = n >> 1;
938 avail = avail & (avail << s);
939 }
940 // and aligned
941 avail &= align;
942
943 if (avail)
944 {
945 n = __builtin_clzll(avail);
946 pa->avail &= ~((-1ULL << (64 - count)) >> n);
947 if (!pa->avail && pa->link.next)
948 {
949 remque(&pa->link);
950 pa->link.next = 0;
951 }
952 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
953 }
954
955 return (0);
956 }
957
958 uintptr_t
959 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
960 {
961 static const uint64_t align_masks[] = {
962 0xFFFFFFFFFFFFFFFF,
963 0xAAAAAAAAAAAAAAAA,
964 0x8888888888888888,
965 0x8080808080808080,
966 0x8000800080008000,
967 0x8000000080000000,
968 0x8000000000000000,
969 };
970 iopa_page_t * pa;
971 uintptr_t addr = 0;
972 uint32_t count;
973 uint64_t align;
974
975 if (!bytes) bytes = 1;
976 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
977 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
978
979 IOLockLock(a->lock);
980 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
981 while (!queue_end(&a->list, &pa->link))
982 {
983 addr = iopa_allocinpage(pa, count, align);
984 if (addr)
985 {
986 a->bytecount += bytes;
987 break;
988 }
989 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
990 }
991 IOLockUnlock(a->lock);
992
993 if (!addr)
994 {
995 addr = alloc(a);
996 if (addr)
997 {
998 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
999 pa->signature = kIOPageAllocSignature;
1000 pa->avail = -2ULL;
1001
1002 addr = iopa_allocinpage(pa, count, align);
1003 IOLockLock(a->lock);
1004 if (pa->avail) enqueue_head(&a->list, &pa->link);
1005 a->pagecount++;
1006 if (addr) a->bytecount += bytes;
1007 IOLockUnlock(a->lock);
1008 }
1009 }
1010
1011 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1012 return (addr);
1013 }
1014
1015 uintptr_t
1016 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1017 {
1018 iopa_page_t * pa;
1019 uint32_t count;
1020 uintptr_t chunk;
1021
1022 if (!bytes) bytes = 1;
1023
1024 chunk = (addr & page_mask);
1025 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1026
1027 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
1028 assert(kIOPageAllocSignature == pa->signature);
1029
1030 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1031 chunk /= gIOPageAllocChunkBytes;
1032
1033 IOLockLock(a->lock);
1034 if (!pa->avail)
1035 {
1036 assert(!pa->link.next);
1037 enqueue_tail(&a->list, &pa->link);
1038 }
1039 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1040 if (pa->avail != -2ULL) pa = 0;
1041 else
1042 {
1043 remque(&pa->link);
1044 pa->link.next = 0;
1045 pa->signature = 0;
1046 a->pagecount--;
1047 // page to free
1048 pa = (typeof(pa)) trunc_page(pa);
1049 }
1050 a->bytecount -= bytes;
1051 IOLockUnlock(a->lock);
1052
1053 return ((uintptr_t) pa);
1054 }
1055
1056 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1057
1058 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1059 IOByteCount length, IOOptionBits cacheMode )
1060 {
1061 IOReturn ret = kIOReturnSuccess;
1062 ppnum_t pagenum;
1063
1064 if( task != kernel_task)
1065 return( kIOReturnUnsupported );
1066 if ((address | length) & PAGE_MASK)
1067 {
1068 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1069 return( kIOReturnUnsupported );
1070 }
1071 length = round_page(address + length) - trunc_page( address );
1072 address = trunc_page( address );
1073
1074 // make map mode
1075 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1076
1077 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1078
1079 // Get the physical page number
1080 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1081 if( pagenum) {
1082 ret = IOUnmapPages( get_task_map(task), address, page_size );
1083 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1084 } else
1085 ret = kIOReturnVMError;
1086
1087 address += page_size;
1088 length -= page_size;
1089 }
1090
1091 return( ret );
1092 }
1093
1094
1095 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1096 IOByteCount length )
1097 {
1098 if( task != kernel_task)
1099 return( kIOReturnUnsupported );
1100
1101 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1102
1103 return( kIOReturnSuccess );
1104 }
1105
1106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1107
1108 vm_offset_t OSKernelStackRemaining( void )
1109 {
1110 return (ml_stack_remaining());
1111 }
1112
1113 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1114
1115 /*
1116 * Spin for indicated number of milliseconds.
1117 */
1118 void IOSleep(unsigned milliseconds)
1119 {
1120 delay_for_interval(milliseconds, kMillisecondScale);
1121 }
1122
1123 /*
1124 * Spin for indicated number of milliseconds, and potentially an
1125 * additional number of milliseconds up to the leeway values.
1126 */
1127 void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1128 {
1129 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1130 }
1131
1132 /*
1133 * Spin for indicated number of microseconds.
1134 */
1135 void IODelay(unsigned microseconds)
1136 {
1137 delay_for_interval(microseconds, kMicrosecondScale);
1138 }
1139
1140 /*
1141 * Spin for indicated number of nanoseconds.
1142 */
1143 void IOPause(unsigned nanoseconds)
1144 {
1145 delay_for_interval(nanoseconds, kNanosecondScale);
1146 }
1147
1148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1149
1150 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1,0);
1151
1152 __attribute__((noinline,not_tail_called))
1153 void IOLog(const char *format, ...)
1154 {
1155 void *caller = __builtin_return_address(0);
1156 va_list ap;
1157
1158 va_start(ap, format);
1159 _IOLogv(format, ap, caller);
1160 va_end(ap);
1161 }
1162
1163 __attribute__((noinline,not_tail_called))
1164 void IOLogv(const char *format, va_list ap)
1165 {
1166 void *caller = __builtin_return_address(0);
1167 _IOLogv(format, ap, caller);
1168 }
1169
1170 void _IOLogv(const char *format, va_list ap, void *caller)
1171 {
1172 va_list ap2;
1173 struct console_printbuf_state info_data;
1174 console_printbuf_state_init(&info_data, TRUE, TRUE);
1175
1176 va_copy(ap2, ap);
1177
1178 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1179
1180 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1181 console_printbuf_clear(&info_data);
1182 va_end(ap2);
1183
1184 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled");
1185 }
1186
1187 #if !__LP64__
1188 void IOPanic(const char *reason)
1189 {
1190 panic("%s", reason);
1191 }
1192 #endif
1193
1194 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1195
1196 /*
1197 * Convert a integer constant (typically a #define or enum) to a string.
1198 */
1199 static char noValue[80]; // that's pretty
1200
1201 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1202 {
1203 for( ; regValueArray->name; regValueArray++) {
1204 if(regValueArray->value == value)
1205 return(regValueArray->name);
1206 }
1207 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1208 return((const char *)noValue);
1209 }
1210
1211 IOReturn IOFindValueForName(const char *string,
1212 const IONamedValue *regValueArray,
1213 int *value)
1214 {
1215 for( ; regValueArray->name; regValueArray++) {
1216 if(!strcmp(regValueArray->name, string)) {
1217 *value = regValueArray->value;
1218 return kIOReturnSuccess;
1219 }
1220 }
1221 return kIOReturnBadArgument;
1222 }
1223
1224 OSString * IOCopyLogNameForPID(int pid)
1225 {
1226 char buf[128];
1227 size_t len;
1228 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1229 len = strlen(buf);
1230 proc_name(pid, buf + len, sizeof(buf) - len);
1231 return (OSString::withCString(buf));
1232 }
1233
1234 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1235
1236 IOAlignment IOSizeToAlignment(unsigned int size)
1237 {
1238 int shift;
1239 const int intsize = sizeof(unsigned int) * 8;
1240
1241 for (shift = 1; shift < intsize; shift++) {
1242 if (size & 0x80000000)
1243 return (IOAlignment)(intsize - shift);
1244 size <<= 1;
1245 }
1246 return 0;
1247 }
1248
1249 unsigned int IOAlignmentToSize(IOAlignment align)
1250 {
1251 unsigned int size;
1252
1253 for (size = 1; align; align--) {
1254 size <<= 1;
1255 }
1256 return size;
1257 }
1258
1259 } /* extern "C" */
1260
1261
1262