]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
01f751a86cf4b917cd100c606beedcb330cfb695
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63
64 #if IOKITSTATS
65
66 #define IOStatisticsAlloc(type, size) \
67 do { \
68 IOStatistics::countAlloc(type, size); \
69 } while (0)
70
71 #else
72
73 #define IOStatisticsAlloc(type, size)
74
75 #endif /* IOKITSTATS */
76
77
78 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
79
80
81 extern "C"
82 {
83
84
85 mach_timespec_t IOZeroTvalspec = { 0, 0 };
86
87 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
88
89 extern int
90 __doprnt(
91 const char *fmt,
92 va_list argp,
93 void (*putc)(int, void *),
94 void *arg,
95 int radix,
96 int is_log);
97
98 extern void cons_putc_locked(char);
99 extern void bsd_log_lock(void);
100 extern void bsd_log_unlock(void);
101 extern void logwakeup();
102
103
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
106 lck_grp_t *IOLockGroup;
107
108 /*
109 * Global variables for use by iLogger
110 * These symbols are for use only by Apple diagnostic code.
111 * Binary compatibility is not guaranteed for kexts that reference these symbols.
112 */
113
114 void *_giDebugLogInternal = NULL;
115 void *_giDebugLogDataInternal = NULL;
116 void *_giDebugReserved1 = NULL;
117 void *_giDebugReserved2 = NULL;
118
119 iopa_t gIOBMDPageAllocator;
120
121 /*
122 * Static variables for this module.
123 */
124
125 static queue_head_t gIOMallocContiguousEntries;
126 static lck_mtx_t * gIOMallocContiguousEntriesLock;
127
128 #if __x86_64__
129 enum { kIOMaxPageableMaps = 8 };
130 enum { kIOPageableMapSize = 512 * 1024 * 1024 };
131 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
132 #else
133 enum { kIOMaxPageableMaps = 16 };
134 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
135 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
136 #endif
137
138 typedef struct {
139 vm_map_t map;
140 vm_offset_t address;
141 vm_offset_t end;
142 } IOMapData;
143
144 static struct {
145 UInt32 count;
146 UInt32 hint;
147 IOMapData maps[ kIOMaxPageableMaps ];
148 lck_mtx_t * lock;
149 } gIOKitPageableSpace;
150
151 static iopa_t gIOPageablePageAllocator;
152
153 uint32_t gIOPageAllocChunkBytes;
154
155 #if IOTRACKING
156 IOTrackingQueue * gIOMallocTracking;
157 IOTrackingQueue * gIOWireTracking;
158 IOTrackingQueue * gIOMapTracking;
159 #endif /* IOTRACKING */
160
161 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
162
163 void IOLibInit(void)
164 {
165 kern_return_t ret;
166
167 static bool libInitialized;
168
169 if(libInitialized)
170 return;
171
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174 #if IOTRACKING
175 IOTrackingInit();
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
187 #endif
188
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
194 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
195 &gIOKitPageableSpace.maps[0].map);
196 if (ret != KERN_SUCCESS)
197 panic("failed to allocate iokit pageable map\n");
198
199 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
200 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
201 gIOKitPageableSpace.hint = 0;
202 gIOKitPageableSpace.count = 1;
203
204 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
205 queue_init( &gIOMallocContiguousEntries );
206
207 gIOPageAllocChunkBytes = PAGE_SIZE/64;
208 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
209 iopa_init(&gIOBMDPageAllocator);
210 iopa_init(&gIOPageablePageAllocator);
211
212
213 libInitialized = true;
214 }
215
216 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
217
218 static uint32_t
219 log2up(uint32_t size)
220 {
221 if (size <= 1) size = 0;
222 else size = 32 - __builtin_clz(size - 1);
223 return (size);
224 }
225
226 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
227
228 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
229 {
230 kern_return_t result;
231 thread_t thread;
232
233 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
234 if (result != KERN_SUCCESS)
235 return (NULL);
236
237 thread_deallocate(thread);
238
239 return (thread);
240 }
241
242
243 void IOExitThread(void)
244 {
245 (void) thread_terminate(current_thread());
246 }
247
248 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
249
250 #if IOTRACKING
251 struct IOLibMallocHeader
252 {
253 IOTrackingAddress tracking;
254 };
255 #endif
256
257 #if IOTRACKING
258 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
259 #else
260 #define sizeofIOLibMallocHeader (0)
261 #endif
262
263 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
264
265 void * IOMalloc(vm_size_t size)
266 {
267 void * address;
268 vm_size_t allocSize;
269
270 allocSize = size + sizeofIOLibMallocHeader;
271 #if IOTRACKING
272 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
273 #endif
274 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
275
276 if ( address ) {
277 #if IOTRACKING
278 if (TRACK_ALLOC) {
279 IOLibMallocHeader * hdr;
280 hdr = (typeof(hdr)) address;
281 bzero(&hdr->tracking, sizeof(hdr->tracking));
282 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
283 hdr->tracking.size = size;
284 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
285 }
286 #endif
287 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
288
289 #if IOALLOCDEBUG
290 OSAddAtomic(size, &debug_iomalloc_size);
291 #endif
292 IOStatisticsAlloc(kIOStatisticsMalloc, size);
293 }
294
295 return address;
296 }
297
298 void IOFree(void * address, vm_size_t size)
299 {
300 if (address) {
301
302 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
303
304 #if IOTRACKING
305 if (TRACK_ALLOC) {
306 IOLibMallocHeader * hdr;
307 hdr = (typeof(hdr)) address;
308 if (size != hdr->tracking.size)
309 {
310 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
311 size = hdr->tracking.size;
312 }
313 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
314 }
315 #endif
316
317 kfree(address, size + sizeofIOLibMallocHeader);
318 #if IOALLOCDEBUG
319 OSAddAtomic(-size, &debug_iomalloc_size);
320 #endif
321 IOStatisticsAlloc(kIOStatisticsFree, size);
322 }
323 }
324
325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
326
327 vm_tag_t
328 IOMemoryTag(vm_map_t map)
329 {
330 vm_tag_t tag;
331
332 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
333
334 tag = vm_tag_bt();
335 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
336
337 return (tag);
338 }
339
340 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
341
342 struct IOLibPageMallocHeader
343 {
344 mach_vm_size_t allocationSize;
345 mach_vm_address_t allocationAddress;
346 #if IOTRACKING
347 IOTrackingAddress tracking;
348 #endif
349 };
350
351 #if IOTRACKING
352 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
353 #else
354 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
355 #endif
356
357 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
358
359 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
360 {
361 kern_return_t kr;
362 vm_offset_t address;
363 vm_offset_t allocationAddress;
364 vm_size_t adjustedSize;
365 uintptr_t alignMask;
366 IOLibPageMallocHeader * hdr;
367
368 if (size == 0)
369 return 0;
370
371 alignment = (1UL << log2up(alignment));
372 alignMask = alignment - 1;
373 adjustedSize = size + sizeofIOLibPageMallocHeader;
374
375 if (size > adjustedSize) {
376 address = 0; /* overflow detected */
377 }
378 else if (adjustedSize >= page_size) {
379
380 kr = kernel_memory_allocate(kernel_map, &address,
381 size, alignMask, 0, IOMemoryTag(kernel_map));
382 if (KERN_SUCCESS != kr) address = 0;
383 #if IOTRACKING
384 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
385 #endif
386
387 } else {
388
389 adjustedSize += alignMask;
390
391 if (adjustedSize >= page_size) {
392
393 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
394 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
395 if (KERN_SUCCESS != kr) allocationAddress = 0;
396
397 } else
398 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
399
400 if (allocationAddress) {
401 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
402 & (~alignMask);
403
404 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
405 hdr->allocationSize = adjustedSize;
406 hdr->allocationAddress = allocationAddress;
407 #if IOTRACKING
408 if (TRACK_ALLOC) {
409 bzero(&hdr->tracking, sizeof(hdr->tracking));
410 hdr->tracking.address = ~address;
411 hdr->tracking.size = size;
412 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
413 }
414 #endif
415 } else
416 address = 0;
417 }
418
419 assert(0 == (address & alignMask));
420
421 if( address) {
422 #if IOALLOCDEBUG
423 OSAddAtomic(size, &debug_iomalloc_size);
424 #endif
425 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
426 }
427
428 return (void *) address;
429 }
430
431 void IOFreeAligned(void * address, vm_size_t size)
432 {
433 vm_address_t allocationAddress;
434 vm_size_t adjustedSize;
435 IOLibPageMallocHeader * hdr;
436
437 if( !address)
438 return;
439
440 assert(size);
441
442 adjustedSize = size + sizeofIOLibPageMallocHeader;
443 if (adjustedSize >= page_size) {
444 #if IOTRACKING
445 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
446 #endif
447 kmem_free( kernel_map, (vm_offset_t) address, size);
448
449 } else {
450 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
451 adjustedSize = hdr->allocationSize;
452 allocationAddress = hdr->allocationAddress;
453
454 #if IOTRACKING
455 if (TRACK_ALLOC)
456 {
457 if (size != hdr->tracking.size)
458 {
459 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
460 size = hdr->tracking.size;
461 }
462 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
463 }
464 #endif
465 if (adjustedSize >= page_size) {
466 kmem_free( kernel_map, allocationAddress, adjustedSize);
467 } else {
468 kfree((void *)allocationAddress, adjustedSize);
469 }
470 }
471
472 #if IOALLOCDEBUG
473 OSAddAtomic(-size, &debug_iomalloc_size);
474 #endif
475
476 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
477 }
478
479 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
480
481 void
482 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
483 {
484 mach_vm_address_t allocationAddress;
485 mach_vm_size_t adjustedSize;
486 IOLibPageMallocHeader * hdr;
487
488 if (!address)
489 return;
490
491 assert(size);
492
493 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
494 if (adjustedSize >= page_size) {
495 #if IOTRACKING
496 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
497 #endif
498 kmem_free( kernel_map, (vm_offset_t) address, size);
499
500 } else {
501
502 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
503 adjustedSize = hdr->allocationSize;
504 allocationAddress = hdr->allocationAddress;
505 #if IOTRACKING
506 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
507 #endif
508 kfree((void *)allocationAddress, adjustedSize);
509 }
510
511 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
512 #if IOALLOCDEBUG
513 OSAddAtomic(-size, &debug_iomalloc_size);
514 #endif
515 }
516
517
518 mach_vm_address_t
519 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
520 mach_vm_size_t alignment, bool contiguous)
521 {
522 kern_return_t kr;
523 mach_vm_address_t address;
524 mach_vm_address_t allocationAddress;
525 mach_vm_size_t adjustedSize;
526 mach_vm_address_t alignMask;
527 IOLibPageMallocHeader * hdr;
528
529 if (size == 0)
530 return (0);
531 if (alignment == 0)
532 alignment = 1;
533
534 alignMask = alignment - 1;
535 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
536 if (adjustedSize < size) return (0);
537
538 contiguous = (contiguous && (adjustedSize > page_size))
539 || (alignment > page_size);
540
541 if (contiguous || maxPhys)
542 {
543 int options = 0;
544 vm_offset_t virt;
545
546 adjustedSize = size;
547 contiguous = (contiguous && (adjustedSize > page_size))
548 || (alignment > page_size);
549
550 if (!contiguous)
551 {
552 if (maxPhys <= 0xFFFFFFFF)
553 {
554 maxPhys = 0;
555 options |= KMA_LOMEM;
556 }
557 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
558 {
559 maxPhys = 0;
560 }
561 }
562 if (contiguous || maxPhys)
563 {
564 kr = kmem_alloc_contig(kernel_map, &virt, size,
565 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
566 }
567 else
568 {
569 kr = kernel_memory_allocate(kernel_map, &virt,
570 size, alignMask, options, IOMemoryTag(kernel_map));
571 }
572 if (KERN_SUCCESS == kr)
573 {
574 address = virt;
575 #if IOTRACKING
576 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
577 #endif
578 }
579 else
580 address = 0;
581 }
582 else
583 {
584 adjustedSize += alignMask;
585 if (adjustedSize < size) return (0);
586 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
587
588 if (allocationAddress) {
589
590
591 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
592 & (~alignMask);
593
594 if (atop_32(address) != atop_32(address + size - 1))
595 address = round_page(address);
596
597 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
598 hdr->allocationSize = adjustedSize;
599 hdr->allocationAddress = allocationAddress;
600 #if IOTRACKING
601 if (TRACK_ALLOC) {
602 bzero(&hdr->tracking, sizeof(hdr->tracking));
603 hdr->tracking.address = ~address;
604 hdr->tracking.size = size;
605 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
606 }
607 #endif
608 } else
609 address = 0;
610 }
611
612 if (address) {
613 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
614 #if IOALLOCDEBUG
615 OSAddAtomic(size, &debug_iomalloc_size);
616 #endif
617 }
618
619 return (address);
620 }
621
622
623 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
624
625 struct _IOMallocContiguousEntry
626 {
627 mach_vm_address_t virtualAddr;
628 IOBufferMemoryDescriptor * md;
629 queue_chain_t link;
630 };
631 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
632
633 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
634 IOPhysicalAddress * physicalAddress)
635 {
636 mach_vm_address_t address = 0;
637
638 if (size == 0)
639 return 0;
640 if (alignment == 0)
641 alignment = 1;
642
643 /* Do we want a physical address? */
644 if (!physicalAddress)
645 {
646 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
647 }
648 else do
649 {
650 IOBufferMemoryDescriptor * bmd;
651 mach_vm_address_t physicalMask;
652 vm_offset_t alignMask;
653
654 alignMask = alignment - 1;
655 physicalMask = (0xFFFFFFFF ^ alignMask);
656
657 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
658 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
659 if (!bmd)
660 break;
661
662 _IOMallocContiguousEntry *
663 entry = IONew(_IOMallocContiguousEntry, 1);
664 if (!entry)
665 {
666 bmd->release();
667 break;
668 }
669 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
670 entry->md = bmd;
671 lck_mtx_lock(gIOMallocContiguousEntriesLock);
672 queue_enter( &gIOMallocContiguousEntries, entry,
673 _IOMallocContiguousEntry *, link );
674 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
675
676 address = (mach_vm_address_t) entry->virtualAddr;
677 *physicalAddress = bmd->getPhysicalAddress();
678 }
679 while (false);
680
681 return (void *) address;
682 }
683
684 void IOFreeContiguous(void * _address, vm_size_t size)
685 {
686 _IOMallocContiguousEntry * entry;
687 IOMemoryDescriptor * md = NULL;
688
689 mach_vm_address_t address = (mach_vm_address_t) _address;
690
691 if( !address)
692 return;
693
694 assert(size);
695
696 lck_mtx_lock(gIOMallocContiguousEntriesLock);
697 queue_iterate( &gIOMallocContiguousEntries, entry,
698 _IOMallocContiguousEntry *, link )
699 {
700 if( entry->virtualAddr == address ) {
701 md = entry->md;
702 queue_remove( &gIOMallocContiguousEntries, entry,
703 _IOMallocContiguousEntry *, link );
704 break;
705 }
706 }
707 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
708
709 if (md)
710 {
711 md->release();
712 IODelete(entry, _IOMallocContiguousEntry, 1);
713 }
714 else
715 {
716 IOKernelFreePhysical((mach_vm_address_t) address, size);
717 }
718 }
719
720 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
721
722 kern_return_t IOIteratePageableMaps(vm_size_t size,
723 IOIteratePageableMapsCallback callback, void * ref)
724 {
725 kern_return_t kr = kIOReturnNotReady;
726 vm_size_t segSize;
727 UInt32 attempts;
728 UInt32 index;
729 vm_offset_t min;
730 vm_map_t map;
731
732 if (size > kIOPageableMaxMapSize)
733 return( kIOReturnBadArgument );
734
735 do {
736 index = gIOKitPageableSpace.hint;
737 attempts = gIOKitPageableSpace.count;
738 while( attempts--) {
739 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
740 if( KERN_SUCCESS == kr) {
741 gIOKitPageableSpace.hint = index;
742 break;
743 }
744 if( index)
745 index--;
746 else
747 index = gIOKitPageableSpace.count - 1;
748 }
749 if( KERN_SUCCESS == kr)
750 break;
751
752 lck_mtx_lock( gIOKitPageableSpace.lock );
753
754 index = gIOKitPageableSpace.count;
755 if( index >= (kIOMaxPageableMaps - 1)) {
756 lck_mtx_unlock( gIOKitPageableSpace.lock );
757 break;
758 }
759
760 if( size < kIOPageableMapSize)
761 segSize = kIOPageableMapSize;
762 else
763 segSize = size;
764
765 min = 0;
766 kr = kmem_suballoc(kernel_map,
767 &min,
768 segSize,
769 TRUE,
770 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
771 &map);
772 if( KERN_SUCCESS != kr) {
773 lck_mtx_unlock( gIOKitPageableSpace.lock );
774 break;
775 }
776
777 gIOKitPageableSpace.maps[index].map = map;
778 gIOKitPageableSpace.maps[index].address = min;
779 gIOKitPageableSpace.maps[index].end = min + segSize;
780 gIOKitPageableSpace.hint = index;
781 gIOKitPageableSpace.count = index + 1;
782
783 lck_mtx_unlock( gIOKitPageableSpace.lock );
784
785 } while( true );
786
787 return kr;
788 }
789
790 struct IOMallocPageableRef
791 {
792 vm_offset_t address;
793 vm_size_t size;
794 vm_tag_t tag;
795 };
796
797 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
798 {
799 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
800 kern_return_t kr;
801
802 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
803
804 return( kr );
805 }
806
807 static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
808 {
809 kern_return_t kr = kIOReturnNotReady;
810 struct IOMallocPageableRef ref;
811
812 if (alignment > page_size)
813 return( 0 );
814 if (size > kIOPageableMaxMapSize)
815 return( 0 );
816
817 ref.size = size;
818 ref.tag = tag;
819 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
820 if( kIOReturnSuccess != kr)
821 ref.address = 0;
822
823 return( (void *) ref.address );
824 }
825
826 vm_map_t IOPageableMapForAddress( uintptr_t address )
827 {
828 vm_map_t map = 0;
829 UInt32 index;
830
831 for( index = 0; index < gIOKitPageableSpace.count; index++) {
832 if( (address >= gIOKitPageableSpace.maps[index].address)
833 && (address < gIOKitPageableSpace.maps[index].end) ) {
834 map = gIOKitPageableSpace.maps[index].map;
835 break;
836 }
837 }
838 if( !map)
839 panic("IOPageableMapForAddress: null");
840
841 return( map );
842 }
843
844 static void IOFreePageablePages(void * address, vm_size_t size)
845 {
846 vm_map_t map;
847
848 map = IOPageableMapForAddress( (vm_address_t) address);
849 if( map)
850 kmem_free( map, (vm_offset_t) address, size);
851 }
852
853 static uintptr_t IOMallocOnePageablePage(iopa_t * a)
854 {
855 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
856 }
857
858 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
859 {
860 void * addr;
861
862 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
863 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
864
865 if (addr) {
866 #if IOALLOCDEBUG
867 OSAddAtomicLong(size, &debug_iomallocpageable_size);
868 #endif
869 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
870 }
871
872 return (addr);
873 }
874
875 void IOFreePageable(void * address, vm_size_t size)
876 {
877 #if IOALLOCDEBUG
878 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
879 #endif
880 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
881
882 if (size < (page_size - 4*gIOPageAllocChunkBytes))
883 {
884 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
885 size = page_size;
886 }
887 if (address) IOFreePageablePages(address, size);
888 }
889
890 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
891
892 extern "C" void
893 iopa_init(iopa_t * a)
894 {
895 bzero(a, sizeof(*a));
896 a->lock = IOLockAlloc();
897 queue_init(&a->list);
898 }
899
900 static uintptr_t
901 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
902 {
903 uint32_t n, s;
904 uint64_t avail = pa->avail;
905
906 assert(avail);
907
908 // find strings of count 1 bits in avail
909 for (n = count; n > 1; n -= s)
910 {
911 s = n >> 1;
912 avail = avail & (avail << s);
913 }
914 // and aligned
915 avail &= align;
916
917 if (avail)
918 {
919 n = __builtin_clzll(avail);
920 pa->avail &= ~((-1ULL << (64 - count)) >> n);
921 if (!pa->avail && pa->link.next)
922 {
923 remque(&pa->link);
924 pa->link.next = 0;
925 }
926 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
927 }
928
929 return (0);
930 }
931
932 uintptr_t
933 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
934 {
935 static const uint64_t align_masks[] = {
936 0xFFFFFFFFFFFFFFFF,
937 0xAAAAAAAAAAAAAAAA,
938 0x8888888888888888,
939 0x8080808080808080,
940 0x8000800080008000,
941 0x8000000080000000,
942 0x8000000000000000,
943 };
944 iopa_page_t * pa;
945 uintptr_t addr = 0;
946 uint32_t count;
947 uint64_t align;
948
949 if (!bytes) bytes = 1;
950 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
951 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
952
953 IOLockLock(a->lock);
954 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
955 while (!queue_end(&a->list, &pa->link))
956 {
957 addr = iopa_allocinpage(pa, count, align);
958 if (addr)
959 {
960 a->bytecount += bytes;
961 break;
962 }
963 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
964 }
965 IOLockUnlock(a->lock);
966
967 if (!addr)
968 {
969 addr = alloc(a);
970 if (addr)
971 {
972 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
973 pa->signature = kIOPageAllocSignature;
974 pa->avail = -2ULL;
975
976 addr = iopa_allocinpage(pa, count, align);
977 IOLockLock(a->lock);
978 if (pa->avail) enqueue_head(&a->list, &pa->link);
979 a->pagecount++;
980 if (addr) a->bytecount += bytes;
981 IOLockUnlock(a->lock);
982 }
983 }
984
985 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
986 return (addr);
987 }
988
989 uintptr_t
990 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
991 {
992 iopa_page_t * pa;
993 uint32_t count;
994 uintptr_t chunk;
995
996 if (!bytes) bytes = 1;
997
998 chunk = (addr & page_mask);
999 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1000
1001 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
1002 assert(kIOPageAllocSignature == pa->signature);
1003
1004 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1005 chunk /= gIOPageAllocChunkBytes;
1006
1007 IOLockLock(a->lock);
1008 if (!pa->avail)
1009 {
1010 assert(!pa->link.next);
1011 enqueue_tail(&a->list, &pa->link);
1012 }
1013 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1014 if (pa->avail != -2ULL) pa = 0;
1015 else
1016 {
1017 remque(&pa->link);
1018 pa->link.next = 0;
1019 pa->signature = 0;
1020 a->pagecount--;
1021 // page to free
1022 pa = (typeof(pa)) trunc_page(pa);
1023 }
1024 a->bytecount -= bytes;
1025 IOLockUnlock(a->lock);
1026
1027 return ((uintptr_t) pa);
1028 }
1029
1030 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1031
1032 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1033 IOByteCount length, IOOptionBits cacheMode )
1034 {
1035 IOReturn ret = kIOReturnSuccess;
1036 ppnum_t pagenum;
1037
1038 if( task != kernel_task)
1039 return( kIOReturnUnsupported );
1040 if ((address | length) & PAGE_MASK)
1041 {
1042 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1043 return( kIOReturnUnsupported );
1044 }
1045 length = round_page(address + length) - trunc_page( address );
1046 address = trunc_page( address );
1047
1048 // make map mode
1049 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1050
1051 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1052
1053 // Get the physical page number
1054 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1055 if( pagenum) {
1056 ret = IOUnmapPages( get_task_map(task), address, page_size );
1057 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1058 } else
1059 ret = kIOReturnVMError;
1060
1061 address += page_size;
1062 length -= page_size;
1063 }
1064
1065 return( ret );
1066 }
1067
1068
1069 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1070 IOByteCount length )
1071 {
1072 if( task != kernel_task)
1073 return( kIOReturnUnsupported );
1074
1075 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1076
1077 return( kIOReturnSuccess );
1078 }
1079
1080 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1081
1082 vm_offset_t OSKernelStackRemaining( void )
1083 {
1084 return (ml_stack_remaining());
1085 }
1086
1087 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1088
1089 /*
1090 * Spin for indicated number of milliseconds.
1091 */
1092 void IOSleep(unsigned milliseconds)
1093 {
1094 delay_for_interval(milliseconds, kMillisecondScale);
1095 }
1096
1097 /*
1098 * Spin for indicated number of milliseconds, and potentially an
1099 * additional number of milliseconds up to the leeway values.
1100 */
1101 void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1102 {
1103 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1104 }
1105
1106 /*
1107 * Spin for indicated number of microseconds.
1108 */
1109 void IODelay(unsigned microseconds)
1110 {
1111 delay_for_interval(microseconds, kMicrosecondScale);
1112 }
1113
1114 /*
1115 * Spin for indicated number of nanoseconds.
1116 */
1117 void IOPause(unsigned nanoseconds)
1118 {
1119 delay_for_interval(nanoseconds, kNanosecondScale);
1120 }
1121
1122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1123
1124 static void _iolog_consputc(int ch, void *arg __unused)
1125 {
1126 cons_putc_locked(ch);
1127 }
1128
1129 static void _IOLogv(const char *format, va_list ap, void *caller);
1130
1131 __attribute__((noinline,not_tail_called))
1132 void IOLog(const char *format, ...)
1133 {
1134 void *caller = __builtin_return_address(0);
1135 va_list ap;
1136
1137 va_start(ap, format);
1138 _IOLogv(format, ap, caller);
1139 va_end(ap);
1140 }
1141
1142 __attribute__((noinline,not_tail_called))
1143 void IOLogv(const char *format, va_list ap)
1144 {
1145 void *caller = __builtin_return_address(0);
1146 _IOLogv(format, ap, caller);
1147 }
1148
1149 void _IOLogv(const char *format, va_list ap, void *caller)
1150 {
1151 va_list ap2;
1152
1153 /* Ideally not called at interrupt context or with interrupts disabled. Needs further validate */
1154 /* assert(TRUE == ml_get_interrupts_enabled()); */
1155
1156 va_copy(ap2, ap);
1157
1158 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1159
1160 __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE);
1161 va_end(ap2);
1162 }
1163
1164 #if !__LP64__
1165 void IOPanic(const char *reason)
1166 {
1167 panic("%s", reason);
1168 }
1169 #endif
1170
1171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1172
1173 /*
1174 * Convert a integer constant (typically a #define or enum) to a string.
1175 */
1176 static char noValue[80]; // that's pretty
1177
1178 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1179 {
1180 for( ; regValueArray->name; regValueArray++) {
1181 if(regValueArray->value == value)
1182 return(regValueArray->name);
1183 }
1184 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1185 return((const char *)noValue);
1186 }
1187
1188 IOReturn IOFindValueForName(const char *string,
1189 const IONamedValue *regValueArray,
1190 int *value)
1191 {
1192 for( ; regValueArray->name; regValueArray++) {
1193 if(!strcmp(regValueArray->name, string)) {
1194 *value = regValueArray->value;
1195 return kIOReturnSuccess;
1196 }
1197 }
1198 return kIOReturnBadArgument;
1199 }
1200
1201 OSString * IOCopyLogNameForPID(int pid)
1202 {
1203 char buf[128];
1204 size_t len;
1205 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1206 len = strlen(buf);
1207 proc_name(pid, buf + len, sizeof(buf) - len);
1208 return (OSString::withCString(buf));
1209 }
1210
1211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1212
1213 IOAlignment IOSizeToAlignment(unsigned int size)
1214 {
1215 int shift;
1216 const int intsize = sizeof(unsigned int) * 8;
1217
1218 for (shift = 1; shift < intsize; shift++) {
1219 if (size & 0x80000000)
1220 return (IOAlignment)(intsize - shift);
1221 size <<= 1;
1222 }
1223 return 0;
1224 }
1225
1226 unsigned int IOAlignmentToSize(IOAlignment align)
1227 {
1228 unsigned int size;
1229
1230 for (size = 1; align; align--) {
1231 size <<= 1;
1232 }
1233 return size;
1234 }
1235
1236 } /* extern "C" */
1237
1238
1239