]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
45504157aa469ccacd400731e9c7a611a54d7b3f
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
41
42 #include <IOKit/assert.h>
43
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
50
51 #include "IOKitKernelInternal.h"
52
53 #ifdef IOALLOCDEBUG
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
56 #endif
57
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <sys/msgbuf.h>
62
63 #if IOKITSTATS
64
65 #define IOStatisticsAlloc(type, size) \
66 do { \
67 IOStatistics::countAlloc(type, size); \
68 } while (0)
69
70 #else
71
72 #define IOStatisticsAlloc(type, size)
73
74 #endif /* IOKITSTATS */
75
76 extern "C"
77 {
78
79
80 mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
82 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
84 extern int
85 __doprnt(
86 const char *fmt,
87 va_list argp,
88 void (*putc)(int, void *),
89 void *arg,
90 int radix);
91
92 extern void cons_putc_locked(char);
93 extern void bsd_log_lock(void);
94 extern void bsd_log_unlock(void);
95 extern void logwakeup();
96
97
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99
100 lck_grp_t *IOLockGroup;
101
102 /*
103 * Global variables for use by iLogger
104 * These symbols are for use only by Apple diagnostic code.
105 * Binary compatibility is not guaranteed for kexts that reference these symbols.
106 */
107
108 void *_giDebugLogInternal = NULL;
109 void *_giDebugLogDataInternal = NULL;
110 void *_giDebugReserved1 = NULL;
111 void *_giDebugReserved2 = NULL;
112
113 iopa_t gIOBMDPageAllocator;
114
115 /*
116 * Static variables for this module.
117 */
118
119 static queue_head_t gIOMallocContiguousEntries;
120 static lck_mtx_t * gIOMallocContiguousEntriesLock;
121
122 enum { kIOMaxPageableMaps = 16 };
123 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
124 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
125
126 typedef struct {
127 vm_map_t map;
128 vm_offset_t address;
129 vm_offset_t end;
130 } IOMapData;
131
132 static struct {
133 UInt32 count;
134 UInt32 hint;
135 IOMapData maps[ kIOMaxPageableMaps ];
136 lck_mtx_t * lock;
137 } gIOKitPageableSpace;
138
139 static iopa_t gIOPageablePageAllocator;
140
141 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
142
143 void IOLibInit(void)
144 {
145 kern_return_t ret;
146
147 static bool libInitialized;
148
149 if(libInitialized)
150 return;
151
152 gIOKitPageableSpace.maps[0].address = 0;
153 ret = kmem_suballoc(kernel_map,
154 &gIOKitPageableSpace.maps[0].address,
155 kIOPageableMapSize,
156 TRUE,
157 VM_FLAGS_ANYWHERE,
158 &gIOKitPageableSpace.maps[0].map);
159 if (ret != KERN_SUCCESS)
160 panic("failed to allocate iokit pageable map\n");
161
162 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
163
164 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
165 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
166 gIOKitPageableSpace.hint = 0;
167 gIOKitPageableSpace.count = 1;
168
169 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
170 queue_init( &gIOMallocContiguousEntries );
171
172 iopa_init(&gIOBMDPageAllocator);
173 iopa_init(&gIOPageablePageAllocator);
174
175 libInitialized = true;
176 }
177
178 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
179
180 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
181 {
182 kern_return_t result;
183 thread_t thread;
184
185 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
186 if (result != KERN_SUCCESS)
187 return (NULL);
188
189 thread_deallocate(thread);
190
191 return (thread);
192 }
193
194
195 void IOExitThread(void)
196 {
197 (void) thread_terminate(current_thread());
198 }
199
200 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
201
202
203 void * IOMalloc(vm_size_t size)
204 {
205 void * address;
206
207 address = (void *)kalloc(size);
208 if ( address ) {
209 #if IOALLOCDEBUG
210 debug_iomalloc_size += size;
211 #endif
212 IOStatisticsAlloc(kIOStatisticsMalloc, size);
213 }
214
215 return address;
216 }
217
218 void IOFree(void * address, vm_size_t size)
219 {
220 if (address) {
221 kfree(address, size);
222 #if IOALLOCDEBUG
223 debug_iomalloc_size -= size;
224 #endif
225 IOStatisticsAlloc(kIOStatisticsFree, size);
226 }
227 }
228
229 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
230
231 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
232 {
233 kern_return_t kr;
234 vm_offset_t address;
235 vm_offset_t allocationAddress;
236 vm_size_t adjustedSize;
237 uintptr_t alignMask;
238
239 if (size == 0)
240 return 0;
241 if (alignment == 0)
242 alignment = 1;
243
244 alignMask = alignment - 1;
245 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
246
247 if (size > adjustedSize) {
248 address = 0; /* overflow detected */
249 }
250 else if (adjustedSize >= page_size) {
251
252 kr = kernel_memory_allocate(kernel_map, &address,
253 size, alignMask, 0);
254 if (KERN_SUCCESS != kr)
255 address = 0;
256
257 } else {
258
259 adjustedSize += alignMask;
260
261 if (adjustedSize >= page_size) {
262
263 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
264 adjustedSize, 0, 0);
265 if (KERN_SUCCESS != kr)
266 allocationAddress = 0;
267
268 } else
269 allocationAddress = (vm_address_t) kalloc(adjustedSize);
270
271 if (allocationAddress) {
272 address = (allocationAddress + alignMask
273 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
274 & (~alignMask);
275
276 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
277 = adjustedSize;
278 *((vm_address_t *)(address - sizeof(vm_address_t)))
279 = allocationAddress;
280 } else
281 address = 0;
282 }
283
284 assert(0 == (address & alignMask));
285
286 if( address) {
287 #if IOALLOCDEBUG
288 debug_iomalloc_size += size;
289 #endif
290 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
291 }
292
293 return (void *) address;
294 }
295
296 void IOFreeAligned(void * address, vm_size_t size)
297 {
298 vm_address_t allocationAddress;
299 vm_size_t adjustedSize;
300
301 if( !address)
302 return;
303
304 assert(size);
305
306 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
307 if (adjustedSize >= page_size) {
308
309 kmem_free( kernel_map, (vm_offset_t) address, size);
310
311 } else {
312 adjustedSize = *((vm_size_t *)( (vm_address_t) address
313 - sizeof(vm_address_t) - sizeof(vm_size_t)));
314 allocationAddress = *((vm_address_t *)( (vm_address_t) address
315 - sizeof(vm_address_t) ));
316
317 if (adjustedSize >= page_size)
318 kmem_free( kernel_map, allocationAddress, adjustedSize);
319 else
320 kfree((void *)allocationAddress, adjustedSize);
321 }
322
323 #if IOALLOCDEBUG
324 debug_iomalloc_size -= size;
325 #endif
326
327 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
328 }
329
330 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
331
332 void
333 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
334 {
335 mach_vm_address_t allocationAddress;
336 mach_vm_size_t adjustedSize;
337
338 if (!address)
339 return;
340
341 assert(size);
342
343 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
344 if (adjustedSize >= page_size) {
345
346 kmem_free( kernel_map, (vm_offset_t) address, size);
347
348 } else {
349
350 adjustedSize = *((mach_vm_size_t *)
351 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
352 allocationAddress = *((mach_vm_address_t *)
353 (address - sizeof(mach_vm_address_t) ));
354 kfree((void *)allocationAddress, adjustedSize);
355 }
356
357 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
358 #if IOALLOCDEBUG
359 debug_iomalloc_size -= size;
360 #endif
361 }
362
363 mach_vm_address_t
364 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
365 mach_vm_size_t alignment, bool contiguous)
366 {
367 kern_return_t kr;
368 mach_vm_address_t address;
369 mach_vm_address_t allocationAddress;
370 mach_vm_size_t adjustedSize;
371 mach_vm_address_t alignMask;
372
373 if (size == 0)
374 return (0);
375 if (alignment == 0)
376 alignment = 1;
377
378 alignMask = alignment - 1;
379 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
380
381 contiguous = (contiguous && (adjustedSize > page_size))
382 || (alignment > page_size);
383
384 if (contiguous || maxPhys)
385 {
386 int options = 0;
387 vm_offset_t virt;
388
389 adjustedSize = size;
390 contiguous = (contiguous && (adjustedSize > page_size))
391 || (alignment > page_size);
392
393 if (!contiguous)
394 {
395 if (maxPhys <= 0xFFFFFFFF)
396 {
397 maxPhys = 0;
398 options |= KMA_LOMEM;
399 }
400 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
401 {
402 maxPhys = 0;
403 }
404 }
405 if (contiguous || maxPhys)
406 {
407 kr = kmem_alloc_contig(kernel_map, &virt, size,
408 alignMask, atop(maxPhys), atop(alignMask), 0);
409 }
410 else
411 {
412 kr = kernel_memory_allocate(kernel_map, &virt,
413 size, alignMask, options);
414 }
415 if (KERN_SUCCESS == kr)
416 address = virt;
417 else
418 address = 0;
419 }
420 else
421 {
422 adjustedSize += alignMask;
423 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
424
425 if (allocationAddress) {
426
427 address = (allocationAddress + alignMask
428 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
429 & (~alignMask);
430
431 if (atop_32(address) != atop_32(address + size - 1))
432 address = round_page(address);
433
434 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
435 - sizeof(mach_vm_address_t))) = adjustedSize;
436 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
437 = allocationAddress;
438 } else
439 address = 0;
440 }
441
442 if (address) {
443 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
444 #if IOALLOCDEBUG
445 debug_iomalloc_size += size;
446 #endif
447 }
448
449 return (address);
450 }
451
452
453 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
454
455 struct _IOMallocContiguousEntry
456 {
457 mach_vm_address_t virtualAddr;
458 IOBufferMemoryDescriptor * md;
459 queue_chain_t link;
460 };
461 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
462
463 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
464 IOPhysicalAddress * physicalAddress)
465 {
466 mach_vm_address_t address = 0;
467
468 if (size == 0)
469 return 0;
470 if (alignment == 0)
471 alignment = 1;
472
473 /* Do we want a physical address? */
474 if (!physicalAddress)
475 {
476 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
477 }
478 else do
479 {
480 IOBufferMemoryDescriptor * bmd;
481 mach_vm_address_t physicalMask;
482 vm_offset_t alignMask;
483
484 alignMask = alignment - 1;
485 physicalMask = (0xFFFFFFFF ^ alignMask);
486
487 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
488 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
489 if (!bmd)
490 break;
491
492 _IOMallocContiguousEntry *
493 entry = IONew(_IOMallocContiguousEntry, 1);
494 if (!entry)
495 {
496 bmd->release();
497 break;
498 }
499 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
500 entry->md = bmd;
501 lck_mtx_lock(gIOMallocContiguousEntriesLock);
502 queue_enter( &gIOMallocContiguousEntries, entry,
503 _IOMallocContiguousEntry *, link );
504 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
505
506 address = (mach_vm_address_t) entry->virtualAddr;
507 *physicalAddress = bmd->getPhysicalAddress();
508 }
509 while (false);
510
511 return (void *) address;
512 }
513
514 void IOFreeContiguous(void * _address, vm_size_t size)
515 {
516 _IOMallocContiguousEntry * entry;
517 IOMemoryDescriptor * md = NULL;
518
519 mach_vm_address_t address = (mach_vm_address_t) _address;
520
521 if( !address)
522 return;
523
524 assert(size);
525
526 lck_mtx_lock(gIOMallocContiguousEntriesLock);
527 queue_iterate( &gIOMallocContiguousEntries, entry,
528 _IOMallocContiguousEntry *, link )
529 {
530 if( entry->virtualAddr == address ) {
531 md = entry->md;
532 queue_remove( &gIOMallocContiguousEntries, entry,
533 _IOMallocContiguousEntry *, link );
534 break;
535 }
536 }
537 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
538
539 if (md)
540 {
541 md->release();
542 IODelete(entry, _IOMallocContiguousEntry, 1);
543 }
544 else
545 {
546 IOKernelFreePhysical((mach_vm_address_t) address, size);
547 }
548 }
549
550 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
551
552 kern_return_t IOIteratePageableMaps(vm_size_t size,
553 IOIteratePageableMapsCallback callback, void * ref)
554 {
555 kern_return_t kr = kIOReturnNotReady;
556 vm_size_t segSize;
557 UInt32 attempts;
558 UInt32 index;
559 vm_offset_t min;
560 vm_map_t map;
561
562 if (size > kIOPageableMaxMapSize)
563 return( kIOReturnBadArgument );
564
565 do {
566 index = gIOKitPageableSpace.hint;
567 attempts = gIOKitPageableSpace.count;
568 while( attempts--) {
569 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
570 if( KERN_SUCCESS == kr) {
571 gIOKitPageableSpace.hint = index;
572 break;
573 }
574 if( index)
575 index--;
576 else
577 index = gIOKitPageableSpace.count - 1;
578 }
579 if( KERN_SUCCESS == kr)
580 break;
581
582 lck_mtx_lock( gIOKitPageableSpace.lock );
583
584 index = gIOKitPageableSpace.count;
585 if( index >= (kIOMaxPageableMaps - 1)) {
586 lck_mtx_unlock( gIOKitPageableSpace.lock );
587 break;
588 }
589
590 if( size < kIOPageableMapSize)
591 segSize = kIOPageableMapSize;
592 else
593 segSize = size;
594
595 min = 0;
596 kr = kmem_suballoc(kernel_map,
597 &min,
598 segSize,
599 TRUE,
600 VM_FLAGS_ANYWHERE,
601 &map);
602 if( KERN_SUCCESS != kr) {
603 lck_mtx_unlock( gIOKitPageableSpace.lock );
604 break;
605 }
606
607 gIOKitPageableSpace.maps[index].map = map;
608 gIOKitPageableSpace.maps[index].address = min;
609 gIOKitPageableSpace.maps[index].end = min + segSize;
610 gIOKitPageableSpace.hint = index;
611 gIOKitPageableSpace.count = index + 1;
612
613 lck_mtx_unlock( gIOKitPageableSpace.lock );
614
615 } while( true );
616
617 return kr;
618 }
619
620 struct IOMallocPageableRef
621 {
622 vm_offset_t address;
623 vm_size_t size;
624 };
625
626 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
627 {
628 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
629 kern_return_t kr;
630
631 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
632
633 return( kr );
634 }
635
636 static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
637 {
638 kern_return_t kr = kIOReturnNotReady;
639 struct IOMallocPageableRef ref;
640
641 if (alignment > page_size)
642 return( 0 );
643 if (size > kIOPageableMaxMapSize)
644 return( 0 );
645
646 ref.size = size;
647 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
648 if( kIOReturnSuccess != kr)
649 ref.address = 0;
650
651 return( (void *) ref.address );
652 }
653
654 vm_map_t IOPageableMapForAddress( uintptr_t address )
655 {
656 vm_map_t map = 0;
657 UInt32 index;
658
659 for( index = 0; index < gIOKitPageableSpace.count; index++) {
660 if( (address >= gIOKitPageableSpace.maps[index].address)
661 && (address < gIOKitPageableSpace.maps[index].end) ) {
662 map = gIOKitPageableSpace.maps[index].map;
663 break;
664 }
665 }
666 if( !map)
667 panic("IOPageableMapForAddress: null");
668
669 return( map );
670 }
671
672 static void IOFreePageablePages(void * address, vm_size_t size)
673 {
674 vm_map_t map;
675
676 map = IOPageableMapForAddress( (vm_address_t) address);
677 if( map)
678 kmem_free( map, (vm_offset_t) address, size);
679 }
680
681 static uintptr_t IOMallocOnePageablePage(iopa_t * a)
682 {
683 return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
684 }
685
686 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
687 {
688 void * addr;
689
690 if (size >= (page_size - 4*kIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
691 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
692
693 if (addr) {
694 #if IOALLOCDEBUG
695 debug_iomallocpageable_size += size;
696 #endif
697 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
698 }
699
700 return (addr);
701 }
702
703 void IOFreePageable(void * address, vm_size_t size)
704 {
705 #if IOALLOCDEBUG
706 debug_iomallocpageable_size -= size;
707 #endif
708 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
709
710 if (size < (page_size - 4*kIOPageAllocChunkBytes))
711 {
712 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
713 size = page_size;
714 }
715 if (address) IOFreePageablePages(address, size);
716 }
717
718 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
719
720 #if 0
721 #undef assert
722 #define assert(ex) \
723 ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
724 #endif
725
726 typedef char iopa_page_t_assert[(sizeof(iopa_page_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
727
728 extern "C" void
729 iopa_init(iopa_t * a)
730 {
731 bzero(a, sizeof(*a));
732 a->lock = IOLockAlloc();
733 queue_init(&a->list);
734 }
735
736 static uintptr_t
737 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
738 {
739 uint32_t n, s;
740 uint64_t avail = pa->avail;
741
742 assert(avail);
743
744 // find strings of count 1 bits in avail
745 for (n = count; n > 1; n -= s)
746 {
747 s = n >> 1;
748 avail = avail & (avail << s);
749 }
750 // and aligned
751 avail &= align;
752
753 if (avail)
754 {
755 n = __builtin_clzll(avail);
756 pa->avail &= ~((-1ULL << (64 - count)) >> n);
757 if (!pa->avail && pa->link.next)
758 {
759 remque(&pa->link);
760 pa->link.next = 0;
761 }
762 return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
763 }
764
765 return (0);
766 }
767
768 static uint32_t
769 log2up(uint32_t size)
770 {
771 if (size <= 1) size = 0;
772 else size = 32 - __builtin_clz(size - 1);
773 return (size);
774 }
775
776 uintptr_t
777 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
778 {
779 static const uint64_t align_masks[] = {
780 0xFFFFFFFFFFFFFFFF,
781 0xAAAAAAAAAAAAAAAA,
782 0x8888888888888888,
783 0x8080808080808080,
784 0x8000800080008000,
785 0x8000000080000000,
786 0x8000000000000000,
787 };
788 iopa_page_t * pa;
789 uintptr_t addr = 0;
790 uint32_t count;
791 uint64_t align;
792
793 if (!bytes) bytes = 1;
794 count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
795 align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];
796
797 IOLockLock(a->lock);
798 pa = (typeof(pa)) queue_first(&a->list);
799 while (!queue_end(&a->list, &pa->link))
800 {
801 addr = iopa_allocinpage(pa, count, align);
802 if (addr)
803 {
804 a->bytecount += bytes;
805 break;
806 }
807 pa = (typeof(pa)) queue_next(&pa->link);
808 }
809 IOLockUnlock(a->lock);
810
811 if (!addr)
812 {
813 addr = alloc(a);
814 if (addr)
815 {
816 pa = (typeof(pa)) (addr + page_size - kIOPageAllocChunkBytes);
817 pa->signature = kIOPageAllocSignature;
818 pa->avail = -2ULL;
819
820 addr = iopa_allocinpage(pa, count, align);
821 IOLockLock(a->lock);
822 if (pa->avail) enqueue_head(&a->list, &pa->link);
823 a->pagecount++;
824 if (addr) a->bytecount += bytes;
825 IOLockUnlock(a->lock);
826 }
827 }
828
829 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
830 return (addr);
831 }
832
833 uintptr_t
834 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
835 {
836 iopa_page_t * pa;
837 uint32_t count;
838 uintptr_t chunk;
839
840 if (!bytes) bytes = 1;
841
842 chunk = (addr & page_mask);
843 assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));
844
845 pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
846 assert(kIOPageAllocSignature == pa->signature);
847
848 count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
849 chunk /= kIOPageAllocChunkBytes;
850
851 IOLockLock(a->lock);
852 if (!pa->avail)
853 {
854 assert(!pa->link.next);
855 enqueue_tail(&a->list, &pa->link);
856 }
857 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
858 if (pa->avail != -2ULL) pa = 0;
859 else
860 {
861 remque(&pa->link);
862 pa->link.next = 0;
863 pa->signature = 0;
864 a->pagecount--;
865 // page to free
866 pa = (typeof(pa)) trunc_page(pa);
867 }
868 a->bytecount -= bytes;
869 IOLockUnlock(a->lock);
870
871 return ((uintptr_t) pa);
872 }
873
874 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
875
876 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
877 IOByteCount length, IOOptionBits cacheMode )
878 {
879 IOReturn ret = kIOReturnSuccess;
880 ppnum_t pagenum;
881
882 if( task != kernel_task)
883 return( kIOReturnUnsupported );
884 if ((address | length) & PAGE_MASK)
885 {
886 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
887 return( kIOReturnUnsupported );
888 }
889 length = round_page(address + length) - trunc_page( address );
890 address = trunc_page( address );
891
892 // make map mode
893 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
894
895 while( (kIOReturnSuccess == ret) && (length > 0) ) {
896
897 // Get the physical page number
898 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
899 if( pagenum) {
900 ret = IOUnmapPages( get_task_map(task), address, page_size );
901 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
902 } else
903 ret = kIOReturnVMError;
904
905 address += page_size;
906 length -= page_size;
907 }
908
909 return( ret );
910 }
911
912
913 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
914 IOByteCount length )
915 {
916 if( task != kernel_task)
917 return( kIOReturnUnsupported );
918
919 flush_dcache64( (addr64_t) address, (unsigned) length, false );
920
921 return( kIOReturnSuccess );
922 }
923
924 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
925
926 vm_offset_t OSKernelStackRemaining( void )
927 {
928 return (ml_stack_remaining());
929 }
930
931 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
932
933 /*
934 * Spin for indicated number of milliseconds.
935 */
936 void IOSleep(unsigned milliseconds)
937 {
938 delay_for_interval(milliseconds, kMillisecondScale);
939 }
940
941 /*
942 * Spin for indicated number of microseconds.
943 */
944 void IODelay(unsigned microseconds)
945 {
946 delay_for_interval(microseconds, kMicrosecondScale);
947 }
948
949 /*
950 * Spin for indicated number of nanoseconds.
951 */
952 void IOPause(unsigned nanoseconds)
953 {
954 delay_for_interval(nanoseconds, kNanosecondScale);
955 }
956
957 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
958
959 static void _iolog_consputc(int ch, void *arg __unused)
960 {
961 cons_putc_locked(ch);
962 }
963
964 static void _iolog_logputc(int ch, void *arg __unused)
965 {
966 log_putc_locked(ch);
967 }
968
969 void IOLog(const char *format, ...)
970 {
971 va_list ap;
972
973 va_start(ap, format);
974 IOLogv(format, ap);
975 va_end(ap);
976 }
977
978 void IOLogv(const char *format, va_list ap)
979 {
980 va_list ap2;
981
982 va_copy(ap2, ap);
983
984 bsd_log_lock();
985 __doprnt(format, ap, _iolog_logputc, NULL, 16);
986 bsd_log_unlock();
987 logwakeup();
988
989 __doprnt(format, ap2, _iolog_consputc, NULL, 16);
990 }
991
992 #if !__LP64__
993 void IOPanic(const char *reason)
994 {
995 panic("%s", reason);
996 }
997 #endif
998
999 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1000
1001 /*
1002 * Convert a integer constant (typically a #define or enum) to a string.
1003 */
1004 static char noValue[80]; // that's pretty
1005
1006 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1007 {
1008 for( ; regValueArray->name; regValueArray++) {
1009 if(regValueArray->value == value)
1010 return(regValueArray->name);
1011 }
1012 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1013 return((const char *)noValue);
1014 }
1015
1016 IOReturn IOFindValueForName(const char *string,
1017 const IONamedValue *regValueArray,
1018 int *value)
1019 {
1020 for( ; regValueArray->name; regValueArray++) {
1021 if(!strcmp(regValueArray->name, string)) {
1022 *value = regValueArray->value;
1023 return kIOReturnSuccess;
1024 }
1025 }
1026 return kIOReturnBadArgument;
1027 }
1028
1029 OSString * IOCopyLogNameForPID(int pid)
1030 {
1031 char buf[128];
1032 size_t len;
1033 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1034 len = strlen(buf);
1035 proc_name(pid, buf + len, sizeof(buf) - len);
1036 return (OSString::withCString(buf));
1037 }
1038
1039 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1040
1041 IOAlignment IOSizeToAlignment(unsigned int size)
1042 {
1043 register int shift;
1044 const int intsize = sizeof(unsigned int) * 8;
1045
1046 for (shift = 1; shift < intsize; shift++) {
1047 if (size & 0x80000000)
1048 return (IOAlignment)(intsize - shift);
1049 size <<= 1;
1050 }
1051 return 0;
1052 }
1053
1054 unsigned int IOAlignmentToSize(IOAlignment align)
1055 {
1056 unsigned int size;
1057
1058 for (size = 1; align; align--) {
1059 size <<= 1;
1060 }
1061 return size;
1062 }
1063
1064 } /* extern "C" */
1065
1066
1067