]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * HISTORY | |
30 | * | |
31 | * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT. | |
32 | * 17-Nov-98 cpp | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <IOKit/system.h> | |
37 | #include <mach/sync_policy.h> | |
38 | #include <machine/machine_routines.h> | |
39 | #include <vm/vm_kern.h> | |
40 | #include <libkern/c++/OSCPPDebug.h> | |
41 | ||
42 | #include <IOKit/assert.h> | |
43 | ||
44 | #include <IOKit/IOReturn.h> | |
45 | #include <IOKit/IOLib.h> | |
46 | #include <IOKit/IOLocks.h> | |
47 | #include <IOKit/IOMapper.h> | |
48 | #include <IOKit/IOBufferMemoryDescriptor.h> | |
49 | #include <IOKit/IOKitDebug.h> | |
50 | ||
51 | #include "IOKitKernelInternal.h" | |
52 | ||
53 | #ifdef IOALLOCDEBUG | |
54 | #include <libkern/OSDebug.h> | |
55 | #include <sys/sysctl.h> | |
56 | #endif | |
57 | ||
58 | #include "libkern/OSAtomic.h" | |
59 | #include <libkern/c++/OSKext.h> | |
60 | #include <IOKit/IOStatisticsPrivate.h> | |
61 | #include <os/log_private.h> | |
62 | #include <sys/msgbuf.h> | |
63 | ||
64 | #if IOKITSTATS | |
65 | ||
66 | #define IOStatisticsAlloc(type, size) \ | |
67 | do { \ | |
68 | IOStatistics::countAlloc(type, size); \ | |
69 | } while (0) | |
70 | ||
71 | #else | |
72 | ||
73 | #define IOStatisticsAlloc(type, size) | |
74 | ||
75 | #endif /* IOKITSTATS */ | |
76 | ||
77 | ||
78 | #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug)) | |
79 | ||
80 | ||
81 | extern "C" | |
82 | { | |
83 | ||
84 | ||
85 | mach_timespec_t IOZeroTvalspec = { 0, 0 }; | |
86 | ||
87 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); | |
88 | ||
89 | extern int | |
90 | __doprnt( | |
91 | const char *fmt, | |
92 | va_list argp, | |
93 | void (*putc)(int, void *), | |
94 | void *arg, | |
95 | int radix, | |
96 | int is_log); | |
97 | ||
98 | extern void cons_putc_locked(char); | |
99 | extern void bsd_log_lock(void); | |
100 | extern void bsd_log_unlock(void); | |
101 | extern void logwakeup(); | |
102 | ||
103 | ||
104 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
105 | ||
106 | lck_grp_t *IOLockGroup; | |
107 | ||
108 | /* | |
109 | * Global variables for use by iLogger | |
110 | * These symbols are for use only by Apple diagnostic code. | |
111 | * Binary compatibility is not guaranteed for kexts that reference these symbols. | |
112 | */ | |
113 | ||
114 | void *_giDebugLogInternal = NULL; | |
115 | void *_giDebugLogDataInternal = NULL; | |
116 | void *_giDebugReserved1 = NULL; | |
117 | void *_giDebugReserved2 = NULL; | |
118 | ||
119 | iopa_t gIOBMDPageAllocator; | |
120 | ||
121 | /* | |
122 | * Static variables for this module. | |
123 | */ | |
124 | ||
125 | static queue_head_t gIOMallocContiguousEntries; | |
126 | static lck_mtx_t * gIOMallocContiguousEntriesLock; | |
127 | ||
128 | #if __x86_64__ | |
129 | enum { kIOMaxPageableMaps = 8 }; | |
130 | enum { kIOPageableMapSize = 512 * 1024 * 1024 }; | |
131 | enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 }; | |
132 | #else | |
133 | enum { kIOMaxPageableMaps = 16 }; | |
134 | enum { kIOPageableMapSize = 96 * 1024 * 1024 }; | |
135 | enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 }; | |
136 | #endif | |
137 | ||
138 | typedef struct { | |
139 | vm_map_t map; | |
140 | vm_offset_t address; | |
141 | vm_offset_t end; | |
142 | } IOMapData; | |
143 | ||
144 | static struct { | |
145 | UInt32 count; | |
146 | UInt32 hint; | |
147 | IOMapData maps[ kIOMaxPageableMaps ]; | |
148 | lck_mtx_t * lock; | |
149 | } gIOKitPageableSpace; | |
150 | ||
151 | static iopa_t gIOPageablePageAllocator; | |
152 | ||
153 | uint32_t gIOPageAllocChunkBytes; | |
154 | ||
155 | #if IOTRACKING | |
156 | IOTrackingQueue * gIOMallocTracking; | |
157 | IOTrackingQueue * gIOWireTracking; | |
158 | IOTrackingQueue * gIOMapTracking; | |
159 | #endif /* IOTRACKING */ | |
160 | ||
161 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
162 | ||
163 | void IOLibInit(void) | |
164 | { | |
165 | kern_return_t ret; | |
166 | ||
167 | static bool libInitialized; | |
168 | ||
169 | if(libInitialized) | |
170 | return; | |
171 | ||
172 | IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL); | |
173 | ||
174 | #if IOTRACKING | |
175 | IOTrackingInit(); | |
176 | gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0, | |
177 | kIOTrackingQueueTypeAlloc, | |
178 | 37); | |
179 | gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0); | |
180 | ||
181 | size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024); | |
182 | gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize, | |
183 | kIOTrackingQueueTypeDefaultOn | |
184 | | kIOTrackingQueueTypeMap | |
185 | | kIOTrackingQueueTypeUser, | |
186 | 0); | |
187 | #endif | |
188 | ||
189 | gIOKitPageableSpace.maps[0].address = 0; | |
190 | ret = kmem_suballoc(kernel_map, | |
191 | &gIOKitPageableSpace.maps[0].address, | |
192 | kIOPageableMapSize, | |
193 | TRUE, | |
194 | VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT), | |
195 | &gIOKitPageableSpace.maps[0].map); | |
196 | if (ret != KERN_SUCCESS) | |
197 | panic("failed to allocate iokit pageable map\n"); | |
198 | ||
199 | gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); | |
200 | gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize; | |
201 | gIOKitPageableSpace.hint = 0; | |
202 | gIOKitPageableSpace.count = 1; | |
203 | ||
204 | gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); | |
205 | queue_init( &gIOMallocContiguousEntries ); | |
206 | ||
207 | gIOPageAllocChunkBytes = PAGE_SIZE/64; | |
208 | assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes); | |
209 | iopa_init(&gIOBMDPageAllocator); | |
210 | iopa_init(&gIOPageablePageAllocator); | |
211 | ||
212 | ||
213 | libInitialized = true; | |
214 | } | |
215 | ||
216 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
217 | ||
218 | static uint32_t | |
219 | log2up(uint32_t size) | |
220 | { | |
221 | if (size <= 1) size = 0; | |
222 | else size = 32 - __builtin_clz(size - 1); | |
223 | return (size); | |
224 | } | |
225 | ||
226 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
227 | ||
228 | IOThread IOCreateThread(IOThreadFunc fcn, void *arg) | |
229 | { | |
230 | kern_return_t result; | |
231 | thread_t thread; | |
232 | ||
233 | result = kernel_thread_start((thread_continue_t)fcn, arg, &thread); | |
234 | if (result != KERN_SUCCESS) | |
235 | return (NULL); | |
236 | ||
237 | thread_deallocate(thread); | |
238 | ||
239 | return (thread); | |
240 | } | |
241 | ||
242 | ||
243 | void IOExitThread(void) | |
244 | { | |
245 | (void) thread_terminate(current_thread()); | |
246 | } | |
247 | ||
248 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
249 | ||
250 | #if IOTRACKING | |
251 | struct IOLibMallocHeader | |
252 | { | |
253 | IOTrackingAddress tracking; | |
254 | }; | |
255 | #endif | |
256 | ||
257 | #if IOTRACKING | |
258 | #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) | |
259 | #else | |
260 | #define sizeofIOLibMallocHeader (0) | |
261 | #endif | |
262 | ||
263 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
264 | ||
265 | void * IOMalloc(vm_size_t size) | |
266 | { | |
267 | void * address; | |
268 | vm_size_t allocSize; | |
269 | ||
270 | allocSize = size + sizeofIOLibMallocHeader; | |
271 | #if IOTRACKING | |
272 | if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow | |
273 | #endif | |
274 | address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT); | |
275 | ||
276 | if ( address ) { | |
277 | #if IOTRACKING | |
278 | if (TRACK_ALLOC) { | |
279 | IOLibMallocHeader * hdr; | |
280 | hdr = (typeof(hdr)) address; | |
281 | bzero(&hdr->tracking, sizeof(hdr->tracking)); | |
282 | hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader); | |
283 | hdr->tracking.size = size; | |
284 | IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true); | |
285 | } | |
286 | #endif | |
287 | address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader); | |
288 | ||
289 | #if IOALLOCDEBUG | |
290 | OSAddAtomic(size, &debug_iomalloc_size); | |
291 | #endif | |
292 | IOStatisticsAlloc(kIOStatisticsMalloc, size); | |
293 | } | |
294 | ||
295 | return address; | |
296 | } | |
297 | ||
298 | void IOFree(void * inAddress, vm_size_t size) | |
299 | { | |
300 | void * address; | |
301 | ||
302 | if ((address = inAddress)) | |
303 | { | |
304 | address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader); | |
305 | ||
306 | #if IOTRACKING | |
307 | if (TRACK_ALLOC) | |
308 | { | |
309 | IOLibMallocHeader * hdr; | |
310 | struct ptr_reference{ void * ptr; }; | |
311 | volatile struct ptr_reference ptr; | |
312 | ||
313 | // we're about to block in IOTrackingRemove(), make sure the original pointer | |
314 | // exists in memory or a register for leak scanning to find | |
315 | ptr.ptr = inAddress; | |
316 | ||
317 | hdr = (typeof(hdr)) address; | |
318 | if (size != hdr->tracking.size) | |
319 | { | |
320 | OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size); | |
321 | size = hdr->tracking.size; | |
322 | } | |
323 | IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); | |
324 | ptr.ptr = NULL; | |
325 | } | |
326 | #endif | |
327 | ||
328 | kfree(address, size + sizeofIOLibMallocHeader); | |
329 | #if IOALLOCDEBUG | |
330 | OSAddAtomic(-size, &debug_iomalloc_size); | |
331 | #endif | |
332 | IOStatisticsAlloc(kIOStatisticsFree, size); | |
333 | } | |
334 | } | |
335 | ||
336 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
337 | ||
338 | vm_tag_t | |
339 | IOMemoryTag(vm_map_t map) | |
340 | { | |
341 | vm_tag_t tag; | |
342 | ||
343 | if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT); | |
344 | ||
345 | tag = vm_tag_bt(); | |
346 | if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT; | |
347 | ||
348 | return (tag); | |
349 | } | |
350 | ||
351 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
352 | ||
353 | struct IOLibPageMallocHeader | |
354 | { | |
355 | mach_vm_size_t allocationSize; | |
356 | mach_vm_address_t allocationAddress; | |
357 | #if IOTRACKING | |
358 | IOTrackingAddress tracking; | |
359 | #endif | |
360 | }; | |
361 | ||
362 | #if IOTRACKING | |
363 | #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) | |
364 | #else | |
365 | #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader)) | |
366 | #endif | |
367 | ||
368 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
369 | ||
370 | void * IOMallocAligned(vm_size_t size, vm_size_t alignment) | |
371 | { | |
372 | kern_return_t kr; | |
373 | vm_offset_t address; | |
374 | vm_offset_t allocationAddress; | |
375 | vm_size_t adjustedSize; | |
376 | uintptr_t alignMask; | |
377 | IOLibPageMallocHeader * hdr; | |
378 | ||
379 | if (size == 0) | |
380 | return 0; | |
381 | ||
382 | alignment = (1UL << log2up(alignment)); | |
383 | alignMask = alignment - 1; | |
384 | adjustedSize = size + sizeofIOLibPageMallocHeader; | |
385 | ||
386 | if (size > adjustedSize) { | |
387 | address = 0; /* overflow detected */ | |
388 | } | |
389 | else if (adjustedSize >= page_size) { | |
390 | ||
391 | kr = kernel_memory_allocate(kernel_map, &address, | |
392 | size, alignMask, 0, IOMemoryTag(kernel_map)); | |
393 | if (KERN_SUCCESS != kr) address = 0; | |
394 | #if IOTRACKING | |
395 | else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size); | |
396 | #endif | |
397 | ||
398 | } else { | |
399 | ||
400 | adjustedSize += alignMask; | |
401 | ||
402 | if (adjustedSize >= page_size) { | |
403 | ||
404 | kr = kernel_memory_allocate(kernel_map, &allocationAddress, | |
405 | adjustedSize, 0, 0, IOMemoryTag(kernel_map)); | |
406 | if (KERN_SUCCESS != kr) allocationAddress = 0; | |
407 | ||
408 | } else | |
409 | allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); | |
410 | ||
411 | if (allocationAddress) { | |
412 | address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) | |
413 | & (~alignMask); | |
414 | ||
415 | hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader); | |
416 | hdr->allocationSize = adjustedSize; | |
417 | hdr->allocationAddress = allocationAddress; | |
418 | #if IOTRACKING | |
419 | if (TRACK_ALLOC) { | |
420 | bzero(&hdr->tracking, sizeof(hdr->tracking)); | |
421 | hdr->tracking.address = ~address; | |
422 | hdr->tracking.size = size; | |
423 | IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true); | |
424 | } | |
425 | #endif | |
426 | } else | |
427 | address = 0; | |
428 | } | |
429 | ||
430 | assert(0 == (address & alignMask)); | |
431 | ||
432 | if( address) { | |
433 | #if IOALLOCDEBUG | |
434 | OSAddAtomic(size, &debug_iomalloc_size); | |
435 | #endif | |
436 | IOStatisticsAlloc(kIOStatisticsMallocAligned, size); | |
437 | } | |
438 | ||
439 | return (void *) address; | |
440 | } | |
441 | ||
442 | void IOFreeAligned(void * address, vm_size_t size) | |
443 | { | |
444 | vm_address_t allocationAddress; | |
445 | vm_size_t adjustedSize; | |
446 | IOLibPageMallocHeader * hdr; | |
447 | ||
448 | if( !address) | |
449 | return; | |
450 | ||
451 | assert(size); | |
452 | ||
453 | adjustedSize = size + sizeofIOLibPageMallocHeader; | |
454 | if (adjustedSize >= page_size) { | |
455 | #if IOTRACKING | |
456 | if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size); | |
457 | #endif | |
458 | kmem_free( kernel_map, (vm_offset_t) address, size); | |
459 | ||
460 | } else { | |
461 | hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader); | |
462 | adjustedSize = hdr->allocationSize; | |
463 | allocationAddress = hdr->allocationAddress; | |
464 | ||
465 | #if IOTRACKING | |
466 | if (TRACK_ALLOC) | |
467 | { | |
468 | if (size != hdr->tracking.size) | |
469 | { | |
470 | OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size); | |
471 | size = hdr->tracking.size; | |
472 | } | |
473 | IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); | |
474 | } | |
475 | #endif | |
476 | if (adjustedSize >= page_size) { | |
477 | kmem_free( kernel_map, allocationAddress, adjustedSize); | |
478 | } else { | |
479 | kfree((void *)allocationAddress, adjustedSize); | |
480 | } | |
481 | } | |
482 | ||
483 | #if IOALLOCDEBUG | |
484 | OSAddAtomic(-size, &debug_iomalloc_size); | |
485 | #endif | |
486 | ||
487 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); | |
488 | } | |
489 | ||
490 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
491 | ||
492 | void | |
493 | IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size) | |
494 | { | |
495 | mach_vm_address_t allocationAddress; | |
496 | mach_vm_size_t adjustedSize; | |
497 | IOLibPageMallocHeader * hdr; | |
498 | ||
499 | if (!address) | |
500 | return; | |
501 | ||
502 | assert(size); | |
503 | ||
504 | adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader; | |
505 | if (adjustedSize >= page_size) { | |
506 | #if IOTRACKING | |
507 | if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size); | |
508 | #endif | |
509 | kmem_free( kernel_map, (vm_offset_t) address, size); | |
510 | ||
511 | } else { | |
512 | ||
513 | hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader); | |
514 | adjustedSize = hdr->allocationSize; | |
515 | allocationAddress = hdr->allocationAddress; | |
516 | #if IOTRACKING | |
517 | if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); | |
518 | #endif | |
519 | kfree((void *)allocationAddress, adjustedSize); | |
520 | } | |
521 | ||
522 | IOStatisticsAlloc(kIOStatisticsFreeContiguous, size); | |
523 | #if IOALLOCDEBUG | |
524 | OSAddAtomic(-size, &debug_iomalloc_size); | |
525 | #endif | |
526 | } | |
527 | ||
528 | ||
529 | mach_vm_address_t | |
530 | IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, | |
531 | mach_vm_size_t alignment, bool contiguous) | |
532 | { | |
533 | kern_return_t kr; | |
534 | mach_vm_address_t address; | |
535 | mach_vm_address_t allocationAddress; | |
536 | mach_vm_size_t adjustedSize; | |
537 | mach_vm_address_t alignMask; | |
538 | IOLibPageMallocHeader * hdr; | |
539 | ||
540 | if (size == 0) | |
541 | return (0); | |
542 | if (alignment == 0) | |
543 | alignment = 1; | |
544 | ||
545 | alignMask = alignment - 1; | |
546 | adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader; | |
547 | if (adjustedSize < size) return (0); | |
548 | ||
549 | contiguous = (contiguous && (adjustedSize > page_size)) | |
550 | || (alignment > page_size); | |
551 | ||
552 | if (contiguous || maxPhys) | |
553 | { | |
554 | int options = 0; | |
555 | vm_offset_t virt; | |
556 | ||
557 | adjustedSize = size; | |
558 | contiguous = (contiguous && (adjustedSize > page_size)) | |
559 | || (alignment > page_size); | |
560 | ||
561 | if (!contiguous) | |
562 | { | |
563 | if (maxPhys <= 0xFFFFFFFF) | |
564 | { | |
565 | maxPhys = 0; | |
566 | options |= KMA_LOMEM; | |
567 | } | |
568 | else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) | |
569 | { | |
570 | maxPhys = 0; | |
571 | } | |
572 | } | |
573 | if (contiguous || maxPhys) | |
574 | { | |
575 | kr = kmem_alloc_contig(kernel_map, &virt, size, | |
576 | alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map)); | |
577 | } | |
578 | else | |
579 | { | |
580 | kr = kernel_memory_allocate(kernel_map, &virt, | |
581 | size, alignMask, options, IOMemoryTag(kernel_map)); | |
582 | } | |
583 | if (KERN_SUCCESS == kr) | |
584 | { | |
585 | address = virt; | |
586 | #if IOTRACKING | |
587 | if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size); | |
588 | #endif | |
589 | } | |
590 | else | |
591 | address = 0; | |
592 | } | |
593 | else | |
594 | { | |
595 | adjustedSize += alignMask; | |
596 | if (adjustedSize < size) return (0); | |
597 | allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); | |
598 | ||
599 | if (allocationAddress) { | |
600 | ||
601 | ||
602 | address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) | |
603 | & (~alignMask); | |
604 | ||
605 | if (atop_32(address) != atop_32(address + size - 1)) | |
606 | address = round_page(address); | |
607 | ||
608 | hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader); | |
609 | hdr->allocationSize = adjustedSize; | |
610 | hdr->allocationAddress = allocationAddress; | |
611 | #if IOTRACKING | |
612 | if (TRACK_ALLOC) { | |
613 | bzero(&hdr->tracking, sizeof(hdr->tracking)); | |
614 | hdr->tracking.address = ~address; | |
615 | hdr->tracking.size = size; | |
616 | IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true); | |
617 | } | |
618 | #endif | |
619 | } else | |
620 | address = 0; | |
621 | } | |
622 | ||
623 | if (address) { | |
624 | IOStatisticsAlloc(kIOStatisticsMallocContiguous, size); | |
625 | #if IOALLOCDEBUG | |
626 | OSAddAtomic(size, &debug_iomalloc_size); | |
627 | #endif | |
628 | } | |
629 | ||
630 | return (address); | |
631 | } | |
632 | ||
633 | ||
634 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
635 | ||
636 | struct _IOMallocContiguousEntry | |
637 | { | |
638 | mach_vm_address_t virtualAddr; | |
639 | IOBufferMemoryDescriptor * md; | |
640 | queue_chain_t link; | |
641 | }; | |
642 | typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry; | |
643 | ||
644 | void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, | |
645 | IOPhysicalAddress * physicalAddress) | |
646 | { | |
647 | mach_vm_address_t address = 0; | |
648 | ||
649 | if (size == 0) | |
650 | return 0; | |
651 | if (alignment == 0) | |
652 | alignment = 1; | |
653 | ||
654 | /* Do we want a physical address? */ | |
655 | if (!physicalAddress) | |
656 | { | |
657 | address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true); | |
658 | } | |
659 | else do | |
660 | { | |
661 | IOBufferMemoryDescriptor * bmd; | |
662 | mach_vm_address_t physicalMask; | |
663 | vm_offset_t alignMask; | |
664 | ||
665 | alignMask = alignment - 1; | |
666 | physicalMask = (0xFFFFFFFF ^ alignMask); | |
667 | ||
668 | bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
669 | kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask); | |
670 | if (!bmd) | |
671 | break; | |
672 | ||
673 | _IOMallocContiguousEntry * | |
674 | entry = IONew(_IOMallocContiguousEntry, 1); | |
675 | if (!entry) | |
676 | { | |
677 | bmd->release(); | |
678 | break; | |
679 | } | |
680 | entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy(); | |
681 | entry->md = bmd; | |
682 | lck_mtx_lock(gIOMallocContiguousEntriesLock); | |
683 | queue_enter( &gIOMallocContiguousEntries, entry, | |
684 | _IOMallocContiguousEntry *, link ); | |
685 | lck_mtx_unlock(gIOMallocContiguousEntriesLock); | |
686 | ||
687 | address = (mach_vm_address_t) entry->virtualAddr; | |
688 | *physicalAddress = bmd->getPhysicalAddress(); | |
689 | } | |
690 | while (false); | |
691 | ||
692 | return (void *) address; | |
693 | } | |
694 | ||
695 | void IOFreeContiguous(void * _address, vm_size_t size) | |
696 | { | |
697 | _IOMallocContiguousEntry * entry; | |
698 | IOMemoryDescriptor * md = NULL; | |
699 | ||
700 | mach_vm_address_t address = (mach_vm_address_t) _address; | |
701 | ||
702 | if( !address) | |
703 | return; | |
704 | ||
705 | assert(size); | |
706 | ||
707 | lck_mtx_lock(gIOMallocContiguousEntriesLock); | |
708 | queue_iterate( &gIOMallocContiguousEntries, entry, | |
709 | _IOMallocContiguousEntry *, link ) | |
710 | { | |
711 | if( entry->virtualAddr == address ) { | |
712 | md = entry->md; | |
713 | queue_remove( &gIOMallocContiguousEntries, entry, | |
714 | _IOMallocContiguousEntry *, link ); | |
715 | break; | |
716 | } | |
717 | } | |
718 | lck_mtx_unlock(gIOMallocContiguousEntriesLock); | |
719 | ||
720 | if (md) | |
721 | { | |
722 | md->release(); | |
723 | IODelete(entry, _IOMallocContiguousEntry, 1); | |
724 | } | |
725 | else | |
726 | { | |
727 | IOKernelFreePhysical((mach_vm_address_t) address, size); | |
728 | } | |
729 | } | |
730 | ||
731 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
732 | ||
733 | kern_return_t IOIteratePageableMaps(vm_size_t size, | |
734 | IOIteratePageableMapsCallback callback, void * ref) | |
735 | { | |
736 | kern_return_t kr = kIOReturnNotReady; | |
737 | vm_size_t segSize; | |
738 | UInt32 attempts; | |
739 | UInt32 index; | |
740 | vm_offset_t min; | |
741 | vm_map_t map; | |
742 | ||
743 | if (size > kIOPageableMaxMapSize) | |
744 | return( kIOReturnBadArgument ); | |
745 | ||
746 | do { | |
747 | index = gIOKitPageableSpace.hint; | |
748 | attempts = gIOKitPageableSpace.count; | |
749 | while( attempts--) { | |
750 | kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref); | |
751 | if( KERN_SUCCESS == kr) { | |
752 | gIOKitPageableSpace.hint = index; | |
753 | break; | |
754 | } | |
755 | if( index) | |
756 | index--; | |
757 | else | |
758 | index = gIOKitPageableSpace.count - 1; | |
759 | } | |
760 | if (KERN_NO_SPACE != kr) | |
761 | break; | |
762 | ||
763 | lck_mtx_lock( gIOKitPageableSpace.lock ); | |
764 | ||
765 | index = gIOKitPageableSpace.count; | |
766 | if( index >= (kIOMaxPageableMaps - 1)) { | |
767 | lck_mtx_unlock( gIOKitPageableSpace.lock ); | |
768 | break; | |
769 | } | |
770 | ||
771 | if( size < kIOPageableMapSize) | |
772 | segSize = kIOPageableMapSize; | |
773 | else | |
774 | segSize = size; | |
775 | ||
776 | min = 0; | |
777 | kr = kmem_suballoc(kernel_map, | |
778 | &min, | |
779 | segSize, | |
780 | TRUE, | |
781 | VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT), | |
782 | &map); | |
783 | if( KERN_SUCCESS != kr) { | |
784 | lck_mtx_unlock( gIOKitPageableSpace.lock ); | |
785 | break; | |
786 | } | |
787 | ||
788 | gIOKitPageableSpace.maps[index].map = map; | |
789 | gIOKitPageableSpace.maps[index].address = min; | |
790 | gIOKitPageableSpace.maps[index].end = min + segSize; | |
791 | gIOKitPageableSpace.hint = index; | |
792 | gIOKitPageableSpace.count = index + 1; | |
793 | ||
794 | lck_mtx_unlock( gIOKitPageableSpace.lock ); | |
795 | ||
796 | } while( true ); | |
797 | ||
798 | return kr; | |
799 | } | |
800 | ||
801 | struct IOMallocPageableRef | |
802 | { | |
803 | vm_offset_t address; | |
804 | vm_size_t size; | |
805 | vm_tag_t tag; | |
806 | }; | |
807 | ||
808 | static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref) | |
809 | { | |
810 | struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref; | |
811 | kern_return_t kr; | |
812 | ||
813 | kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag ); | |
814 | ||
815 | return( kr ); | |
816 | } | |
817 | ||
818 | static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag) | |
819 | { | |
820 | kern_return_t kr = kIOReturnNotReady; | |
821 | struct IOMallocPageableRef ref; | |
822 | ||
823 | if (alignment > page_size) | |
824 | return( 0 ); | |
825 | if (size > kIOPageableMaxMapSize) | |
826 | return( 0 ); | |
827 | ||
828 | ref.size = size; | |
829 | ref.tag = tag; | |
830 | kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref ); | |
831 | if( kIOReturnSuccess != kr) | |
832 | ref.address = 0; | |
833 | ||
834 | return( (void *) ref.address ); | |
835 | } | |
836 | ||
837 | vm_map_t IOPageableMapForAddress( uintptr_t address ) | |
838 | { | |
839 | vm_map_t map = 0; | |
840 | UInt32 index; | |
841 | ||
842 | for( index = 0; index < gIOKitPageableSpace.count; index++) { | |
843 | if( (address >= gIOKitPageableSpace.maps[index].address) | |
844 | && (address < gIOKitPageableSpace.maps[index].end) ) { | |
845 | map = gIOKitPageableSpace.maps[index].map; | |
846 | break; | |
847 | } | |
848 | } | |
849 | if( !map) | |
850 | panic("IOPageableMapForAddress: null"); | |
851 | ||
852 | return( map ); | |
853 | } | |
854 | ||
855 | static void IOFreePageablePages(void * address, vm_size_t size) | |
856 | { | |
857 | vm_map_t map; | |
858 | ||
859 | map = IOPageableMapForAddress( (vm_address_t) address); | |
860 | if( map) | |
861 | kmem_free( map, (vm_offset_t) address, size); | |
862 | } | |
863 | ||
864 | static uintptr_t IOMallocOnePageablePage(iopa_t * a) | |
865 | { | |
866 | return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT)); | |
867 | } | |
868 | ||
869 | void * IOMallocPageable(vm_size_t size, vm_size_t alignment) | |
870 | { | |
871 | void * addr; | |
872 | ||
873 | if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map)); | |
874 | else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment)); | |
875 | ||
876 | if (addr) { | |
877 | #if IOALLOCDEBUG | |
878 | OSAddAtomicLong(size, &debug_iomallocpageable_size); | |
879 | #endif | |
880 | IOStatisticsAlloc(kIOStatisticsMallocPageable, size); | |
881 | } | |
882 | ||
883 | return (addr); | |
884 | } | |
885 | ||
886 | void IOFreePageable(void * address, vm_size_t size) | |
887 | { | |
888 | #if IOALLOCDEBUG | |
889 | OSAddAtomicLong(-size, &debug_iomallocpageable_size); | |
890 | #endif | |
891 | IOStatisticsAlloc(kIOStatisticsFreePageable, size); | |
892 | ||
893 | if (size < (page_size - 4*gIOPageAllocChunkBytes)) | |
894 | { | |
895 | address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size); | |
896 | size = page_size; | |
897 | } | |
898 | if (address) IOFreePageablePages(address, size); | |
899 | } | |
900 | ||
901 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
902 | ||
903 | extern "C" void | |
904 | iopa_init(iopa_t * a) | |
905 | { | |
906 | bzero(a, sizeof(*a)); | |
907 | a->lock = IOLockAlloc(); | |
908 | queue_init(&a->list); | |
909 | } | |
910 | ||
911 | static uintptr_t | |
912 | iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align) | |
913 | { | |
914 | uint32_t n, s; | |
915 | uint64_t avail = pa->avail; | |
916 | ||
917 | assert(avail); | |
918 | ||
919 | // find strings of count 1 bits in avail | |
920 | for (n = count; n > 1; n -= s) | |
921 | { | |
922 | s = n >> 1; | |
923 | avail = avail & (avail << s); | |
924 | } | |
925 | // and aligned | |
926 | avail &= align; | |
927 | ||
928 | if (avail) | |
929 | { | |
930 | n = __builtin_clzll(avail); | |
931 | pa->avail &= ~((-1ULL << (64 - count)) >> n); | |
932 | if (!pa->avail && pa->link.next) | |
933 | { | |
934 | remque(&pa->link); | |
935 | pa->link.next = 0; | |
936 | } | |
937 | return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa)); | |
938 | } | |
939 | ||
940 | return (0); | |
941 | } | |
942 | ||
943 | uintptr_t | |
944 | iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign) | |
945 | { | |
946 | static const uint64_t align_masks[] = { | |
947 | 0xFFFFFFFFFFFFFFFF, | |
948 | 0xAAAAAAAAAAAAAAAA, | |
949 | 0x8888888888888888, | |
950 | 0x8080808080808080, | |
951 | 0x8000800080008000, | |
952 | 0x8000000080000000, | |
953 | 0x8000000000000000, | |
954 | }; | |
955 | iopa_page_t * pa; | |
956 | uintptr_t addr = 0; | |
957 | uint32_t count; | |
958 | uint64_t align; | |
959 | ||
960 | if (!bytes) bytes = 1; | |
961 | count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; | |
962 | align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)]; | |
963 | ||
964 | IOLockLock(a->lock); | |
965 | __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list)); | |
966 | while (!queue_end(&a->list, &pa->link)) | |
967 | { | |
968 | addr = iopa_allocinpage(pa, count, align); | |
969 | if (addr) | |
970 | { | |
971 | a->bytecount += bytes; | |
972 | break; | |
973 | } | |
974 | __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link)); | |
975 | } | |
976 | IOLockUnlock(a->lock); | |
977 | ||
978 | if (!addr) | |
979 | { | |
980 | addr = alloc(a); | |
981 | if (addr) | |
982 | { | |
983 | pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes); | |
984 | pa->signature = kIOPageAllocSignature; | |
985 | pa->avail = -2ULL; | |
986 | ||
987 | addr = iopa_allocinpage(pa, count, align); | |
988 | IOLockLock(a->lock); | |
989 | if (pa->avail) enqueue_head(&a->list, &pa->link); | |
990 | a->pagecount++; | |
991 | if (addr) a->bytecount += bytes; | |
992 | IOLockUnlock(a->lock); | |
993 | } | |
994 | } | |
995 | ||
996 | assert((addr & ((1 << log2up(balign)) - 1)) == 0); | |
997 | return (addr); | |
998 | } | |
999 | ||
1000 | uintptr_t | |
1001 | iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes) | |
1002 | { | |
1003 | iopa_page_t * pa; | |
1004 | uint32_t count; | |
1005 | uintptr_t chunk; | |
1006 | ||
1007 | if (!bytes) bytes = 1; | |
1008 | ||
1009 | chunk = (addr & page_mask); | |
1010 | assert(0 == (chunk & (gIOPageAllocChunkBytes - 1))); | |
1011 | ||
1012 | pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes)); | |
1013 | assert(kIOPageAllocSignature == pa->signature); | |
1014 | ||
1015 | count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; | |
1016 | chunk /= gIOPageAllocChunkBytes; | |
1017 | ||
1018 | IOLockLock(a->lock); | |
1019 | if (!pa->avail) | |
1020 | { | |
1021 | assert(!pa->link.next); | |
1022 | enqueue_tail(&a->list, &pa->link); | |
1023 | } | |
1024 | pa->avail |= ((-1ULL << (64 - count)) >> chunk); | |
1025 | if (pa->avail != -2ULL) pa = 0; | |
1026 | else | |
1027 | { | |
1028 | remque(&pa->link); | |
1029 | pa->link.next = 0; | |
1030 | pa->signature = 0; | |
1031 | a->pagecount--; | |
1032 | // page to free | |
1033 | pa = (typeof(pa)) trunc_page(pa); | |
1034 | } | |
1035 | a->bytecount -= bytes; | |
1036 | IOLockUnlock(a->lock); | |
1037 | ||
1038 | return ((uintptr_t) pa); | |
1039 | } | |
1040 | ||
1041 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1042 | ||
1043 | IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, | |
1044 | IOByteCount length, IOOptionBits cacheMode ) | |
1045 | { | |
1046 | IOReturn ret = kIOReturnSuccess; | |
1047 | ppnum_t pagenum; | |
1048 | ||
1049 | if( task != kernel_task) | |
1050 | return( kIOReturnUnsupported ); | |
1051 | if ((address | length) & PAGE_MASK) | |
1052 | { | |
1053 | // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode); | |
1054 | return( kIOReturnUnsupported ); | |
1055 | } | |
1056 | length = round_page(address + length) - trunc_page( address ); | |
1057 | address = trunc_page( address ); | |
1058 | ||
1059 | // make map mode | |
1060 | cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; | |
1061 | ||
1062 | while( (kIOReturnSuccess == ret) && (length > 0) ) { | |
1063 | ||
1064 | // Get the physical page number | |
1065 | pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address); | |
1066 | if( pagenum) { | |
1067 | ret = IOUnmapPages( get_task_map(task), address, page_size ); | |
1068 | ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode ); | |
1069 | } else | |
1070 | ret = kIOReturnVMError; | |
1071 | ||
1072 | address += page_size; | |
1073 | length -= page_size; | |
1074 | } | |
1075 | ||
1076 | return( ret ); | |
1077 | } | |
1078 | ||
1079 | ||
1080 | IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, | |
1081 | IOByteCount length ) | |
1082 | { | |
1083 | if( task != kernel_task) | |
1084 | return( kIOReturnUnsupported ); | |
1085 | ||
1086 | flush_dcache64( (addr64_t) address, (unsigned) length, false ); | |
1087 | ||
1088 | return( kIOReturnSuccess ); | |
1089 | } | |
1090 | ||
1091 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1092 | ||
1093 | vm_offset_t OSKernelStackRemaining( void ) | |
1094 | { | |
1095 | return (ml_stack_remaining()); | |
1096 | } | |
1097 | ||
1098 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1099 | ||
1100 | /* | |
1101 | * Spin for indicated number of milliseconds. | |
1102 | */ | |
1103 | void IOSleep(unsigned milliseconds) | |
1104 | { | |
1105 | delay_for_interval(milliseconds, kMillisecondScale); | |
1106 | } | |
1107 | ||
1108 | /* | |
1109 | * Spin for indicated number of milliseconds, and potentially an | |
1110 | * additional number of milliseconds up to the leeway values. | |
1111 | */ | |
1112 | void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds) | |
1113 | { | |
1114 | delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale); | |
1115 | } | |
1116 | ||
1117 | /* | |
1118 | * Spin for indicated number of microseconds. | |
1119 | */ | |
1120 | void IODelay(unsigned microseconds) | |
1121 | { | |
1122 | delay_for_interval(microseconds, kMicrosecondScale); | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * Spin for indicated number of nanoseconds. | |
1127 | */ | |
1128 | void IOPause(unsigned nanoseconds) | |
1129 | { | |
1130 | delay_for_interval(nanoseconds, kNanosecondScale); | |
1131 | } | |
1132 | ||
1133 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1134 | ||
1135 | static void _iolog_consputc(int ch, void *arg __unused) | |
1136 | { | |
1137 | cons_putc_locked(ch); | |
1138 | } | |
1139 | ||
1140 | static void _IOLogv(const char *format, va_list ap, void *caller); | |
1141 | ||
1142 | __attribute__((noinline,not_tail_called)) | |
1143 | void IOLog(const char *format, ...) | |
1144 | { | |
1145 | void *caller = __builtin_return_address(0); | |
1146 | va_list ap; | |
1147 | ||
1148 | va_start(ap, format); | |
1149 | _IOLogv(format, ap, caller); | |
1150 | va_end(ap); | |
1151 | } | |
1152 | ||
1153 | __attribute__((noinline,not_tail_called)) | |
1154 | void IOLogv(const char *format, va_list ap) | |
1155 | { | |
1156 | void *caller = __builtin_return_address(0); | |
1157 | _IOLogv(format, ap, caller); | |
1158 | } | |
1159 | ||
1160 | void _IOLogv(const char *format, va_list ap, void *caller) | |
1161 | { | |
1162 | va_list ap2; | |
1163 | ||
1164 | /* Ideally not called at interrupt context or with interrupts disabled. Needs further validate */ | |
1165 | /* assert(TRUE == ml_get_interrupts_enabled()); */ | |
1166 | ||
1167 | va_copy(ap2, ap); | |
1168 | ||
1169 | os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller); | |
1170 | ||
1171 | __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE); | |
1172 | va_end(ap2); | |
1173 | } | |
1174 | ||
1175 | #if !__LP64__ | |
1176 | void IOPanic(const char *reason) | |
1177 | { | |
1178 | panic("%s", reason); | |
1179 | } | |
1180 | #endif | |
1181 | ||
1182 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1183 | ||
1184 | /* | |
1185 | * Convert a integer constant (typically a #define or enum) to a string. | |
1186 | */ | |
1187 | static char noValue[80]; // that's pretty | |
1188 | ||
1189 | const char *IOFindNameForValue(int value, const IONamedValue *regValueArray) | |
1190 | { | |
1191 | for( ; regValueArray->name; regValueArray++) { | |
1192 | if(regValueArray->value == value) | |
1193 | return(regValueArray->name); | |
1194 | } | |
1195 | snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value); | |
1196 | return((const char *)noValue); | |
1197 | } | |
1198 | ||
1199 | IOReturn IOFindValueForName(const char *string, | |
1200 | const IONamedValue *regValueArray, | |
1201 | int *value) | |
1202 | { | |
1203 | for( ; regValueArray->name; regValueArray++) { | |
1204 | if(!strcmp(regValueArray->name, string)) { | |
1205 | *value = regValueArray->value; | |
1206 | return kIOReturnSuccess; | |
1207 | } | |
1208 | } | |
1209 | return kIOReturnBadArgument; | |
1210 | } | |
1211 | ||
1212 | OSString * IOCopyLogNameForPID(int pid) | |
1213 | { | |
1214 | char buf[128]; | |
1215 | size_t len; | |
1216 | snprintf(buf, sizeof(buf), "pid %d, ", pid); | |
1217 | len = strlen(buf); | |
1218 | proc_name(pid, buf + len, sizeof(buf) - len); | |
1219 | return (OSString::withCString(buf)); | |
1220 | } | |
1221 | ||
1222 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1223 | ||
1224 | IOAlignment IOSizeToAlignment(unsigned int size) | |
1225 | { | |
1226 | int shift; | |
1227 | const int intsize = sizeof(unsigned int) * 8; | |
1228 | ||
1229 | for (shift = 1; shift < intsize; shift++) { | |
1230 | if (size & 0x80000000) | |
1231 | return (IOAlignment)(intsize - shift); | |
1232 | size <<= 1; | |
1233 | } | |
1234 | return 0; | |
1235 | } | |
1236 | ||
1237 | unsigned int IOAlignmentToSize(IOAlignment align) | |
1238 | { | |
1239 | unsigned int size; | |
1240 | ||
1241 | for (size = 1; align; align--) { | |
1242 | size <<= 1; | |
1243 | } | |
1244 | return size; | |
1245 | } | |
1246 | ||
1247 | } /* extern "C" */ | |
1248 | ||
1249 | ||
1250 |