]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.cpp
xnu-1699.24.23.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
1c79356b
A
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
b0d623f7 39#include <vm/vm_kern.h>
1c79356b
A
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
91447636 46#include <IOKit/IOLocks.h>
55e303ae 47#include <IOKit/IOMapper.h>
0c530ab8 48#include <IOKit/IOBufferMemoryDescriptor.h>
1c79356b
A
49#include <IOKit/IOKitDebug.h>
50
91447636
A
51#include "IOKitKernelInternal.h"
52
2d21ac55
A
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
6d2010ae
A
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
61#include <sys/msgbuf.h>
62
63#if IOKITSTATS
64
65#define IOStatisticsAlloc(type, size) \
66do { \
67 IOStatistics::countAlloc(type, size); \
68} while (0)
69
70#else
71
72#define IOStatisticsAlloc(type, size)
73
74#endif /* IOKITSTATS */
75
0c530ab8
A
76extern "C"
77{
78
79
1c79356b
A
80mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
55e303ae
A
82extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
6d2010ae 84extern int
b0d623f7
A
85__doprnt(
86 const char *fmt,
87 va_list argp,
88 void (*putc)(int, void *),
89 void *arg,
90 int radix);
91
6d2010ae
A
92extern void cons_putc_locked(char);
93extern void bsd_log_lock(void);
94extern void bsd_log_unlock(void);
b0d623f7 95
0c530ab8 96
55e303ae 97/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 98
91447636
A
99lck_grp_t *IOLockGroup;
100
9bccf70c
A
101/*
102 * Global variables for use by iLogger
103 * These symbols are for use only by Apple diagnostic code.
104 * Binary compatibility is not guaranteed for kexts that reference these symbols.
105 */
106
107void *_giDebugLogInternal = NULL;
108void *_giDebugLogDataInternal = NULL;
109void *_giDebugReserved1 = NULL;
110void *_giDebugReserved2 = NULL;
111
112
1c79356b
A
113/*
114 * Static variables for this module.
115 */
116
55e303ae 117static queue_head_t gIOMallocContiguousEntries;
91447636 118static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b
A
119
120enum { kIOMaxPageableMaps = 16 };
483a1d10 121enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 122enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
1c79356b
A
123
124typedef struct {
b0d623f7 125 vm_map_t map;
1c79356b
A
126 vm_offset_t address;
127 vm_offset_t end;
128} IOMapData;
129
130static struct {
131 UInt32 count;
132 UInt32 hint;
133 IOMapData maps[ kIOMaxPageableMaps ];
91447636 134 lck_mtx_t * lock;
1c79356b
A
135} gIOKitPageableSpace;
136
55e303ae 137/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
138
139void IOLibInit(void)
140{
141 kern_return_t ret;
142
143 static bool libInitialized;
144
145 if(libInitialized)
146 return;
147
1c79356b
A
148 gIOKitPageableSpace.maps[0].address = 0;
149 ret = kmem_suballoc(kernel_map,
150 &gIOKitPageableSpace.maps[0].address,
151 kIOPageableMapSize,
152 TRUE,
91447636 153 VM_FLAGS_ANYWHERE,
1c79356b
A
154 &gIOKitPageableSpace.maps[0].map);
155 if (ret != KERN_SUCCESS)
156 panic("failed to allocate iokit pageable map\n");
157
91447636
A
158 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
159
160 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
161 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
162 gIOKitPageableSpace.hint = 0;
163 gIOKitPageableSpace.count = 1;
164
91447636 165 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
166 queue_init( &gIOMallocContiguousEntries );
167
1c79356b
A
168 libInitialized = true;
169}
170
171/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
1c79356b
A
173IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
174{
91447636
A
175 kern_return_t result;
176 thread_t thread;
1c79356b 177
91447636
A
178 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
179 if (result != KERN_SUCCESS)
180 return (NULL);
1c79356b 181
91447636 182 thread_deallocate(thread);
1c79356b 183
91447636 184 return (thread);
1c79356b
A
185}
186
187
0c530ab8 188void IOExitThread(void)
1c79356b 189{
0c530ab8 190 (void) thread_terminate(current_thread());
1c79356b
A
191}
192
193/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
194
195
196void * IOMalloc(vm_size_t size)
197{
198 void * address;
199
200 address = (void *)kalloc(size);
6d2010ae 201 if ( address ) {
1c79356b 202#if IOALLOCDEBUG
2d21ac55 203 debug_iomalloc_size += size;
1c79356b 204#endif
6d2010ae
A
205 IOStatisticsAlloc(kIOStatisticsMalloc, size);
206 }
207
1c79356b
A
208 return address;
209}
210
211void IOFree(void * address, vm_size_t size)
212{
213 if (address) {
2d21ac55 214 kfree(address, size);
1c79356b 215#if IOALLOCDEBUG
2d21ac55 216 debug_iomalloc_size -= size;
1c79356b 217#endif
6d2010ae 218 IOStatisticsAlloc(kIOStatisticsFree, size);
1c79356b
A
219 }
220}
221
222/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
223
224void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
225{
226 kern_return_t kr;
b0d623f7
A
227 vm_offset_t address;
228 vm_offset_t allocationAddress;
1c79356b 229 vm_size_t adjustedSize;
b0d623f7 230 uintptr_t alignMask;
1c79356b
A
231
232 if (size == 0)
233 return 0;
234 if (alignment == 0)
235 alignment = 1;
236
237 alignMask = alignment - 1;
238 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
239
240 if (adjustedSize >= page_size) {
241
242 kr = kernel_memory_allocate(kernel_map, &address,
9bccf70c
A
243 size, alignMask, 0);
244 if (KERN_SUCCESS != kr)
1c79356b 245 address = 0;
1c79356b
A
246
247 } else {
248
249 adjustedSize += alignMask;
9bccf70c
A
250
251 if (adjustedSize >= page_size) {
252
253 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
254 adjustedSize, 0, 0);
255 if (KERN_SUCCESS != kr)
256 allocationAddress = 0;
257
258 } else
259 allocationAddress = (vm_address_t) kalloc(adjustedSize);
1c79356b
A
260
261 if (allocationAddress) {
262 address = (allocationAddress + alignMask
263 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
264 & (~alignMask);
265
b0d623f7
A
266 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
267 = adjustedSize;
1c79356b
A
268 *((vm_address_t *)(address - sizeof(vm_address_t)))
269 = allocationAddress;
270 } else
271 address = 0;
272 }
273
274 assert(0 == (address & alignMask));
275
2d21ac55 276 if( address) {
6d2010ae 277#if IOALLOCDEBUG
2d21ac55 278 debug_iomalloc_size += size;
1c79356b 279#endif
6d2010ae
A
280 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
281 }
1c79356b
A
282
283 return (void *) address;
284}
285
286void IOFreeAligned(void * address, vm_size_t size)
287{
288 vm_address_t allocationAddress;
b0d623f7 289 vm_size_t adjustedSize;
1c79356b
A
290
291 if( !address)
292 return;
293
294 assert(size);
295
296 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
297 if (adjustedSize >= page_size) {
298
b0d623f7 299 kmem_free( kernel_map, (vm_offset_t) address, size);
1c79356b
A
300
301 } else {
b0d623f7 302 adjustedSize = *((vm_size_t *)( (vm_address_t) address
1c79356b
A
303 - sizeof(vm_address_t) - sizeof(vm_size_t)));
304 allocationAddress = *((vm_address_t *)( (vm_address_t) address
305 - sizeof(vm_address_t) ));
306
9bccf70c 307 if (adjustedSize >= page_size)
91447636 308 kmem_free( kernel_map, allocationAddress, adjustedSize);
9bccf70c 309 else
91447636 310 kfree((void *)allocationAddress, adjustedSize);
1c79356b
A
311 }
312
313#if IOALLOCDEBUG
314 debug_iomalloc_size -= size;
315#endif
6d2010ae
A
316
317 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
1c79356b
A
318}
319
320/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
321
0c530ab8 322void
0b4c1975 323IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
55e303ae 324{
0c530ab8
A
325 mach_vm_address_t allocationAddress;
326 mach_vm_size_t adjustedSize;
4452a7af 327
0c530ab8
A
328 if (!address)
329 return;
330
331 assert(size);
332
333 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
334 if (adjustedSize >= page_size) {
335
b0d623f7 336 kmem_free( kernel_map, (vm_offset_t) address, size);
0c530ab8
A
337
338 } else {
339
340 adjustedSize = *((mach_vm_size_t *)
341 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
342 allocationAddress = *((mach_vm_address_t *)
343 (address - sizeof(mach_vm_address_t) ));
344 kfree((void *)allocationAddress, adjustedSize);
345 }
346
347#if IOALLOCDEBUG
348 debug_iomalloc_size -= size;
349#endif
350}
351
352mach_vm_address_t
0b4c1975 353IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
6d2010ae 354 mach_vm_size_t alignment, bool contiguous)
1c79356b
A
355{
356 kern_return_t kr;
0c530ab8
A
357 mach_vm_address_t address;
358 mach_vm_address_t allocationAddress;
359 mach_vm_size_t adjustedSize;
360 mach_vm_address_t alignMask;
1c79356b
A
361
362 if (size == 0)
0c530ab8 363 return (0);
1c79356b
A
364 if (alignment == 0)
365 alignment = 1;
366
367 alignMask = alignment - 1;
0c530ab8 368 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
1c79356b 369
0b4c1975
A
370 contiguous = (contiguous && (adjustedSize > page_size))
371 || (alignment > page_size);
372
373 if (contiguous || maxPhys)
55e303ae 374 {
0b4c1975 375 int options = 0;
0c530ab8 376 vm_offset_t virt;
0b4c1975 377
55e303ae 378 adjustedSize = size;
0b4c1975
A
379 contiguous = (contiguous && (adjustedSize > page_size))
380 || (alignment > page_size);
381
382 if ((!contiguous) && (maxPhys <= 0xFFFFFFFF))
383 {
384 maxPhys = 0;
385 options |= KMA_LOMEM;
386 }
387
388 if (contiguous || maxPhys)
55e303ae 389 {
0c530ab8 390 kr = kmem_alloc_contig(kernel_map, &virt, size,
b0d623f7 391 alignMask, atop(maxPhys), atop(alignMask), 0);
55e303ae
A
392 }
393 else
394 {
0c530ab8 395 kr = kernel_memory_allocate(kernel_map, &virt,
0b4c1975 396 size, alignMask, options);
55e303ae 397 }
0c530ab8
A
398 if (KERN_SUCCESS == kr)
399 address = virt;
400 else
1c79356b 401 address = 0;
55e303ae
A
402 }
403 else
404 {
1c79356b 405 adjustedSize += alignMask;
0c530ab8 406 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
9bccf70c 407
1c79356b
A
408 if (allocationAddress) {
409
410 address = (allocationAddress + alignMask
0c530ab8 411 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
1c79356b
A
412 & (~alignMask);
413
55e303ae 414 if (atop_32(address) != atop_32(address + size - 1))
b0d623f7 415 address = round_page(address);
1c79356b 416
0c530ab8
A
417 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
418 - sizeof(mach_vm_address_t))) = adjustedSize;
419 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
1c79356b
A
420 = allocationAddress;
421 } else
422 address = 0;
423 }
424
0c530ab8 425#if IOALLOCDEBUG
2d21ac55 426 if (address) {
b0d623f7
A
427 debug_iomalloc_size += size;
428 }
0c530ab8
A
429#endif
430
431 return (address);
432}
433
6d2010ae 434
0c530ab8
A
435/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
436
437struct _IOMallocContiguousEntry
438{
439 mach_vm_address_t virtualAddr;
440 IOBufferMemoryDescriptor * md;
441 queue_chain_t link;
442};
443typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
444
445void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
446 IOPhysicalAddress * physicalAddress)
447{
448 mach_vm_address_t address = 0;
449
450 if (size == 0)
451 return 0;
452 if (alignment == 0)
453 alignment = 1;
454
55e303ae 455 /* Do we want a physical address? */
0c530ab8 456 if (!physicalAddress)
c0fea474 457 {
0b4c1975 458 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
0c530ab8
A
459 }
460 else do
461 {
462 IOBufferMemoryDescriptor * bmd;
463 mach_vm_address_t physicalMask;
b0d623f7 464 vm_offset_t alignMask;
0c530ab8
A
465
466 alignMask = alignment - 1;
b0d623f7
A
467 physicalMask = (0xFFFFFFFF ^ alignMask);
468
0c530ab8
A
469 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
470 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
471 if (!bmd)
472 break;
473
474 _IOMallocContiguousEntry *
475 entry = IONew(_IOMallocContiguousEntry, 1);
476 if (!entry)
55e303ae 477 {
0c530ab8
A
478 bmd->release();
479 break;
55e303ae 480 }
0c530ab8
A
481 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
482 entry->md = bmd;
483 lck_mtx_lock(gIOMallocContiguousEntriesLock);
484 queue_enter( &gIOMallocContiguousEntries, entry,
485 _IOMallocContiguousEntry *, link );
486 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
487
488 address = (mach_vm_address_t) entry->virtualAddr;
489 *physicalAddress = bmd->getPhysicalAddress();
55e303ae 490 }
0c530ab8 491 while (false);
1c79356b 492
6d2010ae
A
493 if (address) {
494 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
495 }
496
1c79356b
A
497 return (void *) address;
498}
499
0c530ab8 500void IOFreeContiguous(void * _address, vm_size_t size)
1c79356b 501{
55e303ae 502 _IOMallocContiguousEntry * entry;
0c530ab8
A
503 IOMemoryDescriptor * md = NULL;
504
505 mach_vm_address_t address = (mach_vm_address_t) _address;
1c79356b
A
506
507 if( !address)
508 return;
509
510 assert(size);
511
91447636 512 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
513 queue_iterate( &gIOMallocContiguousEntries, entry,
514 _IOMallocContiguousEntry *, link )
515 {
0c530ab8
A
516 if( entry->virtualAddr == address ) {
517 md = entry->md;
55e303ae
A
518 queue_remove( &gIOMallocContiguousEntries, entry,
519 _IOMallocContiguousEntry *, link );
520 break;
521 }
522 }
91447636 523 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae 524
0c530ab8 525 if (md)
55e303ae 526 {
0c530ab8 527 md->release();
55e303ae
A
528 IODelete(entry, _IOMallocContiguousEntry, 1);
529 }
0c530ab8
A
530 else
531 {
0b4c1975 532 IOKernelFreePhysical((mach_vm_address_t) address, size);
1c79356b 533 }
6d2010ae
A
534
535 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
1c79356b
A
536}
537
538/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
539
0b4e3aa0
A
540kern_return_t IOIteratePageableMaps(vm_size_t size,
541 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
542{
543 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
544 vm_size_t segSize;
545 UInt32 attempts;
546 UInt32 index;
547 vm_offset_t min;
548 vm_map_t map;
549
1c79356b 550 if (size > kIOPageableMaxMapSize)
0b4e3aa0 551 return( kIOReturnBadArgument );
1c79356b
A
552
553 do {
554 index = gIOKitPageableSpace.hint;
555 attempts = gIOKitPageableSpace.count;
556 while( attempts--) {
0b4e3aa0 557 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
558 if( KERN_SUCCESS == kr) {
559 gIOKitPageableSpace.hint = index;
560 break;
561 }
562 if( index)
563 index--;
564 else
565 index = gIOKitPageableSpace.count - 1;
566 }
567 if( KERN_SUCCESS == kr)
568 break;
569
91447636 570 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
571
572 index = gIOKitPageableSpace.count;
573 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 574 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
575 break;
576 }
577
578 if( size < kIOPageableMapSize)
579 segSize = kIOPageableMapSize;
580 else
581 segSize = size;
582
583 min = 0;
584 kr = kmem_suballoc(kernel_map,
585 &min,
586 segSize,
587 TRUE,
91447636 588 VM_FLAGS_ANYWHERE,
1c79356b
A
589 &map);
590 if( KERN_SUCCESS != kr) {
91447636 591 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
592 break;
593 }
594
595 gIOKitPageableSpace.maps[index].map = map;
596 gIOKitPageableSpace.maps[index].address = min;
597 gIOKitPageableSpace.maps[index].end = min + segSize;
598 gIOKitPageableSpace.hint = index;
599 gIOKitPageableSpace.count = index + 1;
600
91447636 601 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
602
603 } while( true );
604
0b4e3aa0
A
605 return kr;
606}
607
608struct IOMallocPageableRef
609{
b0d623f7 610 vm_offset_t address;
0b4e3aa0
A
611 vm_size_t size;
612};
613
614static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
615{
616 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
617 kern_return_t kr;
618
619 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
620
621 return( kr );
622}
623
624void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
625{
626 kern_return_t kr = kIOReturnNotReady;
627 struct IOMallocPageableRef ref;
628
629 if (alignment > page_size)
630 return( 0 );
631 if (size > kIOPageableMaxMapSize)
632 return( 0 );
633
634 ref.size = size;
635 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
636 if( kIOReturnSuccess != kr)
637 ref.address = 0;
1c79356b 638
6d2010ae 639 if( ref.address) {
1c79356b 640#if IOALLOCDEBUG
b0d623f7 641 debug_iomallocpageable_size += round_page(size);
1c79356b 642#endif
6d2010ae
A
643 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
644 }
1c79356b 645
0b4e3aa0 646 return( (void *) ref.address );
1c79356b
A
647}
648
b0d623f7 649vm_map_t IOPageableMapForAddress( uintptr_t address )
1c79356b
A
650{
651 vm_map_t map = 0;
652 UInt32 index;
653
654 for( index = 0; index < gIOKitPageableSpace.count; index++) {
655 if( (address >= gIOKitPageableSpace.maps[index].address)
656 && (address < gIOKitPageableSpace.maps[index].end) ) {
657 map = gIOKitPageableSpace.maps[index].map;
658 break;
659 }
660 }
661 if( !map)
b0d623f7 662 panic("IOPageableMapForAddress: null");
1c79356b
A
663
664 return( map );
665}
666
667void IOFreePageable(void * address, vm_size_t size)
668{
669 vm_map_t map;
670
671 map = IOPageableMapForAddress( (vm_address_t) address);
672 if( map)
673 kmem_free( map, (vm_offset_t) address, size);
674
675#if IOALLOCDEBUG
b0d623f7 676 debug_iomallocpageable_size -= round_page(size);
1c79356b 677#endif
6d2010ae
A
678
679 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1c79356b 680}
b0d623f7 681
1c79356b
A
682/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
683
1c79356b
A
684IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
685 IOByteCount length, IOOptionBits cacheMode )
686{
687 IOReturn ret = kIOReturnSuccess;
55e303ae 688 ppnum_t pagenum;
1c79356b
A
689
690 if( task != kernel_task)
691 return( kIOReturnUnsupported );
b0d623f7
A
692 if ((address | length) & PAGE_MASK)
693 {
694// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
695 return( kIOReturnUnsupported );
696 }
697 length = round_page(address + length) - trunc_page( address );
698 address = trunc_page( address );
1c79356b
A
699
700 // make map mode
701 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
702
703 while( (kIOReturnSuccess == ret) && (length > 0) ) {
704
55e303ae
A
705 // Get the physical page number
706 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
707 if( pagenum) {
708 ret = IOUnmapPages( get_task_map(task), address, page_size );
0c530ab8 709 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
55e303ae 710 } else
1c79356b
A
711 ret = kIOReturnVMError;
712
55e303ae 713 address += page_size;
1c79356b
A
714 length -= page_size;
715 }
716
717 return( ret );
718}
719
720
721IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
722 IOByteCount length )
723{
724 if( task != kernel_task)
725 return( kIOReturnUnsupported );
726
55e303ae 727 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
728
729 return( kIOReturnSuccess );
730}
731
732/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
733
b0d623f7 734vm_offset_t OSKernelStackRemaining( void )
1c79356b 735{
b0d623f7 736 return (ml_stack_remaining());
1c79356b
A
737}
738
739/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
740
2d21ac55
A
741/*
742 * Spin for indicated number of milliseconds.
743 */
1c79356b
A
744void IOSleep(unsigned milliseconds)
745{
91447636 746 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
747}
748
749/*
750 * Spin for indicated number of microseconds.
751 */
752void IODelay(unsigned microseconds)
753{
91447636 754 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
755}
756
2d21ac55
A
757/*
758 * Spin for indicated number of nanoseconds.
759 */
760void IOPause(unsigned nanoseconds)
761{
762 delay_for_interval(nanoseconds, kNanosecondScale);
763}
764
1c79356b
A
765/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
766
6d2010ae 767static void _iolog_consputc(int ch, void *arg __unused)
b0d623f7 768{
6d2010ae
A
769 cons_putc_locked(ch);
770}
771
772static void _iolog_logputc(int ch, void *arg __unused)
773{
774 log_putc_locked(ch);
b0d623f7
A
775}
776
1c79356b
A
777void IOLog(const char *format, ...)
778{
6d2010ae 779 va_list ap;
1c79356b 780
6d2010ae
A
781 va_start(ap, format);
782 IOLogv(format, ap);
783 va_end(ap);
1c79356b
A
784}
785
b0d623f7
A
786void IOLogv(const char *format, va_list ap)
787{
6d2010ae
A
788 va_list ap2;
789
790 va_copy(ap2, ap);
791
792 bsd_log_lock();
793 __doprnt(format, ap, _iolog_logputc, NULL, 16);
794 bsd_log_unlock();
795
796 __doprnt(format, ap2, _iolog_consputc, NULL, 16);
b0d623f7
A
797}
798
799#if !__LP64__
1c79356b
A
800void IOPanic(const char *reason)
801{
2d21ac55 802 panic("%s", reason);
1c79356b 803}
b0d623f7 804#endif
1c79356b
A
805
806/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
807
808/*
809 * Convert a integer constant (typically a #define or enum) to a string.
810 */
811static char noValue[80]; // that's pretty
812
813const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
814{
815 for( ; regValueArray->name; regValueArray++) {
816 if(regValueArray->value == value)
817 return(regValueArray->name);
818 }
2d21ac55 819 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1c79356b
A
820 return((const char *)noValue);
821}
822
823IOReturn IOFindValueForName(const char *string,
824 const IONamedValue *regValueArray,
825 int *value)
826{
827 for( ; regValueArray->name; regValueArray++) {
828 if(!strcmp(regValueArray->name, string)) {
829 *value = regValueArray->value;
830 return kIOReturnSuccess;
831 }
832 }
833 return kIOReturnBadArgument;
834}
835
2d21ac55
A
836OSString * IOCopyLogNameForPID(int pid)
837{
838 char buf[128];
839 size_t len;
840 snprintf(buf, sizeof(buf), "pid %d, ", pid);
841 len = strlen(buf);
842 proc_name(pid, buf + len, sizeof(buf) - len);
843 return (OSString::withCString(buf));
844}
845
1c79356b
A
846/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
847
848IOAlignment IOSizeToAlignment(unsigned int size)
849{
850 register int shift;
851 const int intsize = sizeof(unsigned int) * 8;
852
853 for (shift = 1; shift < intsize; shift++) {
854 if (size & 0x80000000)
855 return (IOAlignment)(intsize - shift);
856 size <<= 1;
857 }
858 return 0;
859}
860
861unsigned int IOAlignmentToSize(IOAlignment align)
862{
863 unsigned int size;
864
865 for (size = 1; align; align--) {
866 size <<= 1;
867 }
868 return size;
869}
0c530ab8
A
870
871} /* extern "C" */
2d21ac55
A
872
873
874