]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.cpp
xnu-2782.1.97.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
1c79356b
A
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
b0d623f7 39#include <vm/vm_kern.h>
1c79356b
A
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
91447636 46#include <IOKit/IOLocks.h>
55e303ae 47#include <IOKit/IOMapper.h>
0c530ab8 48#include <IOKit/IOBufferMemoryDescriptor.h>
1c79356b
A
49#include <IOKit/IOKitDebug.h>
50
91447636
A
51#include "IOKitKernelInternal.h"
52
2d21ac55
A
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
6d2010ae
A
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
61#include <sys/msgbuf.h>
62
63#if IOKITSTATS
64
65#define IOStatisticsAlloc(type, size) \
66do { \
67 IOStatistics::countAlloc(type, size); \
68} while (0)
69
70#else
71
72#define IOStatisticsAlloc(type, size)
73
74#endif /* IOKITSTATS */
75
0c530ab8
A
76extern "C"
77{
78
79
1c79356b
A
80mach_timespec_t IOZeroTvalspec = { 0, 0 };
81
55e303ae
A
82extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
83
6d2010ae 84extern int
b0d623f7
A
85__doprnt(
86 const char *fmt,
87 va_list argp,
88 void (*putc)(int, void *),
89 void *arg,
90 int radix);
91
6d2010ae
A
92extern void cons_putc_locked(char);
93extern void bsd_log_lock(void);
94extern void bsd_log_unlock(void);
39236c6e 95extern void logwakeup();
b0d623f7 96
0c530ab8 97
55e303ae 98/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9bccf70c 99
91447636
A
100lck_grp_t *IOLockGroup;
101
9bccf70c
A
102/*
103 * Global variables for use by iLogger
104 * These symbols are for use only by Apple diagnostic code.
105 * Binary compatibility is not guaranteed for kexts that reference these symbols.
106 */
107
108void *_giDebugLogInternal = NULL;
109void *_giDebugLogDataInternal = NULL;
110void *_giDebugReserved1 = NULL;
111void *_giDebugReserved2 = NULL;
112
39236c6e 113iopa_t gIOBMDPageAllocator;
9bccf70c 114
1c79356b
A
115/*
116 * Static variables for this module.
117 */
118
55e303ae 119static queue_head_t gIOMallocContiguousEntries;
91447636 120static lck_mtx_t * gIOMallocContiguousEntriesLock;
1c79356b 121
22ba694c
A
122#if __x86_64__
123enum { kIOMaxPageableMaps = 8 };
124enum { kIOPageableMapSize = 512 * 1024 * 1024 };
125enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
126#else
127enum { kIOMaxPageableMaps = 16 };
128enum { kIOPageableMapSize = 96 * 1024 * 1024 };
55e303ae 129enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
22ba694c 130#endif
1c79356b
A
131
132typedef struct {
b0d623f7 133 vm_map_t map;
1c79356b
A
134 vm_offset_t address;
135 vm_offset_t end;
136} IOMapData;
137
138static struct {
139 UInt32 count;
140 UInt32 hint;
141 IOMapData maps[ kIOMaxPageableMaps ];
91447636 142 lck_mtx_t * lock;
1c79356b
A
143} gIOKitPageableSpace;
144
39236c6e
A
145static iopa_t gIOPageablePageAllocator;
146
fe8ab488
A
147uint32_t gIOPageAllocChunkBytes;
148
55e303ae 149/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b
A
150
151void IOLibInit(void)
152{
153 kern_return_t ret;
154
155 static bool libInitialized;
156
157 if(libInitialized)
158 return;
159
1c79356b
A
160 gIOKitPageableSpace.maps[0].address = 0;
161 ret = kmem_suballoc(kernel_map,
162 &gIOKitPageableSpace.maps[0].address,
163 kIOPageableMapSize,
164 TRUE,
91447636 165 VM_FLAGS_ANYWHERE,
1c79356b
A
166 &gIOKitPageableSpace.maps[0].map);
167 if (ret != KERN_SUCCESS)
168 panic("failed to allocate iokit pageable map\n");
169
91447636
A
170 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
171
172 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
1c79356b
A
173 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
174 gIOKitPageableSpace.hint = 0;
175 gIOKitPageableSpace.count = 1;
176
91447636 177 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
55e303ae
A
178 queue_init( &gIOMallocContiguousEntries );
179
fe8ab488
A
180 gIOPageAllocChunkBytes = PAGE_SIZE/64;
181 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
39236c6e
A
182 iopa_init(&gIOBMDPageAllocator);
183 iopa_init(&gIOPageablePageAllocator);
184
1c79356b
A
185 libInitialized = true;
186}
187
188/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
189
1c79356b
A
190IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
191{
91447636
A
192 kern_return_t result;
193 thread_t thread;
1c79356b 194
91447636
A
195 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
196 if (result != KERN_SUCCESS)
197 return (NULL);
1c79356b 198
91447636 199 thread_deallocate(thread);
1c79356b 200
91447636 201 return (thread);
1c79356b
A
202}
203
204
0c530ab8 205void IOExitThread(void)
1c79356b 206{
0c530ab8 207 (void) thread_terminate(current_thread());
1c79356b
A
208}
209
210/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211
212
213void * IOMalloc(vm_size_t size)
214{
215 void * address;
216
217 address = (void *)kalloc(size);
6d2010ae 218 if ( address ) {
1c79356b 219#if IOALLOCDEBUG
2d21ac55 220 debug_iomalloc_size += size;
1c79356b 221#endif
6d2010ae
A
222 IOStatisticsAlloc(kIOStatisticsMalloc, size);
223 }
224
1c79356b
A
225 return address;
226}
227
228void IOFree(void * address, vm_size_t size)
229{
230 if (address) {
2d21ac55 231 kfree(address, size);
1c79356b 232#if IOALLOCDEBUG
2d21ac55 233 debug_iomalloc_size -= size;
1c79356b 234#endif
6d2010ae 235 IOStatisticsAlloc(kIOStatisticsFree, size);
1c79356b
A
236 }
237}
238
239/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
240
241void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
242{
243 kern_return_t kr;
b0d623f7
A
244 vm_offset_t address;
245 vm_offset_t allocationAddress;
1c79356b 246 vm_size_t adjustedSize;
b0d623f7 247 uintptr_t alignMask;
1c79356b
A
248
249 if (size == 0)
250 return 0;
251 if (alignment == 0)
252 alignment = 1;
253
254 alignMask = alignment - 1;
255 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
256
316670eb
A
257 if (size > adjustedSize) {
258 address = 0; /* overflow detected */
259 }
260 else if (adjustedSize >= page_size) {
1c79356b
A
261
262 kr = kernel_memory_allocate(kernel_map, &address,
9bccf70c
A
263 size, alignMask, 0);
264 if (KERN_SUCCESS != kr)
1c79356b 265 address = 0;
1c79356b
A
266
267 } else {
268
269 adjustedSize += alignMask;
9bccf70c
A
270
271 if (adjustedSize >= page_size) {
272
273 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
274 adjustedSize, 0, 0);
275 if (KERN_SUCCESS != kr)
276 allocationAddress = 0;
277
278 } else
279 allocationAddress = (vm_address_t) kalloc(adjustedSize);
1c79356b
A
280
281 if (allocationAddress) {
282 address = (allocationAddress + alignMask
283 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
284 & (~alignMask);
285
b0d623f7
A
286 *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
287 = adjustedSize;
1c79356b
A
288 *((vm_address_t *)(address - sizeof(vm_address_t)))
289 = allocationAddress;
290 } else
291 address = 0;
292 }
293
294 assert(0 == (address & alignMask));
295
2d21ac55 296 if( address) {
6d2010ae 297#if IOALLOCDEBUG
2d21ac55 298 debug_iomalloc_size += size;
1c79356b 299#endif
6d2010ae
A
300 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
301 }
1c79356b
A
302
303 return (void *) address;
304}
305
306void IOFreeAligned(void * address, vm_size_t size)
307{
308 vm_address_t allocationAddress;
b0d623f7 309 vm_size_t adjustedSize;
1c79356b
A
310
311 if( !address)
312 return;
313
314 assert(size);
315
316 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
317 if (adjustedSize >= page_size) {
318
b0d623f7 319 kmem_free( kernel_map, (vm_offset_t) address, size);
1c79356b
A
320
321 } else {
b0d623f7 322 adjustedSize = *((vm_size_t *)( (vm_address_t) address
1c79356b
A
323 - sizeof(vm_address_t) - sizeof(vm_size_t)));
324 allocationAddress = *((vm_address_t *)( (vm_address_t) address
325 - sizeof(vm_address_t) ));
326
9bccf70c 327 if (adjustedSize >= page_size)
91447636 328 kmem_free( kernel_map, allocationAddress, adjustedSize);
9bccf70c 329 else
91447636 330 kfree((void *)allocationAddress, adjustedSize);
1c79356b
A
331 }
332
333#if IOALLOCDEBUG
334 debug_iomalloc_size -= size;
335#endif
6d2010ae
A
336
337 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
1c79356b
A
338}
339
340/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
341
0c530ab8 342void
0b4c1975 343IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
55e303ae 344{
0c530ab8
A
345 mach_vm_address_t allocationAddress;
346 mach_vm_size_t adjustedSize;
4452a7af 347
0c530ab8
A
348 if (!address)
349 return;
350
351 assert(size);
352
353 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
354 if (adjustedSize >= page_size) {
355
b0d623f7 356 kmem_free( kernel_map, (vm_offset_t) address, size);
0c530ab8
A
357
358 } else {
359
360 adjustedSize = *((mach_vm_size_t *)
361 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
362 allocationAddress = *((mach_vm_address_t *)
363 (address - sizeof(mach_vm_address_t) ));
364 kfree((void *)allocationAddress, adjustedSize);
365 }
366
7ddcb079 367 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
0c530ab8
A
368#if IOALLOCDEBUG
369 debug_iomalloc_size -= size;
370#endif
371}
372
fe8ab488 373
0c530ab8 374mach_vm_address_t
0b4c1975 375IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
6d2010ae 376 mach_vm_size_t alignment, bool contiguous)
1c79356b
A
377{
378 kern_return_t kr;
0c530ab8
A
379 mach_vm_address_t address;
380 mach_vm_address_t allocationAddress;
381 mach_vm_size_t adjustedSize;
382 mach_vm_address_t alignMask;
1c79356b
A
383
384 if (size == 0)
0c530ab8 385 return (0);
1c79356b
A
386 if (alignment == 0)
387 alignment = 1;
388
389 alignMask = alignment - 1;
0c530ab8 390 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
1c79356b 391
0b4c1975
A
392 contiguous = (contiguous && (adjustedSize > page_size))
393 || (alignment > page_size);
394
395 if (contiguous || maxPhys)
55e303ae 396 {
0b4c1975 397 int options = 0;
0c530ab8 398 vm_offset_t virt;
0b4c1975 399
55e303ae 400 adjustedSize = size;
0b4c1975
A
401 contiguous = (contiguous && (adjustedSize > page_size))
402 || (alignment > page_size);
403
7ddcb079
A
404 if (!contiguous)
405 {
406 if (maxPhys <= 0xFFFFFFFF)
407 {
408 maxPhys = 0;
409 options |= KMA_LOMEM;
410 }
411 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
412 {
413 maxPhys = 0;
414 }
415 }
0b4c1975 416 if (contiguous || maxPhys)
55e303ae 417 {
0c530ab8 418 kr = kmem_alloc_contig(kernel_map, &virt, size,
b0d623f7 419 alignMask, atop(maxPhys), atop(alignMask), 0);
55e303ae
A
420 }
421 else
422 {
0c530ab8 423 kr = kernel_memory_allocate(kernel_map, &virt,
0b4c1975 424 size, alignMask, options);
55e303ae 425 }
0c530ab8
A
426 if (KERN_SUCCESS == kr)
427 address = virt;
428 else
1c79356b 429 address = 0;
55e303ae
A
430 }
431 else
432 {
1c79356b 433 adjustedSize += alignMask;
0c530ab8 434 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
9bccf70c 435
1c79356b
A
436 if (allocationAddress) {
437
438 address = (allocationAddress + alignMask
0c530ab8 439 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
1c79356b
A
440 & (~alignMask);
441
55e303ae 442 if (atop_32(address) != atop_32(address + size - 1))
b0d623f7 443 address = round_page(address);
1c79356b 444
0c530ab8
A
445 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
446 - sizeof(mach_vm_address_t))) = adjustedSize;
447 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
1c79356b
A
448 = allocationAddress;
449 } else
450 address = 0;
451 }
452
2d21ac55 453 if (address) {
7ddcb079
A
454 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
455#if IOALLOCDEBUG
b0d623f7 456 debug_iomalloc_size += size;
0c530ab8 457#endif
7ddcb079 458 }
0c530ab8
A
459
460 return (address);
461}
462
6d2010ae 463
0c530ab8
A
464/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
465
466struct _IOMallocContiguousEntry
467{
468 mach_vm_address_t virtualAddr;
469 IOBufferMemoryDescriptor * md;
470 queue_chain_t link;
471};
472typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
473
474void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
475 IOPhysicalAddress * physicalAddress)
476{
477 mach_vm_address_t address = 0;
478
479 if (size == 0)
480 return 0;
481 if (alignment == 0)
482 alignment = 1;
483
55e303ae 484 /* Do we want a physical address? */
0c530ab8 485 if (!physicalAddress)
c0fea474 486 {
0b4c1975 487 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
0c530ab8
A
488 }
489 else do
490 {
491 IOBufferMemoryDescriptor * bmd;
492 mach_vm_address_t physicalMask;
b0d623f7 493 vm_offset_t alignMask;
0c530ab8
A
494
495 alignMask = alignment - 1;
b0d623f7
A
496 physicalMask = (0xFFFFFFFF ^ alignMask);
497
0c530ab8
A
498 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
499 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
500 if (!bmd)
501 break;
502
503 _IOMallocContiguousEntry *
504 entry = IONew(_IOMallocContiguousEntry, 1);
505 if (!entry)
55e303ae 506 {
0c530ab8
A
507 bmd->release();
508 break;
55e303ae 509 }
0c530ab8
A
510 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
511 entry->md = bmd;
512 lck_mtx_lock(gIOMallocContiguousEntriesLock);
513 queue_enter( &gIOMallocContiguousEntries, entry,
514 _IOMallocContiguousEntry *, link );
515 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
516
517 address = (mach_vm_address_t) entry->virtualAddr;
518 *physicalAddress = bmd->getPhysicalAddress();
55e303ae 519 }
0c530ab8 520 while (false);
1c79356b
A
521
522 return (void *) address;
523}
524
0c530ab8 525void IOFreeContiguous(void * _address, vm_size_t size)
1c79356b 526{
55e303ae 527 _IOMallocContiguousEntry * entry;
0c530ab8
A
528 IOMemoryDescriptor * md = NULL;
529
530 mach_vm_address_t address = (mach_vm_address_t) _address;
1c79356b
A
531
532 if( !address)
533 return;
534
535 assert(size);
536
91447636 537 lck_mtx_lock(gIOMallocContiguousEntriesLock);
55e303ae
A
538 queue_iterate( &gIOMallocContiguousEntries, entry,
539 _IOMallocContiguousEntry *, link )
540 {
0c530ab8
A
541 if( entry->virtualAddr == address ) {
542 md = entry->md;
55e303ae
A
543 queue_remove( &gIOMallocContiguousEntries, entry,
544 _IOMallocContiguousEntry *, link );
545 break;
546 }
547 }
91447636 548 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
55e303ae 549
0c530ab8 550 if (md)
55e303ae 551 {
0c530ab8 552 md->release();
55e303ae
A
553 IODelete(entry, _IOMallocContiguousEntry, 1);
554 }
0c530ab8
A
555 else
556 {
0b4c1975 557 IOKernelFreePhysical((mach_vm_address_t) address, size);
1c79356b 558 }
1c79356b
A
559}
560
561/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
562
0b4e3aa0
A
563kern_return_t IOIteratePageableMaps(vm_size_t size,
564 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
565{
566 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
567 vm_size_t segSize;
568 UInt32 attempts;
569 UInt32 index;
570 vm_offset_t min;
571 vm_map_t map;
572
1c79356b 573 if (size > kIOPageableMaxMapSize)
0b4e3aa0 574 return( kIOReturnBadArgument );
1c79356b
A
575
576 do {
577 index = gIOKitPageableSpace.hint;
578 attempts = gIOKitPageableSpace.count;
579 while( attempts--) {
0b4e3aa0 580 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
581 if( KERN_SUCCESS == kr) {
582 gIOKitPageableSpace.hint = index;
583 break;
584 }
585 if( index)
586 index--;
587 else
588 index = gIOKitPageableSpace.count - 1;
589 }
590 if( KERN_SUCCESS == kr)
591 break;
592
91447636 593 lck_mtx_lock( gIOKitPageableSpace.lock );
1c79356b
A
594
595 index = gIOKitPageableSpace.count;
596 if( index >= (kIOMaxPageableMaps - 1)) {
91447636 597 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
598 break;
599 }
600
601 if( size < kIOPageableMapSize)
602 segSize = kIOPageableMapSize;
603 else
604 segSize = size;
605
606 min = 0;
607 kr = kmem_suballoc(kernel_map,
608 &min,
609 segSize,
610 TRUE,
91447636 611 VM_FLAGS_ANYWHERE,
1c79356b
A
612 &map);
613 if( KERN_SUCCESS != kr) {
91447636 614 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
615 break;
616 }
617
618 gIOKitPageableSpace.maps[index].map = map;
619 gIOKitPageableSpace.maps[index].address = min;
620 gIOKitPageableSpace.maps[index].end = min + segSize;
621 gIOKitPageableSpace.hint = index;
622 gIOKitPageableSpace.count = index + 1;
623
91447636 624 lck_mtx_unlock( gIOKitPageableSpace.lock );
1c79356b
A
625
626 } while( true );
627
0b4e3aa0
A
628 return kr;
629}
630
631struct IOMallocPageableRef
632{
b0d623f7 633 vm_offset_t address;
0b4e3aa0
A
634 vm_size_t size;
635};
636
637static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
638{
639 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
640 kern_return_t kr;
641
642 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
643
644 return( kr );
645}
646
39236c6e 647static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
0b4e3aa0
A
648{
649 kern_return_t kr = kIOReturnNotReady;
650 struct IOMallocPageableRef ref;
651
652 if (alignment > page_size)
653 return( 0 );
654 if (size > kIOPageableMaxMapSize)
655 return( 0 );
656
657 ref.size = size;
658 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
659 if( kIOReturnSuccess != kr)
660 ref.address = 0;
1c79356b 661
0b4e3aa0 662 return( (void *) ref.address );
1c79356b
A
663}
664
b0d623f7 665vm_map_t IOPageableMapForAddress( uintptr_t address )
1c79356b
A
666{
667 vm_map_t map = 0;
668 UInt32 index;
669
670 for( index = 0; index < gIOKitPageableSpace.count; index++) {
671 if( (address >= gIOKitPageableSpace.maps[index].address)
672 && (address < gIOKitPageableSpace.maps[index].end) ) {
673 map = gIOKitPageableSpace.maps[index].map;
674 break;
675 }
676 }
677 if( !map)
b0d623f7 678 panic("IOPageableMapForAddress: null");
1c79356b
A
679
680 return( map );
681}
682
39236c6e 683static void IOFreePageablePages(void * address, vm_size_t size)
1c79356b
A
684{
685 vm_map_t map;
686
687 map = IOPageableMapForAddress( (vm_address_t) address);
688 if( map)
689 kmem_free( map, (vm_offset_t) address, size);
39236c6e 690}
1c79356b 691
39236c6e
A
692static uintptr_t IOMallocOnePageablePage(iopa_t * a)
693{
694 return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
695}
696
697void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
698{
699 void * addr;
700
fe8ab488 701 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
39236c6e
A
702 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
703
704 if (addr) {
1c79356b 705#if IOALLOCDEBUG
39236c6e 706 debug_iomallocpageable_size += size;
1c79356b 707#endif
39236c6e
A
708 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
709 }
710
711 return (addr);
712}
6d2010ae 713
39236c6e
A
714void IOFreePageable(void * address, vm_size_t size)
715{
716#if IOALLOCDEBUG
717 debug_iomallocpageable_size -= size;
718#endif
6d2010ae 719 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
39236c6e 720
fe8ab488 721 if (size < (page_size - 4*gIOPageAllocChunkBytes))
39236c6e
A
722 {
723 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
724 size = page_size;
725 }
726 if (address) IOFreePageablePages(address, size);
727}
728
729/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
730
39236c6e
A
731extern "C" void
732iopa_init(iopa_t * a)
733{
734 bzero(a, sizeof(*a));
735 a->lock = IOLockAlloc();
736 queue_init(&a->list);
737}
738
739static uintptr_t
740iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
741{
742 uint32_t n, s;
743 uint64_t avail = pa->avail;
744
745 assert(avail);
746
747 // find strings of count 1 bits in avail
748 for (n = count; n > 1; n -= s)
749 {
750 s = n >> 1;
751 avail = avail & (avail << s);
752 }
753 // and aligned
754 avail &= align;
755
756 if (avail)
757 {
758 n = __builtin_clzll(avail);
759 pa->avail &= ~((-1ULL << (64 - count)) >> n);
760 if (!pa->avail && pa->link.next)
761 {
762 remque(&pa->link);
763 pa->link.next = 0;
764 }
fe8ab488 765 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
39236c6e
A
766 }
767
768 return (0);
769}
770
771static uint32_t
772log2up(uint32_t size)
773{
774 if (size <= 1) size = 0;
775 else size = 32 - __builtin_clz(size - 1);
776 return (size);
777}
778
779uintptr_t
780iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
781{
782 static const uint64_t align_masks[] = {
783 0xFFFFFFFFFFFFFFFF,
784 0xAAAAAAAAAAAAAAAA,
785 0x8888888888888888,
786 0x8080808080808080,
787 0x8000800080008000,
788 0x8000000080000000,
789 0x8000000000000000,
790 };
791 iopa_page_t * pa;
792 uintptr_t addr = 0;
793 uint32_t count;
794 uint64_t align;
795
796 if (!bytes) bytes = 1;
fe8ab488
A
797 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
798 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
39236c6e
A
799
800 IOLockLock(a->lock);
801 pa = (typeof(pa)) queue_first(&a->list);
802 while (!queue_end(&a->list, &pa->link))
803 {
804 addr = iopa_allocinpage(pa, count, align);
805 if (addr)
806 {
807 a->bytecount += bytes;
808 break;
809 }
810 pa = (typeof(pa)) queue_next(&pa->link);
811 }
812 IOLockUnlock(a->lock);
813
814 if (!addr)
815 {
816 addr = alloc(a);
817 if (addr)
818 {
fe8ab488 819 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
39236c6e
A
820 pa->signature = kIOPageAllocSignature;
821 pa->avail = -2ULL;
822
823 addr = iopa_allocinpage(pa, count, align);
824 IOLockLock(a->lock);
825 if (pa->avail) enqueue_head(&a->list, &pa->link);
826 a->pagecount++;
827 if (addr) a->bytecount += bytes;
828 IOLockUnlock(a->lock);
829 }
830 }
831
832 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
833 return (addr);
834}
835
836uintptr_t
837iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
838{
839 iopa_page_t * pa;
840 uint32_t count;
841 uintptr_t chunk;
842
843 if (!bytes) bytes = 1;
844
845 chunk = (addr & page_mask);
fe8ab488 846 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
39236c6e 847
fe8ab488 848 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
39236c6e
A
849 assert(kIOPageAllocSignature == pa->signature);
850
fe8ab488
A
851 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
852 chunk /= gIOPageAllocChunkBytes;
39236c6e
A
853
854 IOLockLock(a->lock);
855 if (!pa->avail)
856 {
857 assert(!pa->link.next);
858 enqueue_tail(&a->list, &pa->link);
859 }
860 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
861 if (pa->avail != -2ULL) pa = 0;
862 else
863 {
864 remque(&pa->link);
865 pa->link.next = 0;
866 pa->signature = 0;
867 a->pagecount--;
868 // page to free
869 pa = (typeof(pa)) trunc_page(pa);
870 }
871 a->bytecount -= bytes;
872 IOLockUnlock(a->lock);
873
874 return ((uintptr_t) pa);
1c79356b 875}
b0d623f7 876
1c79356b
A
877/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
878
1c79356b
A
879IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
880 IOByteCount length, IOOptionBits cacheMode )
881{
882 IOReturn ret = kIOReturnSuccess;
55e303ae 883 ppnum_t pagenum;
1c79356b
A
884
885 if( task != kernel_task)
886 return( kIOReturnUnsupported );
b0d623f7
A
887 if ((address | length) & PAGE_MASK)
888 {
889// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
890 return( kIOReturnUnsupported );
891 }
892 length = round_page(address + length) - trunc_page( address );
893 address = trunc_page( address );
1c79356b
A
894
895 // make map mode
896 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
897
898 while( (kIOReturnSuccess == ret) && (length > 0) ) {
899
55e303ae
A
900 // Get the physical page number
901 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
902 if( pagenum) {
903 ret = IOUnmapPages( get_task_map(task), address, page_size );
0c530ab8 904 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
55e303ae 905 } else
1c79356b
A
906 ret = kIOReturnVMError;
907
55e303ae 908 address += page_size;
1c79356b
A
909 length -= page_size;
910 }
911
912 return( ret );
913}
914
915
916IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
917 IOByteCount length )
918{
919 if( task != kernel_task)
920 return( kIOReturnUnsupported );
921
55e303ae 922 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1c79356b
A
923
924 return( kIOReturnSuccess );
925}
926
927/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
928
b0d623f7 929vm_offset_t OSKernelStackRemaining( void )
1c79356b 930{
b0d623f7 931 return (ml_stack_remaining());
1c79356b
A
932}
933
934/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
935
2d21ac55
A
936/*
937 * Spin for indicated number of milliseconds.
938 */
1c79356b
A
939void IOSleep(unsigned milliseconds)
940{
91447636 941 delay_for_interval(milliseconds, kMillisecondScale);
1c79356b
A
942}
943
944/*
945 * Spin for indicated number of microseconds.
946 */
947void IODelay(unsigned microseconds)
948{
91447636 949 delay_for_interval(microseconds, kMicrosecondScale);
1c79356b
A
950}
951
2d21ac55
A
952/*
953 * Spin for indicated number of nanoseconds.
954 */
955void IOPause(unsigned nanoseconds)
956{
957 delay_for_interval(nanoseconds, kNanosecondScale);
958}
959
1c79356b
A
960/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
961
6d2010ae 962static void _iolog_consputc(int ch, void *arg __unused)
b0d623f7 963{
6d2010ae
A
964 cons_putc_locked(ch);
965}
966
967static void _iolog_logputc(int ch, void *arg __unused)
968{
969 log_putc_locked(ch);
b0d623f7
A
970}
971
1c79356b
A
972void IOLog(const char *format, ...)
973{
6d2010ae 974 va_list ap;
1c79356b 975
6d2010ae
A
976 va_start(ap, format);
977 IOLogv(format, ap);
978 va_end(ap);
1c79356b
A
979}
980
b0d623f7
A
981void IOLogv(const char *format, va_list ap)
982{
6d2010ae
A
983 va_list ap2;
984
985 va_copy(ap2, ap);
986
987 bsd_log_lock();
988 __doprnt(format, ap, _iolog_logputc, NULL, 16);
989 bsd_log_unlock();
39236c6e 990 logwakeup();
6d2010ae
A
991
992 __doprnt(format, ap2, _iolog_consputc, NULL, 16);
b0d623f7
A
993}
994
995#if !__LP64__
1c79356b
A
996void IOPanic(const char *reason)
997{
2d21ac55 998 panic("%s", reason);
1c79356b 999}
b0d623f7 1000#endif
1c79356b
A
1001
1002/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1003
1004/*
1005 * Convert a integer constant (typically a #define or enum) to a string.
1006 */
1007static char noValue[80]; // that's pretty
1008
1009const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1010{
1011 for( ; regValueArray->name; regValueArray++) {
1012 if(regValueArray->value == value)
1013 return(regValueArray->name);
1014 }
2d21ac55 1015 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1c79356b
A
1016 return((const char *)noValue);
1017}
1018
1019IOReturn IOFindValueForName(const char *string,
1020 const IONamedValue *regValueArray,
1021 int *value)
1022{
1023 for( ; regValueArray->name; regValueArray++) {
1024 if(!strcmp(regValueArray->name, string)) {
1025 *value = regValueArray->value;
1026 return kIOReturnSuccess;
1027 }
1028 }
1029 return kIOReturnBadArgument;
1030}
1031
2d21ac55
A
1032OSString * IOCopyLogNameForPID(int pid)
1033{
1034 char buf[128];
1035 size_t len;
1036 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1037 len = strlen(buf);
1038 proc_name(pid, buf + len, sizeof(buf) - len);
1039 return (OSString::withCString(buf));
1040}
1041
1c79356b
A
1042/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1043
1044IOAlignment IOSizeToAlignment(unsigned int size)
1045{
1046 register int shift;
1047 const int intsize = sizeof(unsigned int) * 8;
1048
1049 for (shift = 1; shift < intsize; shift++) {
1050 if (size & 0x80000000)
1051 return (IOAlignment)(intsize - shift);
1052 size <<= 1;
1053 }
1054 return 0;
1055}
1056
1057unsigned int IOAlignmentToSize(IOAlignment align)
1058{
1059 unsigned int size;
1060
1061 for (size = 1; align; align--) {
1062 size <<= 1;
1063 }
1064 return size;
1065}
0c530ab8
A
1066
1067} /* extern "C" */
2d21ac55
A
1068
1069
1070