]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.c
xnu-344.49.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
1 /*
2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * HISTORY
27 *
28 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
29 * 17-Nov-98 cpp
30 *
31 */
32
33 #include <IOKit/system.h>
34 #include <mach/sync_policy.h>
35 #include <machine/machine_routines.h>
36 #include <libkern/c++/OSCPPDebug.h>
37
38 #include <IOKit/assert.h>
39
40 #include <IOKit/IOReturn.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOKitDebug.h>
43
44 mach_timespec_t IOZeroTvalspec = { 0, 0 };
45
46
47 /*
48 * Global variables for use by iLogger
49 * These symbols are for use only by Apple diagnostic code.
50 * Binary compatibility is not guaranteed for kexts that reference these symbols.
51 */
52
53 void *_giDebugLogInternal = NULL;
54 void *_giDebugLogDataInternal = NULL;
55 void *_giDebugReserved1 = NULL;
56 void *_giDebugReserved2 = NULL;
57
58
59 /*
60 * Static variables for this module.
61 */
62
63 static IOThreadFunc threadArgFcn;
64 static void * threadArgArg;
65 static lock_t * threadArgLock;
66
67
68 enum { kIOMaxPageableMaps = 16 };
69 enum { kIOPageableMapSize = 16 * 1024 * 1024 };
70 enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 };
71
72 typedef struct {
73 vm_map_t map;
74 vm_offset_t address;
75 vm_offset_t end;
76 } IOMapData;
77
78 static struct {
79 UInt32 count;
80 UInt32 hint;
81 IOMapData maps[ kIOMaxPageableMaps ];
82 mutex_t * lock;
83 } gIOKitPageableSpace;
84
85
86 void IOLibInit(void)
87 {
88 kern_return_t ret;
89
90 static bool libInitialized;
91
92 if(libInitialized)
93 return;
94
95 threadArgLock = lock_alloc( true, NULL, NULL );
96
97 gIOKitPageableSpace.maps[0].address = 0;
98 ret = kmem_suballoc(kernel_map,
99 &gIOKitPageableSpace.maps[0].address,
100 kIOPageableMapSize,
101 TRUE,
102 TRUE,
103 &gIOKitPageableSpace.maps[0].map);
104 if (ret != KERN_SUCCESS)
105 panic("failed to allocate iokit pageable map\n");
106
107 gIOKitPageableSpace.lock = mutex_alloc( 0 );
108 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
109 gIOKitPageableSpace.hint = 0;
110 gIOKitPageableSpace.count = 1;
111
112 libInitialized = true;
113 }
114
115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116
117 /*
118 * We pass an argument to a new thread by saving fcn and arg in some
119 * locked variables and starting the thread at ioThreadStart(). This
120 * function retrives fcn and arg and makes the appropriate call.
121 *
122 */
123
124 static void ioThreadStart( void )
125 {
126 IOThreadFunc fcn;
127 void * arg;
128
129 fcn = threadArgFcn;
130 arg = threadArgArg;
131 lock_done( threadArgLock);
132
133 (*fcn)(arg);
134
135 IOExitThread();
136 }
137
138 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
139 {
140 IOThread thread;
141
142 lock_write( threadArgLock);
143 threadArgFcn = fcn;
144 threadArgArg = arg;
145
146 thread = kernel_thread( kernel_task, ioThreadStart);
147
148 return(thread);
149 }
150
151
152 volatile void IOExitThread()
153 {
154 (void) thread_terminate(current_act());
155 }
156
157 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
158
159
160 void * IOMalloc(vm_size_t size)
161 {
162 void * address;
163
164 address = (void *)kalloc(size);
165 #if IOALLOCDEBUG
166 if (address)
167 debug_iomalloc_size += size;
168 #endif
169 return address;
170 }
171
172 void IOFree(void * address, vm_size_t size)
173 {
174 if (address) {
175 kfree((vm_offset_t)address, size);
176 #if IOALLOCDEBUG
177 debug_iomalloc_size -= size;
178 #endif
179 }
180 }
181
182 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
183
184 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
185 {
186 kern_return_t kr;
187 vm_address_t address;
188 vm_address_t allocationAddress;
189 vm_size_t adjustedSize;
190 vm_offset_t alignMask;
191
192 if (size == 0)
193 return 0;
194 if (alignment == 0)
195 alignment = 1;
196
197 alignMask = alignment - 1;
198 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
199
200 if (adjustedSize >= page_size) {
201
202 kr = kernel_memory_allocate(kernel_map, &address,
203 size, alignMask, 0);
204 if (KERN_SUCCESS != kr)
205 address = 0;
206
207 } else {
208
209 adjustedSize += alignMask;
210
211 if (adjustedSize >= page_size) {
212
213 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
214 adjustedSize, 0, 0);
215 if (KERN_SUCCESS != kr)
216 allocationAddress = 0;
217
218 } else
219 allocationAddress = (vm_address_t) kalloc(adjustedSize);
220
221 if (allocationAddress) {
222 address = (allocationAddress + alignMask
223 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
224 & (~alignMask);
225
226 *((vm_size_t *)(address - sizeof(vm_size_t)
227 - sizeof(vm_address_t))) = adjustedSize;
228 *((vm_address_t *)(address - sizeof(vm_address_t)))
229 = allocationAddress;
230 } else
231 address = 0;
232 }
233
234 assert(0 == (address & alignMask));
235
236 #if IOALLOCDEBUG
237 if( address)
238 debug_iomalloc_size += size;
239 #endif
240
241 return (void *) address;
242 }
243
244 void IOFreeAligned(void * address, vm_size_t size)
245 {
246 vm_address_t allocationAddress;
247 vm_size_t adjustedSize;
248
249 if( !address)
250 return;
251
252 assert(size);
253
254 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
255 if (adjustedSize >= page_size) {
256
257 kmem_free( kernel_map, (vm_address_t) address, size);
258
259 } else {
260 adjustedSize = *((vm_size_t *)( (vm_address_t) address
261 - sizeof(vm_address_t) - sizeof(vm_size_t)));
262 allocationAddress = *((vm_address_t *)( (vm_address_t) address
263 - sizeof(vm_address_t) ));
264
265 if (adjustedSize >= page_size)
266 kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize);
267 else
268 kfree((vm_offset_t) allocationAddress, adjustedSize);
269 }
270
271 #if IOALLOCDEBUG
272 debug_iomalloc_size -= size;
273 #endif
274 }
275
276 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
277
278 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
279 IOPhysicalAddress * physicalAddress)
280 {
281 kern_return_t kr;
282 vm_address_t address;
283 vm_address_t allocationAddress;
284 vm_size_t adjustedSize;
285 vm_offset_t alignMask;
286
287 if (size == 0)
288 return 0;
289 if (alignment == 0)
290 alignment = 1;
291
292 alignMask = alignment - 1;
293 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
294
295 if (adjustedSize >= page_size) {
296
297 kr = kmem_alloc_contig(kernel_map, &address, size,
298 alignMask, 0);
299 if (KERN_SUCCESS != kr)
300 address = 0;
301
302 } else {
303
304 adjustedSize += alignMask;
305 allocationAddress = (vm_address_t) kalloc(adjustedSize);
306
307 if (allocationAddress) {
308
309 address = (allocationAddress + alignMask
310 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
311 & (~alignMask);
312
313 if (atop(address) != atop(address + size - 1))
314 address = round_page(address);
315
316 *((vm_size_t *)(address - sizeof(vm_size_t)
317 - sizeof(vm_address_t))) = adjustedSize;
318 *((vm_address_t *)(address - sizeof(vm_address_t)))
319 = allocationAddress;
320 } else
321 address = 0;
322 }
323
324 if( address && physicalAddress)
325 *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap,
326 address );
327
328 assert(0 == (address & alignMask));
329
330 #if IOALLOCDEBUG
331 if( address)
332 debug_iomalloc_size += size;
333 #endif
334
335 return (void *) address;
336 }
337
338 void IOFreeContiguous(void * address, vm_size_t size)
339 {
340 vm_address_t allocationAddress;
341 vm_size_t adjustedSize;
342
343 if( !address)
344 return;
345
346 assert(size);
347
348 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
349 if (adjustedSize >= page_size) {
350
351 kmem_free( kernel_map, (vm_address_t) address, size);
352
353 } else {
354 adjustedSize = *((vm_size_t *)( (vm_address_t) address
355 - sizeof(vm_address_t) - sizeof(vm_size_t)));
356 allocationAddress = *((vm_address_t *)( (vm_address_t) address
357 - sizeof(vm_address_t) ));
358
359 kfree((vm_offset_t) allocationAddress, adjustedSize);
360 }
361
362 #if IOALLOCDEBUG
363 debug_iomalloc_size -= size;
364 #endif
365 }
366
367 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
368
369 typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
370
371 kern_return_t IOIteratePageableMaps(vm_size_t size,
372 IOIteratePageableMapsCallback callback, void * ref)
373 {
374 kern_return_t kr = kIOReturnNotReady;
375 vm_size_t segSize;
376 UInt32 attempts;
377 UInt32 index;
378 vm_offset_t min;
379 vm_map_t map;
380
381 if (size > kIOPageableMaxMapSize)
382 return( kIOReturnBadArgument );
383
384 do {
385 index = gIOKitPageableSpace.hint;
386 attempts = gIOKitPageableSpace.count;
387 while( attempts--) {
388 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
389 if( KERN_SUCCESS == kr) {
390 gIOKitPageableSpace.hint = index;
391 break;
392 }
393 if( index)
394 index--;
395 else
396 index = gIOKitPageableSpace.count - 1;
397 }
398 if( KERN_SUCCESS == kr)
399 break;
400
401 mutex_lock( gIOKitPageableSpace.lock );
402
403 index = gIOKitPageableSpace.count;
404 if( index >= (kIOMaxPageableMaps - 1)) {
405 mutex_unlock( gIOKitPageableSpace.lock );
406 break;
407 }
408
409 if( size < kIOPageableMapSize)
410 segSize = kIOPageableMapSize;
411 else
412 segSize = size;
413
414 min = 0;
415 kr = kmem_suballoc(kernel_map,
416 &min,
417 segSize,
418 TRUE,
419 TRUE,
420 &map);
421 if( KERN_SUCCESS != kr) {
422 mutex_unlock( gIOKitPageableSpace.lock );
423 break;
424 }
425
426 gIOKitPageableSpace.maps[index].map = map;
427 gIOKitPageableSpace.maps[index].address = min;
428 gIOKitPageableSpace.maps[index].end = min + segSize;
429 gIOKitPageableSpace.hint = index;
430 gIOKitPageableSpace.count = index + 1;
431
432 mutex_unlock( gIOKitPageableSpace.lock );
433
434 } while( true );
435
436 return kr;
437 }
438
439 struct IOMallocPageableRef
440 {
441 vm_address_t address;
442 vm_size_t size;
443 };
444
445 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
446 {
447 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
448 kern_return_t kr;
449
450 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
451
452 return( kr );
453 }
454
455 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
456 {
457 kern_return_t kr = kIOReturnNotReady;
458 struct IOMallocPageableRef ref;
459
460 if (alignment > page_size)
461 return( 0 );
462 if (size > kIOPageableMaxMapSize)
463 return( 0 );
464
465 ref.size = size;
466 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
467 if( kIOReturnSuccess != kr)
468 ref.address = 0;
469
470 #if IOALLOCDEBUG
471 if( ref.address)
472 debug_iomalloc_size += round_page(size);
473 #endif
474
475 return( (void *) ref.address );
476 }
477
478 vm_map_t IOPageableMapForAddress( vm_address_t address )
479 {
480 vm_map_t map = 0;
481 UInt32 index;
482
483 for( index = 0; index < gIOKitPageableSpace.count; index++) {
484 if( (address >= gIOKitPageableSpace.maps[index].address)
485 && (address < gIOKitPageableSpace.maps[index].end) ) {
486 map = gIOKitPageableSpace.maps[index].map;
487 break;
488 }
489 }
490 if( !map)
491 IOPanic("IOPageableMapForAddress: null");
492
493 return( map );
494 }
495
496 void IOFreePageable(void * address, vm_size_t size)
497 {
498 vm_map_t map;
499
500 map = IOPageableMapForAddress( (vm_address_t) address);
501 if( map)
502 kmem_free( map, (vm_offset_t) address, size);
503
504 #if IOALLOCDEBUG
505 debug_iomalloc_size -= round_page(size);
506 #endif
507 }
508
509 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
510
511 extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
512 vm_size_t length, unsigned int options);
513
514 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
515 IOByteCount length, IOOptionBits cacheMode )
516 {
517 IOReturn ret = kIOReturnSuccess;
518 vm_offset_t physAddr;
519
520 if( task != kernel_task)
521 return( kIOReturnUnsupported );
522
523 length = round_page(address + length) - trunc_page( address );
524 address = trunc_page( address );
525
526 // make map mode
527 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
528
529 while( (kIOReturnSuccess == ret) && (length > 0) ) {
530
531 physAddr = pmap_extract( kernel_pmap, address );
532 if( physAddr)
533 ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode );
534 else
535 ret = kIOReturnVMError;
536
537 length -= page_size;
538 }
539
540 return( ret );
541 }
542
543
544 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
545 IOByteCount length )
546 {
547 if( task != kernel_task)
548 return( kIOReturnUnsupported );
549
550 #if __ppc__
551 flush_dcache( (vm_offset_t) address, (unsigned) length, false );
552 #endif
553
554 return( kIOReturnSuccess );
555 }
556
557 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
558
559 SInt32 OSKernelStackRemaining( void )
560 {
561 SInt32 stack;
562
563 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
564
565 return( stack );
566 }
567
568 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
569
570 void IOSleep(unsigned milliseconds)
571 {
572 wait_result_t wait_result;
573
574 wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT);
575 assert(wait_result == THREAD_WAITING);
576
577 wait_result = thread_block(THREAD_CONTINUE_NULL);
578 assert(wait_result == THREAD_TIMED_OUT);
579 }
580
581 /*
582 * Spin for indicated number of microseconds.
583 */
584 void IODelay(unsigned microseconds)
585 {
586 extern void delay(int usec);
587
588 delay(microseconds);
589 }
590
591 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
592
593 void IOLog(const char *format, ...)
594 {
595 va_list ap;
596 extern void conslog_putc(char);
597 extern void logwakeup();
598
599 va_start(ap, format);
600 _doprnt(format, &ap, conslog_putc, 16);
601 va_end(ap);
602 }
603
604 void IOPanic(const char *reason)
605 {
606 panic(reason);
607 }
608
609 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
610
611 /*
612 * Convert a integer constant (typically a #define or enum) to a string.
613 */
614 static char noValue[80]; // that's pretty
615
616 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
617 {
618 for( ; regValueArray->name; regValueArray++) {
619 if(regValueArray->value == value)
620 return(regValueArray->name);
621 }
622 sprintf(noValue, "0x%x (UNDEFINED)", value);
623 return((const char *)noValue);
624 }
625
626 IOReturn IOFindValueForName(const char *string,
627 const IONamedValue *regValueArray,
628 int *value)
629 {
630 for( ; regValueArray->name; regValueArray++) {
631 if(!strcmp(regValueArray->name, string)) {
632 *value = regValueArray->value;
633 return kIOReturnSuccess;
634 }
635 }
636 return kIOReturnBadArgument;
637 }
638
639 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
640
641 IOAlignment IOSizeToAlignment(unsigned int size)
642 {
643 register int shift;
644 const int intsize = sizeof(unsigned int) * 8;
645
646 for (shift = 1; shift < intsize; shift++) {
647 if (size & 0x80000000)
648 return (IOAlignment)(intsize - shift);
649 size <<= 1;
650 }
651 return 0;
652 }
653
654 unsigned int IOAlignmentToSize(IOAlignment align)
655 {
656 unsigned int size;
657
658 for (size = 1; align; align--) {
659 size <<= 1;
660 }
661 return size;
662 }
663
664 IOReturn IONDRVLibrariesInitialize( void )
665 {
666 return( kIOReturnUnsupported );
667 }