]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLib.c
xnu-344.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
CommitLineData
1c79356b 1/*
9bccf70c 2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
1c79356b
A
23 * HISTORY
24 *
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
26 * 17-Nov-98 cpp
27 *
28 */
29
30#include <IOKit/system.h>
31#include <mach/sync_policy.h>
32#include <machine/machine_routines.h>
33#include <libkern/c++/OSCPPDebug.h>
34
35#include <IOKit/assert.h>
36
37#include <IOKit/IOReturn.h>
38#include <IOKit/IOLib.h>
39#include <IOKit/IOKitDebug.h>
40
41mach_timespec_t IOZeroTvalspec = { 0, 0 };
42
9bccf70c
A
43
44/*
45 * Global variables for use by iLogger
46 * These symbols are for use only by Apple diagnostic code.
47 * Binary compatibility is not guaranteed for kexts that reference these symbols.
48 */
49
50void *_giDebugLogInternal = NULL;
51void *_giDebugLogDataInternal = NULL;
52void *_giDebugReserved1 = NULL;
53void *_giDebugReserved2 = NULL;
54
55
1c79356b
A
56/*
57 * Static variables for this module.
58 */
59
60static IOThreadFunc threadArgFcn;
61static void * threadArgArg;
62static lock_t * threadArgLock;
63
64
65enum { kIOMaxPageableMaps = 16 };
66enum { kIOPageableMapSize = 16 * 1024 * 1024 };
0b4e3aa0 67enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 };
1c79356b
A
68
69typedef struct {
70 vm_map_t map;
71 vm_offset_t address;
72 vm_offset_t end;
73} IOMapData;
74
75static struct {
76 UInt32 count;
77 UInt32 hint;
78 IOMapData maps[ kIOMaxPageableMaps ];
79 mutex_t * lock;
80} gIOKitPageableSpace;
81
82
83void IOLibInit(void)
84{
85 kern_return_t ret;
86
87 static bool libInitialized;
88
89 if(libInitialized)
90 return;
91
92 threadArgLock = lock_alloc( true, NULL, NULL );
93
94 gIOKitPageableSpace.maps[0].address = 0;
95 ret = kmem_suballoc(kernel_map,
96 &gIOKitPageableSpace.maps[0].address,
97 kIOPageableMapSize,
98 TRUE,
99 TRUE,
100 &gIOKitPageableSpace.maps[0].map);
101 if (ret != KERN_SUCCESS)
102 panic("failed to allocate iokit pageable map\n");
103
104 gIOKitPageableSpace.lock = mutex_alloc( 0 );
105 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
106 gIOKitPageableSpace.hint = 0;
107 gIOKitPageableSpace.count = 1;
108
109 libInitialized = true;
110}
111
112/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
113
114/*
115 * We pass an argument to a new thread by saving fcn and arg in some
116 * locked variables and starting the thread at ioThreadStart(). This
117 * function retrives fcn and arg and makes the appropriate call.
118 *
119 */
120
121static void ioThreadStart( void )
122{
123 IOThreadFunc fcn;
124 void * arg;
125
126 fcn = threadArgFcn;
127 arg = threadArgArg;
128 lock_done( threadArgLock);
129
130 (*fcn)(arg);
131
132 IOExitThread();
133}
134
135IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
136{
137 IOThread thread;
138
139 lock_write( threadArgLock);
140 threadArgFcn = fcn;
141 threadArgArg = arg;
142
143 thread = kernel_thread( kernel_task, ioThreadStart);
144
145 return(thread);
146}
147
148
149volatile void IOExitThread()
150{
151 (void) thread_terminate(current_act());
152}
153
154/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
155
156
157void * IOMalloc(vm_size_t size)
158{
159 void * address;
160
161 address = (void *)kalloc(size);
162#if IOALLOCDEBUG
163 if (address)
164 debug_iomalloc_size += size;
165#endif
166 return address;
167}
168
169void IOFree(void * address, vm_size_t size)
170{
171 if (address) {
172 kfree((vm_offset_t)address, size);
173#if IOALLOCDEBUG
174 debug_iomalloc_size -= size;
175#endif
176 }
177}
178
179/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
180
181void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
182{
183 kern_return_t kr;
184 vm_address_t address;
185 vm_address_t allocationAddress;
186 vm_size_t adjustedSize;
187 vm_offset_t alignMask;
188
189 if (size == 0)
190 return 0;
191 if (alignment == 0)
192 alignment = 1;
193
194 alignMask = alignment - 1;
195 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
196
197 if (adjustedSize >= page_size) {
198
199 kr = kernel_memory_allocate(kernel_map, &address,
9bccf70c
A
200 size, alignMask, 0);
201 if (KERN_SUCCESS != kr)
1c79356b 202 address = 0;
1c79356b
A
203
204 } else {
205
206 adjustedSize += alignMask;
9bccf70c
A
207
208 if (adjustedSize >= page_size) {
209
210 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
211 adjustedSize, 0, 0);
212 if (KERN_SUCCESS != kr)
213 allocationAddress = 0;
214
215 } else
216 allocationAddress = (vm_address_t) kalloc(adjustedSize);
1c79356b
A
217
218 if (allocationAddress) {
219 address = (allocationAddress + alignMask
220 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
221 & (~alignMask);
222
223 *((vm_size_t *)(address - sizeof(vm_size_t)
224 - sizeof(vm_address_t))) = adjustedSize;
225 *((vm_address_t *)(address - sizeof(vm_address_t)))
226 = allocationAddress;
227 } else
228 address = 0;
229 }
230
231 assert(0 == (address & alignMask));
232
233#if IOALLOCDEBUG
234 if( address)
235 debug_iomalloc_size += size;
236#endif
237
238 return (void *) address;
239}
240
241void IOFreeAligned(void * address, vm_size_t size)
242{
243 vm_address_t allocationAddress;
244 vm_size_t adjustedSize;
245
246 if( !address)
247 return;
248
249 assert(size);
250
251 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
252 if (adjustedSize >= page_size) {
253
254 kmem_free( kernel_map, (vm_address_t) address, size);
255
256 } else {
257 adjustedSize = *((vm_size_t *)( (vm_address_t) address
258 - sizeof(vm_address_t) - sizeof(vm_size_t)));
259 allocationAddress = *((vm_address_t *)( (vm_address_t) address
260 - sizeof(vm_address_t) ));
261
9bccf70c
A
262 if (adjustedSize >= page_size)
263 kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize);
264 else
265 kfree((vm_offset_t) allocationAddress, adjustedSize);
1c79356b
A
266 }
267
268#if IOALLOCDEBUG
269 debug_iomalloc_size -= size;
270#endif
271}
272
273/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
274
275void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
276 IOPhysicalAddress * physicalAddress)
277{
278 kern_return_t kr;
279 vm_address_t address;
280 vm_address_t allocationAddress;
281 vm_size_t adjustedSize;
282 vm_offset_t alignMask;
283
284 if (size == 0)
285 return 0;
286 if (alignment == 0)
287 alignment = 1;
288
289 alignMask = alignment - 1;
290 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
291
292 if (adjustedSize >= page_size) {
293
294 kr = kmem_alloc_contig(kernel_map, &address, size,
9bccf70c 295 alignMask, 0);
1c79356b
A
296 if (KERN_SUCCESS != kr)
297 address = 0;
298
299 } else {
300
301 adjustedSize += alignMask;
9bccf70c
A
302 allocationAddress = (vm_address_t) kalloc(adjustedSize);
303
1c79356b
A
304 if (allocationAddress) {
305
306 address = (allocationAddress + alignMask
307 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
308 & (~alignMask);
309
310 if (atop(address) != atop(address + size - 1))
311 address = round_page(address);
312
313 *((vm_size_t *)(address - sizeof(vm_size_t)
314 - sizeof(vm_address_t))) = adjustedSize;
315 *((vm_address_t *)(address - sizeof(vm_address_t)))
316 = allocationAddress;
317 } else
318 address = 0;
319 }
320
321 if( address && physicalAddress)
322 *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap,
323 address );
324
325 assert(0 == (address & alignMask));
326
327#if IOALLOCDEBUG
328 if( address)
329 debug_iomalloc_size += size;
330#endif
331
332 return (void *) address;
333}
334
335void IOFreeContiguous(void * address, vm_size_t size)
336{
337 vm_address_t allocationAddress;
338 vm_size_t adjustedSize;
339
340 if( !address)
341 return;
342
343 assert(size);
344
345 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
346 if (adjustedSize >= page_size) {
347
348 kmem_free( kernel_map, (vm_address_t) address, size);
349
350 } else {
351 adjustedSize = *((vm_size_t *)( (vm_address_t) address
352 - sizeof(vm_address_t) - sizeof(vm_size_t)));
353 allocationAddress = *((vm_address_t *)( (vm_address_t) address
354 - sizeof(vm_address_t) ));
355
356 kfree((vm_offset_t) allocationAddress, adjustedSize);
357 }
358
359#if IOALLOCDEBUG
360 debug_iomalloc_size -= size;
361#endif
362}
363
364/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
365
0b4e3aa0
A
366typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
367
368kern_return_t IOIteratePageableMaps(vm_size_t size,
369 IOIteratePageableMapsCallback callback, void * ref)
1c79356b
A
370{
371 kern_return_t kr = kIOReturnNotReady;
1c79356b
A
372 vm_size_t segSize;
373 UInt32 attempts;
374 UInt32 index;
375 vm_offset_t min;
376 vm_map_t map;
377
1c79356b 378 if (size > kIOPageableMaxMapSize)
0b4e3aa0 379 return( kIOReturnBadArgument );
1c79356b
A
380
381 do {
382 index = gIOKitPageableSpace.hint;
383 attempts = gIOKitPageableSpace.count;
384 while( attempts--) {
0b4e3aa0 385 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
1c79356b
A
386 if( KERN_SUCCESS == kr) {
387 gIOKitPageableSpace.hint = index;
388 break;
389 }
390 if( index)
391 index--;
392 else
393 index = gIOKitPageableSpace.count - 1;
394 }
395 if( KERN_SUCCESS == kr)
396 break;
397
398 mutex_lock( gIOKitPageableSpace.lock );
399
400 index = gIOKitPageableSpace.count;
401 if( index >= (kIOMaxPageableMaps - 1)) {
402 mutex_unlock( gIOKitPageableSpace.lock );
403 break;
404 }
405
406 if( size < kIOPageableMapSize)
407 segSize = kIOPageableMapSize;
408 else
409 segSize = size;
410
411 min = 0;
412 kr = kmem_suballoc(kernel_map,
413 &min,
414 segSize,
415 TRUE,
416 TRUE,
417 &map);
418 if( KERN_SUCCESS != kr) {
419 mutex_unlock( gIOKitPageableSpace.lock );
420 break;
421 }
422
423 gIOKitPageableSpace.maps[index].map = map;
424 gIOKitPageableSpace.maps[index].address = min;
425 gIOKitPageableSpace.maps[index].end = min + segSize;
426 gIOKitPageableSpace.hint = index;
427 gIOKitPageableSpace.count = index + 1;
428
429 mutex_unlock( gIOKitPageableSpace.lock );
430
431 } while( true );
432
0b4e3aa0
A
433 return kr;
434}
435
436struct IOMallocPageableRef
437{
438 vm_address_t address;
439 vm_size_t size;
440};
441
442static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
443{
444 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
445 kern_return_t kr;
446
447 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
448
449 return( kr );
450}
451
452void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
453{
454 kern_return_t kr = kIOReturnNotReady;
455 struct IOMallocPageableRef ref;
456
457 if (alignment > page_size)
458 return( 0 );
459 if (size > kIOPageableMaxMapSize)
460 return( 0 );
461
462 ref.size = size;
463 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
464 if( kIOReturnSuccess != kr)
465 ref.address = 0;
1c79356b
A
466
467#if IOALLOCDEBUG
0b4e3aa0
A
468 if( ref.address)
469 debug_iomalloc_size += round_page(size);
1c79356b
A
470#endif
471
0b4e3aa0 472 return( (void *) ref.address );
1c79356b
A
473}
474
475vm_map_t IOPageableMapForAddress( vm_address_t address )
476{
477 vm_map_t map = 0;
478 UInt32 index;
479
480 for( index = 0; index < gIOKitPageableSpace.count; index++) {
481 if( (address >= gIOKitPageableSpace.maps[index].address)
482 && (address < gIOKitPageableSpace.maps[index].end) ) {
483 map = gIOKitPageableSpace.maps[index].map;
484 break;
485 }
486 }
487 if( !map)
488 IOPanic("IOPageableMapForAddress: null");
489
490 return( map );
491}
492
493void IOFreePageable(void * address, vm_size_t size)
494{
495 vm_map_t map;
496
497 map = IOPageableMapForAddress( (vm_address_t) address);
498 if( map)
499 kmem_free( map, (vm_offset_t) address, size);
500
501#if IOALLOCDEBUG
502 debug_iomalloc_size -= round_page(size);
503#endif
504}
505
506/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
507
508extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
509 vm_size_t length, unsigned int options);
510
511IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
512 IOByteCount length, IOOptionBits cacheMode )
513{
514 IOReturn ret = kIOReturnSuccess;
515 vm_offset_t physAddr;
516
517 if( task != kernel_task)
518 return( kIOReturnUnsupported );
519
520 length = round_page(address + length) - trunc_page( address );
521 address = trunc_page( address );
522
523 // make map mode
524 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
525
526 while( (kIOReturnSuccess == ret) && (length > 0) ) {
527
528 physAddr = pmap_extract( kernel_pmap, address );
529 if( physAddr)
530 ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode );
531 else
532 ret = kIOReturnVMError;
533
534 length -= page_size;
535 }
536
537 return( ret );
538}
539
540
541IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
542 IOByteCount length )
543{
544 if( task != kernel_task)
545 return( kIOReturnUnsupported );
546
547#if __ppc__
548 flush_dcache( (vm_offset_t) address, (unsigned) length, false );
549#endif
550
551 return( kIOReturnSuccess );
552}
553
554/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
555
556SInt32 OSKernelStackRemaining( void )
557{
558 SInt32 stack;
559
560 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
561
562 return( stack );
563}
564
565/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
566
567void IOSleep(unsigned milliseconds)
568{
9bccf70c
A
569 wait_result_t wait_result;
570
571 wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT);
572 assert(wait_result == THREAD_WAITING);
1c79356b 573
9bccf70c
A
574 wait_result = thread_block(THREAD_CONTINUE_NULL);
575 assert(wait_result == THREAD_TIMED_OUT);
1c79356b
A
576}
577
578/*
579 * Spin for indicated number of microseconds.
580 */
581void IODelay(unsigned microseconds)
582{
583 extern void delay(int usec);
584
585 delay(microseconds);
586}
587
588/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
589
590void IOLog(const char *format, ...)
591{
592 va_list ap;
593 extern void conslog_putc(char);
594 extern void logwakeup();
595
596 va_start(ap, format);
597 _doprnt(format, &ap, conslog_putc, 16);
598 va_end(ap);
599}
600
601void IOPanic(const char *reason)
602{
603 panic(reason);
604}
605
606/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
607
608/*
609 * Convert a integer constant (typically a #define or enum) to a string.
610 */
611static char noValue[80]; // that's pretty
612
613const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
614{
615 for( ; regValueArray->name; regValueArray++) {
616 if(regValueArray->value == value)
617 return(regValueArray->name);
618 }
619 sprintf(noValue, "0x%x (UNDEFINED)", value);
620 return((const char *)noValue);
621}
622
623IOReturn IOFindValueForName(const char *string,
624 const IONamedValue *regValueArray,
625 int *value)
626{
627 for( ; regValueArray->name; regValueArray++) {
628 if(!strcmp(regValueArray->name, string)) {
629 *value = regValueArray->value;
630 return kIOReturnSuccess;
631 }
632 }
633 return kIOReturnBadArgument;
634}
635
636/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
637
638IOAlignment IOSizeToAlignment(unsigned int size)
639{
640 register int shift;
641 const int intsize = sizeof(unsigned int) * 8;
642
643 for (shift = 1; shift < intsize; shift++) {
644 if (size & 0x80000000)
645 return (IOAlignment)(intsize - shift);
646 size <<= 1;
647 }
648 return 0;
649}
650
651unsigned int IOAlignmentToSize(IOAlignment align)
652{
653 unsigned int size;
654
655 for (size = 1; align; align--) {
656 size <<= 1;
657 }
658 return size;
659}
0b4e3aa0
A
660
661IOReturn IONDRVLibrariesInitialize( void )
662{
663 return( kIOReturnUnsupported );
664}