]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.c
2721732a0f02c38dc80fc7b6486ad31a1244ca81
[apple/xnu.git] / iokit / Kernel / IOLib.c
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
28 * 17-Nov-98 cpp
29 *
30 */
31
32 #include <IOKit/system.h>
33 #include <mach/sync_policy.h>
34 #include <machine/machine_routines.h>
35 #include <libkern/c++/OSCPPDebug.h>
36
37 #include <IOKit/assert.h>
38
39 #include <IOKit/IOReturn.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOKitDebug.h>
42
43 mach_timespec_t IOZeroTvalspec = { 0, 0 };
44
45 /*
46 * Static variables for this module.
47 */
48
49 static IOThreadFunc threadArgFcn;
50 static void * threadArgArg;
51 static lock_t * threadArgLock;
52
53
54 enum { kIOMaxPageableMaps = 16 };
55 enum { kIOPageableMapSize = 16 * 1024 * 1024 };
56 enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 };
57
58 typedef struct {
59 vm_map_t map;
60 vm_offset_t address;
61 vm_offset_t end;
62 } IOMapData;
63
64 static struct {
65 UInt32 count;
66 UInt32 hint;
67 IOMapData maps[ kIOMaxPageableMaps ];
68 mutex_t * lock;
69 } gIOKitPageableSpace;
70
71
72 void IOLibInit(void)
73 {
74 kern_return_t ret;
75
76 static bool libInitialized;
77
78 if(libInitialized)
79 return;
80
81 threadArgLock = lock_alloc( true, NULL, NULL );
82
83 gIOKitPageableSpace.maps[0].address = 0;
84 ret = kmem_suballoc(kernel_map,
85 &gIOKitPageableSpace.maps[0].address,
86 kIOPageableMapSize,
87 TRUE,
88 TRUE,
89 &gIOKitPageableSpace.maps[0].map);
90 if (ret != KERN_SUCCESS)
91 panic("failed to allocate iokit pageable map\n");
92
93 gIOKitPageableSpace.lock = mutex_alloc( 0 );
94 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
95 gIOKitPageableSpace.hint = 0;
96 gIOKitPageableSpace.count = 1;
97
98 libInitialized = true;
99 }
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 /*
104 * We pass an argument to a new thread by saving fcn and arg in some
105 * locked variables and starting the thread at ioThreadStart(). This
106 * function retrives fcn and arg and makes the appropriate call.
107 *
108 */
109
110 static void ioThreadStart( void )
111 {
112 IOThreadFunc fcn;
113 void * arg;
114
115 fcn = threadArgFcn;
116 arg = threadArgArg;
117 lock_done( threadArgLock);
118
119 (*fcn)(arg);
120
121 IOExitThread();
122 }
123
124 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
125 {
126 IOThread thread;
127
128 lock_write( threadArgLock);
129 threadArgFcn = fcn;
130 threadArgArg = arg;
131
132 thread = kernel_thread( kernel_task, ioThreadStart);
133
134 return(thread);
135 }
136
137
138 volatile void IOExitThread()
139 {
140 (void) thread_terminate(current_act());
141 }
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145
146 void * IOMalloc(vm_size_t size)
147 {
148 void * address;
149
150 address = (void *)kalloc(size);
151 #if IOALLOCDEBUG
152 if (address)
153 debug_iomalloc_size += size;
154 #endif
155 return address;
156 }
157
158 void IOFree(void * address, vm_size_t size)
159 {
160 if (address) {
161 kfree((vm_offset_t)address, size);
162 #if IOALLOCDEBUG
163 debug_iomalloc_size -= size;
164 #endif
165 }
166 }
167
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
170 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
171 {
172 kern_return_t kr;
173 vm_address_t address;
174 vm_address_t allocationAddress;
175 vm_size_t adjustedSize;
176 vm_offset_t alignMask;
177
178 if (size == 0)
179 return 0;
180 if (alignment == 0)
181 alignment = 1;
182
183 alignMask = alignment - 1;
184 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
185
186 if (adjustedSize >= page_size) {
187
188 kr = kernel_memory_allocate(kernel_map, &address,
189 size, alignMask, KMA_KOBJECT);
190 if (KERN_SUCCESS != kr) {
191 IOLog("Failed %08x, %08x\n", size, alignment);
192 address = 0;
193 }
194
195 } else {
196
197 adjustedSize += alignMask;
198 allocationAddress = (vm_address_t) kalloc(adjustedSize);
199
200 if (allocationAddress) {
201 address = (allocationAddress + alignMask
202 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
203 & (~alignMask);
204
205 *((vm_size_t *)(address - sizeof(vm_size_t)
206 - sizeof(vm_address_t))) = adjustedSize;
207 *((vm_address_t *)(address - sizeof(vm_address_t)))
208 = allocationAddress;
209 } else
210 address = 0;
211 }
212
213 assert(0 == (address & alignMask));
214
215 #if IOALLOCDEBUG
216 if( address)
217 debug_iomalloc_size += size;
218 #endif
219
220 return (void *) address;
221 }
222
223 void IOFreeAligned(void * address, vm_size_t size)
224 {
225 vm_address_t allocationAddress;
226 vm_size_t adjustedSize;
227
228 if( !address)
229 return;
230
231 assert(size);
232
233 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
234 if (adjustedSize >= page_size) {
235
236 kmem_free( kernel_map, (vm_address_t) address, size);
237
238 } else {
239 adjustedSize = *((vm_size_t *)( (vm_address_t) address
240 - sizeof(vm_address_t) - sizeof(vm_size_t)));
241 allocationAddress = *((vm_address_t *)( (vm_address_t) address
242 - sizeof(vm_address_t) ));
243
244 kfree((vm_offset_t) allocationAddress, adjustedSize);
245 }
246
247 #if IOALLOCDEBUG
248 debug_iomalloc_size -= size;
249 #endif
250 }
251
252 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
253
254 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
255 IOPhysicalAddress * physicalAddress)
256 {
257 kern_return_t kr;
258 vm_address_t address;
259 vm_address_t allocationAddress;
260 vm_size_t adjustedSize;
261 vm_offset_t alignMask;
262
263 if (size == 0)
264 return 0;
265 if (alignment == 0)
266 alignment = 1;
267
268 alignMask = alignment - 1;
269 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
270
271 if (adjustedSize >= page_size) {
272
273 kr = kmem_alloc_contig(kernel_map, &address, size,
274 alignMask, KMA_KOBJECT);
275 if (KERN_SUCCESS != kr)
276 address = 0;
277
278 } else {
279
280 adjustedSize += alignMask;
281 allocationAddress = (vm_address_t)
282 kalloc(adjustedSize);
283 if (allocationAddress) {
284
285 address = (allocationAddress + alignMask
286 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
287 & (~alignMask);
288
289 if (atop(address) != atop(address + size - 1))
290 address = round_page(address);
291
292 *((vm_size_t *)(address - sizeof(vm_size_t)
293 - sizeof(vm_address_t))) = adjustedSize;
294 *((vm_address_t *)(address - sizeof(vm_address_t)))
295 = allocationAddress;
296 } else
297 address = 0;
298 }
299
300 if( address && physicalAddress)
301 *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap,
302 address );
303
304 assert(0 == (address & alignMask));
305
306 #if IOALLOCDEBUG
307 if( address)
308 debug_iomalloc_size += size;
309 #endif
310
311 return (void *) address;
312 }
313
314 void IOFreeContiguous(void * address, vm_size_t size)
315 {
316 vm_address_t allocationAddress;
317 vm_size_t adjustedSize;
318
319 if( !address)
320 return;
321
322 assert(size);
323
324 adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
325 if (adjustedSize >= page_size) {
326
327 kmem_free( kernel_map, (vm_address_t) address, size);
328
329 } else {
330 adjustedSize = *((vm_size_t *)( (vm_address_t) address
331 - sizeof(vm_address_t) - sizeof(vm_size_t)));
332 allocationAddress = *((vm_address_t *)( (vm_address_t) address
333 - sizeof(vm_address_t) ));
334
335 kfree((vm_offset_t) allocationAddress, adjustedSize);
336 }
337
338 #if IOALLOCDEBUG
339 debug_iomalloc_size -= size;
340 #endif
341 }
342
343 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
344
345 typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
346
347 kern_return_t IOIteratePageableMaps(vm_size_t size,
348 IOIteratePageableMapsCallback callback, void * ref)
349 {
350 kern_return_t kr = kIOReturnNotReady;
351 vm_size_t segSize;
352 UInt32 attempts;
353 UInt32 index;
354 vm_offset_t min;
355 vm_map_t map;
356
357 if (size > kIOPageableMaxMapSize)
358 return( kIOReturnBadArgument );
359
360 do {
361 index = gIOKitPageableSpace.hint;
362 attempts = gIOKitPageableSpace.count;
363 while( attempts--) {
364 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
365 if( KERN_SUCCESS == kr) {
366 gIOKitPageableSpace.hint = index;
367 break;
368 }
369 if( index)
370 index--;
371 else
372 index = gIOKitPageableSpace.count - 1;
373 }
374 if( KERN_SUCCESS == kr)
375 break;
376
377 mutex_lock( gIOKitPageableSpace.lock );
378
379 index = gIOKitPageableSpace.count;
380 if( index >= (kIOMaxPageableMaps - 1)) {
381 mutex_unlock( gIOKitPageableSpace.lock );
382 break;
383 }
384
385 if( size < kIOPageableMapSize)
386 segSize = kIOPageableMapSize;
387 else
388 segSize = size;
389
390 min = 0;
391 kr = kmem_suballoc(kernel_map,
392 &min,
393 segSize,
394 TRUE,
395 TRUE,
396 &map);
397 if( KERN_SUCCESS != kr) {
398 mutex_unlock( gIOKitPageableSpace.lock );
399 break;
400 }
401
402 gIOKitPageableSpace.maps[index].map = map;
403 gIOKitPageableSpace.maps[index].address = min;
404 gIOKitPageableSpace.maps[index].end = min + segSize;
405 gIOKitPageableSpace.hint = index;
406 gIOKitPageableSpace.count = index + 1;
407
408 mutex_unlock( gIOKitPageableSpace.lock );
409
410 } while( true );
411
412 return kr;
413 }
414
415 struct IOMallocPageableRef
416 {
417 vm_address_t address;
418 vm_size_t size;
419 };
420
421 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
422 {
423 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
424 kern_return_t kr;
425
426 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
427
428 return( kr );
429 }
430
431 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
432 {
433 kern_return_t kr = kIOReturnNotReady;
434 struct IOMallocPageableRef ref;
435
436 if (alignment > page_size)
437 return( 0 );
438 if (size > kIOPageableMaxMapSize)
439 return( 0 );
440
441 ref.size = size;
442 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
443 if( kIOReturnSuccess != kr)
444 ref.address = 0;
445
446 #if IOALLOCDEBUG
447 if( ref.address)
448 debug_iomalloc_size += round_page(size);
449 #endif
450
451 return( (void *) ref.address );
452 }
453
454 vm_map_t IOPageableMapForAddress( vm_address_t address )
455 {
456 vm_map_t map = 0;
457 UInt32 index;
458
459 for( index = 0; index < gIOKitPageableSpace.count; index++) {
460 if( (address >= gIOKitPageableSpace.maps[index].address)
461 && (address < gIOKitPageableSpace.maps[index].end) ) {
462 map = gIOKitPageableSpace.maps[index].map;
463 break;
464 }
465 }
466 if( !map)
467 IOPanic("IOPageableMapForAddress: null");
468
469 return( map );
470 }
471
472 void IOFreePageable(void * address, vm_size_t size)
473 {
474 vm_map_t map;
475
476 map = IOPageableMapForAddress( (vm_address_t) address);
477 if( map)
478 kmem_free( map, (vm_offset_t) address, size);
479
480 #if IOALLOCDEBUG
481 debug_iomalloc_size -= round_page(size);
482 #endif
483 }
484
485 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
486
487 extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
488 vm_size_t length, unsigned int options);
489
490 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
491 IOByteCount length, IOOptionBits cacheMode )
492 {
493 IOReturn ret = kIOReturnSuccess;
494 vm_offset_t physAddr;
495
496 if( task != kernel_task)
497 return( kIOReturnUnsupported );
498
499 length = round_page(address + length) - trunc_page( address );
500 address = trunc_page( address );
501
502 // make map mode
503 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
504
505 while( (kIOReturnSuccess == ret) && (length > 0) ) {
506
507 physAddr = pmap_extract( kernel_pmap, address );
508 if( physAddr)
509 ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode );
510 else
511 ret = kIOReturnVMError;
512
513 length -= page_size;
514 }
515
516 return( ret );
517 }
518
519
520 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
521 IOByteCount length )
522 {
523 if( task != kernel_task)
524 return( kIOReturnUnsupported );
525
526 #if __ppc__
527 flush_dcache( (vm_offset_t) address, (unsigned) length, false );
528 #endif
529
530 return( kIOReturnSuccess );
531 }
532
533 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
534
535 SInt32 OSKernelStackRemaining( void )
536 {
537 SInt32 stack;
538
539 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
540
541 return( stack );
542 }
543
544 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
545
546 void IOSleep(unsigned milliseconds)
547 {
548 int wait_result;
549
550 assert_wait_timeout(milliseconds, THREAD_INTERRUPTIBLE);
551 wait_result = thread_block((void (*)(void))0);
552 if (wait_result != THREAD_TIMED_OUT)
553 thread_cancel_timer();
554 }
555
556 /*
557 * Spin for indicated number of microseconds.
558 */
559 void IODelay(unsigned microseconds)
560 {
561 extern void delay(int usec);
562
563 delay(microseconds);
564 }
565
566 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
567
568 void IOLog(const char *format, ...)
569 {
570 va_list ap;
571 extern void conslog_putc(char);
572 extern void logwakeup();
573
574 va_start(ap, format);
575 _doprnt(format, &ap, conslog_putc, 16);
576 va_end(ap);
577 }
578
579 void IOPanic(const char *reason)
580 {
581 panic(reason);
582 }
583
584 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
585
586 /*
587 * Convert a integer constant (typically a #define or enum) to a string.
588 */
589 static char noValue[80]; // that's pretty
590
591 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
592 {
593 for( ; regValueArray->name; regValueArray++) {
594 if(regValueArray->value == value)
595 return(regValueArray->name);
596 }
597 sprintf(noValue, "0x%x (UNDEFINED)", value);
598 return((const char *)noValue);
599 }
600
601 IOReturn IOFindValueForName(const char *string,
602 const IONamedValue *regValueArray,
603 int *value)
604 {
605 for( ; regValueArray->name; regValueArray++) {
606 if(!strcmp(regValueArray->name, string)) {
607 *value = regValueArray->value;
608 return kIOReturnSuccess;
609 }
610 }
611 return kIOReturnBadArgument;
612 }
613
614 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
615
616 IOAlignment IOSizeToAlignment(unsigned int size)
617 {
618 register int shift;
619 const int intsize = sizeof(unsigned int) * 8;
620
621 for (shift = 1; shift < intsize; shift++) {
622 if (size & 0x80000000)
623 return (IOAlignment)(intsize - shift);
624 size <<= 1;
625 }
626 return 0;
627 }
628
629 unsigned int IOAlignmentToSize(IOAlignment align)
630 {
631 unsigned int size;
632
633 for (size = 1; align; align--) {
634 size <<= 1;
635 }
636 return size;
637 }
638
639 IOReturn IONDRVLibrariesInitialize( void )
640 {
641 return( kIOReturnUnsupported );
642 }