]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
4821b81dc39980046bbf1d7c1b5d564ebad7eb3f
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <IOKit/assert.h>
29 #include <IOKit/system.h>
30
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMapper.h>
33 #include <IOKit/IOBufferMemoryDescriptor.h>
34
35 #include "IOKitKernelInternal.h"
36 #include "IOCopyMapper.h"
37
38 __BEGIN_DECLS
39 void ipc_port_release_send(ipc_port_t port);
40 #include <vm/pmap.h>
41
42 vm_map_t IOPageableMapForAddress( vm_address_t address );
43 __END_DECLS
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46
47 volatile ppnum_t gIOHighestAllocatedPage;
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 #define super IOGeneralMemoryDescriptor
52 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
53 IOGeneralMemoryDescriptor);
54
55 bool IOBufferMemoryDescriptor::initWithAddress(
56 void * /* address */ ,
57 IOByteCount /* withLength */ ,
58 IODirection /* withDirection */ )
59 {
60 return false;
61 }
62
63 bool IOBufferMemoryDescriptor::initWithAddress(
64 vm_address_t /* address */ ,
65 IOByteCount /* withLength */ ,
66 IODirection /* withDirection */ ,
67 task_t /* withTask */ )
68 {
69 return false;
70 }
71
72 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
73 IOPhysicalAddress /* address */ ,
74 IOByteCount /* withLength */ ,
75 IODirection /* withDirection */ )
76 {
77 return false;
78 }
79
80 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
81 IOPhysicalRange * /* ranges */ ,
82 UInt32 /* withCount */ ,
83 IODirection /* withDirection */ ,
84 bool /* asReference */ )
85 {
86 return false;
87 }
88
89 bool IOBufferMemoryDescriptor::initWithRanges(
90 IOVirtualRange * /* ranges */ ,
91 UInt32 /* withCount */ ,
92 IODirection /* withDirection */ ,
93 task_t /* withTask */ ,
94 bool /* asReference */ )
95 {
96 return false;
97 }
98
99 bool IOBufferMemoryDescriptor::initWithOptions(
100 IOOptionBits options,
101 vm_size_t capacity,
102 vm_offset_t alignment,
103 task_t inTask)
104 {
105 mach_vm_address_t physicalMask = 0;
106 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
107 }
108
109 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
110 task_t inTask,
111 IOOptionBits options,
112 mach_vm_size_t capacity,
113 mach_vm_address_t alignment,
114 mach_vm_address_t physicalMask)
115 {
116 kern_return_t kr;
117 addr64_t lastIOAddr;
118 vm_map_t vmmap = 0;
119 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
120
121 if (!capacity)
122 return false;
123
124 _options = options;
125 _capacity = capacity;
126 _physAddrs = 0;
127 _physSegCount = 0;
128 _buffer = 0;
129
130 // Grab the direction and the Auto Prepare bits from the Buffer MD options
131 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
132
133 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
134 alignment = page_size;
135
136 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
137 return false;
138
139 if (physicalMask && (alignment <= 1))
140 alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1;
141
142 if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
143 physicalMask = 0xFFFFFFFF;
144
145 _alignment = alignment;
146 if (options & kIOMemoryPageable)
147 {
148 iomdOptions |= kIOMemoryBufferPageable;
149
150 ipc_port_t sharedMem;
151 vm_size_t size = round_page_32(capacity);
152
153 // must create the entry before any pages are allocated
154
155 // set flags for entry + object create
156 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
157 | MAP_MEM_NAMED_CREATE;
158
159 if (options & kIOMemoryPurgeable)
160 memEntryCacheMode |= MAP_MEM_PURGABLE;
161
162 // set memory entry cache mode
163 switch (options & kIOMapCacheMask)
164 {
165 case kIOMapInhibitCache:
166 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
167 break;
168
169 case kIOMapWriteThruCache:
170 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
171 break;
172
173 case kIOMapWriteCombineCache:
174 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
175 break;
176
177 case kIOMapCopybackCache:
178 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
179 break;
180
181 case kIOMapDefaultCache:
182 default:
183 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
184 break;
185 }
186
187 kr = mach_make_memory_entry( vmmap,
188 &size, 0,
189 memEntryCacheMode, &sharedMem,
190 NULL );
191
192 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
193 ipc_port_release_send( sharedMem );
194 kr = kIOReturnVMError;
195 }
196 if( KERN_SUCCESS != kr)
197 return( false );
198
199 _memEntry = (void *) sharedMem;
200 #if IOALLOCDEBUG
201 debug_iomallocpageable_size += size;
202 #endif
203 if (NULL == inTask)
204 inTask = kernel_task;
205 else if (inTask == kernel_task)
206 {
207 vmmap = kernel_map;
208 }
209 else
210 {
211 if( !reserved) {
212 reserved = IONew( ExpansionData, 1 );
213 if( !reserved)
214 return( false );
215 }
216 vmmap = get_task_map(inTask);
217 vm_map_reference(vmmap);
218 reserved->map = vmmap;
219 }
220 }
221 else
222 {
223 if (IOMapper::gSystem)
224 // assuming mapped space is 2G
225 lastIOAddr = (1UL << 31) - PAGE_SIZE;
226 else
227 lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
228
229 if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask)))
230 {
231 mach_vm_address_t address;
232 iomdOptions &= ~kIOMemoryTypeVirtual;
233 iomdOptions |= kIOMemoryTypePhysical;
234
235 address = IOMallocPhysical(capacity, physicalMask);
236 _buffer = (void *) address;
237 if (!_buffer)
238 return false;
239
240 if (inTask == kernel_task)
241 {
242 vmmap = kernel_map;
243 }
244 else if (NULL != inTask)
245 {
246 if( !reserved) {
247 reserved = IONew( ExpansionData, 1 );
248 if( !reserved)
249 return( false );
250 }
251 vmmap = get_task_map(inTask);
252 vm_map_reference(vmmap);
253 reserved->map = vmmap;
254 }
255 inTask = 0;
256 }
257 else
258 {
259 // Buffer shouldn't auto prepare they should be prepared explicitly
260 // But it never was enforced so what are you going to do?
261 iomdOptions |= kIOMemoryAutoPrepare;
262
263 /* Allocate a wired-down buffer inside kernel space. */
264 if (options & kIOMemoryPhysicallyContiguous)
265 _buffer = (void *) IOKernelAllocateContiguous(capacity, alignment);
266 else if (alignment > 1)
267 _buffer = IOMallocAligned(capacity, alignment);
268 else
269 _buffer = IOMalloc(capacity);
270 if (!_buffer)
271 return false;
272 }
273 }
274
275 _singleRange.v.address = (vm_address_t) _buffer;
276 _singleRange.v.length = capacity;
277
278 if (!super::initWithOptions(&_singleRange.v, 1, 0,
279 inTask, iomdOptions, /* System mapper */ 0))
280 return false;
281
282 if (physicalMask && !IOMapper::gSystem)
283 {
284 IOMDDMACharacteristics mdSummary;
285
286 bzero(&mdSummary, sizeof(mdSummary));
287 IOReturn rtn = dmaCommandOperation(
288 kIOMDGetCharacteristics,
289 &mdSummary, sizeof(mdSummary));
290 if (rtn)
291 return false;
292
293 if (mdSummary.fHighestPage)
294 {
295 ppnum_t highest;
296 while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
297 {
298 if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
299 (UInt32 *) &gIOHighestAllocatedPage))
300 break;
301 }
302 lastIOAddr = ptoa_64(mdSummary.fHighestPage);
303 }
304 else
305 lastIOAddr = ptoa_64(gIOLastPage);
306
307 if (lastIOAddr != (lastIOAddr & physicalMask))
308 {
309 if (kIOMemoryTypePhysical != (_flags & kIOMemoryTypeMask))
310 {
311 // flag a retry
312 _physSegCount = 1;
313 }
314 return false;
315 }
316 }
317
318 if (vmmap)
319 {
320 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, capacity);
321 if (KERN_SUCCESS != kr)
322 {
323 _buffer = 0;
324 return( false );
325 }
326
327 if (kIOMemoryTypeVirtual & iomdOptions)
328 _singleRange.v.address = (vm_address_t) _buffer;
329 }
330
331 setLength(capacity);
332
333 return true;
334 }
335
336 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
337 task_t inTask,
338 IOOptionBits options,
339 vm_size_t capacity,
340 vm_offset_t alignment)
341 {
342 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
343
344 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
345 bool retry = me->_physSegCount;
346 me->release();
347 me = 0;
348 if (retry)
349 {
350 me = new IOBufferMemoryDescriptor;
351 if (me && !me->initWithOptions(options, capacity, alignment, inTask))
352 {
353 me->release();
354 me = 0;
355 }
356 }
357 }
358 return me;
359 }
360
361 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
362 task_t inTask,
363 IOOptionBits options,
364 mach_vm_size_t capacity,
365 mach_vm_address_t physicalMask)
366 {
367 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
368
369 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
370 {
371 bool retry = me->_physSegCount;
372 me->release();
373 me = 0;
374 if (retry)
375 {
376 me = new IOBufferMemoryDescriptor;
377 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
378 {
379 me->release();
380 me = 0;
381 }
382 }
383 }
384 return me;
385 }
386
387 bool IOBufferMemoryDescriptor::initWithOptions(
388 IOOptionBits options,
389 vm_size_t capacity,
390 vm_offset_t alignment)
391 {
392 return( initWithOptions(options, capacity, alignment, kernel_task) );
393 }
394
395 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
396 IOOptionBits options,
397 vm_size_t capacity,
398 vm_offset_t alignment)
399 {
400 return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment));
401 }
402
403
404 /*
405 * withCapacity:
406 *
407 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
408 * hold capacity bytes. The descriptor's length is initially set to the capacity.
409 */
410 IOBufferMemoryDescriptor *
411 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
412 IODirection inDirection,
413 bool inContiguous)
414 {
415 return( IOBufferMemoryDescriptor::withOptions(
416 inDirection | kIOMemoryUnshared
417 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
418 inCapacity, inContiguous ? inCapacity : 1 ));
419 }
420
421 /*
422 * initWithBytes:
423 *
424 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
425 * The descriptor's length and capacity are set to the input buffer's size.
426 */
427 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
428 vm_size_t inLength,
429 IODirection inDirection,
430 bool inContiguous)
431 {
432 if (!initWithOptions(
433 inDirection | kIOMemoryUnshared
434 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
435 inLength, inLength ))
436 return false;
437
438 // start out with no data
439 setLength(0);
440
441 if (!appendBytes(inBytes, inLength))
442 return false;
443
444 return true;
445 }
446
447 /*
448 * withBytes:
449 *
450 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
451 * The descriptor's length and capacity are set to the input buffer's size.
452 */
453 IOBufferMemoryDescriptor *
454 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
455 vm_size_t inLength,
456 IODirection inDirection,
457 bool inContiguous)
458 {
459 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
460
461 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
462 {
463 bool retry = me->_physSegCount;
464 me->release();
465 me = 0;
466 if (retry)
467 {
468 me = new IOBufferMemoryDescriptor;
469 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
470 {
471 me->release();
472 me = 0;
473 }
474 }
475
476 }
477 return me;
478 }
479
480 /*
481 * free:
482 *
483 * Free resources
484 */
485 void IOBufferMemoryDescriptor::free()
486 {
487 // Cache all of the relevant information on the stack for use
488 // after we call super::free()!
489 IOOptionBits flags = _flags;
490 IOOptionBits options = _options;
491 vm_size_t size = _capacity;
492 void * buffer = _buffer;
493 IOVirtualAddress source = _singleRange.v.address;
494 vm_map_t vmmap = 0;
495 vm_offset_t alignment = _alignment;
496
497 if (reserved)
498 {
499 vmmap = reserved->map;
500 IODelete( reserved, ExpansionData, 1 );
501 }
502
503 /* super::free may unwire - deallocate buffer afterwards */
504 super::free();
505
506 if (options & kIOMemoryPageable)
507 {
508 #if IOALLOCDEBUG
509 if (!buffer || vmmap)
510 debug_iomallocpageable_size -= round_page_32(size);
511 #endif
512 if (buffer)
513 {
514 if (vmmap)
515 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
516 else
517 IOFreePageable(buffer, size);
518 }
519 }
520 else if (buffer)
521 {
522 if (kIOMemoryTypePhysical == (flags & kIOMemoryTypeMask))
523 {
524 if (vmmap)
525 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
526 IOFreePhysical((mach_vm_address_t) source, size);
527 }
528 else if (options & kIOMemoryPhysicallyContiguous)
529 IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
530 else if (alignment > 1)
531 IOFreeAligned(buffer, size);
532 else
533 IOFree(buffer, size);
534 }
535 if (vmmap)
536 vm_map_deallocate(vmmap);
537 }
538
539 /*
540 * getCapacity:
541 *
542 * Get the buffer capacity
543 */
544 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
545 {
546 return _capacity;
547 }
548
549 /*
550 * setLength:
551 *
552 * Change the buffer length of the memory descriptor. When a new buffer
553 * is created, the initial length of the buffer is set to be the same as
554 * the capacity. The length can be adjusted via setLength for a shorter
555 * transfer (there is no need to create more buffer descriptors when you
556 * can reuse an existing one, even for different transfer sizes). Note
557 * that the specified length must not exceed the capacity of the buffer.
558 */
559 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
560 {
561 assert(length <= _capacity);
562
563 _length = length;
564 _singleRange.v.length = length;
565 }
566
567 /*
568 * setDirection:
569 *
570 * Change the direction of the transfer. This method allows one to redirect
571 * the descriptor's transfer direction. This eliminates the need to destroy
572 * and create new buffers when different transfer directions are needed.
573 */
574 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
575 {
576 _direction = direction;
577 }
578
579 /*
580 * appendBytes:
581 *
582 * Add some data to the end of the buffer. This method automatically
583 * maintains the memory descriptor buffer length. Note that appendBytes
584 * will not copy past the end of the memory descriptor's current capacity.
585 */
586 bool
587 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
588 {
589 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
590 IOByteCount offset;
591
592 assert(_length <= _capacity);
593
594 offset = _length;
595 _length += actualBytesToCopy;
596 _singleRange.v.length += actualBytesToCopy;
597
598 if (_task == kernel_task)
599 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + offset),
600 actualBytesToCopy);
601 else
602 writeBytes(offset, bytes, actualBytesToCopy);
603
604 return true;
605 }
606
607 /*
608 * getBytesNoCopy:
609 *
610 * Return the virtual address of the beginning of the buffer
611 */
612 void * IOBufferMemoryDescriptor::getBytesNoCopy()
613 {
614 if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))
615 return _buffer;
616 else
617 return (void *)_singleRange.v.address;
618 }
619
620
621 /*
622 * getBytesNoCopy:
623 *
624 * Return the virtual address of an offset from the beginning of the buffer
625 */
626 void *
627 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
628 {
629 IOVirtualAddress address;
630 if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))
631 address = (IOVirtualAddress) _buffer;
632 else
633 address = _singleRange.v.address;
634
635 if (start < _length && (start + withLength) <= _length)
636 return (void *)(address + start);
637 return 0;
638 }
639
640 /* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
641 /* DEPRECATED */ IOByteCount * lengthOfSegment)
642 {
643 void * bytes = getBytesNoCopy(offset, 0);
644
645 if (bytes && lengthOfSegment)
646 *lengthOfSegment = _length - offset;
647
648 return bytes;
649 }
650
651 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
652 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
653 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
654 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
655 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
656 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
657 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
658 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
661 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
662 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
663 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
664 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
665 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
666 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);