]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
4435b9f81380f5dd73c46c0da418af95ecd9a5bf
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <IOKit/assert.h>
31 #include <IOKit/system.h>
32
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMapper.h>
35 #include <IOKit/IOBufferMemoryDescriptor.h>
36
37 #include "IOKitKernelInternal.h"
38 #include "IOCopyMapper.h"
39
40 __BEGIN_DECLS
41 void ipc_port_release_send(ipc_port_t port);
42 #include <vm/pmap.h>
43
44 vm_map_t IOPageableMapForAddress( vm_address_t address );
45 __END_DECLS
46
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49 volatile ppnum_t gIOHighestAllocatedPage;
50
51 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52
53 #define super IOGeneralMemoryDescriptor
54 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
55 IOGeneralMemoryDescriptor);
56
57 bool IOBufferMemoryDescriptor::initWithAddress(
58 void * /* address */ ,
59 IOByteCount /* withLength */ ,
60 IODirection /* withDirection */ )
61 {
62 return false;
63 }
64
65 bool IOBufferMemoryDescriptor::initWithAddress(
66 vm_address_t /* address */ ,
67 IOByteCount /* withLength */ ,
68 IODirection /* withDirection */ ,
69 task_t /* withTask */ )
70 {
71 return false;
72 }
73
74 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
75 IOPhysicalAddress /* address */ ,
76 IOByteCount /* withLength */ ,
77 IODirection /* withDirection */ )
78 {
79 return false;
80 }
81
82 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
83 IOPhysicalRange * /* ranges */ ,
84 UInt32 /* withCount */ ,
85 IODirection /* withDirection */ ,
86 bool /* asReference */ )
87 {
88 return false;
89 }
90
91 bool IOBufferMemoryDescriptor::initWithRanges(
92 IOVirtualRange * /* ranges */ ,
93 UInt32 /* withCount */ ,
94 IODirection /* withDirection */ ,
95 task_t /* withTask */ ,
96 bool /* asReference */ )
97 {
98 return false;
99 }
100
101 bool IOBufferMemoryDescriptor::initWithOptions(
102 IOOptionBits options,
103 vm_size_t capacity,
104 vm_offset_t alignment,
105 task_t inTask)
106 {
107 mach_vm_address_t physicalMask = 0;
108 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
109 }
110
111 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
112 task_t inTask,
113 IOOptionBits options,
114 mach_vm_size_t capacity,
115 mach_vm_address_t alignment,
116 mach_vm_address_t physicalMask)
117 {
118 kern_return_t kr;
119 addr64_t lastIOAddr;
120 vm_map_t vmmap = 0;
121 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
122
123 if (!capacity)
124 return false;
125
126 _options = options;
127 _capacity = capacity;
128 _physAddrs = 0;
129 _physSegCount = 0;
130 _buffer = 0;
131
132 // Grab the direction and the Auto Prepare bits from the Buffer MD options
133 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
134
135 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
136 alignment = page_size;
137
138 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
139 return false;
140
141 if (physicalMask && (alignment <= 1))
142 alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1;
143
144 if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
145 physicalMask = 0xFFFFFFFF;
146
147 _alignment = alignment;
148 if (options & kIOMemoryPageable)
149 {
150 iomdOptions |= kIOMemoryBufferPageable;
151
152 ipc_port_t sharedMem;
153 vm_size_t size = round_page_32(capacity);
154
155 // must create the entry before any pages are allocated
156
157 // set flags for entry + object create
158 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
159 | MAP_MEM_NAMED_CREATE;
160
161 if (options & kIOMemoryPurgeable)
162 memEntryCacheMode |= MAP_MEM_PURGABLE;
163
164 // set memory entry cache mode
165 switch (options & kIOMapCacheMask)
166 {
167 case kIOMapInhibitCache:
168 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
169 break;
170
171 case kIOMapWriteThruCache:
172 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
173 break;
174
175 case kIOMapWriteCombineCache:
176 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
177 break;
178
179 case kIOMapCopybackCache:
180 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
181 break;
182
183 case kIOMapDefaultCache:
184 default:
185 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
186 break;
187 }
188
189 kr = mach_make_memory_entry( vmmap,
190 &size, 0,
191 memEntryCacheMode, &sharedMem,
192 NULL );
193
194 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
195 ipc_port_release_send( sharedMem );
196 kr = kIOReturnVMError;
197 }
198 if( KERN_SUCCESS != kr)
199 return( false );
200
201 _memEntry = (void *) sharedMem;
202 #if IOALLOCDEBUG
203 debug_iomallocpageable_size += size;
204 #endif
205 if (NULL == inTask)
206 inTask = kernel_task;
207 else if (inTask == kernel_task)
208 {
209 vmmap = kernel_map;
210 }
211 else
212 {
213 if( !reserved) {
214 reserved = IONew( ExpansionData, 1 );
215 if( !reserved)
216 return( false );
217 }
218 vmmap = get_task_map(inTask);
219 vm_map_reference(vmmap);
220 reserved->map = vmmap;
221 }
222 }
223 else
224 {
225 if (IOMapper::gSystem)
226 // assuming mapped space is 2G
227 lastIOAddr = (1UL << 31) - PAGE_SIZE;
228 else
229 lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
230
231 if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask)))
232 {
233 mach_vm_address_t address;
234 iomdOptions &= ~kIOMemoryTypeVirtual;
235 iomdOptions |= kIOMemoryTypePhysical;
236
237 address = IOMallocPhysical(capacity, physicalMask);
238 _buffer = (void *) address;
239 if (!_buffer)
240 return false;
241
242 if (inTask == kernel_task)
243 {
244 vmmap = kernel_map;
245 }
246 else if (NULL != inTask)
247 {
248 if( !reserved) {
249 reserved = IONew( ExpansionData, 1 );
250 if( !reserved)
251 return( false );
252 }
253 vmmap = get_task_map(inTask);
254 vm_map_reference(vmmap);
255 reserved->map = vmmap;
256 }
257 inTask = 0;
258 }
259 else
260 {
261 // Buffer shouldn't auto prepare they should be prepared explicitly
262 // But it never was enforced so what are you going to do?
263 iomdOptions |= kIOMemoryAutoPrepare;
264
265 /* Allocate a wired-down buffer inside kernel space. */
266 if (options & kIOMemoryPhysicallyContiguous)
267 _buffer = (void *) IOKernelAllocateContiguous(capacity, alignment);
268 else if (alignment > 1)
269 _buffer = IOMallocAligned(capacity, alignment);
270 else
271 _buffer = IOMalloc(capacity);
272 if (!_buffer)
273 return false;
274 }
275 }
276
277 _singleRange.v.address = (vm_address_t) _buffer;
278 _singleRange.v.length = capacity;
279
280 if (!super::initWithOptions(&_singleRange.v, 1, 0,
281 inTask, iomdOptions, /* System mapper */ 0))
282 return false;
283
284 if (physicalMask && !IOMapper::gSystem)
285 {
286 IOMDDMACharacteristics mdSummary;
287
288 bzero(&mdSummary, sizeof(mdSummary));
289 IOReturn rtn = dmaCommandOperation(
290 kIOMDGetCharacteristics,
291 &mdSummary, sizeof(mdSummary));
292 if (rtn)
293 return false;
294
295 if (mdSummary.fHighestPage)
296 {
297 ppnum_t highest;
298 while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
299 {
300 if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
301 (UInt32 *) &gIOHighestAllocatedPage))
302 break;
303 }
304 lastIOAddr = ptoa_64(mdSummary.fHighestPage);
305 }
306 else
307 lastIOAddr = ptoa_64(gIOLastPage);
308
309 if (lastIOAddr != (lastIOAddr & physicalMask))
310 {
311 if (kIOMemoryTypePhysical != (_flags & kIOMemoryTypeMask))
312 {
313 // flag a retry
314 _physSegCount = 1;
315 }
316 return false;
317 }
318 }
319
320 if (vmmap)
321 {
322 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, capacity);
323 if (KERN_SUCCESS != kr)
324 {
325 _buffer = 0;
326 return( false );
327 }
328
329 if (kIOMemoryTypeVirtual & iomdOptions)
330 _singleRange.v.address = (vm_address_t) _buffer;
331 }
332
333 setLength(capacity);
334
335 return true;
336 }
337
338 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
339 task_t inTask,
340 IOOptionBits options,
341 vm_size_t capacity,
342 vm_offset_t alignment)
343 {
344 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
345
346 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
347 bool retry = me->_physSegCount;
348 me->release();
349 me = 0;
350 if (retry)
351 {
352 me = new IOBufferMemoryDescriptor;
353 if (me && !me->initWithOptions(options, capacity, alignment, inTask))
354 {
355 me->release();
356 me = 0;
357 }
358 }
359 }
360 return me;
361 }
362
363 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
364 task_t inTask,
365 IOOptionBits options,
366 mach_vm_size_t capacity,
367 mach_vm_address_t physicalMask)
368 {
369 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
370
371 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
372 {
373 bool retry = me->_physSegCount;
374 me->release();
375 me = 0;
376 if (retry)
377 {
378 me = new IOBufferMemoryDescriptor;
379 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
380 {
381 me->release();
382 me = 0;
383 }
384 }
385 }
386 return me;
387 }
388
389 bool IOBufferMemoryDescriptor::initWithOptions(
390 IOOptionBits options,
391 vm_size_t capacity,
392 vm_offset_t alignment)
393 {
394 return( initWithOptions(options, capacity, alignment, kernel_task) );
395 }
396
397 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
398 IOOptionBits options,
399 vm_size_t capacity,
400 vm_offset_t alignment)
401 {
402 return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment));
403 }
404
405
406 /*
407 * withCapacity:
408 *
409 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
410 * hold capacity bytes. The descriptor's length is initially set to the capacity.
411 */
412 IOBufferMemoryDescriptor *
413 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
414 IODirection inDirection,
415 bool inContiguous)
416 {
417 return( IOBufferMemoryDescriptor::withOptions(
418 inDirection | kIOMemoryUnshared
419 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
420 inCapacity, inContiguous ? inCapacity : 1 ));
421 }
422
423 /*
424 * initWithBytes:
425 *
426 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
427 * The descriptor's length and capacity are set to the input buffer's size.
428 */
429 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
430 vm_size_t inLength,
431 IODirection inDirection,
432 bool inContiguous)
433 {
434 if (!initWithOptions(
435 inDirection | kIOMemoryUnshared
436 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
437 inLength, inLength ))
438 return false;
439
440 // start out with no data
441 setLength(0);
442
443 if (!appendBytes(inBytes, inLength))
444 return false;
445
446 return true;
447 }
448
449 /*
450 * withBytes:
451 *
452 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
453 * The descriptor's length and capacity are set to the input buffer's size.
454 */
455 IOBufferMemoryDescriptor *
456 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
457 vm_size_t inLength,
458 IODirection inDirection,
459 bool inContiguous)
460 {
461 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
462
463 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
464 {
465 bool retry = me->_physSegCount;
466 me->release();
467 me = 0;
468 if (retry)
469 {
470 me = new IOBufferMemoryDescriptor;
471 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
472 {
473 me->release();
474 me = 0;
475 }
476 }
477
478 }
479 return me;
480 }
481
482 /*
483 * free:
484 *
485 * Free resources
486 */
487 void IOBufferMemoryDescriptor::free()
488 {
489 // Cache all of the relevant information on the stack for use
490 // after we call super::free()!
491 IOOptionBits flags = _flags;
492 IOOptionBits options = _options;
493 vm_size_t size = _capacity;
494 void * buffer = _buffer;
495 IOVirtualAddress source = _singleRange.v.address;
496 vm_map_t vmmap = 0;
497 vm_offset_t alignment = _alignment;
498
499 if (reserved)
500 {
501 vmmap = reserved->map;
502 IODelete( reserved, ExpansionData, 1 );
503 }
504
505 /* super::free may unwire - deallocate buffer afterwards */
506 super::free();
507
508 if (options & kIOMemoryPageable)
509 {
510 #if IOALLOCDEBUG
511 if (!buffer || vmmap)
512 debug_iomallocpageable_size -= round_page_32(size);
513 #endif
514 if (buffer)
515 {
516 if (vmmap)
517 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
518 else
519 IOFreePageable(buffer, size);
520 }
521 }
522 else if (buffer)
523 {
524 if (kIOMemoryTypePhysical == (flags & kIOMemoryTypeMask))
525 {
526 if (vmmap)
527 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
528 IOFreePhysical((mach_vm_address_t) source, size);
529 }
530 else if (options & kIOMemoryPhysicallyContiguous)
531 IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
532 else if (alignment > 1)
533 IOFreeAligned(buffer, size);
534 else
535 IOFree(buffer, size);
536 }
537 if (vmmap)
538 vm_map_deallocate(vmmap);
539 }
540
541 /*
542 * getCapacity:
543 *
544 * Get the buffer capacity
545 */
546 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
547 {
548 return _capacity;
549 }
550
551 /*
552 * setLength:
553 *
554 * Change the buffer length of the memory descriptor. When a new buffer
555 * is created, the initial length of the buffer is set to be the same as
556 * the capacity. The length can be adjusted via setLength for a shorter
557 * transfer (there is no need to create more buffer descriptors when you
558 * can reuse an existing one, even for different transfer sizes). Note
559 * that the specified length must not exceed the capacity of the buffer.
560 */
561 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
562 {
563 assert(length <= _capacity);
564
565 _length = length;
566 _singleRange.v.length = length;
567 }
568
569 /*
570 * setDirection:
571 *
572 * Change the direction of the transfer. This method allows one to redirect
573 * the descriptor's transfer direction. This eliminates the need to destroy
574 * and create new buffers when different transfer directions are needed.
575 */
576 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
577 {
578 _direction = direction;
579 }
580
581 /*
582 * appendBytes:
583 *
584 * Add some data to the end of the buffer. This method automatically
585 * maintains the memory descriptor buffer length. Note that appendBytes
586 * will not copy past the end of the memory descriptor's current capacity.
587 */
588 bool
589 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
590 {
591 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
592 IOByteCount offset;
593
594 assert(_length <= _capacity);
595
596 offset = _length;
597 _length += actualBytesToCopy;
598 _singleRange.v.length += actualBytesToCopy;
599
600 if (_task == kernel_task)
601 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + offset),
602 actualBytesToCopy);
603 else
604 writeBytes(offset, bytes, actualBytesToCopy);
605
606 return true;
607 }
608
609 /*
610 * getBytesNoCopy:
611 *
612 * Return the virtual address of the beginning of the buffer
613 */
614 void * IOBufferMemoryDescriptor::getBytesNoCopy()
615 {
616 if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))
617 return _buffer;
618 else
619 return (void *)_singleRange.v.address;
620 }
621
622
623 /*
624 * getBytesNoCopy:
625 *
626 * Return the virtual address of an offset from the beginning of the buffer
627 */
628 void *
629 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
630 {
631 IOVirtualAddress address;
632 if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))
633 address = (IOVirtualAddress) _buffer;
634 else
635 address = _singleRange.v.address;
636
637 if (start < _length && (start + withLength) <= _length)
638 return (void *)(address + start);
639 return 0;
640 }
641
642 /* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
643 /* DEPRECATED */ IOByteCount * lengthOfSegment)
644 {
645 void * bytes = getBytesNoCopy(offset, 0);
646
647 if (bytes && lengthOfSegment)
648 *lengthOfSegment = _length - offset;
649
650 return bytes;
651 }
652
653 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
654 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
655 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
656 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
657 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
658 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
661 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
662 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
663 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
664 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
665 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
666 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
667 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
668 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);