]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
0d934cdb865f589ed3877b49a93a7ca303709e89
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <IOKit/assert.h>
23 #include <IOKit/system.h>
24
25 #include <IOKit/IOLib.h>
26 #include <IOKit/IOMapper.h>
27 #include <IOKit/IOBufferMemoryDescriptor.h>
28
29 #include "IOKitKernelInternal.h"
30 #include "IOCopyMapper.h"
31
32 __BEGIN_DECLS
33 void ipc_port_release_send(ipc_port_t port);
34 #include <vm/pmap.h>
35
36 vm_map_t IOPageableMapForAddress( vm_address_t address );
37 __END_DECLS
38
39 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
40
41 volatile ppnum_t gIOHighestAllocatedPage;
42
43 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
44
45 #define super IOGeneralMemoryDescriptor
46 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
47 IOGeneralMemoryDescriptor);
48
49 bool IOBufferMemoryDescriptor::initWithAddress(
50 void * /* address */ ,
51 IOByteCount /* withLength */ ,
52 IODirection /* withDirection */ )
53 {
54 return false;
55 }
56
57 bool IOBufferMemoryDescriptor::initWithAddress(
58 vm_address_t /* address */ ,
59 IOByteCount /* withLength */ ,
60 IODirection /* withDirection */ ,
61 task_t /* withTask */ )
62 {
63 return false;
64 }
65
66 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
67 IOPhysicalAddress /* address */ ,
68 IOByteCount /* withLength */ ,
69 IODirection /* withDirection */ )
70 {
71 return false;
72 }
73
74 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
75 IOPhysicalRange * /* ranges */ ,
76 UInt32 /* withCount */ ,
77 IODirection /* withDirection */ ,
78 bool /* asReference */ )
79 {
80 return false;
81 }
82
83 bool IOBufferMemoryDescriptor::initWithRanges(
84 IOVirtualRange * /* ranges */ ,
85 UInt32 /* withCount */ ,
86 IODirection /* withDirection */ ,
87 task_t /* withTask */ ,
88 bool /* asReference */ )
89 {
90 return false;
91 }
92
93 bool IOBufferMemoryDescriptor::initWithOptions(
94 IOOptionBits options,
95 vm_size_t capacity,
96 vm_offset_t alignment,
97 task_t inTask)
98 {
99 mach_vm_address_t physicalMask = 0;
100 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
101 }
102
103 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
104 task_t inTask,
105 IOOptionBits options,
106 mach_vm_size_t capacity,
107 mach_vm_address_t alignment,
108 mach_vm_address_t physicalMask)
109 {
110 kern_return_t kr;
111 addr64_t lastIOAddr;
112 vm_map_t vmmap = 0;
113 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
114
115 if (!capacity)
116 return false;
117
118 _options = options;
119 _capacity = capacity;
120 _physAddrs = 0;
121 _physSegCount = 0;
122 _buffer = 0;
123
124 // Grab the direction and the Auto Prepare bits from the Buffer MD options
125 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
126
127 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
128 alignment = page_size;
129
130 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
131 return false;
132
133 if (physicalMask && (alignment <= 1))
134 alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1;
135
136 if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
137 physicalMask = 0xFFFFFFFF;
138
139 _alignment = alignment;
140 if (options & kIOMemoryPageable)
141 {
142 iomdOptions |= kIOMemoryBufferPageable;
143
144 ipc_port_t sharedMem;
145 vm_size_t size = round_page_32(capacity);
146
147 // must create the entry before any pages are allocated
148
149 // set flags for entry + object create
150 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
151 | MAP_MEM_NAMED_CREATE;
152
153 if (options & kIOMemoryPurgeable)
154 memEntryCacheMode |= MAP_MEM_PURGABLE;
155
156 // set memory entry cache mode
157 switch (options & kIOMapCacheMask)
158 {
159 case kIOMapInhibitCache:
160 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
161 break;
162
163 case kIOMapWriteThruCache:
164 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
165 break;
166
167 case kIOMapWriteCombineCache:
168 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
169 break;
170
171 case kIOMapCopybackCache:
172 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
173 break;
174
175 case kIOMapDefaultCache:
176 default:
177 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
178 break;
179 }
180
181 kr = mach_make_memory_entry( vmmap,
182 &size, 0,
183 memEntryCacheMode, &sharedMem,
184 NULL );
185
186 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
187 ipc_port_release_send( sharedMem );
188 kr = kIOReturnVMError;
189 }
190 if( KERN_SUCCESS != kr)
191 return( false );
192
193 _memEntry = (void *) sharedMem;
194 #if IOALLOCDEBUG
195 debug_iomallocpageable_size += size;
196 #endif
197 if (NULL == inTask)
198 inTask = kernel_task;
199 else if (inTask == kernel_task)
200 {
201 vmmap = kernel_map;
202 }
203 else
204 {
205 if( !reserved) {
206 reserved = IONew( ExpansionData, 1 );
207 if( !reserved)
208 return( false );
209 }
210 vmmap = get_task_map(inTask);
211 vm_map_reference(vmmap);
212 reserved->map = vmmap;
213 }
214 }
215 else
216 {
217 if (IOMapper::gSystem)
218 // assuming mapped space is 2G
219 lastIOAddr = (1UL << 31) - PAGE_SIZE;
220 else
221 lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
222
223 if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask)))
224 {
225 mach_vm_address_t address;
226 iomdOptions &= ~kIOMemoryTypeVirtual;
227 iomdOptions |= kIOMemoryTypePhysical;
228
229 address = IOMallocPhysical(capacity, physicalMask);
230 _buffer = (void *) address;
231 if (!_buffer)
232 return false;
233
234 if (inTask == kernel_task)
235 {
236 vmmap = kernel_map;
237 }
238 else if (NULL != inTask)
239 {
240 if( !reserved) {
241 reserved = IONew( ExpansionData, 1 );
242 if( !reserved)
243 return( false );
244 }
245 vmmap = get_task_map(inTask);
246 vm_map_reference(vmmap);
247 reserved->map = vmmap;
248 }
249 inTask = 0;
250 }
251 else
252 {
253 // Buffer shouldn't auto prepare they should be prepared explicitly
254 // But it never was enforced so what are you going to do?
255 iomdOptions |= kIOMemoryAutoPrepare;
256
257 /* Allocate a wired-down buffer inside kernel space. */
258 if (options & kIOMemoryPhysicallyContiguous)
259 _buffer = (void *) IOKernelAllocateContiguous(capacity, alignment);
260 else if (alignment > 1)
261 _buffer = IOMallocAligned(capacity, alignment);
262 else
263 _buffer = IOMalloc(capacity);
264 if (!_buffer)
265 return false;
266 }
267 }
268
269 _singleRange.v.address = (vm_address_t) _buffer;
270 _singleRange.v.length = capacity;
271
272 if (!super::initWithOptions(&_singleRange.v, 1, 0,
273 inTask, iomdOptions, /* System mapper */ 0))
274 return false;
275
276 if (physicalMask && !IOMapper::gSystem)
277 {
278 IOMDDMACharacteristics mdSummary;
279
280 bzero(&mdSummary, sizeof(mdSummary));
281 IOReturn rtn = dmaCommandOperation(
282 kIOMDGetCharacteristics,
283 &mdSummary, sizeof(mdSummary));
284 if (rtn)
285 return false;
286
287 if (mdSummary.fHighestPage)
288 {
289 ppnum_t highest;
290 while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
291 {
292 if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
293 (UInt32 *) &gIOHighestAllocatedPage))
294 break;
295 }
296 lastIOAddr = ptoa_64(mdSummary.fHighestPage);
297 }
298 else
299 lastIOAddr = ptoa_64(gIOLastPage);
300
301 if (lastIOAddr != (lastIOAddr & physicalMask))
302 {
303 if (kIOMemoryTypePhysical != (_flags & kIOMemoryTypeMask))
304 {
305 // flag a retry
306 _physSegCount = 1;
307 }
308 return false;
309 }
310 }
311
312 if (vmmap)
313 {
314 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, capacity);
315 if (KERN_SUCCESS != kr)
316 {
317 _buffer = 0;
318 return( false );
319 }
320
321 if (kIOMemoryTypeVirtual & iomdOptions)
322 _singleRange.v.address = (vm_address_t) _buffer;
323 }
324
325 setLength(capacity);
326
327 return true;
328 }
329
330 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
331 task_t inTask,
332 IOOptionBits options,
333 vm_size_t capacity,
334 vm_offset_t alignment)
335 {
336 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
337
338 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
339 bool retry = me->_physSegCount;
340 me->release();
341 me = 0;
342 if (retry)
343 {
344 me = new IOBufferMemoryDescriptor;
345 if (me && !me->initWithOptions(options, capacity, alignment, inTask))
346 {
347 me->release();
348 me = 0;
349 }
350 }
351 }
352 return me;
353 }
354
355 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
356 task_t inTask,
357 IOOptionBits options,
358 mach_vm_size_t capacity,
359 mach_vm_address_t physicalMask)
360 {
361 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
362
363 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
364 {
365 bool retry = me->_physSegCount;
366 me->release();
367 me = 0;
368 if (retry)
369 {
370 me = new IOBufferMemoryDescriptor;
371 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
372 {
373 me->release();
374 me = 0;
375 }
376 }
377 }
378 return me;
379 }
380
381 bool IOBufferMemoryDescriptor::initWithOptions(
382 IOOptionBits options,
383 vm_size_t capacity,
384 vm_offset_t alignment)
385 {
386 return( initWithOptions(options, capacity, alignment, kernel_task) );
387 }
388
389 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
390 IOOptionBits options,
391 vm_size_t capacity,
392 vm_offset_t alignment)
393 {
394 return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment));
395 }
396
397
398 /*
399 * withCapacity:
400 *
401 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
402 * hold capacity bytes. The descriptor's length is initially set to the capacity.
403 */
404 IOBufferMemoryDescriptor *
405 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
406 IODirection inDirection,
407 bool inContiguous)
408 {
409 return( IOBufferMemoryDescriptor::withOptions(
410 inDirection | kIOMemoryUnshared
411 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
412 inCapacity, inContiguous ? inCapacity : 1 ));
413 }
414
415 /*
416 * initWithBytes:
417 *
418 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
419 * The descriptor's length and capacity are set to the input buffer's size.
420 */
421 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
422 vm_size_t inLength,
423 IODirection inDirection,
424 bool inContiguous)
425 {
426 if (!initWithOptions(
427 inDirection | kIOMemoryUnshared
428 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
429 inLength, inLength ))
430 return false;
431
432 // start out with no data
433 setLength(0);
434
435 if (!appendBytes(inBytes, inLength))
436 return false;
437
438 return true;
439 }
440
441 /*
442 * withBytes:
443 *
444 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
445 * The descriptor's length and capacity are set to the input buffer's size.
446 */
447 IOBufferMemoryDescriptor *
448 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
449 vm_size_t inLength,
450 IODirection inDirection,
451 bool inContiguous)
452 {
453 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
454
455 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
456 {
457 bool retry = me->_physSegCount;
458 me->release();
459 me = 0;
460 if (retry)
461 {
462 me = new IOBufferMemoryDescriptor;
463 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
464 {
465 me->release();
466 me = 0;
467 }
468 }
469
470 }
471 return me;
472 }
473
474 /*
475 * free:
476 *
477 * Free resources
478 */
479 void IOBufferMemoryDescriptor::free()
480 {
481 // Cache all of the relevant information on the stack for use
482 // after we call super::free()!
483 IOOptionBits flags = _flags;
484 IOOptionBits options = _options;
485 vm_size_t size = _capacity;
486 void * buffer = _buffer;
487 IOVirtualAddress source = _singleRange.v.address;
488 vm_map_t vmmap = 0;
489 vm_offset_t alignment = _alignment;
490
491 if (reserved)
492 {
493 vmmap = reserved->map;
494 IODelete( reserved, ExpansionData, 1 );
495 }
496
497 /* super::free may unwire - deallocate buffer afterwards */
498 super::free();
499
500 if (options & kIOMemoryPageable)
501 {
502 #if IOALLOCDEBUG
503 if (!buffer || vmmap)
504 debug_iomallocpageable_size -= round_page_32(size);
505 #endif
506 if (buffer)
507 {
508 if (vmmap)
509 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
510 else
511 IOFreePageable(buffer, size);
512 }
513 }
514 else if (buffer)
515 {
516 if (kIOMemoryTypePhysical == (flags & kIOMemoryTypeMask))
517 {
518 if (vmmap)
519 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
520 IOFreePhysical((mach_vm_address_t) source, size);
521 }
522 else if (options & kIOMemoryPhysicallyContiguous)
523 IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
524 else if (alignment > 1)
525 IOFreeAligned(buffer, size);
526 else
527 IOFree(buffer, size);
528 }
529 if (vmmap)
530 vm_map_deallocate(vmmap);
531 }
532
533 /*
534 * getCapacity:
535 *
536 * Get the buffer capacity
537 */
538 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
539 {
540 return _capacity;
541 }
542
543 /*
544 * setLength:
545 *
546 * Change the buffer length of the memory descriptor. When a new buffer
547 * is created, the initial length of the buffer is set to be the same as
548 * the capacity. The length can be adjusted via setLength for a shorter
549 * transfer (there is no need to create more buffer descriptors when you
550 * can reuse an existing one, even for different transfer sizes). Note
551 * that the specified length must not exceed the capacity of the buffer.
552 */
553 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
554 {
555 assert(length <= _capacity);
556
557 _length = length;
558 _singleRange.v.length = length;
559 }
560
561 /*
562 * setDirection:
563 *
564 * Change the direction of the transfer. This method allows one to redirect
565 * the descriptor's transfer direction. This eliminates the need to destroy
566 * and create new buffers when different transfer directions are needed.
567 */
568 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
569 {
570 _direction = direction;
571 }
572
573 /*
574 * appendBytes:
575 *
576 * Add some data to the end of the buffer. This method automatically
577 * maintains the memory descriptor buffer length. Note that appendBytes
578 * will not copy past the end of the memory descriptor's current capacity.
579 */
580 bool
581 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
582 {
583 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
584 IOByteCount offset;
585
586 assert(_length <= _capacity);
587
588 offset = _length;
589 _length += actualBytesToCopy;
590 _singleRange.v.length += actualBytesToCopy;
591
592 if (_task == kernel_task)
593 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + offset),
594 actualBytesToCopy);
595 else
596 writeBytes(offset, bytes, actualBytesToCopy);
597
598 return true;
599 }
600
601 /*
602 * getBytesNoCopy:
603 *
604 * Return the virtual address of the beginning of the buffer
605 */
606 void * IOBufferMemoryDescriptor::getBytesNoCopy()
607 {
608 if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))
609 return _buffer;
610 else
611 return (void *)_singleRange.v.address;
612 }
613
614
615 /*
616 * getBytesNoCopy:
617 *
618 * Return the virtual address of an offset from the beginning of the buffer
619 */
620 void *
621 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
622 {
623 IOVirtualAddress address;
624 if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))
625 address = (IOVirtualAddress) _buffer;
626 else
627 address = _singleRange.v.address;
628
629 if (start < _length && (start + withLength) <= _length)
630 return (void *)(address + start);
631 return 0;
632 }
633
634 /* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
635 /* DEPRECATED */ IOByteCount * lengthOfSegment)
636 {
637 void * bytes = getBytesNoCopy(offset, 0);
638
639 if (bytes && lengthOfSegment)
640 *lengthOfSegment = _length - offset;
641
642 return bytes;
643 }
644
645 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
646 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
647 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
648 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
649 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
650 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
651 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
652 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
653 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
654 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
655 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
656 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
657 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
658 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);