]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IODMACommand.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define IOKIT_ENABLE_SHARED_PTR
30
31#include <IOKit/assert.h>
32
33#include <libkern/OSTypes.h>
34#include <libkern/OSByteOrder.h>
35#include <libkern/OSDebug.h>
36
37#include <IOKit/IOReturn.h>
38#include <IOKit/IOLib.h>
39#include <IOKit/IODMACommand.h>
40#include <IOKit/IOMapper.h>
41#include <IOKit/IOMemoryDescriptor.h>
42#include <IOKit/IOBufferMemoryDescriptor.h>
43
44#include "IOKitKernelInternal.h"
45
46#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
47#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48
49enum{
50 kWalkSyncIn = 0x01,// bounce -> md
51 kWalkSyncOut = 0x02,// bounce <- md
52 kWalkSyncAlways = 0x04,
53 kWalkPreflight = 0x08,
54 kWalkDoubleBuffer = 0x10,
55 kWalkPrepare = 0x20,
56 kWalkComplete = 0x40,
57 kWalkClient = 0x80
58};
59
60
61#define fInternalState reserved
62#define fState reserved->fState
63#define fMDSummary reserved->fMDSummary
64
65
66#if 1
67// no direction => OutIn
68#define SHOULD_COPY_DIR(op, direction) \
69 ((kIODirectionNone == (direction)) \
70 || (kWalkSyncAlways & (op)) \
71 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
72 & (direction)))
73
74#else
75#define SHOULD_COPY_DIR(state, direction) (true)
76#endif
77
78#if 0
79#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
80#else
81#define DEBG(fmt, args...) {}
82#endif
83
84#if 0
85#define LOGTAG 0x87654321
86#endif
87
88/**************************** class IODMACommand ***************************/
89
90#undef super
91#define super IOCommand
92OSDefineMetaClassAndStructorsWithZone(IODMACommand, IOCommand, ZC_NONE);
93
94OSMetaClassDefineReservedUsedX86(IODMACommand, 0);
95OSMetaClassDefineReservedUsedX86(IODMACommand, 1);
96OSMetaClassDefineReservedUsedX86(IODMACommand, 2);
97OSMetaClassDefineReservedUsedX86(IODMACommand, 3);
98OSMetaClassDefineReservedUsedX86(IODMACommand, 4);
99OSMetaClassDefineReservedUsedX86(IODMACommand, 5);
100OSMetaClassDefineReservedUsedX86(IODMACommand, 6);
101OSMetaClassDefineReservedUnused(IODMACommand, 7);
102OSMetaClassDefineReservedUnused(IODMACommand, 8);
103OSMetaClassDefineReservedUnused(IODMACommand, 9);
104OSMetaClassDefineReservedUnused(IODMACommand, 10);
105OSMetaClassDefineReservedUnused(IODMACommand, 11);
106OSMetaClassDefineReservedUnused(IODMACommand, 12);
107OSMetaClassDefineReservedUnused(IODMACommand, 13);
108OSMetaClassDefineReservedUnused(IODMACommand, 14);
109OSMetaClassDefineReservedUnused(IODMACommand, 15);
110
111
112OSSharedPtr<IODMACommand>
113IODMACommand::withRefCon(void * refCon)
114{
115 OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
116
117 if (me && !me->initWithRefCon(refCon)) {
118 return nullptr;
119 }
120
121 return me;
122}
123
124OSSharedPtr<IODMACommand>
125IODMACommand::withSpecification(SegmentFunction outSegFunc,
126 const SegmentOptions * segmentOptions,
127 uint32_t mappingOptions,
128 IOMapper * mapper,
129 void * refCon)
130{
131 OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
132
133 if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions,
134 mapper, refCon)) {
135 return nullptr;
136 }
137
138 return me;
139}
140
141OSSharedPtr<IODMACommand>
142IODMACommand::withSpecification(SegmentFunction outSegFunc,
143 UInt8 numAddressBits,
144 UInt64 maxSegmentSize,
145 MappingOptions mappingOptions,
146 UInt64 maxTransferSize,
147 UInt32 alignment,
148 IOMapper *mapper,
149 void *refCon)
150{
151 OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
152
153 if (me && !me->initWithSpecification(outSegFunc,
154 numAddressBits, maxSegmentSize,
155 mappingOptions, maxTransferSize,
156 alignment, mapper, refCon)) {
157 return nullptr;
158 }
159
160 return me;
161}
162
163OSSharedPtr<IODMACommand>
164IODMACommand::cloneCommand(void *refCon)
165{
166 SegmentOptions segmentOptions =
167 {
168 .fStructSize = sizeof(segmentOptions),
169 .fNumAddressBits = (uint8_t)fNumAddressBits,
170 .fMaxSegmentSize = fMaxSegmentSize,
171 .fMaxTransferSize = fMaxTransferSize,
172 .fAlignment = fAlignMask + 1,
173 .fAlignmentLength = fAlignMaskInternalSegments + 1,
174 .fAlignmentInternalSegments = fAlignMaskLength + 1
175 };
176
177 return IODMACommand::withSpecification(fOutSeg, &segmentOptions,
178 fMappingOptions, fMapper.get(), refCon);
179}
180
181#define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
182
183bool
184IODMACommand::initWithRefCon(void * refCon)
185{
186 if (!super::init()) {
187 return false;
188 }
189
190 if (!reserved) {
191 reserved = IONew(IODMACommandInternal, 1);
192 if (!reserved) {
193 return false;
194 }
195 }
196 bzero(reserved, sizeof(IODMACommandInternal));
197 fRefCon = refCon;
198
199 return true;
200}
201
202bool
203IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
204 const SegmentOptions * segmentOptions,
205 uint32_t mappingOptions,
206 IOMapper * mapper,
207 void * refCon)
208{
209 if (!initWithRefCon(refCon)) {
210 return false;
211 }
212
213 if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions,
214 mappingOptions, mapper)) {
215 return false;
216 }
217
218 return true;
219}
220
221bool
222IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
223 UInt8 numAddressBits,
224 UInt64 maxSegmentSize,
225 MappingOptions mappingOptions,
226 UInt64 maxTransferSize,
227 UInt32 alignment,
228 IOMapper *mapper,
229 void *refCon)
230{
231 SegmentOptions segmentOptions =
232 {
233 .fStructSize = sizeof(segmentOptions),
234 .fNumAddressBits = numAddressBits,
235 .fMaxSegmentSize = maxSegmentSize,
236 .fMaxTransferSize = maxTransferSize,
237 .fAlignment = alignment,
238 .fAlignmentLength = 1,
239 .fAlignmentInternalSegments = alignment
240 };
241
242 return initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon);
243}
244
245IOReturn
246IODMACommand::setSpecification(SegmentFunction outSegFunc,
247 const SegmentOptions * segmentOptions,
248 uint32_t mappingOptions,
249 IOMapper * mapper)
250{
251 IOService * device = NULL;
252 UInt8 numAddressBits;
253 UInt64 maxSegmentSize;
254 UInt64 maxTransferSize;
255 UInt32 alignment;
256
257 bool is32Bit;
258
259 if (!outSegFunc || !segmentOptions) {
260 return kIOReturnBadArgument;
261 }
262
263 is32Bit = ((OutputHost32 == outSegFunc)
264 || (OutputBig32 == outSegFunc)
265 || (OutputLittle32 == outSegFunc));
266
267 numAddressBits = segmentOptions->fNumAddressBits;
268 maxSegmentSize = segmentOptions->fMaxSegmentSize;
269 maxTransferSize = segmentOptions->fMaxTransferSize;
270 alignment = segmentOptions->fAlignment;
271 if (is32Bit) {
272 if (!numAddressBits) {
273 numAddressBits = 32;
274 } else if (numAddressBits > 32) {
275 return kIOReturnBadArgument; // Wrong output function for bits
276 }
277 }
278
279 if (numAddressBits && (numAddressBits < PAGE_SHIFT)) {
280 return kIOReturnBadArgument;
281 }
282
283 if (!maxSegmentSize) {
284 maxSegmentSize--; // Set Max segment to -1
285 }
286 if (!maxTransferSize) {
287 maxTransferSize--; // Set Max transfer to -1
288 }
289 if (mapper && !OSDynamicCast(IOMapper, mapper)) {
290 device = mapper;
291 mapper = NULL;
292 }
293 if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) {
294 IOMapper::checkForSystemMapper();
295 mapper = IOMapper::gSystem;
296 }
297
298 fNumSegments = 0;
299 fOutSeg = outSegFunc;
300 fNumAddressBits = numAddressBits;
301 fMaxSegmentSize = maxSegmentSize;
302 fMappingOptions = mappingOptions;
303 fMaxTransferSize = maxTransferSize;
304 if (!alignment) {
305 alignment = 1;
306 }
307 fAlignMask = alignment - 1;
308
309 alignment = segmentOptions->fAlignmentLength;
310 if (!alignment) {
311 alignment = 1;
312 }
313 fAlignMaskLength = alignment - 1;
314
315 alignment = segmentOptions->fAlignmentInternalSegments;
316 if (!alignment) {
317 alignment = (fAlignMask + 1);
318 }
319 fAlignMaskInternalSegments = alignment - 1;
320
321 switch (MAPTYPE(mappingOptions)) {
322 case kMapped: break;
323 case kUnmapped: break;
324 case kNonCoherent: break;
325
326 case kBypassed:
327 if (!mapper) {
328 break;
329 }
330 return kIOReturnBadArgument;
331
332 default:
333 return kIOReturnBadArgument;
334 }
335 ;
336
337 if (mapper != fMapper) {
338 fMapper.reset(mapper, OSRetain);
339 }
340
341 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
342 fInternalState->fDevice = device;
343
344 return kIOReturnSuccess;
345}
346
347void
348IODMACommand::free()
349{
350 if (reserved) {
351 IODelete(reserved, IODMACommandInternal, 1);
352 }
353
354 fMapper.reset();
355
356 // Correct use of this class when setting an IOMemoryDescriptor
357 // in fMemory via setMemoryDescriptor(desc) is, for the caller, to
358 // have a matching call to clearMemoryDescriptor() before releasing
359 // the object. The matching call has also the effect of releasing
360 // the ref taken on the IOMemoryDescriptor in setMemoryDescriptor().
361 //
362 // A number of "misbehaving" drivers has been found during testing,
363 // whereby a matching call to clearMemoryDescriptor() is missing:
364 //
365 // rdar://59947343
366 // rdar://59946968
367 //
368 // Both the approaches taken in said drivers are wrong, but have gone
369 // basically silent with fMemory being a regular pointer. With fMemory
370 // becoming a OSSharedPtr, the IODMACommand destructor expects to find
371 // either fMemory reset (through the call to clearMemoryDescriptor()) or
372 // a reference hold for the release.
373 //
374 // For this reason, this workaround of detaching fMemory is put in
375 // place here, choosing the leak over the panic for misbehaving
376 // drivers. Once all instances are fixed, this workaround will be
377 // removed.
378 //
379 // Note: all well behaving drivers that have matching calls for
380 // setMemoryDescriptor() and clearMemoryDescriptor() are unaffected
381 // since fMemory will be null at this point.
382 fMemory.detach();
383
384 super::free();
385}
386
387IOReturn
388IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
389{
390 IOReturn err = kIOReturnSuccess;
391
392 if (mem == fMemory) {
393 if (!autoPrepare) {
394 while (fActive) {
395 complete();
396 }
397 }
398 return kIOReturnSuccess;
399 }
400
401 if (fMemory) {
402 // As we are almost certainly being called from a work loop thread
403 // if fActive is true it is probably not a good time to potentially
404 // block. Just test for it and return an error
405 if (fActive) {
406 return kIOReturnBusy;
407 }
408 clearMemoryDescriptor();
409 }
410
411 if (mem) {
412 bzero(&fMDSummary, sizeof(fMDSummary));
413 err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
414 &fMDSummary, sizeof(fMDSummary));
415 if (err) {
416 return err;
417 }
418
419 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
420
421 if ((kMapped == MAPTYPE(fMappingOptions))
422 && fMapper) {
423 fInternalState->fCheckAddressing = false;
424 } else {
425 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
426 }
427
428 fInternalState->fNewMD = true;
429 fMemory.reset(const_cast<IOMemoryDescriptor *>(mem), OSRetain);
430 fInternalState->fSetActiveNoMapper = (!fMapper);
431 if (fInternalState->fSetActiveNoMapper) {
432 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
433 }
434 if (autoPrepare) {
435 err = prepare();
436 if (err) {
437 clearMemoryDescriptor();
438 }
439 }
440 }
441
442 return err;
443}
444
445IOReturn
446IODMACommand::clearMemoryDescriptor(bool autoComplete)
447{
448 if (fActive && !autoComplete) {
449 return kIOReturnNotReady;
450 }
451
452 if (fMemory) {
453 while (fActive) {
454 complete();
455 }
456 if (fInternalState->fSetActiveNoMapper) {
457 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
458 }
459 fMemory.reset();
460 }
461
462 return kIOReturnSuccess;
463}
464
465const IOMemoryDescriptor *
466IODMACommand::getMemoryDescriptor() const
467{
468 return fMemory.get();
469}
470
471IOMemoryDescriptor *
472IODMACommand::getIOMemoryDescriptor() const
473{
474 OSSharedPtr<IOMemoryDescriptor> mem;
475
476 mem = reserved->fCopyMD;
477 if (!mem) {
478 mem = fMemory;
479 }
480
481 return mem.get();
482}
483
484IOReturn
485IODMACommand::segmentOp(
486 void *reference,
487 IODMACommand *target,
488 Segment64 segment,
489 void *segments,
490 UInt32 segmentIndex)
491{
492 IOOptionBits op = (IOOptionBits)(uintptr_t) reference;
493 addr64_t maxPhys, address;
494 uint64_t length;
495 uint32_t numPages;
496 uint32_t mask;
497
498 IODMACommandInternal * state = target->reserved;
499
500 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) {
501 maxPhys = (1ULL << target->fNumAddressBits);
502 } else {
503 maxPhys = 0;
504 }
505 maxPhys--;
506
507 address = segment.fIOVMAddr;
508 length = segment.fLength;
509
510 assert(length);
511
512 if (!state->fMisaligned) {
513 mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask);
514 state->fMisaligned |= (0 != (mask & address));
515 if (state->fMisaligned) {
516 DEBG("misaligned address %qx:%qx, %x\n", address, length, mask);
517 }
518 }
519 if (!state->fMisaligned) {
520 mask = target->fAlignMaskLength;
521 state->fMisaligned |= (0 != (mask & length));
522 if (state->fMisaligned) {
523 DEBG("misaligned length %qx:%qx, %x\n", address, length, mask);
524 }
525 }
526
527 if (state->fMisaligned && (kWalkPreflight & op)) {
528 return kIOReturnNotAligned;
529 }
530
531 if (!state->fDoubleBuffer) {
532 if ((address + length - 1) <= maxPhys) {
533 length = 0;
534 } else if (address <= maxPhys) {
535 DEBG("tail %qx, %qx", address, length);
536 length = (address + length - maxPhys - 1);
537 address = maxPhys + 1;
538 DEBG("-> %qx, %qx\n", address, length);
539 }
540 }
541
542 if (!length) {
543 return kIOReturnSuccess;
544 }
545
546 uint64_t numPages64 = atop_64(round_page_64((address & PAGE_MASK) + length));
547 if (numPages64 > UINT_MAX) {
548 return kIOReturnVMError;
549 }
550 numPages = (typeof(numPages))numPages64;
551
552 if (kWalkPreflight & op) {
553 state->fCopyPageCount += numPages;
554 } else {
555 vm_page_t lastPage;
556 lastPage = NULL;
557 if (kWalkPrepare & op) {
558 lastPage = state->fCopyNext;
559 for (IOItemCount idx = 0; idx < numPages; idx++) {
560 vm_page_set_offset(lastPage, atop_64(address) + idx);
561 lastPage = vm_page_get_next(lastPage);
562 }
563 }
564
565 if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) {
566 lastPage = state->fCopyNext;
567 for (IOItemCount idx = 0; idx < numPages; idx++) {
568 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) {
569 addr64_t cpuAddr = address;
570 addr64_t remapAddr;
571 uint64_t chunk;
572
573 if ((kMapped == MAPTYPE(target->fMappingOptions))
574 && target->fMapper) {
575 cpuAddr = target->fMapper->mapToPhysicalAddress(address);
576 }
577
578 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
579 if (!state->fDoubleBuffer) {
580 remapAddr += (address & PAGE_MASK);
581 }
582 chunk = PAGE_SIZE - (address & PAGE_MASK);
583 if (chunk > length) {
584 chunk = length;
585 }
586 if (chunk > (UINT_MAX - PAGE_SIZE + 1)) {
587 chunk = (UINT_MAX - PAGE_SIZE + 1);
588 }
589
590 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
591 (kWalkSyncIn & op) ? "->" : "<-",
592 address, chunk, op);
593
594 if (kWalkSyncIn & op) { // cppvNoModSnk
595 copypv(remapAddr, cpuAddr, (unsigned int) chunk,
596 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
597 } else {
598 copypv(cpuAddr, remapAddr, (unsigned int) chunk,
599 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
600 }
601 address += chunk;
602 length -= chunk;
603 }
604 lastPage = vm_page_get_next(lastPage);
605 }
606 }
607 state->fCopyNext = lastPage;
608 }
609
610 return kIOReturnSuccess;
611}
612
613OSSharedPtr<IOBufferMemoryDescriptor>
614IODMACommand::createCopyBuffer(IODirection direction, UInt64 length)
615{
616 mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
617 return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
618 direction, length, mask);
619}
620
621IOReturn
622IODMACommand::walkAll(uint32_t op)
623{
624 IODMACommandInternal * state = fInternalState;
625
626 IOReturn ret = kIOReturnSuccess;
627 UInt32 numSegments;
628 UInt64 offset;
629
630 if (kWalkPreflight & op) {
631 state->fMisaligned = false;
632 state->fDoubleBuffer = false;
633 state->fPrepared = false;
634 state->fCopyNext = NULL;
635 state->fCopyPageAlloc = NULL;
636 state->fCopyPageCount = 0;
637 state->fNextRemapPage = NULL;
638 state->fCopyMD = NULL;
639
640 if (!(kWalkDoubleBuffer & op)) {
641 offset = 0;
642 numSegments = 0 - 1;
643 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
644 }
645
646 op &= ~kWalkPreflight;
647
648 state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer);
649 state->fForceDoubleBuffer = false;
650 if (state->fDoubleBuffer) {
651 state->fCopyPageCount = (typeof(state->fCopyPageCount))(atop_64(round_page(state->fPreparedLength)));
652 }
653
654 if (state->fCopyPageCount) {
655 vm_page_t mapBase = NULL;
656
657 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
658
659 if (!fMapper && !state->fDoubleBuffer) {
660 kern_return_t kr;
661
662 if (fMapper) {
663 panic("fMapper copying");
664 }
665
666 kr = vm_page_alloc_list(state->fCopyPageCount,
667 (kma_flags_t)(KMA_LOMEM | KMA_NOPAGEWAIT), &mapBase);
668 if (KERN_SUCCESS != kr) {
669 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
670 mapBase = NULL;
671 }
672 }
673
674 if (mapBase) {
675 state->fCopyPageAlloc = mapBase;
676 state->fCopyNext = state->fCopyPageAlloc;
677 offset = 0;
678 numSegments = 0 - 1;
679 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
680 state->fPrepared = true;
681 op &= ~(kWalkSyncIn | kWalkSyncOut);
682 } else {
683 DEBG("alloc IOBMD\n");
684 state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength);
685
686 if (state->fCopyMD) {
687 ret = kIOReturnSuccess;
688 state->fPrepared = true;
689 } else {
690 DEBG("IODMACommand !alloc IOBMD");
691 return kIOReturnNoResources;
692 }
693 }
694 }
695 }
696
697 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) {
698 if (state->fCopyPageCount) {
699 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
700
701 if (state->fCopyPageAlloc) {
702 state->fCopyNext = state->fCopyPageAlloc;
703 offset = 0;
704 numSegments = 0 - 1;
705 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
706 } else if (state->fCopyMD) {
707 DEBG("sync IOBMD\n");
708
709 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) {
710 OSSharedPtr<IOMemoryDescriptor> poMD = fMemory;
711
712 IOByteCount bytes;
713
714 if (kWalkSyncIn & op) {
715 bytes = poMD->writeBytes(state->fPreparedOffset,
716 state->fCopyMD->getBytesNoCopy(),
717 state->fPreparedLength);
718 } else {
719 bytes = poMD->readBytes(state->fPreparedOffset,
720 state->fCopyMD->getBytesNoCopy(),
721 state->fPreparedLength);
722 }
723 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
724 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
725 } else {
726 ret = kIOReturnSuccess;
727 }
728 }
729 }
730 }
731
732 if (kWalkComplete & op) {
733 if (state->fCopyPageAlloc) {
734 vm_page_free_list(state->fCopyPageAlloc, FALSE);
735 state->fCopyPageAlloc = NULL;
736 state->fCopyPageCount = 0;
737 }
738 if (state->fCopyMD) {
739 state->fCopyMD.reset();
740 }
741
742 state->fPrepared = false;
743 }
744 return ret;
745}
746
747UInt8
748IODMACommand::getNumAddressBits(void)
749{
750 return (UInt8) fNumAddressBits;
751}
752
753UInt32
754IODMACommand::getAlignment(void)
755{
756 return fAlignMask + 1;
757}
758
759uint32_t
760IODMACommand::getAlignmentLength(void)
761{
762 return fAlignMaskLength + 1;
763}
764
765uint32_t
766IODMACommand::getAlignmentInternalSegments(void)
767{
768 return fAlignMaskInternalSegments + 1;
769}
770
771IOReturn
772IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
773 const SegmentOptions * segmentOptions,
774 uint32_t mappingOptions,
775 IOMapper * mapper,
776 UInt64 offset,
777 UInt64 length,
778 bool flushCache,
779 bool synchronize)
780{
781 IOReturn ret;
782
783 if (fActive) {
784 return kIOReturnNotPermitted;
785 }
786
787 ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper);
788 if (kIOReturnSuccess != ret) {
789 return ret;
790 }
791
792 ret = prepare(offset, length, flushCache, synchronize);
793
794 return ret;
795}
796
797IOReturn
798IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
799 UInt8 numAddressBits,
800 UInt64 maxSegmentSize,
801 MappingOptions mappingOptions,
802 UInt64 maxTransferSize,
803 UInt32 alignment,
804 IOMapper *mapper,
805 UInt64 offset,
806 UInt64 length,
807 bool flushCache,
808 bool synchronize)
809{
810 SegmentOptions segmentOptions =
811 {
812 .fStructSize = sizeof(segmentOptions),
813 .fNumAddressBits = numAddressBits,
814 .fMaxSegmentSize = maxSegmentSize,
815 .fMaxTransferSize = maxTransferSize,
816 .fAlignment = alignment,
817 .fAlignmentLength = 1,
818 .fAlignmentInternalSegments = alignment
819 };
820
821 return prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper,
822 offset, length, flushCache, synchronize);
823}
824
825
826IOReturn
827IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
828{
829 IODMACommandInternal * state = fInternalState;
830 IOReturn ret = kIOReturnSuccess;
831 uint32_t mappingOptions = fMappingOptions;
832
833 // check specification has been set
834 if (!fOutSeg) {
835 return kIOReturnNotReady;
836 }
837
838 if (!length) {
839 length = fMDSummary.fLength;
840 }
841
842 if (length > fMaxTransferSize) {
843 return kIOReturnNoSpace;
844 }
845
846 if (fActive++) {
847 if ((state->fPreparedOffset != offset)
848 || (state->fPreparedLength != length)) {
849 ret = kIOReturnNotReady;
850 }
851 } else {
852 if (fAlignMaskLength & length) {
853 return kIOReturnNotAligned;
854 }
855
856 if (atop_64(state->fPreparedLength) > UINT_MAX) {
857 return kIOReturnVMError;
858 }
859 state->fPreparedOffset = offset;
860 state->fPreparedLength = length;
861
862 state->fMisaligned = false;
863 state->fDoubleBuffer = false;
864 state->fPrepared = false;
865 state->fCopyNext = NULL;
866 state->fCopyPageAlloc = NULL;
867 state->fCopyPageCount = 0;
868 state->fNextRemapPage = NULL;
869 state->fCopyMD = NULL;
870 state->fLocalMapperAlloc = 0;
871 state->fLocalMapperAllocValid = false;
872 state->fLocalMapperAllocLength = 0;
873
874 state->fSourceAlignMask = fAlignMask;
875 if (fMapper) {
876 state->fSourceAlignMask &= page_mask;
877 }
878
879 state->fCursor = state->fIterateOnly
880 || (!state->fCheckAddressing
881 && (!state->fSourceAlignMask
882 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
883
884 if (!state->fCursor) {
885 IOOptionBits op = kWalkPrepare | kWalkPreflight;
886 if (synchronize) {
887 op |= kWalkSyncOut;
888 }
889 ret = walkAll(op);
890 }
891
892 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
893 if (state->fCopyMD) {
894 state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
895 } else {
896 fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length);
897 }
898 }
899
900 if (fMapper) {
901 IOMDDMAMapArgs mapArgs;
902 bzero(&mapArgs, sizeof(mapArgs));
903 mapArgs.fMapper = fMapper.get();
904 mapArgs.fCommand = this;
905 mapArgs.fMapSpec.device = state->fDevice;
906 mapArgs.fMapSpec.alignment = fAlignMask + 1;
907 mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64;
908 mapArgs.fLength = state->fPreparedLength;
909 OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD;
910 if (md) {
911 mapArgs.fOffset = 0;
912 } else {
913 md = fMemory;
914 mapArgs.fOffset = state->fPreparedOffset;
915 }
916
917 ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs));
918
919 if ((kIOReturnSuccess == ret)
920 && mapArgs.fAllocLength
921 && (mapArgs.fAllocLength != mapArgs.fLength)) {
922 do {
923 // multisegment case
924 IOMDDMAWalkSegmentState walkState;
925 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
926 IOOptionBits mdOp;
927 uint64_t index;
928 IOPhysicalLength segLen;
929 uint32_t segCount;
930 uint64_t phys, align;
931 uint64_t mapperPageMask;
932 uint64_t mapperPageShift;
933 uint64_t insertOffset;
934 uint32_t mapOptions;
935 uint64_t length;
936
937 assert(mapArgs.fAllocLength > mapArgs.fLength);
938
939 mapperPageMask = fMapper->getPageSize();
940 assert(mapperPageMask);
941 mapperPageMask -= 1;
942 mapperPageShift = (64 - __builtin_clzll(mapperPageMask));
943 walkArgs->fMapped = false;
944 length = state->fPreparedLength;
945 mdOp = kIOMDFirstSegment;
946 segCount = 0;
947 for (index = 0; index < length; segCount++) {
948 walkArgs->fOffset = state->fPreparedOffset + index;
949
950 ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
951 mdOp = kIOMDWalkSegments;
952 assert(kIOReturnSuccess == ret);
953 if (ret != kIOReturnSuccess) {
954 panic("dmaCommandOperation");
955 }
956 segLen = walkArgs->fLength;
957 index += segLen;
958 }
959 if (ret != kIOReturnSuccess) {
960 break;
961 }
962
963#if defined(LOGTAG)
964 if (LOGTAG == fMemory->getTag()) {
965 IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength);
966 }
967#endif /* defined(LOGTAG) */
968
969 state->fMapSegments = IONewZero(IODMACommandMapSegment, segCount);
970 if (!state->fMapSegments) {
971 ret = kIOReturnNoMemory;
972 break;
973 }
974 state->fMapSegmentsCount = segCount;
975
976 switch (kIODirectionOutIn & fMDSummary.fDirection) {
977 case kIODirectionOut:
978 mapOptions = kIODMAMapReadAccess;
979 break;
980 case kIODirectionIn:
981 mapOptions = kIODMAMapWriteAccess;
982 break;
983 default:
984 mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess;
985 break;
986 }
987
988 mdOp = kIOMDFirstSegment;
989 segCount = 0;
990 for (insertOffset = 0, index = 0; index < length; segCount++) {
991 walkArgs->fOffset = state->fPreparedOffset + index;
992 ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
993 mdOp = kIOMDWalkSegments;
994 if (ret != kIOReturnSuccess) {
995 panic("dmaCommandOperation 0x%x", ret);
996 }
997 phys = walkArgs->fIOVMAddr;
998 segLen = walkArgs->fLength;
999
1000#if defined(LOGTAG)
1001 if (LOGTAG == fMemory->getTag()) {
1002 IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen);
1003 }
1004#endif /* defined(LOGTAG) */
1005
1006 align = (phys & mapperPageMask);
1007
1008#if defined(LOGTAG)
1009 if (LOGTAG == fMemory->getTag()) {
1010 IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align);
1011 }
1012#endif /* defined(LOGTAG) */
1013
1014 assert(segCount < state->fMapSegmentsCount);
1015 state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index;
1016 state->fMapSegments[segCount].fMapOffset = insertOffset;
1017 state->fMapSegments[segCount].fPageOffset = align;
1018 index += segLen;
1019
1020 // segment page align
1021 segLen = ((phys + segLen + mapperPageMask) & ~mapperPageMask);
1022 phys -= align;
1023 segLen -= phys;
1024 insertOffset += segLen;
1025 }
1026 state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask);
1027#if defined(LOGTAG)
1028 if (LOGTAG == fMemory->getTag()) {
1029 IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount);
1030 }
1031#endif /* defined(LOGTAG) */
1032 } while (false);
1033 }
1034 if (kIOReturnSuccess == ret) {
1035 state->fLocalMapperAlloc = mapArgs.fAlloc;
1036 state->fLocalMapperAllocValid = true;
1037 state->fLocalMapperAllocLength = mapArgs.fAllocLength;
1038 }
1039 }
1040 if (kIOReturnSuccess == ret) {
1041 state->fPrepared = true;
1042 }
1043 }
1044 return ret;
1045}
1046
1047IOReturn
1048IODMACommand::complete(bool invalidateCache, bool synchronize)
1049{
1050 IODMACommandInternal * state = fInternalState;
1051 IOReturn ret = kIOReturnSuccess;
1052 OSSharedPtr<IOMemoryDescriptor> copyMD;
1053
1054 if (fActive < 1) {
1055 return kIOReturnNotReady;
1056 }
1057
1058 if (!--fActive) {
1059 copyMD = state->fCopyMD;
1060
1061 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) {
1062 if (copyMD) {
1063 copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength);
1064 } else {
1065 OSSharedPtr<IOMemoryDescriptor> md = fMemory;
1066 md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
1067 }
1068 }
1069
1070 if (!state->fCursor) {
1071 IOOptionBits op = kWalkComplete;
1072 if (synchronize) {
1073 op |= kWalkSyncIn;
1074 }
1075 ret = walkAll(op);
1076 }
1077
1078 if (state->fLocalMapperAllocValid) {
1079 IOMDDMAMapArgs mapArgs;
1080 bzero(&mapArgs, sizeof(mapArgs));
1081 mapArgs.fMapper = fMapper.get();
1082 mapArgs.fCommand = this;
1083 mapArgs.fAlloc = state->fLocalMapperAlloc;
1084 mapArgs.fAllocLength = state->fLocalMapperAllocLength;
1085 OSSharedPtr<IOMemoryDescriptor> md = copyMD;
1086 if (md) {
1087 mapArgs.fOffset = 0;
1088 } else {
1089 md = fMemory;
1090 mapArgs.fOffset = state->fPreparedOffset;
1091 }
1092
1093 ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs));
1094
1095 state->fLocalMapperAlloc = 0;
1096 state->fLocalMapperAllocValid = false;
1097 state->fLocalMapperAllocLength = 0;
1098 if (state->fMapSegments) {
1099 IODelete(state->fMapSegments, IODMACommandMapSegment, state->fMapSegmentsCount);
1100 state->fMapSegments = NULL;
1101 state->fMapSegmentsCount = 0;
1102 }
1103 }
1104
1105 state->fPrepared = false;
1106 }
1107
1108 return ret;
1109}
1110
1111IOReturn
1112IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
1113{
1114 IODMACommandInternal * state = fInternalState;
1115 if (fActive < 1) {
1116 return kIOReturnNotReady;
1117 }
1118
1119 if (offset) {
1120 *offset = state->fPreparedOffset;
1121 }
1122 if (length) {
1123 *length = state->fPreparedLength;
1124 }
1125
1126 return kIOReturnSuccess;
1127}
1128
1129IOReturn
1130IODMACommand::synchronize(IOOptionBits options)
1131{
1132 IODMACommandInternal * state = fInternalState;
1133 IOReturn ret = kIOReturnSuccess;
1134 IOOptionBits op;
1135
1136 if (kIODirectionOutIn == (kIODirectionOutIn & options)) {
1137 return kIOReturnBadArgument;
1138 }
1139
1140 if (fActive < 1) {
1141 return kIOReturnNotReady;
1142 }
1143
1144 op = 0;
1145 if (kForceDoubleBuffer & options) {
1146 if (state->fDoubleBuffer) {
1147 return kIOReturnSuccess;
1148 }
1149 ret = complete(false /* invalidateCache */, true /* synchronize */);
1150 state->fCursor = false;
1151 state->fForceDoubleBuffer = true;
1152 ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */);
1153
1154 return ret;
1155 } else if (state->fCursor) {
1156 return kIOReturnSuccess;
1157 }
1158
1159 if (kIODirectionIn & options) {
1160 op |= kWalkSyncIn | kWalkSyncAlways;
1161 } else if (kIODirectionOut & options) {
1162 op |= kWalkSyncOut | kWalkSyncAlways;
1163 }
1164
1165 ret = walkAll(op);
1166
1167 return ret;
1168}
1169
1170struct IODMACommandTransferContext {
1171 void * buffer;
1172 UInt64 bufferOffset;
1173 UInt64 remaining;
1174 UInt32 op;
1175};
1176enum{
1177 kIODMACommandTransferOpReadBytes = 1,
1178 kIODMACommandTransferOpWriteBytes = 2
1179};
1180
1181IOReturn
1182IODMACommand::transferSegment(void *reference,
1183 IODMACommand *target,
1184 Segment64 segment,
1185 void *segments,
1186 UInt32 segmentIndex)
1187{
1188 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
1189 UInt64 length = min(segment.fLength, context->remaining);
1190 addr64_t ioAddr = segment.fIOVMAddr;
1191 addr64_t cpuAddr = ioAddr;
1192
1193 context->remaining -= length;
1194
1195 while (length) {
1196 UInt64 copyLen = length;
1197 if ((kMapped == MAPTYPE(target->fMappingOptions))
1198 && target->fMapper) {
1199 cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr);
1200 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
1201 ioAddr += copyLen;
1202 }
1203 if (copyLen > (UINT_MAX - PAGE_SIZE + 1)) {
1204 copyLen = (UINT_MAX - PAGE_SIZE + 1);
1205 }
1206
1207 switch (context->op) {
1208 case kIODMACommandTransferOpReadBytes:
1209 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, (unsigned int) copyLen,
1210 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1211 break;
1212 case kIODMACommandTransferOpWriteBytes:
1213 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, (unsigned int) copyLen,
1214 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1215 break;
1216 }
1217 length -= copyLen;
1218 context->bufferOffset += copyLen;
1219 }
1220
1221 return context->remaining ? kIOReturnSuccess : kIOReturnOverrun;
1222}
1223
1224UInt64
1225IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
1226{
1227 IODMACommandInternal * state = fInternalState;
1228 IODMACommandTransferContext context;
1229 Segment64 segments[1];
1230 UInt32 numSegments = 0 - 1;
1231
1232 if (fActive < 1) {
1233 return 0;
1234 }
1235
1236 if (offset >= state->fPreparedLength) {
1237 return 0;
1238 }
1239 length = min(length, state->fPreparedLength - offset);
1240
1241 context.buffer = buffer;
1242 context.bufferOffset = 0;
1243 context.remaining = length;
1244 context.op = transferOp;
1245 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
1246
1247 return length - context.remaining;
1248}
1249
1250UInt64
1251IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
1252{
1253 return transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length);
1254}
1255
1256UInt64
1257IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
1258{
1259 return transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length);
1260}
1261
1262IOReturn
1263IODMACommand::genIOVMSegments(UInt64 *offsetP,
1264 void *segmentsP,
1265 UInt32 *numSegmentsP)
1266{
1267 return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
1268 offsetP, segmentsP, numSegmentsP);
1269}
1270
1271IOReturn
1272IODMACommand::genIOVMSegments(uint32_t op,
1273 InternalSegmentFunction outSegFunc,
1274 void *reference,
1275 UInt64 *offsetP,
1276 void *segmentsP,
1277 UInt32 *numSegmentsP)
1278{
1279 IODMACommandInternal * internalState = fInternalState;
1280 IOOptionBits mdOp = kIOMDWalkSegments;
1281 IOReturn ret = kIOReturnSuccess;
1282
1283 if (!(kWalkComplete & op) && !fActive) {
1284 return kIOReturnNotReady;
1285 }
1286
1287 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) {
1288 return kIOReturnBadArgument;
1289 }
1290
1291 IOMDDMAWalkSegmentArgs *state =
1292 (IOMDDMAWalkSegmentArgs *)(void *) fState;
1293
1294 UInt64 offset = *offsetP + internalState->fPreparedOffset;
1295 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
1296
1297 if (offset >= memLength) {
1298 return kIOReturnOverrun;
1299 }
1300
1301 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
1302 state->fOffset = 0;
1303 internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1304 internalState->fNextRemapPage = NULL;
1305 internalState->fNewMD = false;
1306 mdOp = kIOMDFirstSegment;
1307 if (fMapper) {
1308 if (internalState->fLocalMapperAllocValid) {
1309 state->fMapped = true;
1310 state->fMappedBase = internalState->fLocalMapperAlloc;
1311 } else {
1312 state->fMapped = false;
1313 }
1314 }
1315 }
1316
1317 UInt32 segIndex = 0;
1318 UInt32 numSegments = *numSegmentsP;
1319 Segment64 curSeg = { 0, 0 };
1320 bool curSegValid = false;
1321 addr64_t maxPhys;
1322
1323 if (fNumAddressBits && (fNumAddressBits < 64)) {
1324 maxPhys = (1ULL << fNumAddressBits);
1325 } else {
1326 maxPhys = 0;
1327 }
1328 maxPhys--;
1329
1330 while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) {
1331 // state = next seg
1332 if (!internalState->fIOVMAddrValid) {
1333 IOReturn rtn;
1334
1335 state->fOffset = offset;
1336 state->fLength = memLength - offset;
1337
1338 bool done = false;
1339 bool check = false;
1340
1341 if (internalState->fLocalMapperAllocValid) {
1342 if (!internalState->fMapSegmentsCount) {
1343 state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset;
1344 rtn = kIOReturnSuccess;
1345 done = true;
1346 check = true;
1347 } else {
1348 uint64_t address;
1349 uint64_t length;
1350 uint64_t runOffset;
1351 uint64_t ind;
1352 uint64_t off2Ind = internalState->fOffset2Index;
1353
1354 // Validate the previous offset
1355 if (offset
1356 && (offset == internalState->fNextOffset || off2Ind <= offset)) {
1357 ind = internalState->fIndex;
1358 } else {
1359 ind = off2Ind = 0; // Start from beginning
1360 }
1361#if defined(LOGTAG)
1362 if (LOGTAG == fMemory->getTag()) {
1363 IOLog("DMA[%p] offsets 0x%qx, 0x%qx, 0x%qx ind %qd\n", this, offset, internalState->fPreparedOffset, internalState->fNextOffset, ind);
1364 }
1365#endif /* defined(LOGTAG) */
1366
1367 // Scan through iopl info blocks looking for block containing offset
1368 while (ind < internalState->fMapSegmentsCount && offset >= internalState->fMapSegments[ind].fDMAOffset) {
1369 ind++;
1370 }
1371 if (ind < internalState->fMapSegmentsCount) {
1372 length = internalState->fMapSegments[ind].fDMAOffset;
1373 } else {
1374 length = memLength;
1375 }
1376 length -= offset; // Remainder within iopl
1377
1378 // Go back to actual range as search goes past it
1379 ind--;
1380 off2Ind = internalState->fMapSegments[ind].fDMAOffset;
1381
1382 // Subtract offset till this iopl in total list
1383 runOffset = offset - off2Ind;
1384
1385 // Compute an offset relative to the mapped base
1386
1387 runOffset += internalState->fMapSegments[ind].fPageOffset;
1388 address = internalState->fLocalMapperAllocBase + internalState->fMapSegments[ind].fMapOffset + runOffset;
1389#if defined(LOGTAG)
1390 if (LOGTAG == fMemory->getTag()) {
1391 IOLog("DMA[%p] addrlen 0x%qx, 0x%qx\n", this, address, length);
1392 }
1393#endif /* defined(LOGTAG) */
1394
1395 state->fIOVMAddr = address;
1396 state->fLength = length;
1397
1398 internalState->fIndex = ind;
1399 internalState->fOffset2Index = off2Ind;
1400 internalState->fNextOffset = state->fOffset + length;
1401
1402 rtn = kIOReturnSuccess;
1403 done = true;
1404 check = true;
1405 }
1406 }
1407
1408 if (!done) {
1409 IOMemoryDescriptor * memory =
1410 internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
1411 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
1412 mdOp = kIOMDWalkSegments;
1413 }
1414#if 0
1415 if (check
1416 && !ml_at_interrupt_context()
1417 && (rtn == kIOReturnSuccess)
1418 && fMapper
1419 && strcmp("AppleNVMeMMU", fMapper->getName())) {
1420 uint64_t checkOffset;
1421 IOPhysicalLength segLen;
1422 IOMemoryDescriptor * memory =
1423 internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
1424 for (checkOffset = 0; checkOffset < state->fLength;) {
1425 addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone);
1426 addr64_t mapperPhys;
1427
1428 mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset);
1429 mapperPhys |= (phys & (fMapper->getPageSize() - 1));
1430 if (mapperPhys != phys) {
1431 panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx\n",
1432 this, offset, checkOffset,
1433 state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength);
1434 }
1435 checkOffset += page_size - (phys & page_mask);
1436 }
1437 }
1438#endif
1439 if (rtn == kIOReturnSuccess) {
1440 internalState->fIOVMAddrValid = true;
1441 assert(state->fLength);
1442 if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) {
1443 UInt64 length = state->fLength;
1444 offset += length;
1445 curSeg.fLength += length;
1446 internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1447 }
1448 } else if (rtn == kIOReturnOverrun) {
1449 internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
1450 } else {
1451 return rtn;
1452 }
1453 }
1454
1455 // seg = state, offset = end of seg
1456 if (!curSegValid) {
1457 UInt64 length = state->fLength;
1458 offset += length;
1459 curSeg.fIOVMAddr = state->fIOVMAddr;
1460 curSeg.fLength = length;
1461 curSegValid = true;
1462 internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1463 }
1464
1465 if (!internalState->fIOVMAddrValid) {
1466 // maxPhys
1467 if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) {
1468 if (internalState->fCursor) {
1469 curSegValid = curSeg.fIOVMAddr = 0;
1470 ret = kIOReturnMessageTooLarge;
1471 break;
1472 } else if (curSeg.fIOVMAddr <= maxPhys) {
1473 UInt64 remain, newLength;
1474
1475 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1476 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1477 remain = curSeg.fLength - newLength;
1478 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1479 internalState->fIOVMAddrValid = true;
1480 curSeg.fLength = newLength;
1481 state->fLength = remain;
1482 offset -= remain;
1483 } else {
1484 UInt64 addr = curSeg.fIOVMAddr;
1485 ppnum_t addrPage = (ppnum_t) atop_64(addr);
1486 vm_page_t remap = NULL;
1487 UInt64 remain, newLength;
1488
1489 DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1490
1491 remap = internalState->fNextRemapPage;
1492 if (remap && (addrPage == vm_page_get_offset(remap))) {
1493 } else {
1494 for (remap = internalState->fCopyPageAlloc;
1495 remap && (addrPage != vm_page_get_offset(remap));
1496 remap = vm_page_get_next(remap)) {
1497 }
1498 }
1499
1500 if (!remap) {
1501 panic("no remap page found");
1502 }
1503
1504 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1505 + (addr & PAGE_MASK);
1506 curSegValid = true;
1507 internalState->fNextRemapPage = vm_page_get_next(remap);
1508
1509 newLength = PAGE_SIZE - (addr & PAGE_MASK);
1510 if (newLength < curSeg.fLength) {
1511 remain = curSeg.fLength - newLength;
1512 state->fIOVMAddr = addr + newLength;
1513 internalState->fIOVMAddrValid = true;
1514 curSeg.fLength = newLength;
1515 state->fLength = remain;
1516 offset -= remain;
1517 }
1518 DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
1519 }
1520 }
1521
1522 // reduce size of output segment
1523 uint64_t reduce, leftover = 0;
1524
1525 // fMaxSegmentSize
1526 if (curSeg.fLength > fMaxSegmentSize) {
1527 leftover += curSeg.fLength - fMaxSegmentSize;
1528 curSeg.fLength = fMaxSegmentSize;
1529 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1530 internalState->fIOVMAddrValid = true;
1531 }
1532
1533 // alignment current length
1534
1535 reduce = (curSeg.fLength & fAlignMaskLength);
1536 if (reduce && (curSeg.fLength > reduce)) {
1537 leftover += reduce;
1538 curSeg.fLength -= reduce;
1539 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1540 internalState->fIOVMAddrValid = true;
1541 }
1542
1543 // alignment next address
1544
1545 reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
1546 if (reduce && (curSeg.fLength > reduce)) {
1547 leftover += reduce;
1548 curSeg.fLength -= reduce;
1549 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1550 internalState->fIOVMAddrValid = true;
1551 }
1552
1553 if (leftover) {
1554 DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1555 leftover, offset,
1556 curSeg.fIOVMAddr, curSeg.fLength);
1557 state->fLength = leftover;
1558 offset -= leftover;
1559 }
1560
1561 //
1562
1563 if (internalState->fCursor) {
1564 bool misaligned;
1565 uint32_t mask;
1566
1567 mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
1568 misaligned = (0 != (mask & curSeg.fIOVMAddr));
1569 if (!misaligned) {
1570 mask = fAlignMaskLength;
1571 misaligned |= (0 != (mask & curSeg.fLength));
1572 }
1573 if (misaligned) {
1574 if (misaligned) {
1575 DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1576 }
1577 curSegValid = curSeg.fIOVMAddr = 0;
1578 ret = kIOReturnNotAligned;
1579 break;
1580 }
1581 }
1582
1583 if (offset >= memLength) {
1584 curSeg.fLength -= (offset - memLength);
1585 offset = memLength;
1586 internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
1587 break;
1588 }
1589 }
1590
1591 if (internalState->fIOVMAddrValid) {
1592 if ((segIndex + 1 == numSegments)) {
1593 break;
1594 }
1595#if defined(LOGTAG)
1596 if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) {
1597 IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength);
1598 }
1599#endif /* defined(LOGTAG) */
1600 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1601 curSegValid = curSeg.fIOVMAddr = 0;
1602 if (kIOReturnSuccess != ret) {
1603 break;
1604 }
1605 }
1606 }
1607
1608 if (curSegValid) {
1609#if defined(LOGTAG)
1610 if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) {
1611 IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength);
1612 }
1613#endif /* defined(LOGTAG) */
1614 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1615 }
1616
1617 if (kIOReturnSuccess == ret) {
1618 state->fOffset = offset;
1619 *offsetP = offset - internalState->fPreparedOffset;
1620 *numSegmentsP = segIndex;
1621 }
1622 return ret;
1623}
1624
1625IOReturn
1626IODMACommand::clientOutputSegment(
1627 void *reference, IODMACommand *target,
1628 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1629{
1630 SegmentFunction segmentFunction = (SegmentFunction) reference;
1631 IOReturn ret = kIOReturnSuccess;
1632
1633 if (target->fNumAddressBits && (target->fNumAddressBits < 64)
1634 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1635 && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) {
1636 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1637 ret = kIOReturnMessageTooLarge;
1638 }
1639
1640 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) {
1641 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1642 ret = kIOReturnMessageTooLarge;
1643 }
1644
1645 return ret;
1646}
1647
1648IOReturn
1649IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1650 UInt64 *offsetP,
1651 void *segmentsP,
1652 UInt32 *numSegmentsP)
1653{
1654 return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1655 offsetP, segmentsP, numSegmentsP);
1656}
1657
1658bool
1659IODMACommand::OutputHost32(IODMACommand *,
1660 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1661{
1662 Segment32 *base = (Segment32 *) vSegList;
1663 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1664 base[outSegIndex].fLength = (UInt32) segment.fLength;
1665 return true;
1666}
1667
1668bool
1669IODMACommand::OutputBig32(IODMACommand *,
1670 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1671{
1672 const UInt offAddr = outSegIndex * sizeof(Segment32);
1673 const UInt offLen = offAddr + sizeof(UInt32);
1674 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1675 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1676 return true;
1677}
1678
1679bool
1680IODMACommand::OutputLittle32(IODMACommand *,
1681 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1682{
1683 const UInt offAddr = outSegIndex * sizeof(Segment32);
1684 const UInt offLen = offAddr + sizeof(UInt32);
1685 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1686 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1687 return true;
1688}
1689
1690bool
1691IODMACommand::OutputHost64(IODMACommand *,
1692 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1693{
1694 Segment64 *base = (Segment64 *) vSegList;
1695 base[outSegIndex] = segment;
1696 return true;
1697}
1698
1699bool
1700IODMACommand::OutputBig64(IODMACommand *,
1701 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1702{
1703 const UInt offAddr = outSegIndex * sizeof(Segment64);
1704 const UInt offLen = offAddr + sizeof(UInt64);
1705 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1706 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1707 return true;
1708}
1709
1710bool
1711IODMACommand::OutputLittle64(IODMACommand *,
1712 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1713{
1714 const UInt offAddr = outSegIndex * sizeof(Segment64);
1715 const UInt offLen = offAddr + sizeof(UInt64);
1716 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1717 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1718 return true;
1719}