2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
41 #include "IOKitKernelInternal.h"
42 #include "IOCopyMapper.h"
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
50 static bool gIOEnableCopyMapper
= true;
54 kWalkSyncIn
= 0x01, // bounce -> md
55 kWalkSyncOut
= 0x02, // bounce <- md
56 kWalkSyncAlways
= 0x04,
57 kWalkPreflight
= 0x08,
58 kWalkDoubleBuffer
= 0x10,
65 #define fInternalState reserved
66 #define fState reserved->fState
67 #define fMDSummary reserved->fMDSummary
71 // no direction => OutIn
72 #define SHOULD_COPY_DIR(op, direction) \
73 ((kIODirectionNone == (direction)) \
74 || (kWalkSyncAlways & (op)) \
75 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
79 #define SHOULD_COPY_DIR(state, direction) (true)
83 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
85 #define DEBG(fmt, args...) {}
89 /**************************** class IODMACommand ***************************/
92 #define super OSObject
93 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
95 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
96 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 2);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
104 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
105 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
106 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
107 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
108 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
109 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
110 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
113 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
114 UInt8 numAddressBits
,
115 UInt64 maxSegmentSize
,
116 MappingOptions mappingOptions
,
117 UInt64 maxTransferSize
,
122 IODMACommand
* me
= new IODMACommand
;
124 if (me
&& !me
->initWithSpecification(outSegFunc
,
125 numAddressBits
, maxSegmentSize
,
126 mappingOptions
, maxTransferSize
,
127 alignment
, mapper
, refCon
))
137 IODMACommand::cloneCommand(void *refCon
)
139 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
140 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
143 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
146 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
147 UInt8 numAddressBits
,
148 UInt64 maxSegmentSize
,
149 MappingOptions mappingOptions
,
150 UInt64 maxTransferSize
,
155 if (!super::init() || !outSegFunc
|| !numAddressBits
)
158 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
159 || OutputLittle32
== outSegFunc
);
164 else if (numAddressBits
> 32)
165 return false; // Wrong output function for bits
168 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
172 maxSegmentSize
--; // Set Max segment to -1
173 if (!maxTransferSize
)
174 maxTransferSize
--; // Set Max transfer to -1
178 IOMapper::checkForSystemMapper();
179 mapper
= IOMapper::gSystem
;
184 fOutSeg
= outSegFunc
;
185 fNumAddressBits
= numAddressBits
;
186 fMaxSegmentSize
= maxSegmentSize
;
187 fMappingOptions
= mappingOptions
;
188 fMaxTransferSize
= maxTransferSize
;
191 fAlignMask
= alignment
- 1;
195 switch (MAPTYPE(mappingOptions
))
198 case kNonCoherent
: fMapper
= 0; break;
200 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
207 reserved
= IONew(IODMACommandInternal
, 1);
210 bzero(reserved
, sizeof(IODMACommandInternal
));
212 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
221 IODelete(reserved
, IODMACommandInternal
, 1);
227 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
236 return kIOReturnSuccess
;
240 // As we are almost certainly being called from a work loop thread
241 // if fActive is true it is probably not a good time to potentially
242 // block. Just test for it and return an error
244 return kIOReturnBusy
;
245 clearMemoryDescriptor();
249 bzero(&fMDSummary
, sizeof(fMDSummary
));
250 IOReturn rtn
= mem
->dmaCommandOperation(
251 kIOMDGetCharacteristics
,
252 &fMDSummary
, sizeof(fMDSummary
));
256 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
258 if ((kMapped
== MAPTYPE(fMappingOptions
))
260 && (!fNumAddressBits
|| (fNumAddressBits
>= 31)))
261 // assuming mapped space is 2G
262 fInternalState
->fCheckAddressing
= false;
264 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
266 fInternalState
->fNewMD
= true;
274 return kIOReturnSuccess
;
278 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
280 if (fActive
&& !autoComplete
)
281 return (kIOReturnNotReady
);
290 return (kIOReturnSuccess
);
293 const IOMemoryDescriptor
*
294 IODMACommand::getMemoryDescriptor() const
301 IODMACommand::segmentOp(
303 IODMACommand
*target
,
308 IOOptionBits op
= (IOOptionBits
) reference
;
309 addr64_t maxPhys
, address
;
310 addr64_t remapAddr
= 0;
314 IODMACommandInternal
* state
= target
->reserved
;
316 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64))
317 maxPhys
= (1ULL << target
->fNumAddressBits
);
322 address
= segment
.fIOVMAddr
;
323 length
= segment
.fLength
;
328 if (!state
->fMisaligned
)
330 state
->fMisaligned
|= (0 != (target
->fAlignMask
& address
));
331 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, target
->fAlignMask
);
334 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
335 return (kIOReturnNotAligned
);
337 if (!state
->fDoubleBuffer
)
339 if ((address
+ length
- 1) <= maxPhys
)
343 else if (address
<= maxPhys
)
345 DEBG("tail %qx, %qx", address
, length
);
346 length
= (address
+ length
- maxPhys
- 1);
347 address
= maxPhys
+ 1;
348 DEBG("-> %qx, %qx\n", address
, length
);
353 return (kIOReturnSuccess
);
355 numPages
= atop_64(round_page_64(length
));
356 remapAddr
= state
->fCopyNext
;
358 if (kWalkPreflight
& op
)
360 state
->fCopyPageCount
+= numPages
;
364 if (kWalkPrepare
& op
)
366 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
367 gIOCopyMapper
->iovmInsert(atop_64(remapAddr
), idx
, atop_64(address
) + idx
);
369 if (state
->fDoubleBuffer
)
370 state
->fCopyNext
+= length
;
373 state
->fCopyNext
+= round_page(length
);
374 remapAddr
+= (address
& PAGE_MASK
);
377 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
379 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
380 (kWalkSyncIn
& op
) ? "->" : "<-",
381 address
, length
, op
);
382 if (kWalkSyncIn
& op
)
384 copypv(remapAddr
, address
, length
,
385 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
389 copypv(address
, remapAddr
, length
,
390 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
395 return kIOReturnSuccess
;
399 IODMACommand::walkAll(UInt8 op
)
401 IODMACommandInternal
* state
= fInternalState
;
403 IOReturn ret
= kIOReturnSuccess
;
407 if (gIOEnableCopyMapper
&& (kWalkPreflight
& op
))
409 state
->fCopyContig
= false;
410 state
->fMisaligned
= false;
411 state
->fDoubleBuffer
= false;
412 state
->fPrepared
= false;
413 state
->fCopyNext
= 0;
414 state
->fCopyPageAlloc
= 0;
415 state
->fCopyPageCount
= 0;
416 state
->fNextRemapIndex
= 0;
419 if (!(kWalkDoubleBuffer
& op
))
423 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
426 op
&= ~kWalkPreflight
;
428 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
429 if (state
->fDoubleBuffer
)
430 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
432 if (state
->fCopyPageCount
)
437 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
439 mapper
= gIOCopyMapper
;
441 mapBase
= mapper
->iovmAlloc(state
->fCopyPageCount
);
444 state
->fCopyPageAlloc
= mapBase
;
445 if (state
->fCopyPageAlloc
&& state
->fDoubleBuffer
)
447 DEBG("contig copy map\n");
448 state
->fCopyContig
= true;
451 state
->fCopyNext
= ptoa_64(state
->fCopyPageAlloc
);
454 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
455 state
->fPrepared
= true;
456 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
460 DEBG("alloc IOBMD\n");
461 state
->fCopyMD
= IOBufferMemoryDescriptor::withOptions(
462 fMDSummary
.fDirection
, state
->fPreparedLength
, page_size
);
466 ret
= kIOReturnSuccess
;
467 state
->fPrepared
= true;
471 DEBG("IODMACommand !iovmAlloc");
472 return (kIOReturnNoResources
);
478 if (gIOEnableCopyMapper
&& state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
480 if (state
->fCopyPageCount
)
482 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
484 if (state
->fCopyPageAlloc
)
486 state
->fCopyNext
= ptoa_64(state
->fCopyPageAlloc
);
489 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
491 else if (state
->fCopyMD
)
493 DEBG("sync IOBMD\n");
495 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
497 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
501 if (kWalkSyncIn
& op
)
502 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
503 state
->fCopyMD
->getBytesNoCopy(),
504 state
->fPreparedLength
);
506 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
507 state
->fCopyMD
->getBytesNoCopy(),
508 state
->fPreparedLength
);
509 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
510 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
513 ret
= kIOReturnSuccess
;
518 if (kWalkComplete
& op
)
520 if (state
->fCopyPageAlloc
)
522 gIOCopyMapper
->iovmFree(state
->fCopyPageAlloc
, state
->fCopyPageCount
);
523 state
->fCopyPageAlloc
= 0;
524 state
->fCopyPageCount
= 0;
528 state
->fCopyMD
->release();
532 state
->fPrepared
= false;
538 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
539 UInt8 numAddressBits
,
540 UInt64 maxSegmentSize
,
541 MappingOptions mappingOptions
,
542 UInt64 maxTransferSize
,
551 return kIOReturnNotPermitted
;
553 if (!outSegFunc
|| !numAddressBits
)
554 return kIOReturnBadArgument
;
556 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
557 || OutputLittle32
== outSegFunc
);
562 else if (numAddressBits
> 32)
563 return kIOReturnBadArgument
; // Wrong output function for bits
566 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
567 return kIOReturnBadArgument
;
570 maxSegmentSize
--; // Set Max segment to -1
571 if (!maxTransferSize
)
572 maxTransferSize
--; // Set Max transfer to -1
576 IOMapper::checkForSystemMapper();
577 mapper
= IOMapper::gSystem
;
580 switch (MAPTYPE(mappingOptions
))
583 case kNonCoherent
: fMapper
= 0; break;
585 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
586 return kIOReturnBadArgument
;
589 return kIOReturnBadArgument
;
594 fOutSeg
= outSegFunc
;
595 fNumAddressBits
= numAddressBits
;
596 fMaxSegmentSize
= maxSegmentSize
;
597 fMappingOptions
= mappingOptions
;
598 fMaxTransferSize
= maxTransferSize
;
601 fAlignMask
= alignment
- 1;
604 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
606 return prepare(offset
, length
, flushCache
, synchronize
);
611 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
613 IODMACommandInternal
* state
= fInternalState
;
614 IOReturn ret
= kIOReturnSuccess
;
615 MappingOptions mappingOptions
= fMappingOptions
;
618 length
= fMDSummary
.fLength
;
620 if (length
> fMaxTransferSize
)
621 return kIOReturnNoSpace
;
623 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
624 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
626 poMD
->performOperation(kIOMemoryIncoherentIOStore
, 0, fMDSummary
.fLength
);
630 if ((state
->fPreparedOffset
!= offset
)
631 || (state
->fPreparedLength
!= length
))
632 ret
= kIOReturnNotReady
;
636 state
->fPreparedOffset
= offset
;
637 state
->fPreparedLength
= length
;
639 state
->fCopyContig
= false;
640 state
->fMisaligned
= false;
641 state
->fDoubleBuffer
= false;
642 state
->fPrepared
= false;
643 state
->fCopyNext
= 0;
644 state
->fCopyPageAlloc
= 0;
645 state
->fCopyPageCount
= 0;
646 state
->fNextRemapIndex
= 0;
649 state
->fCursor
= state
->fIterateOnly
650 || (!state
->fCheckAddressing
652 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& fAlignMask
)))));
655 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
660 if (kIOReturnSuccess
== ret
)
661 state
->fPrepared
= true;
667 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
669 IODMACommandInternal
* state
= fInternalState
;
670 IOReturn ret
= kIOReturnSuccess
;
673 return kIOReturnNotReady
;
679 IOOptionBits op
= kWalkComplete
;
684 state
->fPrepared
= false;
686 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
688 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
690 poMD
->performOperation(kIOMemoryIncoherentIOFlush
, 0, fMDSummary
.fLength
);
698 IODMACommand::synchronize(IOOptionBits options
)
700 IODMACommandInternal
* state
= fInternalState
;
701 IOReturn ret
= kIOReturnSuccess
;
704 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
705 return kIOReturnBadArgument
;
708 return kIOReturnNotReady
;
711 if (kForceDoubleBuffer
& options
)
713 if (state
->fDoubleBuffer
)
714 return kIOReturnSuccess
;
716 state
->fCursor
= false;
718 ret
= walkAll(kWalkComplete
);
720 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
722 else if (state
->fCursor
)
723 return kIOReturnSuccess
;
725 if (kIODirectionIn
& options
)
726 op
|= kWalkSyncIn
| kWalkSyncAlways
;
727 else if (kIODirectionOut
& options
)
728 op
|= kWalkSyncOut
| kWalkSyncAlways
;
735 struct IODMACommandTransferContext
744 kIODMACommandTransferOpReadBytes
= 1,
745 kIODMACommandTransferOpWriteBytes
= 2
749 IODMACommand::transferSegment(void *reference
,
750 IODMACommand
*target
,
755 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) segments
;
756 UInt64 length
= min(segment
.fLength
, context
->remaining
);
757 addr64_t ioAddr
= segment
.fIOVMAddr
;
758 addr64_t cpuAddr
= ioAddr
;
760 context
->remaining
-= length
;
764 UInt64 copyLen
= length
;
765 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
768 cpuAddr
= target
->fMapper
->mapAddr(ioAddr
);
769 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
775 case kIODMACommandTransferOpReadBytes
:
776 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
777 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
779 case kIODMACommandTransferOpWriteBytes
:
780 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
781 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
785 context
->bufferOffset
+= copyLen
;
788 return (context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
);
792 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
794 IODMACommandInternal
* state
= fInternalState
;
795 IODMACommandTransferContext context
;
796 UInt32 numSegments
= 0-1;
801 if (offset
>= state
->fPreparedLength
)
803 length
= min(length
, state
->fPreparedLength
- offset
);
805 context
.buffer
= buffer
;
806 context
.bufferOffset
= 0;
807 context
.remaining
= length
;
808 context
.op
= transferOp
;
809 (void) genIOVMSegments(transferSegment
, (void *) kWalkClient
, &offset
, &context
, &numSegments
);
811 return (length
- context
.remaining
);
815 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
817 return (transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
));
821 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
823 return (transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
));
827 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
829 UInt32
*numSegmentsP
)
831 return (genIOVMSegments(clientOutputSegment
, (void *) kWalkClient
, offsetP
, segmentsP
, numSegmentsP
));
835 IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc
,
839 UInt32
*numSegmentsP
)
841 IOOptionBits op
= (IOOptionBits
) reference
;
842 IODMACommandInternal
* internalState
= fInternalState
;
843 IOOptionBits mdOp
= kIOMDWalkSegments
;
844 IOReturn ret
= kIOReturnSuccess
;
846 if (!(kWalkComplete
& op
) && !fActive
)
847 return kIOReturnNotReady
;
849 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
850 return kIOReturnBadArgument
;
852 IOMDDMAWalkSegmentArgs
*state
=
853 (IOMDDMAWalkSegmentArgs
*) fState
;
855 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
856 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
858 if (offset
>= memLength
)
859 return kIOReturnOverrun
;
861 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
863 state
->fIOVMAddr
= 0;
864 internalState
->fNextRemapIndex
= 0;
865 internalState
->fNewMD
= false;
866 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
867 mdOp
= kIOMDFirstSegment
;
870 UInt64 bypassMask
= fBypassMask
;
872 UInt32 numSegments
= *numSegmentsP
;
873 Segment64 curSeg
= { 0, 0 };
876 if (fNumAddressBits
&& (fNumAddressBits
< 64))
877 maxPhys
= (1ULL << fNumAddressBits
);
882 while ((state
->fIOVMAddr
) || state
->fOffset
< memLength
)
884 if (!state
->fIOVMAddr
) {
888 state
->fOffset
= offset
;
889 state
->fLength
= memLength
- offset
;
891 if (internalState
->fCopyContig
&& (kWalkClient
& op
))
893 state
->fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
)
894 + offset
- internalState
->fPreparedOffset
;
895 rtn
= kIOReturnSuccess
;
899 const IOMemoryDescriptor
* memory
=
900 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
901 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
902 mdOp
= kIOMDWalkSegments
;
905 if (rtn
== kIOReturnSuccess
) {
906 assert(state
->fIOVMAddr
);
907 assert(state
->fLength
);
909 else if (rtn
== kIOReturnOverrun
)
910 state
->fIOVMAddr
= state
->fLength
= 0; // At end
915 if (!curSeg
.fIOVMAddr
) {
916 UInt64 length
= state
->fLength
;
919 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
920 curSeg
.fLength
= length
;
921 state
->fIOVMAddr
= 0;
923 else if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
== state
->fIOVMAddr
)) {
924 UInt64 length
= state
->fLength
;
926 curSeg
.fLength
+= length
;
927 state
->fIOVMAddr
= 0;
931 if (!state
->fIOVMAddr
)
933 if (kWalkClient
& op
)
935 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
937 if (internalState
->fCursor
)
939 curSeg
.fIOVMAddr
= 0;
940 ret
= kIOReturnMessageTooLarge
;
943 else if (curSeg
.fIOVMAddr
<= maxPhys
)
945 UInt64 remain
, newLength
;
947 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
948 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
949 remain
= curSeg
.fLength
- newLength
;
950 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
951 curSeg
.fLength
= newLength
;
952 state
->fLength
= remain
;
955 else if (gIOCopyMapper
)
957 DEBG("sparse switch %qx, %qx ", curSeg
.fIOVMAddr
, curSeg
.fLength
);
958 if (trunc_page_64(curSeg
.fIOVMAddr
) == gIOCopyMapper
->mapAddr(
959 ptoa_64(internalState
->fCopyPageAlloc
+ internalState
->fNextRemapIndex
)))
962 curSeg
.fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
+ internalState
->fNextRemapIndex
)
963 + (curSeg
.fIOVMAddr
& PAGE_MASK
);
964 internalState
->fNextRemapIndex
+= atop_64(round_page(curSeg
.fLength
));
966 else for (UInt checkRemapIndex
= 0; checkRemapIndex
< internalState
->fCopyPageCount
; checkRemapIndex
++)
968 if (trunc_page_64(curSeg
.fIOVMAddr
) == gIOCopyMapper
->mapAddr(
969 ptoa_64(internalState
->fCopyPageAlloc
+ checkRemapIndex
)))
971 curSeg
.fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
+ checkRemapIndex
)
972 + (curSeg
.fIOVMAddr
& PAGE_MASK
);
973 internalState
->fNextRemapIndex
= checkRemapIndex
+ atop_64(round_page(curSeg
.fLength
));
977 DEBG("-> %qx, %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
);
982 if (curSeg
.fLength
> fMaxSegmentSize
)
984 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
986 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
987 curSeg
.fLength
= fMaxSegmentSize
;
989 state
->fLength
= remain
;
993 if (internalState
->fCursor
994 && (0 != (fAlignMask
& curSeg
.fIOVMAddr
)))
996 curSeg
.fIOVMAddr
= 0;
997 ret
= kIOReturnNotAligned
;
1001 if (offset
>= memLength
)
1003 curSeg
.fLength
-= (offset
- memLength
);
1005 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1010 if (state
->fIOVMAddr
) {
1011 if ((segIndex
+ 1 == numSegments
))
1014 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1015 curSeg
.fIOVMAddr
= 0;
1016 if (kIOReturnSuccess
!= ret
)
1021 if (curSeg
.fIOVMAddr
) {
1022 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1025 if (kIOReturnSuccess
== ret
)
1027 state
->fOffset
= offset
;
1028 *offsetP
= offset
- internalState
->fPreparedOffset
;
1029 *numSegmentsP
= segIndex
;
1035 IODMACommand::clientOutputSegment(
1036 void *reference
, IODMACommand
*target
,
1037 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1039 IOReturn ret
= kIOReturnSuccess
;
1041 if ((target
->fNumAddressBits
< 64)
1042 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
))
1044 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1045 ret
= kIOReturnMessageTooLarge
;
1048 if (!(*target
->fOutSeg
)(target
, segment
, vSegList
, outSegIndex
))
1050 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1051 ret
= kIOReturnMessageTooLarge
;
1058 IODMACommand::OutputHost32(IODMACommand
*,
1059 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1061 Segment32
*base
= (Segment32
*) vSegList
;
1062 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1063 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1068 IODMACommand::OutputBig32(IODMACommand
*,
1069 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1071 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1072 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1073 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1074 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1079 IODMACommand::OutputLittle32(IODMACommand
*,
1080 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1082 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1083 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1084 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1085 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1090 IODMACommand::OutputHost64(IODMACommand
*,
1091 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1093 Segment64
*base
= (Segment64
*) vSegList
;
1094 base
[outSegIndex
] = segment
;
1099 IODMACommand::OutputBig64(IODMACommand
*,
1100 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1102 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1103 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1104 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1105 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1110 IODMACommand::OutputLittle64(IODMACommand
*,
1111 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1113 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1114 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1115 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1116 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);