2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
41 #include "IOKitKernelInternal.h"
42 #include "IOCopyMapper.h"
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
51 kWalkSyncIn
= 0x01, // bounce -> md
52 kWalkSyncOut
= 0x02, // bounce <- md
53 kWalkSyncAlways
= 0x04,
54 kWalkPreflight
= 0x08,
55 kWalkDoubleBuffer
= 0x10,
62 #define fInternalState reserved
63 #define fState reserved->fState
64 #define fMDSummary reserved->fMDSummary
68 // no direction => OutIn
69 #define SHOULD_COPY_DIR(op, direction) \
70 ((kIODirectionNone == (direction)) \
71 || (kWalkSyncAlways & (op)) \
72 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
76 #define SHOULD_COPY_DIR(state, direction) (true)
80 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
82 #define DEBG(fmt, args...) {}
86 /**************************** class IODMACommand ***************************/
89 #define super OSObject
90 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
92 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
93 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
94 OSMetaClassDefineReservedUsed(IODMACommand
, 2);
95 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
96 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
104 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
105 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
106 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
107 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
110 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
111 UInt8 numAddressBits
,
112 UInt64 maxSegmentSize
,
113 MappingOptions mappingOptions
,
114 UInt64 maxTransferSize
,
119 IODMACommand
* me
= new IODMACommand
;
121 if (me
&& !me
->initWithSpecification(outSegFunc
,
122 numAddressBits
, maxSegmentSize
,
123 mappingOptions
, maxTransferSize
,
124 alignment
, mapper
, refCon
))
134 IODMACommand::cloneCommand(void *refCon
)
136 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
137 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
140 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
143 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
144 UInt8 numAddressBits
,
145 UInt64 maxSegmentSize
,
146 MappingOptions mappingOptions
,
147 UInt64 maxTransferSize
,
152 if (!super::init() || !outSegFunc
|| !numAddressBits
)
155 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
156 || OutputLittle32
== outSegFunc
);
161 else if (numAddressBits
> 32)
162 return false; // Wrong output function for bits
165 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
169 maxSegmentSize
--; // Set Max segment to -1
170 if (!maxTransferSize
)
171 maxTransferSize
--; // Set Max transfer to -1
175 IOMapper::checkForSystemMapper();
176 mapper
= IOMapper::gSystem
;
181 fOutSeg
= outSegFunc
;
182 fNumAddressBits
= numAddressBits
;
183 fMaxSegmentSize
= maxSegmentSize
;
184 fMappingOptions
= mappingOptions
;
185 fMaxTransferSize
= maxTransferSize
;
188 fAlignMask
= alignment
- 1;
192 switch (MAPTYPE(mappingOptions
))
195 case kNonCoherent
: fMapper
= 0; break;
197 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
207 reserved
= IONew(IODMACommandInternal
, 1);
210 bzero(reserved
, sizeof(IODMACommandInternal
));
212 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
221 IODelete(reserved
, IODMACommandInternal
, 1);
230 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
239 return kIOReturnSuccess
;
243 // As we are almost certainly being called from a work loop thread
244 // if fActive is true it is probably not a good time to potentially
245 // block. Just test for it and return an error
247 return kIOReturnBusy
;
248 clearMemoryDescriptor();
252 bzero(&fMDSummary
, sizeof(fMDSummary
));
253 IOReturn rtn
= mem
->dmaCommandOperation(
254 kIOMDGetCharacteristics
,
255 &fMDSummary
, sizeof(fMDSummary
));
259 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
261 if ((kMapped
== MAPTYPE(fMappingOptions
))
263 && (!fNumAddressBits
|| (fNumAddressBits
>= 31)))
264 // assuming mapped space is 2G
265 fInternalState
->fCheckAddressing
= false;
267 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
269 fInternalState
->fNewMD
= true;
273 mem
->dmaCommandOperation(kIOMDSetDMAActive
, this, 0);
278 return kIOReturnSuccess
;
282 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
284 if (fActive
&& !autoComplete
)
285 return (kIOReturnNotReady
);
290 fMemory
->dmaCommandOperation(kIOMDSetDMAInactive
, this, 0);
295 return (kIOReturnSuccess
);
298 const IOMemoryDescriptor
*
299 IODMACommand::getMemoryDescriptor() const
306 IODMACommand::segmentOp(
308 IODMACommand
*target
,
313 IOOptionBits op
= (uintptr_t) reference
;
314 addr64_t maxPhys
, address
;
315 addr64_t remapAddr
= 0;
319 IODMACommandInternal
* state
= target
->reserved
;
321 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64) && !state
->fLocalMapper
)
322 maxPhys
= (1ULL << target
->fNumAddressBits
);
327 address
= segment
.fIOVMAddr
;
328 length
= segment
.fLength
;
333 if (!state
->fMisaligned
)
335 state
->fMisaligned
|= (0 != (state
->fSourceAlignMask
& address
));
336 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, state
->fSourceAlignMask
);
339 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
340 return (kIOReturnNotAligned
);
342 if (!state
->fDoubleBuffer
)
344 if ((address
+ length
- 1) <= maxPhys
)
348 else if (address
<= maxPhys
)
350 DEBG("tail %qx, %qx", address
, length
);
351 length
= (address
+ length
- maxPhys
- 1);
352 address
= maxPhys
+ 1;
353 DEBG("-> %qx, %qx\n", address
, length
);
358 return (kIOReturnSuccess
);
360 numPages
= atop_64(round_page_64(length
));
361 remapAddr
= state
->fCopyNext
;
363 if (kWalkPreflight
& op
)
365 state
->fCopyPageCount
+= numPages
;
369 if (kWalkPrepare
& op
)
371 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
372 gIOCopyMapper
->iovmInsert(atop_64(remapAddr
), idx
, atop_64(address
) + idx
);
374 if (state
->fDoubleBuffer
)
375 state
->fCopyNext
+= length
;
378 state
->fCopyNext
+= round_page(length
);
379 remapAddr
+= (address
& PAGE_MASK
);
382 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
384 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
385 (kWalkSyncIn
& op
) ? "->" : "<-",
386 address
, length
, op
);
387 if (kWalkSyncIn
& op
)
389 copypv(remapAddr
, address
, length
,
390 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
394 copypv(address
, remapAddr
, length
,
395 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
400 return kIOReturnSuccess
;
404 IODMACommand::walkAll(UInt8 op
)
406 IODMACommandInternal
* state
= fInternalState
;
408 IOReturn ret
= kIOReturnSuccess
;
412 if (kWalkPreflight
& op
)
414 state
->fMapContig
= false;
415 state
->fMisaligned
= false;
416 state
->fDoubleBuffer
= false;
417 state
->fPrepared
= false;
418 state
->fCopyNext
= 0;
419 state
->fCopyMapperPageAlloc
= 0;
420 state
->fLocalMapperPageAlloc
= 0;
421 state
->fCopyPageCount
= 0;
422 state
->fNextRemapIndex
= 0;
425 if (!(kWalkDoubleBuffer
& op
))
429 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
432 op
&= ~kWalkPreflight
;
434 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
435 if (state
->fDoubleBuffer
)
436 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
438 if (state
->fCopyPageCount
)
443 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
445 mapper
= gIOCopyMapper
;
447 mapBase
= mapper
->iovmAlloc(state
->fCopyPageCount
);
450 state
->fCopyMapperPageAlloc
= mapBase
;
451 if (state
->fCopyMapperPageAlloc
&& state
->fDoubleBuffer
)
453 DEBG("contig copy map\n");
454 state
->fMapContig
= true;
457 state
->fCopyNext
= ptoa_64(state
->fCopyMapperPageAlloc
);
460 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
461 state
->fPrepared
= true;
462 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
466 DEBG("alloc IOBMD\n");
467 state
->fCopyMD
= IOBufferMemoryDescriptor::withOptions(
468 fMDSummary
.fDirection
, state
->fPreparedLength
, state
->fSourceAlignMask
);
472 ret
= kIOReturnSuccess
;
473 state
->fPrepared
= true;
477 DEBG("IODMACommand !iovmAlloc");
478 return (kIOReturnNoResources
);
483 if (state
->fLocalMapper
)
485 state
->fLocalMapperPageCount
= atop_64(round_page(
486 state
->fPreparedLength
+ ((state
->fPreparedOffset
+ fMDSummary
.fPageAlign
) & page_mask
)));
487 state
->fLocalMapperPageAlloc
= fMapper
->iovmAllocDMACommand(this, state
->fLocalMapperPageCount
);
488 state
->fMapContig
= true;
492 if (state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
494 if (state
->fCopyPageCount
)
496 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
498 if (state
->fCopyMapperPageAlloc
)
500 state
->fCopyNext
= ptoa_64(state
->fCopyMapperPageAlloc
);
503 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
505 else if (state
->fCopyMD
)
507 DEBG("sync IOBMD\n");
509 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
511 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
515 if (kWalkSyncIn
& op
)
516 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
517 state
->fCopyMD
->getBytesNoCopy(),
518 state
->fPreparedLength
);
520 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
521 state
->fCopyMD
->getBytesNoCopy(),
522 state
->fPreparedLength
);
523 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
524 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
527 ret
= kIOReturnSuccess
;
532 if (kWalkComplete
& op
)
534 if (state
->fLocalMapperPageAlloc
)
536 fMapper
->iovmFreeDMACommand(this, state
->fLocalMapperPageAlloc
, state
->fLocalMapperPageCount
);
537 state
->fLocalMapperPageAlloc
= 0;
538 state
->fLocalMapperPageCount
= 0;
540 if (state
->fCopyMapperPageAlloc
)
542 gIOCopyMapper
->iovmFree(state
->fCopyMapperPageAlloc
, state
->fCopyPageCount
);
543 state
->fCopyMapperPageAlloc
= 0;
544 state
->fCopyPageCount
= 0;
548 state
->fCopyMD
->release();
552 state
->fPrepared
= false;
558 IODMACommand::getNumAddressBits(void)
560 return (fNumAddressBits
);
564 IODMACommand::getAlignment(void)
566 return (fAlignMask
+ 1);
570 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
571 UInt8 numAddressBits
,
572 UInt64 maxSegmentSize
,
573 MappingOptions mappingOptions
,
574 UInt64 maxTransferSize
,
583 return kIOReturnNotPermitted
;
585 if (!outSegFunc
|| !numAddressBits
)
586 return kIOReturnBadArgument
;
588 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
589 || OutputLittle32
== outSegFunc
);
594 else if (numAddressBits
> 32)
595 return kIOReturnBadArgument
; // Wrong output function for bits
598 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
599 return kIOReturnBadArgument
;
602 maxSegmentSize
--; // Set Max segment to -1
603 if (!maxTransferSize
)
604 maxTransferSize
--; // Set Max transfer to -1
608 IOMapper::checkForSystemMapper();
609 mapper
= IOMapper::gSystem
;
612 switch (MAPTYPE(mappingOptions
))
615 case kNonCoherent
: fMapper
= 0; break;
617 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
618 return kIOReturnBadArgument
;
621 return kIOReturnBadArgument
;
626 fOutSeg
= outSegFunc
;
627 fNumAddressBits
= numAddressBits
;
628 fMaxSegmentSize
= maxSegmentSize
;
629 fMappingOptions
= mappingOptions
;
630 fMaxTransferSize
= maxTransferSize
;
633 fAlignMask
= alignment
- 1;
634 if (mapper
!= fMapper
)
641 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
643 return prepare(offset
, length
, flushCache
, synchronize
);
648 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
650 IODMACommandInternal
* state
= fInternalState
;
651 IOReturn ret
= kIOReturnSuccess
;
652 MappingOptions mappingOptions
= fMappingOptions
;
655 length
= fMDSummary
.fLength
;
657 if (length
> fMaxTransferSize
)
658 return kIOReturnNoSpace
;
660 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
661 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
663 poMD
->performOperation(kIOMemoryIncoherentIOStore
, offset
, length
);
667 if ((state
->fPreparedOffset
!= offset
)
668 || (state
->fPreparedLength
!= length
))
669 ret
= kIOReturnNotReady
;
673 state
->fPreparedOffset
= offset
;
674 state
->fPreparedLength
= length
;
676 state
->fMapContig
= false;
677 state
->fMisaligned
= false;
678 state
->fDoubleBuffer
= false;
679 state
->fPrepared
= false;
680 state
->fCopyNext
= 0;
681 state
->fCopyMapperPageAlloc
= 0;
682 state
->fCopyPageCount
= 0;
683 state
->fNextRemapIndex
= 0;
685 state
->fLocalMapperPageAlloc
= 0;
686 state
->fLocalMapperPageCount
= 0;
688 state
->fLocalMapper
= (fMapper
&& (fMapper
!= IOMapper::gSystem
));
690 state
->fSourceAlignMask
= fAlignMask
;
691 if (state
->fLocalMapper
)
692 state
->fSourceAlignMask
&= page_mask
;
694 state
->fCursor
= state
->fIterateOnly
695 || (!state
->fCheckAddressing
696 && !state
->fLocalMapper
697 && (!state
->fSourceAlignMask
698 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& state
->fSourceAlignMask
)))));
702 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
707 if (kIOReturnSuccess
== ret
)
708 state
->fPrepared
= true;
714 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
716 IODMACommandInternal
* state
= fInternalState
;
717 IOReturn ret
= kIOReturnSuccess
;
720 return kIOReturnNotReady
;
726 IOOptionBits op
= kWalkComplete
;
731 state
->fPrepared
= false;
733 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
735 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
737 poMD
->performOperation(kIOMemoryIncoherentIOFlush
, state
->fPreparedOffset
, state
->fPreparedLength
);
745 IODMACommand::getPreparedOffsetAndLength(UInt64
* offset
, UInt64
* length
)
747 IODMACommandInternal
* state
= fInternalState
;
749 return (kIOReturnNotReady
);
752 *offset
= state
->fPreparedOffset
;
754 *length
= state
->fPreparedLength
;
756 return (kIOReturnSuccess
);
760 IODMACommand::synchronize(IOOptionBits options
)
762 IODMACommandInternal
* state
= fInternalState
;
763 IOReturn ret
= kIOReturnSuccess
;
766 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
767 return kIOReturnBadArgument
;
770 return kIOReturnNotReady
;
773 if (kForceDoubleBuffer
& options
)
775 if (state
->fDoubleBuffer
)
776 return kIOReturnSuccess
;
778 state
->fCursor
= false;
780 ret
= walkAll(kWalkComplete
);
782 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
784 else if (state
->fCursor
)
785 return kIOReturnSuccess
;
787 if (kIODirectionIn
& options
)
788 op
|= kWalkSyncIn
| kWalkSyncAlways
;
789 else if (kIODirectionOut
& options
)
790 op
|= kWalkSyncOut
| kWalkSyncAlways
;
797 struct IODMACommandTransferContext
806 kIODMACommandTransferOpReadBytes
= 1,
807 kIODMACommandTransferOpWriteBytes
= 2
811 IODMACommand::transferSegment(void *reference
,
812 IODMACommand
*target
,
817 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) reference
;
818 UInt64 length
= min(segment
.fLength
, context
->remaining
);
819 addr64_t ioAddr
= segment
.fIOVMAddr
;
820 addr64_t cpuAddr
= ioAddr
;
822 context
->remaining
-= length
;
826 UInt64 copyLen
= length
;
827 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
830 cpuAddr
= target
->fMapper
->mapAddr(ioAddr
);
831 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
837 case kIODMACommandTransferOpReadBytes
:
838 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
839 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
841 case kIODMACommandTransferOpWriteBytes
:
842 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
843 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
847 context
->bufferOffset
+= copyLen
;
850 return (context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
);
854 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
856 IODMACommandInternal
* state
= fInternalState
;
857 IODMACommandTransferContext context
;
858 Segment64 segments
[1];
859 UInt32 numSegments
= 0-1;
864 if (offset
>= state
->fPreparedLength
)
866 length
= min(length
, state
->fPreparedLength
- offset
);
868 context
.buffer
= buffer
;
869 context
.bufferOffset
= 0;
870 context
.remaining
= length
;
871 context
.op
= transferOp
;
872 (void) genIOVMSegments(kWalkClient
, transferSegment
, &context
, &offset
, &segments
[0], &numSegments
);
874 return (length
- context
.remaining
);
878 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
880 return (transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
));
884 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
886 return (transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
));
890 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
892 UInt32
*numSegmentsP
)
894 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) fOutSeg
,
895 offsetP
, segmentsP
, numSegmentsP
));
899 IODMACommand::genIOVMSegments(uint32_t op
,
900 InternalSegmentFunction outSegFunc
,
904 UInt32
*numSegmentsP
)
906 IODMACommandInternal
* internalState
= fInternalState
;
907 IOOptionBits mdOp
= kIOMDWalkSegments
;
908 IOReturn ret
= kIOReturnSuccess
;
910 if (!(kWalkComplete
& op
) && !fActive
)
911 return kIOReturnNotReady
;
913 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
914 return kIOReturnBadArgument
;
916 IOMDDMAWalkSegmentArgs
*state
=
917 (IOMDDMAWalkSegmentArgs
*) fState
;
919 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
920 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
922 if (offset
>= memLength
)
923 return kIOReturnOverrun
;
925 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
927 state
->fIOVMAddr
= 0;
928 internalState
->fNextRemapIndex
= 0;
929 internalState
->fNewMD
= false;
930 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
931 mdOp
= kIOMDFirstSegment
;
934 UInt64 bypassMask
= fBypassMask
;
936 UInt32 numSegments
= *numSegmentsP
;
937 Segment64 curSeg
= { 0, 0 };
940 if (fNumAddressBits
&& (fNumAddressBits
< 64))
941 maxPhys
= (1ULL << fNumAddressBits
);
946 while ((state
->fIOVMAddr
) || state
->fOffset
< memLength
)
948 if (!state
->fIOVMAddr
) {
952 state
->fOffset
= offset
;
953 state
->fLength
= memLength
- offset
;
955 if (internalState
->fMapContig
&& (kWalkClient
& op
))
957 ppnum_t pageNum
= internalState
->fLocalMapperPageAlloc
;
959 pageNum
= internalState
->fCopyMapperPageAlloc
;
960 state
->fIOVMAddr
= ptoa_64(pageNum
)
961 + offset
- internalState
->fPreparedOffset
;
962 rtn
= kIOReturnSuccess
;
966 const IOMemoryDescriptor
* memory
=
967 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
968 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
969 mdOp
= kIOMDWalkSegments
;
972 if (rtn
== kIOReturnSuccess
) {
973 assert(state
->fIOVMAddr
);
974 assert(state
->fLength
);
976 else if (rtn
== kIOReturnOverrun
)
977 state
->fIOVMAddr
= state
->fLength
= 0; // At end
982 if (!curSeg
.fIOVMAddr
) {
983 UInt64 length
= state
->fLength
;
986 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
987 curSeg
.fLength
= length
;
988 state
->fIOVMAddr
= 0;
990 else if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
== state
->fIOVMAddr
)) {
991 UInt64 length
= state
->fLength
;
993 curSeg
.fLength
+= length
;
994 state
->fIOVMAddr
= 0;
998 if (!state
->fIOVMAddr
)
1000 if (kWalkClient
& op
)
1002 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
1004 if (internalState
->fCursor
)
1006 curSeg
.fIOVMAddr
= 0;
1007 ret
= kIOReturnMessageTooLarge
;
1010 else if (curSeg
.fIOVMAddr
<= maxPhys
)
1012 UInt64 remain
, newLength
;
1014 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
1015 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
1016 remain
= curSeg
.fLength
- newLength
;
1017 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
1018 curSeg
.fLength
= newLength
;
1019 state
->fLength
= remain
;
1022 else if (gIOCopyMapper
)
1024 DEBG("sparse switch %qx, %qx ", curSeg
.fIOVMAddr
, curSeg
.fLength
);
1025 if (trunc_page_64(curSeg
.fIOVMAddr
) == gIOCopyMapper
->mapAddr(
1026 ptoa_64(internalState
->fCopyMapperPageAlloc
+ internalState
->fNextRemapIndex
)))
1029 curSeg
.fIOVMAddr
= ptoa_64(internalState
->fCopyMapperPageAlloc
+ internalState
->fNextRemapIndex
)
1030 + (curSeg
.fIOVMAddr
& PAGE_MASK
);
1031 internalState
->fNextRemapIndex
+= atop_64(round_page(curSeg
.fLength
));
1033 else for (UInt checkRemapIndex
= 0; checkRemapIndex
< internalState
->fCopyPageCount
; checkRemapIndex
++)
1035 if (trunc_page_64(curSeg
.fIOVMAddr
) == gIOCopyMapper
->mapAddr(
1036 ptoa_64(internalState
->fCopyMapperPageAlloc
+ checkRemapIndex
)))
1038 curSeg
.fIOVMAddr
= ptoa_64(internalState
->fCopyMapperPageAlloc
+ checkRemapIndex
)
1039 + (curSeg
.fIOVMAddr
& PAGE_MASK
);
1040 internalState
->fNextRemapIndex
= checkRemapIndex
+ atop_64(round_page(curSeg
.fLength
));
1044 DEBG("-> %qx, %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
);
1049 if (curSeg
.fLength
> fMaxSegmentSize
)
1051 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
1053 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
1054 curSeg
.fLength
= fMaxSegmentSize
;
1056 state
->fLength
= remain
;
1060 if (internalState
->fCursor
1061 && (0 != (internalState
->fSourceAlignMask
& curSeg
.fIOVMAddr
)))
1063 curSeg
.fIOVMAddr
= 0;
1064 ret
= kIOReturnNotAligned
;
1068 if (offset
>= memLength
)
1070 curSeg
.fLength
-= (offset
- memLength
);
1072 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1077 if (state
->fIOVMAddr
) {
1078 if ((segIndex
+ 1 == numSegments
))
1081 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1082 curSeg
.fIOVMAddr
= 0;
1083 if (kIOReturnSuccess
!= ret
)
1088 if (curSeg
.fIOVMAddr
) {
1089 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1092 if (kIOReturnSuccess
== ret
)
1094 state
->fOffset
= offset
;
1095 *offsetP
= offset
- internalState
->fPreparedOffset
;
1096 *numSegmentsP
= segIndex
;
1102 IODMACommand::clientOutputSegment(
1103 void *reference
, IODMACommand
*target
,
1104 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1106 SegmentFunction segmentFunction
= (SegmentFunction
) reference
;
1107 IOReturn ret
= kIOReturnSuccess
;
1109 if ((target
->fNumAddressBits
< 64)
1110 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
)
1111 && (target
->reserved
->fLocalMapperPageAlloc
|| !target
->reserved
->fLocalMapper
))
1113 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1114 ret
= kIOReturnMessageTooLarge
;
1117 if (!(*segmentFunction
)(target
, segment
, vSegList
, outSegIndex
))
1119 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1120 ret
= kIOReturnMessageTooLarge
;
1127 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction
,
1130 UInt32
*numSegmentsP
)
1132 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) segmentFunction
,
1133 offsetP
, segmentsP
, numSegmentsP
));
1137 IODMACommand::OutputHost32(IODMACommand
*,
1138 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1140 Segment32
*base
= (Segment32
*) vSegList
;
1141 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1142 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1147 IODMACommand::OutputBig32(IODMACommand
*,
1148 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1150 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1151 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1152 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1153 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1158 IODMACommand::OutputLittle32(IODMACommand
*,
1159 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1161 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1162 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1163 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1164 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1169 IODMACommand::OutputHost64(IODMACommand
*,
1170 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1172 Segment64
*base
= (Segment64
*) vSegList
;
1173 base
[outSegIndex
] = segment
;
1178 IODMACommand::OutputBig64(IODMACommand
*,
1179 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1181 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1182 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1183 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1184 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1189 IODMACommand::OutputLittle64(IODMACommand
*,
1190 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1192 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1193 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1194 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1195 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);