2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
41 #include "IOKitKernelInternal.h"
42 #include "IOCopyMapper.h"
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
50 static bool gIOEnableCopyMapper
= true;
54 kWalkSyncIn
= 0x01, // bounce -> md
55 kWalkSyncOut
= 0x02, // bounce <- md
56 kWalkSyncAlways
= 0x04,
57 kWalkPreflight
= 0x08,
58 kWalkDoubleBuffer
= 0x10,
66 IOMDDMAWalkSegmentState fState
;
67 IOMDDMACharacteristics fMDSummary
;
69 UInt64 fPreparedOffset
;
70 UInt64 fPreparedLength
;
73 UInt8 fCheckAddressing
;
81 ppnum_t fCopyPageAlloc
;
82 ppnum_t fCopyPageCount
;
85 class IOBufferMemoryDescriptor
* fCopyMD
;
87 typedef ExpansionData IODMACommandInternal
;
89 #define fInternalState reserved
90 #define fState reserved->fState
91 #define fMDSummary reserved->fMDSummary
95 // no direction => OutIn
96 #define SHOULD_COPY_DIR(op, direction) \
97 ((kIODirectionNone == (direction)) \
98 || (kWalkSyncAlways & (op)) \
99 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
103 #define SHOULD_COPY_DIR(state, direction) (true)
107 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
109 #define DEBG(fmt, args...) {}
113 /**************************** class IODMACommand ***************************/
116 #define super OSObject
117 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
119 OSMetaClassDefineReservedUnused(IODMACommand
, 0);
120 OSMetaClassDefineReservedUnused(IODMACommand
, 1);
121 OSMetaClassDefineReservedUnused(IODMACommand
, 2);
122 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
123 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
124 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
125 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
126 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
127 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
128 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
129 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
130 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
131 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
132 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
133 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
134 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
137 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
138 UInt8 numAddressBits
,
139 UInt64 maxSegmentSize
,
140 MappingOptions mappingOptions
,
141 UInt64 maxTransferSize
,
146 IODMACommand
* me
= new IODMACommand
;
148 if (me
&& !me
->initWithSpecification(outSegFunc
,
149 numAddressBits
, maxSegmentSize
,
150 mappingOptions
, maxTransferSize
,
151 alignment
, mapper
, refCon
))
161 IODMACommand::cloneCommand(void *refCon
)
163 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
164 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
167 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
170 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
171 UInt8 numAddressBits
,
172 UInt64 maxSegmentSize
,
173 MappingOptions mappingOptions
,
174 UInt64 maxTransferSize
,
179 if (!super::init() || !outSegFunc
|| !numAddressBits
)
182 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
183 || OutputLittle32
== outSegFunc
);
188 else if (numAddressBits
> 32)
189 return false; // Wrong output function for bits
192 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
196 maxSegmentSize
--; // Set Max segment to -1
197 if (!maxTransferSize
)
198 maxTransferSize
--; // Set Max transfer to -1
202 IOMapper::checkForSystemMapper();
203 mapper
= IOMapper::gSystem
;
208 fOutSeg
= outSegFunc
;
209 fNumAddressBits
= numAddressBits
;
210 fMaxSegmentSize
= maxSegmentSize
;
211 fMappingOptions
= mappingOptions
;
212 fMaxTransferSize
= maxTransferSize
;
215 fAlignMask
= alignment
- 1;
219 switch (MAPTYPE(mappingOptions
))
222 case kNonCoherent
: fMapper
= 0; break;
224 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
231 reserved
= IONew(ExpansionData
, 1);
234 bzero(reserved
, sizeof(ExpansionData
));
236 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
245 IODelete(reserved
, ExpansionData
, 1);
251 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
260 return kIOReturnSuccess
;
264 // As we are almost certainly being called from a work loop thread
265 // if fActive is true it is probably not a good time to potentially
266 // block. Just test for it and return an error
268 return kIOReturnBusy
;
269 clearMemoryDescriptor();
273 bzero(&fMDSummary
, sizeof(fMDSummary
));
274 IOReturn rtn
= mem
->dmaCommandOperation(
275 kIOMDGetCharacteristics
,
276 &fMDSummary
, sizeof(fMDSummary
));
280 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
282 if ((kMapped
== MAPTYPE(fMappingOptions
))
284 && (!fNumAddressBits
|| (fNumAddressBits
>= 31)))
285 // assuming mapped space is 2G
286 fInternalState
->fCheckAddressing
= false;
288 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
297 return kIOReturnSuccess
;
301 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
303 if (fActive
&& !autoComplete
)
304 return (kIOReturnNotReady
);
313 return (kIOReturnSuccess
);
316 const IOMemoryDescriptor
*
317 IODMACommand::getMemoryDescriptor() const
324 IODMACommand::segmentOp(
326 IODMACommand
*target
,
331 IOOptionBits op
= (IOOptionBits
) reference
;
332 addr64_t maxPhys
, address
;
333 addr64_t remapAddr
= 0;
337 IODMACommandInternal
* state
= target
->reserved
;
339 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64))
340 maxPhys
= (1ULL << target
->fNumAddressBits
);
345 address
= segment
.fIOVMAddr
;
346 length
= segment
.fLength
;
351 if (!state
->fMisaligned
)
353 state
->fMisaligned
|= (0 != (target
->fAlignMask
& address
));
354 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, target
->fAlignMask
);
357 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
358 return (kIOReturnNotAligned
);
360 if (!state
->fDoubleBuffer
)
362 if ((address
+ length
- 1) <= maxPhys
)
366 else if (address
<= maxPhys
)
368 DEBG("tail %qx, %qx", address
, length
);
369 length
= (address
+ length
- maxPhys
- 1);
370 address
= maxPhys
+ 1;
371 DEBG("-> %qx, %qx\n", address
, length
);
376 return (kIOReturnSuccess
);
378 numPages
= atop_64(round_page_64(length
));
379 remapAddr
= state
->fCopyNext
;
381 if (kWalkPreflight
& op
)
383 state
->fCopyPageCount
+= numPages
;
387 if (kWalkPrepare
& op
)
389 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
390 gIOCopyMapper
->iovmInsert(atop_64(remapAddr
), idx
, atop_64(address
) + idx
);
392 if (state
->fDoubleBuffer
)
393 state
->fCopyNext
+= length
;
396 state
->fCopyNext
+= round_page(length
);
397 remapAddr
+= (address
& PAGE_MASK
);
400 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
402 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
403 (kWalkSyncIn
& op
) ? "->" : "<-",
404 address
, length
, op
);
405 if (kWalkSyncIn
& op
)
407 copypv(remapAddr
, address
, length
,
408 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
412 copypv(address
, remapAddr
, length
,
413 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
418 return kIOReturnSuccess
;
422 IODMACommand::walkAll(UInt8 op
)
424 IODMACommandInternal
* state
= fInternalState
;
426 IOReturn ret
= kIOReturnSuccess
;
430 if (gIOEnableCopyMapper
&& (kWalkPreflight
& op
))
432 state
->fCopyContig
= false;
433 state
->fMisaligned
= false;
434 state
->fDoubleBuffer
= false;
435 state
->fPrepared
= false;
436 state
->fCopyNext
= 0;
437 state
->fCopyPageAlloc
= 0;
438 state
->fCopyPageCount
= 0;
441 if (!(kWalkDoubleBuffer
& op
))
445 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
448 op
&= ~kWalkPreflight
;
450 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
451 if (state
->fDoubleBuffer
)
452 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
454 if (state
->fCopyPageCount
)
459 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
461 mapper
= gIOCopyMapper
;
463 mapBase
= mapper
->iovmAlloc(state
->fCopyPageCount
);
466 state
->fCopyPageAlloc
= mapBase
;
467 if (state
->fCopyPageAlloc
&& state
->fDoubleBuffer
)
469 DEBG("contig copy map\n");
470 state
->fCopyContig
= true;
473 state
->fCopyNext
= ptoa_64(state
->fCopyPageAlloc
);
476 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
477 state
->fPrepared
= true;
478 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
482 DEBG("alloc IOBMD\n");
483 state
->fCopyMD
= IOBufferMemoryDescriptor::withOptions(
484 fMDSummary
.fDirection
, state
->fPreparedLength
, page_size
);
488 ret
= kIOReturnSuccess
;
489 state
->fPrepared
= true;
493 DEBG("IODMACommand !iovmAlloc");
494 return (kIOReturnNoResources
);
500 if (gIOEnableCopyMapper
&& state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
502 if (state
->fCopyPageCount
)
504 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
506 if (state
->fCopyPageAlloc
)
508 state
->fCopyNext
= ptoa_64(state
->fCopyPageAlloc
);
511 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
513 else if (state
->fCopyMD
)
515 DEBG("sync IOBMD\n");
517 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
519 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
523 if (kWalkSyncIn
& op
)
524 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
525 state
->fCopyMD
->getBytesNoCopy(),
526 state
->fPreparedLength
);
528 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
529 state
->fCopyMD
->getBytesNoCopy(),
530 state
->fPreparedLength
);
531 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
532 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
535 ret
= kIOReturnSuccess
;
540 if (kWalkComplete
& op
)
542 if (state
->fCopyPageAlloc
)
544 gIOCopyMapper
->iovmFree(state
->fCopyPageAlloc
, state
->fCopyPageCount
);
545 state
->fCopyPageAlloc
= 0;
546 state
->fCopyPageCount
= 0;
550 state
->fCopyMD
->release();
554 state
->fPrepared
= false;
560 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
562 IODMACommandInternal
* state
= fInternalState
;
563 IOReturn ret
= kIOReturnSuccess
;
566 length
= fMDSummary
.fLength
;
568 if (length
> fMaxTransferSize
)
569 return kIOReturnNoSpace
;
572 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
573 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
575 poMD
->performOperation(kIOMemoryIncoherentIOStore
, 0, fMDSummary
.fLength
);
580 if ((state
->fPreparedOffset
!= offset
)
581 || (state
->fPreparedLength
!= length
))
582 ret
= kIOReturnNotReady
;
586 state
->fPreparedOffset
= offset
;
587 state
->fPreparedLength
= length
;
589 state
->fCopyContig
= false;
590 state
->fMisaligned
= false;
591 state
->fDoubleBuffer
= false;
592 state
->fPrepared
= false;
593 state
->fCopyNext
= 0;
594 state
->fCopyPageAlloc
= 0;
595 state
->fCopyPageCount
= 0;
598 state
->fCursor
= state
->fIterateOnly
599 || (!state
->fCheckAddressing
601 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& fAlignMask
)))));
604 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
609 if (kIOReturnSuccess
== ret
)
610 state
->fPrepared
= true;
616 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
618 IODMACommandInternal
* state
= fInternalState
;
619 IOReturn ret
= kIOReturnSuccess
;
622 return kIOReturnNotReady
;
628 IOOptionBits op
= kWalkComplete
;
633 state
->fPrepared
= false;
636 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
638 // XXX gvdl: need invalidate before Chardonnay ships
639 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
641 poMD
->performOperation(kIOMemoryIncoherentIOInvalidate
, 0, fMDSummary
.fLength
);
650 IODMACommand::synchronize(IOOptionBits options
)
652 IODMACommandInternal
* state
= fInternalState
;
653 IOReturn ret
= kIOReturnSuccess
;
656 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
657 return kIOReturnBadArgument
;
660 return kIOReturnNotReady
;
663 if (kForceDoubleBuffer
& options
)
665 if (state
->fDoubleBuffer
)
666 return kIOReturnSuccess
;
668 state
->fCursor
= false;
670 ret
= walkAll(kWalkComplete
);
672 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
674 else if (state
->fCursor
)
675 return kIOReturnSuccess
;
677 if (kIODirectionIn
& options
)
678 op
|= kWalkSyncIn
| kWalkSyncAlways
;
679 else if (kIODirectionOut
& options
)
680 op
|= kWalkSyncOut
| kWalkSyncAlways
;
688 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
690 UInt32
*numSegmentsP
)
692 return (genIOVMSegments(clientOutputSegment
, (void *) kWalkClient
, offsetP
, segmentsP
, numSegmentsP
));
696 IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc
,
700 UInt32
*numSegmentsP
)
702 IOOptionBits op
= (IOOptionBits
) reference
;
703 IODMACommandInternal
* internalState
= fInternalState
;
704 IOOptionBits mdOp
= kIOMDWalkSegments
;
705 IOReturn ret
= kIOReturnSuccess
;
707 if (!(kWalkComplete
& op
) && !fActive
)
708 return kIOReturnNotReady
;
710 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
711 return kIOReturnBadArgument
;
713 IOMDDMAWalkSegmentArgs
*state
=
714 (IOMDDMAWalkSegmentArgs
*) fState
;
716 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
717 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
719 if (offset
>= memLength
)
720 return kIOReturnOverrun
;
722 if (!offset
|| offset
!= state
->fOffset
) {
724 state
->fIOVMAddr
= 0;
725 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
726 mdOp
= kIOMDFirstSegment
;
729 UInt64 bypassMask
= fBypassMask
;
731 UInt32 numSegments
= *numSegmentsP
;
732 Segment64 curSeg
= { 0, 0 };
735 if (fNumAddressBits
&& (fNumAddressBits
< 64))
736 maxPhys
= (1ULL << fNumAddressBits
);
741 while ((state
->fIOVMAddr
) || state
->fOffset
< memLength
)
743 if (!state
->fIOVMAddr
) {
747 state
->fOffset
= offset
;
748 state
->fLength
= memLength
- offset
;
750 if (internalState
->fCopyContig
&& (kWalkClient
& op
))
752 state
->fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
)
753 + offset
- internalState
->fPreparedOffset
;
754 rtn
= kIOReturnSuccess
;
758 const IOMemoryDescriptor
* memory
=
759 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
760 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
761 mdOp
= kIOMDWalkSegments
;
764 if (rtn
== kIOReturnSuccess
) {
765 assert(state
->fIOVMAddr
);
766 assert(state
->fLength
);
768 else if (rtn
== kIOReturnOverrun
)
769 state
->fIOVMAddr
= state
->fLength
= 0; // At end
774 if (!curSeg
.fIOVMAddr
) {
775 UInt64 length
= state
->fLength
;
778 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
779 curSeg
.fLength
= length
;
780 state
->fIOVMAddr
= 0;
782 else if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
== state
->fIOVMAddr
)) {
783 UInt64 length
= state
->fLength
;
785 curSeg
.fLength
+= length
;
786 state
->fIOVMAddr
= 0;
790 if (!state
->fIOVMAddr
)
792 if (kWalkClient
& op
)
794 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
796 if (internalState
->fCursor
)
798 curSeg
.fIOVMAddr
= 0;
799 ret
= kIOReturnMessageTooLarge
;
802 else if (curSeg
.fIOVMAddr
<= maxPhys
)
804 UInt64 remain
, newLength
;
806 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
807 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
808 remain
= curSeg
.fLength
- newLength
;
809 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
810 curSeg
.fLength
= newLength
;
811 state
->fLength
= remain
;
814 else if (gIOCopyMapper
)
816 DEBG("sparse switch %qx, %qx ", curSeg
.fIOVMAddr
, curSeg
.fLength
);
818 for (UInt checkRemapIndex
= 0; checkRemapIndex
< internalState
->fCopyPageCount
; checkRemapIndex
++)
820 if (trunc_page_64(curSeg
.fIOVMAddr
) == gIOCopyMapper
->mapAddr(
821 ptoa_64(internalState
->fCopyPageAlloc
+ checkRemapIndex
)))
823 curSeg
.fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
+ checkRemapIndex
) + (curSeg
.fIOVMAddr
& PAGE_MASK
);
827 DEBG("-> %qx, %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
);
832 if (curSeg
.fLength
> fMaxSegmentSize
)
834 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
836 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
837 curSeg
.fLength
= fMaxSegmentSize
;
839 state
->fLength
= remain
;
843 if (internalState
->fCursor
844 && (0 != (fAlignMask
& curSeg
.fIOVMAddr
)))
846 curSeg
.fIOVMAddr
= 0;
847 ret
= kIOReturnNotAligned
;
851 if (offset
>= memLength
)
853 curSeg
.fLength
-= (offset
- memLength
);
855 state
->fIOVMAddr
= state
->fLength
= 0; // At end
860 if (state
->fIOVMAddr
) {
861 if ((segIndex
+ 1 == numSegments
))
864 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
865 curSeg
.fIOVMAddr
= 0;
866 if (kIOReturnSuccess
!= ret
)
871 if (curSeg
.fIOVMAddr
) {
872 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
875 if (kIOReturnSuccess
== ret
)
877 state
->fOffset
= offset
;
878 *offsetP
= offset
- internalState
->fPreparedOffset
;
879 *numSegmentsP
= segIndex
;
885 IODMACommand::clientOutputSegment(
886 void *reference
, IODMACommand
*target
,
887 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
889 IOReturn ret
= kIOReturnSuccess
;
891 if ((target
->fNumAddressBits
< 64)
892 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
))
894 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
895 ret
= kIOReturnMessageTooLarge
;
898 if (!(*target
->fOutSeg
)(target
, segment
, vSegList
, outSegIndex
))
900 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
901 ret
= kIOReturnMessageTooLarge
;
908 IODMACommand::OutputHost32(IODMACommand
*,
909 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
911 Segment32
*base
= (Segment32
*) vSegList
;
912 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
913 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
918 IODMACommand::OutputBig32(IODMACommand
*,
919 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
921 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
922 const UInt offLen
= offAddr
+ sizeof(UInt32
);
923 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
924 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
929 IODMACommand::OutputLittle32(IODMACommand
*,
930 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
932 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
933 const UInt offLen
= offAddr
+ sizeof(UInt32
);
934 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
935 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
940 IODMACommand::OutputHost64(IODMACommand
*,
941 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
943 Segment64
*base
= (Segment64
*) vSegList
;
944 base
[outSegIndex
] = segment
;
949 IODMACommand::OutputBig64(IODMACommand
*,
950 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
952 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
953 const UInt offLen
= offAddr
+ sizeof(UInt64
);
954 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
955 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
960 IODMACommand::OutputLittle64(IODMACommand
*,
961 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
963 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
964 const UInt offLen
= offAddr
+ sizeof(UInt64
);
965 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
966 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);