2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <IOKit/assert.h>
25 #include <libkern/OSTypes.h>
26 #include <libkern/OSByteOrder.h>
28 #include <IOKit/IOReturn.h>
29 #include <IOKit/IOLib.h>
30 #include <IOKit/IODMACommand.h>
31 #include <IOKit/IOMapper.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33 #include <IOKit/IOBufferMemoryDescriptor.h>
35 #include "IOKitKernelInternal.h"
36 #include "IOCopyMapper.h"
38 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
39 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
40 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
41 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
44 static bool gIOEnableCopyMapper
= true;
48 kWalkSyncIn
= 0x01, // bounce -> md
49 kWalkSyncOut
= 0x02, // bounce <- md
50 kWalkSyncAlways
= 0x04,
51 kWalkPreflight
= 0x08,
52 kWalkDoubleBuffer
= 0x10,
60 IOMDDMAWalkSegmentState fState
;
61 IOMDDMACharacteristics fMDSummary
;
63 UInt64 fPreparedOffset
;
64 UInt64 fPreparedLength
;
67 UInt8 fCheckAddressing
;
75 ppnum_t fCopyPageAlloc
;
76 ppnum_t fCopyPageCount
;
79 class IOBufferMemoryDescriptor
* fCopyMD
;
81 typedef ExpansionData IODMACommandInternal
;
83 #define fInternalState reserved
84 #define fState reserved->fState
85 #define fMDSummary reserved->fMDSummary
89 // no direction => OutIn
90 #define SHOULD_COPY_DIR(op, direction) \
91 ((kIODirectionNone == (direction)) \
92 || (kWalkSyncAlways & (op)) \
93 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
97 #define SHOULD_COPY_DIR(state, direction) (true)
101 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
103 #define DEBG(fmt, args...) {}
107 /**************************** class IODMACommand ***************************/
110 #define super OSObject
111 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
113 OSMetaClassDefineReservedUnused(IODMACommand
, 0);
114 OSMetaClassDefineReservedUnused(IODMACommand
, 1);
115 OSMetaClassDefineReservedUnused(IODMACommand
, 2);
116 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
117 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
118 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
119 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
120 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
121 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
122 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
123 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
124 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
125 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
126 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
127 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
128 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
131 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
132 UInt8 numAddressBits
,
133 UInt64 maxSegmentSize
,
134 MappingOptions mappingOptions
,
135 UInt64 maxTransferSize
,
140 IODMACommand
* me
= new IODMACommand
;
142 if (me
&& !me
->initWithSpecification(outSegFunc
,
143 numAddressBits
, maxSegmentSize
,
144 mappingOptions
, maxTransferSize
,
145 alignment
, mapper
, refCon
))
155 IODMACommand::cloneCommand(void *refCon
)
157 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
158 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
161 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
164 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
165 UInt8 numAddressBits
,
166 UInt64 maxSegmentSize
,
167 MappingOptions mappingOptions
,
168 UInt64 maxTransferSize
,
173 if (!super::init() || !outSegFunc
|| !numAddressBits
)
176 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
177 || OutputLittle32
== outSegFunc
);
182 else if (numAddressBits
> 32)
183 return false; // Wrong output function for bits
186 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
190 maxSegmentSize
--; // Set Max segment to -1
191 if (!maxTransferSize
)
192 maxTransferSize
--; // Set Max transfer to -1
196 IOMapper::checkForSystemMapper();
197 mapper
= IOMapper::gSystem
;
202 fOutSeg
= outSegFunc
;
203 fNumAddressBits
= numAddressBits
;
204 fMaxSegmentSize
= maxSegmentSize
;
205 fMappingOptions
= mappingOptions
;
206 fMaxTransferSize
= maxTransferSize
;
209 fAlignMask
= alignment
- 1;
213 switch (MAPTYPE(mappingOptions
))
216 case kNonCoherent
: fMapper
= 0; break;
218 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
225 reserved
= IONew(ExpansionData
, 1);
228 bzero(reserved
, sizeof(ExpansionData
));
230 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
239 IODelete(reserved
, ExpansionData
, 1);
245 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
254 return kIOReturnSuccess
;
258 // As we are almost certainly being called from a work loop thread
259 // if fActive is true it is probably not a good time to potentially
260 // block. Just test for it and return an error
262 return kIOReturnBusy
;
263 clearMemoryDescriptor();
267 bzero(&fMDSummary
, sizeof(fMDSummary
));
268 IOReturn rtn
= mem
->dmaCommandOperation(
269 kIOMDGetCharacteristics
,
270 &fMDSummary
, sizeof(fMDSummary
));
274 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
276 if ((kMapped
== MAPTYPE(fMappingOptions
))
278 && (!fNumAddressBits
|| (fNumAddressBits
>= 31)))
279 // assuming mapped space is 2G
280 fInternalState
->fCheckAddressing
= false;
282 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
291 return kIOReturnSuccess
;
295 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
297 if (fActive
&& !autoComplete
)
298 return (kIOReturnNotReady
);
307 return (kIOReturnSuccess
);
310 const IOMemoryDescriptor
*
311 IODMACommand::getMemoryDescriptor() const
318 IODMACommand::segmentOp(
320 IODMACommand
*target
,
325 IOOptionBits op
= (IOOptionBits
) reference
;
326 addr64_t maxPhys
, address
;
327 addr64_t remapAddr
= 0;
331 IODMACommandInternal
* state
= target
->reserved
;
333 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64))
334 maxPhys
= (1ULL << target
->fNumAddressBits
);
339 address
= segment
.fIOVMAddr
;
340 length
= segment
.fLength
;
345 if (!state
->fMisaligned
)
347 state
->fMisaligned
|= (0 != (target
->fAlignMask
& address
));
348 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, target
->fAlignMask
);
351 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
352 return (kIOReturnNotAligned
);
354 if (!state
->fDoubleBuffer
)
356 if ((address
+ length
- 1) <= maxPhys
)
360 else if (address
<= maxPhys
)
362 DEBG("tail %qx, %qx", address
, length
);
363 length
= (address
+ length
- maxPhys
- 1);
364 address
= maxPhys
+ 1;
365 DEBG("-> %qx, %qx\n", address
, length
);
370 return (kIOReturnSuccess
);
372 numPages
= atop_64(round_page_64(length
));
373 remapAddr
= state
->fCopyNext
;
375 if (kWalkPreflight
& op
)
377 state
->fCopyPageCount
+= numPages
;
381 if (kWalkPrepare
& op
)
383 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
384 gIOCopyMapper
->iovmInsert(atop_64(remapAddr
), idx
, atop_64(address
) + idx
);
386 if (state
->fDoubleBuffer
)
387 state
->fCopyNext
+= length
;
390 state
->fCopyNext
+= round_page(length
);
391 remapAddr
+= (address
& PAGE_MASK
);
394 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
396 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
397 (kWalkSyncIn
& op
) ? "->" : "<-",
398 address
, length
, op
);
399 if (kWalkSyncIn
& op
)
401 copypv(remapAddr
, address
, length
,
402 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
406 copypv(address
, remapAddr
, length
,
407 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
412 return kIOReturnSuccess
;
416 IODMACommand::walkAll(UInt8 op
)
418 IODMACommandInternal
* state
= fInternalState
;
420 IOReturn ret
= kIOReturnSuccess
;
424 if (gIOEnableCopyMapper
&& (kWalkPreflight
& op
))
426 state
->fCopyContig
= false;
427 state
->fMisaligned
= false;
428 state
->fDoubleBuffer
= false;
429 state
->fPrepared
= false;
430 state
->fCopyNext
= 0;
431 state
->fCopyPageAlloc
= 0;
432 state
->fCopyPageCount
= 0;
435 if (!(kWalkDoubleBuffer
& op
))
439 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
442 op
&= ~kWalkPreflight
;
444 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
445 if (state
->fDoubleBuffer
)
446 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
448 if (state
->fCopyPageCount
)
453 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
455 mapper
= gIOCopyMapper
;
457 mapBase
= mapper
->iovmAlloc(state
->fCopyPageCount
);
460 state
->fCopyPageAlloc
= mapBase
;
461 if (state
->fCopyPageAlloc
&& state
->fDoubleBuffer
)
463 DEBG("contig copy map\n");
464 state
->fCopyContig
= true;
467 state
->fCopyNext
= ptoa_64(state
->fCopyPageAlloc
);
470 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
471 state
->fPrepared
= true;
472 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
476 DEBG("alloc IOBMD\n");
477 state
->fCopyMD
= IOBufferMemoryDescriptor::withOptions(
478 fMDSummary
.fDirection
, state
->fPreparedLength
, page_size
);
482 ret
= kIOReturnSuccess
;
483 state
->fPrepared
= true;
487 DEBG("IODMACommand !iovmAlloc");
488 return (kIOReturnNoResources
);
494 if (gIOEnableCopyMapper
&& state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
496 if (state
->fCopyPageCount
)
498 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
500 if (state
->fCopyPageAlloc
)
502 state
->fCopyNext
= ptoa_64(state
->fCopyPageAlloc
);
505 ret
= genIOVMSegments(segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
507 else if (state
->fCopyMD
)
509 DEBG("sync IOBMD\n");
511 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
513 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
517 if (kWalkSyncIn
& op
)
518 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
519 state
->fCopyMD
->getBytesNoCopy(),
520 state
->fPreparedLength
);
522 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
523 state
->fCopyMD
->getBytesNoCopy(),
524 state
->fPreparedLength
);
525 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
526 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
529 ret
= kIOReturnSuccess
;
534 if (kWalkComplete
& op
)
536 if (state
->fCopyPageAlloc
)
538 gIOCopyMapper
->iovmFree(state
->fCopyPageAlloc
, state
->fCopyPageCount
);
539 state
->fCopyPageAlloc
= 0;
540 state
->fCopyPageCount
= 0;
544 state
->fCopyMD
->release();
548 state
->fPrepared
= false;
554 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
556 IODMACommandInternal
* state
= fInternalState
;
557 IOReturn ret
= kIOReturnSuccess
;
560 length
= fMDSummary
.fLength
;
562 if (length
> fMaxTransferSize
)
563 return kIOReturnNoSpace
;
566 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
567 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
569 poMD
->performOperation(kIOMemoryIncoherentIOStore
, 0, fMDSummary
.fLength
);
574 if ((state
->fPreparedOffset
!= offset
)
575 || (state
->fPreparedLength
!= length
))
576 ret
= kIOReturnNotReady
;
580 state
->fPreparedOffset
= offset
;
581 state
->fPreparedLength
= length
;
583 state
->fCopyContig
= false;
584 state
->fMisaligned
= false;
585 state
->fDoubleBuffer
= false;
586 state
->fPrepared
= false;
587 state
->fCopyNext
= 0;
588 state
->fCopyPageAlloc
= 0;
589 state
->fCopyPageCount
= 0;
592 state
->fCursor
= state
->fIterateOnly
593 || (!state
->fCheckAddressing
595 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& fAlignMask
)))));
598 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
603 if (kIOReturnSuccess
== ret
)
604 state
->fPrepared
= true;
610 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
612 IODMACommandInternal
* state
= fInternalState
;
613 IOReturn ret
= kIOReturnSuccess
;
616 return kIOReturnNotReady
;
622 IOOptionBits op
= kWalkComplete
;
627 state
->fPrepared
= false;
630 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
632 // XXX gvdl: need invalidate before Chardonnay ships
633 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
635 poMD
->performOperation(kIOMemoryIncoherentIOInvalidate
, 0, fMDSummary
.fLength
);
644 IODMACommand::synchronize(IOOptionBits options
)
646 IODMACommandInternal
* state
= fInternalState
;
647 IOReturn ret
= kIOReturnSuccess
;
650 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
651 return kIOReturnBadArgument
;
654 return kIOReturnNotReady
;
657 if (kForceDoubleBuffer
& options
)
659 if (state
->fDoubleBuffer
)
660 return kIOReturnSuccess
;
662 state
->fCursor
= false;
664 ret
= walkAll(kWalkComplete
);
666 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
668 else if (state
->fCursor
)
669 return kIOReturnSuccess
;
671 if (kIODirectionIn
& options
)
672 op
|= kWalkSyncIn
| kWalkSyncAlways
;
673 else if (kIODirectionOut
& options
)
674 op
|= kWalkSyncOut
| kWalkSyncAlways
;
682 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
684 UInt32
*numSegmentsP
)
686 return (genIOVMSegments(clientOutputSegment
, (void *) kWalkClient
, offsetP
, segmentsP
, numSegmentsP
));
690 IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc
,
694 UInt32
*numSegmentsP
)
696 IOOptionBits op
= (IOOptionBits
) reference
;
697 IODMACommandInternal
* internalState
= fInternalState
;
698 IOOptionBits mdOp
= kIOMDWalkSegments
;
699 IOReturn ret
= kIOReturnSuccess
;
701 if (!(kWalkComplete
& op
) && !fActive
)
702 return kIOReturnNotReady
;
704 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
705 return kIOReturnBadArgument
;
707 IOMDDMAWalkSegmentArgs
*state
=
708 (IOMDDMAWalkSegmentArgs
*) fState
;
710 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
711 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
713 if (offset
>= memLength
)
714 return kIOReturnOverrun
;
716 if (!offset
|| offset
!= state
->fOffset
) {
718 state
->fIOVMAddr
= 0;
719 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
720 mdOp
= kIOMDFirstSegment
;
723 UInt64 bypassMask
= fBypassMask
;
725 UInt32 numSegments
= *numSegmentsP
;
726 Segment64 curSeg
= { 0, 0 };
729 if (fNumAddressBits
&& (fNumAddressBits
< 64))
730 maxPhys
= (1ULL << fNumAddressBits
);
735 while ((state
->fIOVMAddr
) || state
->fOffset
< memLength
)
737 if (!state
->fIOVMAddr
) {
741 state
->fOffset
= offset
;
742 state
->fLength
= memLength
- offset
;
744 if (internalState
->fCopyContig
&& (kWalkClient
& op
))
746 state
->fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
)
747 + offset
- internalState
->fPreparedOffset
;
748 rtn
= kIOReturnSuccess
;
752 const IOMemoryDescriptor
* memory
=
753 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
754 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
755 mdOp
= kIOMDWalkSegments
;
758 if (rtn
== kIOReturnSuccess
) {
759 assert(state
->fIOVMAddr
);
760 assert(state
->fLength
);
762 else if (rtn
== kIOReturnOverrun
)
763 state
->fIOVMAddr
= state
->fLength
= 0; // At end
768 if (!curSeg
.fIOVMAddr
) {
769 UInt64 length
= state
->fLength
;
772 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
773 curSeg
.fLength
= length
;
774 state
->fIOVMAddr
= 0;
776 else if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
== state
->fIOVMAddr
)) {
777 UInt64 length
= state
->fLength
;
779 curSeg
.fLength
+= length
;
780 state
->fIOVMAddr
= 0;
784 if (!state
->fIOVMAddr
)
786 if (kWalkClient
& op
)
788 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
790 if (internalState
->fCursor
)
792 curSeg
.fIOVMAddr
= 0;
793 ret
= kIOReturnMessageTooLarge
;
796 else if (curSeg
.fIOVMAddr
<= maxPhys
)
798 UInt64 remain
, newLength
;
800 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
801 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
802 remain
= curSeg
.fLength
- newLength
;
803 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
804 curSeg
.fLength
= newLength
;
805 state
->fLength
= remain
;
808 else if (gIOCopyMapper
)
810 DEBG("sparse switch %qx, %qx ", curSeg
.fIOVMAddr
, curSeg
.fLength
);
812 for (UInt checkRemapIndex
= 0; checkRemapIndex
< internalState
->fCopyPageCount
; checkRemapIndex
++)
814 if (trunc_page_64(curSeg
.fIOVMAddr
) == gIOCopyMapper
->mapAddr(
815 ptoa_64(internalState
->fCopyPageAlloc
+ checkRemapIndex
)))
817 curSeg
.fIOVMAddr
= ptoa_64(internalState
->fCopyPageAlloc
+ checkRemapIndex
) + (curSeg
.fIOVMAddr
& PAGE_MASK
);
821 DEBG("-> %qx, %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
);
826 if (curSeg
.fLength
> fMaxSegmentSize
)
828 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
830 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
831 curSeg
.fLength
= fMaxSegmentSize
;
833 state
->fLength
= remain
;
837 if (internalState
->fCursor
838 && (0 != (fAlignMask
& curSeg
.fIOVMAddr
)))
840 curSeg
.fIOVMAddr
= 0;
841 ret
= kIOReturnNotAligned
;
845 if (offset
>= memLength
)
847 curSeg
.fLength
-= (offset
- memLength
);
849 state
->fIOVMAddr
= state
->fLength
= 0; // At end
854 if (state
->fIOVMAddr
) {
855 if ((segIndex
+ 1 == numSegments
))
858 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
859 curSeg
.fIOVMAddr
= 0;
860 if (kIOReturnSuccess
!= ret
)
865 if (curSeg
.fIOVMAddr
) {
866 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
869 if (kIOReturnSuccess
== ret
)
871 state
->fOffset
= offset
;
872 *offsetP
= offset
- internalState
->fPreparedOffset
;
873 *numSegmentsP
= segIndex
;
879 IODMACommand::clientOutputSegment(
880 void *reference
, IODMACommand
*target
,
881 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
883 IOReturn ret
= kIOReturnSuccess
;
885 if ((target
->fNumAddressBits
< 64)
886 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
))
888 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
889 ret
= kIOReturnMessageTooLarge
;
892 if (!(*target
->fOutSeg
)(target
, segment
, vSegList
, outSegIndex
))
894 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
895 ret
= kIOReturnMessageTooLarge
;
902 IODMACommand::OutputHost32(IODMACommand
*,
903 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
905 Segment32
*base
= (Segment32
*) vSegList
;
906 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
907 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
912 IODMACommand::OutputBig32(IODMACommand
*,
913 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
915 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
916 const UInt offLen
= offAddr
+ sizeof(UInt32
);
917 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
918 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
923 IODMACommand::OutputLittle32(IODMACommand
*,
924 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
926 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
927 const UInt offLen
= offAddr
+ sizeof(UInt32
);
928 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
929 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
934 IODMACommand::OutputHost64(IODMACommand
*,
935 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
937 Segment64
*base
= (Segment64
*) vSegList
;
938 base
[outSegIndex
] = segment
;
943 IODMACommand::OutputBig64(IODMACommand
*,
944 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
946 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
947 const UInt offLen
= offAddr
+ sizeof(UInt64
);
948 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
949 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
954 IODMACommand::OutputLittle64(IODMACommand
*,
955 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
957 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
958 const UInt offLen
= offAddr
+ sizeof(UInt64
);
959 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
960 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);