2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33 #include <libkern/OSDebug.h>
35 #include <IOKit/IOReturn.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOMapper.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <IOKit/IOBufferMemoryDescriptor.h>
42 #include "IOKitKernelInternal.h"
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48 kWalkSyncIn
= 0x01,// bounce -> md
49 kWalkSyncOut
= 0x02,// bounce <- md
50 kWalkSyncAlways
= 0x04,
51 kWalkPreflight
= 0x08,
52 kWalkDoubleBuffer
= 0x10,
59 #define fInternalState reserved
60 #define fState reserved->fState
61 #define fMDSummary reserved->fMDSummary
65 // no direction => OutIn
66 #define SHOULD_COPY_DIR(op, direction) \
67 ((kIODirectionNone == (direction)) \
68 || (kWalkSyncAlways & (op)) \
69 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
73 #define SHOULD_COPY_DIR(state, direction) (true)
77 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
79 #define DEBG(fmt, args...) {}
82 /**************************** class IODMACommand ***************************/
85 #define super IOCommand
86 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
88 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
89 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
90 OSMetaClassDefineReservedUsed(IODMACommand
, 2);
91 OSMetaClassDefineReservedUsed(IODMACommand
, 3);
92 OSMetaClassDefineReservedUsed(IODMACommand
, 4);
93 OSMetaClassDefineReservedUsed(IODMACommand
, 5);
94 OSMetaClassDefineReservedUsed(IODMACommand
, 6);
95 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
96 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
106 IODMACommand::withRefCon(void * refCon
)
108 IODMACommand
* me
= new IODMACommand
;
110 if (me
&& !me
->initWithRefCon(refCon
)) {
119 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
120 const SegmentOptions
* segmentOptions
,
121 uint32_t mappingOptions
,
125 IODMACommand
* me
= new IODMACommand
;
127 if (me
&& !me
->initWithSpecification(outSegFunc
, segmentOptions
, mappingOptions
,
137 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
138 UInt8 numAddressBits
,
139 UInt64 maxSegmentSize
,
140 MappingOptions mappingOptions
,
141 UInt64 maxTransferSize
,
146 IODMACommand
* me
= new IODMACommand
;
148 if (me
&& !me
->initWithSpecification(outSegFunc
,
149 numAddressBits
, maxSegmentSize
,
150 mappingOptions
, maxTransferSize
,
151 alignment
, mapper
, refCon
)) {
160 IODMACommand::cloneCommand(void *refCon
)
162 SegmentOptions segmentOptions
=
164 .fStructSize
= sizeof(segmentOptions
),
165 .fNumAddressBits
= (uint8_t)fNumAddressBits
,
166 .fMaxSegmentSize
= fMaxSegmentSize
,
167 .fMaxTransferSize
= fMaxTransferSize
,
168 .fAlignment
= fAlignMask
+ 1,
169 .fAlignmentLength
= fAlignMaskInternalSegments
+ 1,
170 .fAlignmentInternalSegments
= fAlignMaskLength
+ 1
173 return IODMACommand::withSpecification(fOutSeg
, &segmentOptions
,
174 fMappingOptions
, fMapper
, refCon
);
177 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
180 IODMACommand::initWithRefCon(void * refCon
)
182 if (!super::init()) {
187 reserved
= IONew(IODMACommandInternal
, 1);
192 bzero(reserved
, sizeof(IODMACommandInternal
));
199 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
200 const SegmentOptions
* segmentOptions
,
201 uint32_t mappingOptions
,
205 if (!initWithRefCon(refCon
)) {
209 if (kIOReturnSuccess
!= setSpecification(outSegFunc
, segmentOptions
,
210 mappingOptions
, mapper
)) {
218 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
219 UInt8 numAddressBits
,
220 UInt64 maxSegmentSize
,
221 MappingOptions mappingOptions
,
222 UInt64 maxTransferSize
,
227 SegmentOptions segmentOptions
=
229 .fStructSize
= sizeof(segmentOptions
),
230 .fNumAddressBits
= numAddressBits
,
231 .fMaxSegmentSize
= maxSegmentSize
,
232 .fMaxTransferSize
= maxTransferSize
,
233 .fAlignment
= alignment
,
234 .fAlignmentLength
= 1,
235 .fAlignmentInternalSegments
= alignment
238 return initWithSpecification(outSegFunc
, &segmentOptions
, mappingOptions
, mapper
, refCon
);
242 IODMACommand::setSpecification(SegmentFunction outSegFunc
,
243 const SegmentOptions
* segmentOptions
,
244 uint32_t mappingOptions
,
247 IOService
* device
= NULL
;
248 UInt8 numAddressBits
;
249 UInt64 maxSegmentSize
;
250 UInt64 maxTransferSize
;
255 if (!outSegFunc
|| !segmentOptions
) {
256 return kIOReturnBadArgument
;
259 is32Bit
= ((OutputHost32
== outSegFunc
)
260 || (OutputBig32
== outSegFunc
)
261 || (OutputLittle32
== outSegFunc
));
263 numAddressBits
= segmentOptions
->fNumAddressBits
;
264 maxSegmentSize
= segmentOptions
->fMaxSegmentSize
;
265 maxTransferSize
= segmentOptions
->fMaxTransferSize
;
266 alignment
= segmentOptions
->fAlignment
;
268 if (!numAddressBits
) {
270 } else if (numAddressBits
> 32) {
271 return kIOReturnBadArgument
; // Wrong output function for bits
275 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
)) {
276 return kIOReturnBadArgument
;
279 if (!maxSegmentSize
) {
280 maxSegmentSize
--; // Set Max segment to -1
282 if (!maxTransferSize
) {
283 maxTransferSize
--; // Set Max transfer to -1
285 if (mapper
&& !OSDynamicCast(IOMapper
, mapper
)) {
289 if (!mapper
&& (kUnmapped
!= MAPTYPE(mappingOptions
))) {
290 IOMapper::checkForSystemMapper();
291 mapper
= IOMapper::gSystem
;
295 fOutSeg
= outSegFunc
;
296 fNumAddressBits
= numAddressBits
;
297 fMaxSegmentSize
= maxSegmentSize
;
298 fMappingOptions
= mappingOptions
;
299 fMaxTransferSize
= maxTransferSize
;
303 fAlignMask
= alignment
- 1;
305 alignment
= segmentOptions
->fAlignmentLength
;
309 fAlignMaskLength
= alignment
- 1;
311 alignment
= segmentOptions
->fAlignmentInternalSegments
;
313 alignment
= (fAlignMask
+ 1);
315 fAlignMaskInternalSegments
= alignment
- 1;
317 switch (MAPTYPE(mappingOptions
)) {
319 case kUnmapped
: break;
320 case kNonCoherent
: break;
326 return kIOReturnBadArgument
;
329 return kIOReturnBadArgument
;
333 if (mapper
!= fMapper
) {
343 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
344 fInternalState
->fDevice
= device
;
346 return kIOReturnSuccess
;
353 IODelete(reserved
, IODMACommandInternal
, 1);
364 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
366 IOReturn err
= kIOReturnSuccess
;
368 if (mem
== fMemory
) {
374 return kIOReturnSuccess
;
378 // As we are almost certainly being called from a work loop thread
379 // if fActive is true it is probably not a good time to potentially
380 // block. Just test for it and return an error
382 return kIOReturnBusy
;
384 clearMemoryDescriptor();
388 bzero(&fMDSummary
, sizeof(fMDSummary
));
389 err
= mem
->dmaCommandOperation(kIOMDGetCharacteristics
| (kMapped
== MAPTYPE(fMappingOptions
)),
390 &fMDSummary
, sizeof(fMDSummary
));
395 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
397 if ((kMapped
== MAPTYPE(fMappingOptions
))
399 fInternalState
->fCheckAddressing
= false;
401 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
404 fInternalState
->fNewMD
= true;
407 fInternalState
->fSetActiveNoMapper
= (!fMapper
);
408 if (fInternalState
->fSetActiveNoMapper
) {
409 mem
->dmaCommandOperation(kIOMDSetDMAActive
, this, 0);
414 clearMemoryDescriptor();
423 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
425 if (fActive
&& !autoComplete
) {
426 return kIOReturnNotReady
;
433 if (fInternalState
->fSetActiveNoMapper
) {
434 fMemory
->dmaCommandOperation(kIOMDSetDMAInactive
, this, 0);
440 return kIOReturnSuccess
;
443 const IOMemoryDescriptor
*
444 IODMACommand::getMemoryDescriptor() const
450 IODMACommand::getIOMemoryDescriptor() const
452 IOMemoryDescriptor
* mem
;
454 mem
= reserved
->fCopyMD
;
456 mem
= __IODEQUALIFY(IOMemoryDescriptor
*, fMemory
);
463 IODMACommand::segmentOp(
465 IODMACommand
*target
,
470 IOOptionBits op
= (uintptr_t) reference
;
471 addr64_t maxPhys
, address
;
476 IODMACommandInternal
* state
= target
->reserved
;
478 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64) && (state
->fLocalMapperAllocValid
|| !target
->fMapper
)) {
479 maxPhys
= (1ULL << target
->fNumAddressBits
);
485 address
= segment
.fIOVMAddr
;
486 length
= segment
.fLength
;
490 if (!state
->fMisaligned
) {
491 mask
= (segmentIndex
? target
->fAlignMaskInternalSegments
: state
->fSourceAlignMask
);
492 state
->fMisaligned
|= (0 != (mask
& address
));
493 if (state
->fMisaligned
) {
494 DEBG("misaligned address %qx:%qx, %x\n", address
, length
, mask
);
497 if (!state
->fMisaligned
) {
498 mask
= target
->fAlignMaskLength
;
499 state
->fMisaligned
|= (0 != (mask
& length
));
500 if (state
->fMisaligned
) {
501 DEBG("misaligned length %qx:%qx, %x\n", address
, length
, mask
);
505 if (state
->fMisaligned
&& (kWalkPreflight
& op
)) {
506 return kIOReturnNotAligned
;
509 if (!state
->fDoubleBuffer
) {
510 if ((address
+ length
- 1) <= maxPhys
) {
512 } else if (address
<= maxPhys
) {
513 DEBG("tail %qx, %qx", address
, length
);
514 length
= (address
+ length
- maxPhys
- 1);
515 address
= maxPhys
+ 1;
516 DEBG("-> %qx, %qx\n", address
, length
);
521 return kIOReturnSuccess
;
524 numPages
= atop_64(round_page_64((address
& PAGE_MASK
) + length
));
526 if (kWalkPreflight
& op
) {
527 state
->fCopyPageCount
+= numPages
;
531 if (kWalkPrepare
& op
) {
532 lastPage
= state
->fCopyNext
;
533 for (IOItemCount idx
= 0; idx
< numPages
; idx
++) {
534 vm_page_set_offset(lastPage
, atop_64(address
) + idx
);
535 lastPage
= vm_page_get_next(lastPage
);
539 if (!lastPage
|| SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
)) {
540 lastPage
= state
->fCopyNext
;
541 for (IOItemCount idx
= 0; idx
< numPages
; idx
++) {
542 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
)) {
543 addr64_t cpuAddr
= address
;
547 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
548 && target
->fMapper
) {
549 cpuAddr
= target
->fMapper
->mapToPhysicalAddress(address
);
552 remapAddr
= ptoa_64(vm_page_get_phys_page(lastPage
));
553 if (!state
->fDoubleBuffer
) {
554 remapAddr
+= (address
& PAGE_MASK
);
556 chunk
= PAGE_SIZE
- (address
& PAGE_MASK
);
557 if (chunk
> length
) {
561 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
562 (kWalkSyncIn
& op
) ? "->" : "<-",
565 if (kWalkSyncIn
& op
) { // cppvNoModSnk
566 copypv(remapAddr
, cpuAddr
, chunk
,
567 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
569 copypv(cpuAddr
, remapAddr
, chunk
,
570 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
575 lastPage
= vm_page_get_next(lastPage
);
578 state
->fCopyNext
= lastPage
;
581 return kIOReturnSuccess
;
584 IOBufferMemoryDescriptor
*
585 IODMACommand::createCopyBuffer(IODirection direction
, UInt64 length
)
587 mach_vm_address_t mask
= 0xFFFFF000; //state->fSourceAlignMask
588 return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task
,
589 direction
, length
, mask
);
593 IODMACommand::walkAll(UInt8 op
)
595 IODMACommandInternal
* state
= fInternalState
;
597 IOReturn ret
= kIOReturnSuccess
;
601 if (kWalkPreflight
& op
) {
602 state
->fMisaligned
= false;
603 state
->fDoubleBuffer
= false;
604 state
->fPrepared
= false;
605 state
->fCopyNext
= NULL
;
606 state
->fCopyPageAlloc
= NULL
;
607 state
->fCopyPageCount
= 0;
608 state
->fNextRemapPage
= NULL
;
609 state
->fCopyMD
= NULL
;
611 if (!(kWalkDoubleBuffer
& op
)) {
614 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
617 op
&= ~kWalkPreflight
;
619 state
->fDoubleBuffer
= (state
->fMisaligned
|| state
->fForceDoubleBuffer
);
620 state
->fForceDoubleBuffer
= false;
621 if (state
->fDoubleBuffer
) {
622 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
625 if (state
->fCopyPageCount
) {
626 vm_page_t mapBase
= NULL
;
628 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
630 if (!fMapper
&& !state
->fDoubleBuffer
) {
634 panic("fMapper copying");
637 kr
= vm_page_alloc_list(state
->fCopyPageCount
,
638 KMA_LOMEM
| KMA_NOPAGEWAIT
, &mapBase
);
639 if (KERN_SUCCESS
!= kr
) {
640 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state
->fCopyPageCount
, kr
);
646 state
->fCopyPageAlloc
= mapBase
;
647 state
->fCopyNext
= state
->fCopyPageAlloc
;
650 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
651 state
->fPrepared
= true;
652 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
654 DEBG("alloc IOBMD\n");
655 state
->fCopyMD
= createCopyBuffer(fMDSummary
.fDirection
, state
->fPreparedLength
);
657 if (state
->fCopyMD
) {
658 ret
= kIOReturnSuccess
;
659 state
->fPrepared
= true;
661 DEBG("IODMACommand !alloc IOBMD");
662 return kIOReturnNoResources
;
668 if (state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
)) {
669 if (state
->fCopyPageCount
) {
670 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
672 if (state
->fCopyPageAlloc
) {
673 state
->fCopyNext
= state
->fCopyPageAlloc
;
676 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
677 } else if (state
->fCopyMD
) {
678 DEBG("sync IOBMD\n");
680 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
)) {
681 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
685 if (kWalkSyncIn
& op
) {
686 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
687 state
->fCopyMD
->getBytesNoCopy(),
688 state
->fPreparedLength
);
690 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
691 state
->fCopyMD
->getBytesNoCopy(),
692 state
->fPreparedLength
);
694 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
695 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
697 ret
= kIOReturnSuccess
;
703 if (kWalkComplete
& op
) {
704 if (state
->fCopyPageAlloc
) {
705 vm_page_free_list(state
->fCopyPageAlloc
, FALSE
);
706 state
->fCopyPageAlloc
= NULL
;
707 state
->fCopyPageCount
= 0;
709 if (state
->fCopyMD
) {
710 state
->fCopyMD
->release();
711 state
->fCopyMD
= NULL
;
714 state
->fPrepared
= false;
720 IODMACommand::getNumAddressBits(void)
722 return fNumAddressBits
;
726 IODMACommand::getAlignment(void)
728 return fAlignMask
+ 1;
732 IODMACommand::getAlignmentLength(void)
734 return fAlignMaskLength
+ 1;
738 IODMACommand::getAlignmentInternalSegments(void)
740 return fAlignMaskInternalSegments
+ 1;
744 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
745 const SegmentOptions
* segmentOptions
,
746 uint32_t mappingOptions
,
756 return kIOReturnNotPermitted
;
759 ret
= setSpecification(outSegFunc
, segmentOptions
, mappingOptions
, mapper
);
760 if (kIOReturnSuccess
!= ret
) {
764 ret
= prepare(offset
, length
, flushCache
, synchronize
);
770 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
771 UInt8 numAddressBits
,
772 UInt64 maxSegmentSize
,
773 MappingOptions mappingOptions
,
774 UInt64 maxTransferSize
,
782 SegmentOptions segmentOptions
=
784 .fStructSize
= sizeof(segmentOptions
),
785 .fNumAddressBits
= numAddressBits
,
786 .fMaxSegmentSize
= maxSegmentSize
,
787 .fMaxTransferSize
= maxTransferSize
,
788 .fAlignment
= alignment
,
789 .fAlignmentLength
= 1,
790 .fAlignmentInternalSegments
= alignment
793 return prepareWithSpecification(outSegFunc
, &segmentOptions
, mappingOptions
, mapper
,
794 offset
, length
, flushCache
, synchronize
);
799 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
801 IODMACommandInternal
* state
= fInternalState
;
802 IOReturn ret
= kIOReturnSuccess
;
803 uint32_t mappingOptions
= fMappingOptions
;
805 // check specification has been set
807 return kIOReturnNotReady
;
811 length
= fMDSummary
.fLength
;
814 if (length
> fMaxTransferSize
) {
815 return kIOReturnNoSpace
;
819 if ((state
->fPreparedOffset
!= offset
)
820 || (state
->fPreparedLength
!= length
)) {
821 ret
= kIOReturnNotReady
;
824 if (fAlignMaskLength
& length
) {
825 return kIOReturnNotAligned
;
828 state
->fPreparedOffset
= offset
;
829 state
->fPreparedLength
= length
;
831 state
->fMapContig
= false;
832 state
->fMisaligned
= false;
833 state
->fDoubleBuffer
= false;
834 state
->fPrepared
= false;
835 state
->fCopyNext
= NULL
;
836 state
->fCopyPageAlloc
= NULL
;
837 state
->fCopyPageCount
= 0;
838 state
->fNextRemapPage
= NULL
;
839 state
->fCopyMD
= NULL
;
840 state
->fLocalMapperAlloc
= 0;
841 state
->fLocalMapperAllocValid
= false;
842 state
->fLocalMapperAllocLength
= 0;
844 state
->fSourceAlignMask
= fAlignMask
;
846 state
->fSourceAlignMask
&= page_mask
;
849 state
->fCursor
= state
->fIterateOnly
850 || (!state
->fCheckAddressing
851 && (!state
->fSourceAlignMask
852 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& state
->fSourceAlignMask
)))));
854 if (!state
->fCursor
) {
855 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
862 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
863 if (state
->fCopyMD
) {
864 state
->fCopyMD
->performOperation(kIOMemoryIncoherentIOStore
, 0, length
);
866 IOMemoryDescriptor
* md
= const_cast<IOMemoryDescriptor
*>(fMemory
);
867 md
->performOperation(kIOMemoryIncoherentIOStore
, offset
, length
);
872 IOMDDMAMapArgs mapArgs
;
873 bzero(&mapArgs
, sizeof(mapArgs
));
874 mapArgs
.fMapper
= fMapper
;
875 mapArgs
.fCommand
= this;
876 mapArgs
.fMapSpec
.device
= state
->fDevice
;
877 mapArgs
.fMapSpec
.alignment
= fAlignMask
+ 1;
878 mapArgs
.fMapSpec
.numAddressBits
= fNumAddressBits
? fNumAddressBits
: 64;
879 mapArgs
.fLength
= state
->fPreparedLength
;
880 const IOMemoryDescriptor
* md
= state
->fCopyMD
;
885 mapArgs
.fOffset
= state
->fPreparedOffset
;
887 ret
= md
->dmaCommandOperation(kIOMDDMAMap
| state
->fIterateOnly
, &mapArgs
, sizeof(mapArgs
));
888 //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength);
890 if (kIOReturnSuccess
== ret
) {
891 state
->fLocalMapperAlloc
= mapArgs
.fAlloc
;
892 state
->fLocalMapperAllocValid
= true;
893 state
->fLocalMapperAllocLength
= mapArgs
.fAllocLength
;
894 state
->fMapContig
= mapArgs
.fMapContig
;
896 if (NULL
!= IOMapper::gSystem
) {
897 ret
= kIOReturnSuccess
;
900 if (kIOReturnSuccess
== ret
) {
901 state
->fPrepared
= true;
908 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
910 IODMACommandInternal
* state
= fInternalState
;
911 IOReturn ret
= kIOReturnSuccess
;
912 IOMemoryDescriptor
* copyMD
;
915 return kIOReturnNotReady
;
919 copyMD
= state
->fCopyMD
;
924 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
) {
926 copyMD
->performOperation(kIOMemoryIncoherentIOFlush
, 0, state
->fPreparedLength
);
928 IOMemoryDescriptor
* md
= const_cast<IOMemoryDescriptor
*>(fMemory
);
929 md
->performOperation(kIOMemoryIncoherentIOFlush
, state
->fPreparedOffset
, state
->fPreparedLength
);
933 if (!state
->fCursor
) {
934 IOOptionBits op
= kWalkComplete
;
941 if (state
->fLocalMapperAllocValid
) {
942 IOMDDMAMapArgs mapArgs
;
943 bzero(&mapArgs
, sizeof(mapArgs
));
944 mapArgs
.fMapper
= fMapper
;
945 mapArgs
.fCommand
= this;
946 mapArgs
.fAlloc
= state
->fLocalMapperAlloc
;
947 mapArgs
.fAllocLength
= state
->fLocalMapperAllocLength
;
948 const IOMemoryDescriptor
* md
= copyMD
;
953 mapArgs
.fOffset
= state
->fPreparedOffset
;
956 ret
= md
->dmaCommandOperation(kIOMDDMAUnmap
, &mapArgs
, sizeof(mapArgs
));
958 state
->fLocalMapperAlloc
= 0;
959 state
->fLocalMapperAllocValid
= false;
960 state
->fLocalMapperAllocLength
= 0;
965 state
->fPrepared
= false;
972 IODMACommand::getPreparedOffsetAndLength(UInt64
* offset
, UInt64
* length
)
974 IODMACommandInternal
* state
= fInternalState
;
976 return kIOReturnNotReady
;
980 *offset
= state
->fPreparedOffset
;
983 *length
= state
->fPreparedLength
;
986 return kIOReturnSuccess
;
990 IODMACommand::synchronize(IOOptionBits options
)
992 IODMACommandInternal
* state
= fInternalState
;
993 IOReturn ret
= kIOReturnSuccess
;
996 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
)) {
997 return kIOReturnBadArgument
;
1001 return kIOReturnNotReady
;
1005 if (kForceDoubleBuffer
& options
) {
1006 if (state
->fDoubleBuffer
) {
1007 return kIOReturnSuccess
;
1009 ret
= complete(false /* invalidateCache */, true /* synchronize */);
1010 state
->fCursor
= false;
1011 state
->fForceDoubleBuffer
= true;
1012 ret
= prepare(state
->fPreparedOffset
, state
->fPreparedLength
, false /* flushCache */, true /* synchronize */);
1015 } else if (state
->fCursor
) {
1016 return kIOReturnSuccess
;
1019 if (kIODirectionIn
& options
) {
1020 op
|= kWalkSyncIn
| kWalkSyncAlways
;
1021 } else if (kIODirectionOut
& options
) {
1022 op
|= kWalkSyncOut
| kWalkSyncAlways
;
1030 struct IODMACommandTransferContext
{
1032 UInt64 bufferOffset
;
1037 kIODMACommandTransferOpReadBytes
= 1,
1038 kIODMACommandTransferOpWriteBytes
= 2
1042 IODMACommand::transferSegment(void *reference
,
1043 IODMACommand
*target
,
1046 UInt32 segmentIndex
)
1048 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) reference
;
1049 UInt64 length
= min(segment
.fLength
, context
->remaining
);
1050 addr64_t ioAddr
= segment
.fIOVMAddr
;
1051 addr64_t cpuAddr
= ioAddr
;
1053 context
->remaining
-= length
;
1056 UInt64 copyLen
= length
;
1057 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
1058 && target
->fMapper
) {
1059 cpuAddr
= target
->fMapper
->mapToPhysicalAddress(ioAddr
);
1060 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
1064 switch (context
->op
) {
1065 case kIODMACommandTransferOpReadBytes
:
1066 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
1067 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1069 case kIODMACommandTransferOpWriteBytes
:
1070 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
1071 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1075 context
->bufferOffset
+= copyLen
;
1078 return context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
;
1082 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
1084 IODMACommandInternal
* state
= fInternalState
;
1085 IODMACommandTransferContext context
;
1086 Segment64 segments
[1];
1087 UInt32 numSegments
= 0 - 1;
1093 if (offset
>= state
->fPreparedLength
) {
1096 length
= min(length
, state
->fPreparedLength
- offset
);
1098 context
.buffer
= buffer
;
1099 context
.bufferOffset
= 0;
1100 context
.remaining
= length
;
1101 context
.op
= transferOp
;
1102 (void) genIOVMSegments(kWalkClient
, transferSegment
, &context
, &offset
, &segments
[0], &numSegments
);
1104 return length
- context
.remaining
;
1108 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
1110 return transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
);
1114 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
1116 return transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
);
1120 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
1122 UInt32
*numSegmentsP
)
1124 return genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) fOutSeg
,
1125 offsetP
, segmentsP
, numSegmentsP
);
1129 IODMACommand::genIOVMSegments(uint32_t op
,
1130 InternalSegmentFunction outSegFunc
,
1134 UInt32
*numSegmentsP
)
1136 IODMACommandInternal
* internalState
= fInternalState
;
1137 IOOptionBits mdOp
= kIOMDWalkSegments
;
1138 IOReturn ret
= kIOReturnSuccess
;
1140 if (!(kWalkComplete
& op
) && !fActive
) {
1141 return kIOReturnNotReady
;
1144 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
) {
1145 return kIOReturnBadArgument
;
1148 IOMDDMAWalkSegmentArgs
*state
=
1149 (IOMDDMAWalkSegmentArgs
*)(void *) fState
;
1151 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
1152 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
1154 if (offset
>= memLength
) {
1155 return kIOReturnOverrun
;
1158 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
1160 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= 0;
1161 internalState
->fNextRemapPage
= NULL
;
1162 internalState
->fNewMD
= false;
1163 mdOp
= kIOMDFirstSegment
;
1165 if (internalState
->fLocalMapperAllocValid
) {
1166 state
->fMapped
= kIOMDDMAWalkMappedLocal
;
1167 state
->fMappedBase
= internalState
->fLocalMapperAlloc
;
1169 state
->fMapped
= true;
1175 UInt32 segIndex
= 0;
1176 UInt32 numSegments
= *numSegmentsP
;
1177 Segment64 curSeg
= { 0, 0 };
1178 bool curSegValid
= false;
1181 if (fNumAddressBits
&& (fNumAddressBits
< 64)) {
1182 maxPhys
= (1ULL << fNumAddressBits
);
1188 while (internalState
->fIOVMAddrValid
|| (state
->fOffset
< memLength
)) {
1190 if (!internalState
->fIOVMAddrValid
) {
1193 state
->fOffset
= offset
;
1194 state
->fLength
= memLength
- offset
;
1196 if (internalState
->fMapContig
&& internalState
->fLocalMapperAllocValid
) {
1197 state
->fIOVMAddr
= internalState
->fLocalMapperAlloc
+ offset
- internalState
->fPreparedOffset
;
1198 rtn
= kIOReturnSuccess
;
1201 uint64_t checkOffset
;
1202 IOPhysicalLength segLen
;
1203 for (checkOffset
= 0; checkOffset
< state
->fLength
;) {
1204 addr64_t phys
= const_cast<IOMemoryDescriptor
*>(fMemory
)->getPhysicalSegment(checkOffset
+ offset
, &segLen
, kIOMemoryMapperNone
);
1205 if (fMapper
->mapAddr(state
->fIOVMAddr
+ checkOffset
) != phys
) {
1206 panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset
,
1207 state
->fIOVMAddr
+ checkOffset
, fMapper
->mapAddr(state
->fIOVMAddr
+ checkOffset
), state
->fLength
,
1210 checkOffset
+= page_size
- (phys
& page_mask
);
1215 const IOMemoryDescriptor
* memory
=
1216 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
1217 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
1218 mdOp
= kIOMDWalkSegments
;
1221 if (rtn
== kIOReturnSuccess
) {
1222 internalState
->fIOVMAddrValid
= true;
1223 assert(state
->fLength
);
1224 if (curSegValid
&& ((curSeg
.fIOVMAddr
+ curSeg
.fLength
) == state
->fIOVMAddr
)) {
1225 UInt64 length
= state
->fLength
;
1227 curSeg
.fLength
+= length
;
1228 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= 0;
1230 } else if (rtn
== kIOReturnOverrun
) {
1231 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= state
->fLength
= 0; // At end
1237 // seg = state, offset = end of seg
1239 UInt64 length
= state
->fLength
;
1241 curSeg
.fIOVMAddr
= state
->fIOVMAddr
;
1242 curSeg
.fLength
= length
;
1244 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= 0;
1247 if (!internalState
->fIOVMAddrValid
) {
1249 if ((kWalkClient
& op
) && (curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
) {
1250 if (internalState
->fCursor
) {
1251 curSegValid
= curSeg
.fIOVMAddr
= 0;
1252 ret
= kIOReturnMessageTooLarge
;
1254 } else if (curSeg
.fIOVMAddr
<= maxPhys
) {
1255 UInt64 remain
, newLength
;
1257 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
1258 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
1259 remain
= curSeg
.fLength
- newLength
;
1260 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
1261 internalState
->fIOVMAddrValid
= true;
1262 curSeg
.fLength
= newLength
;
1263 state
->fLength
= remain
;
1266 UInt64 addr
= curSeg
.fIOVMAddr
;
1267 ppnum_t addrPage
= atop_64(addr
);
1268 vm_page_t remap
= NULL
;
1269 UInt64 remain
, newLength
;
1271 DEBG("sparse switch %qx, %qx ", addr
, curSeg
.fLength
);
1273 remap
= internalState
->fNextRemapPage
;
1274 if (remap
&& (addrPage
== vm_page_get_offset(remap
))) {
1276 for (remap
= internalState
->fCopyPageAlloc
;
1277 remap
&& (addrPage
!= vm_page_get_offset(remap
));
1278 remap
= vm_page_get_next(remap
)) {
1283 panic("no remap page found");
1286 curSeg
.fIOVMAddr
= ptoa_64(vm_page_get_phys_page(remap
))
1287 + (addr
& PAGE_MASK
);
1289 internalState
->fNextRemapPage
= vm_page_get_next(remap
);
1291 newLength
= PAGE_SIZE
- (addr
& PAGE_MASK
);
1292 if (newLength
< curSeg
.fLength
) {
1293 remain
= curSeg
.fLength
- newLength
;
1294 state
->fIOVMAddr
= addr
+ newLength
;
1295 internalState
->fIOVMAddrValid
= true;
1296 curSeg
.fLength
= newLength
;
1297 state
->fLength
= remain
;
1300 DEBG("-> %qx, %qx offset %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, offset
);
1304 // reduce size of output segment
1305 uint64_t reduce
, leftover
= 0;
1308 if (curSeg
.fLength
> fMaxSegmentSize
) {
1309 leftover
+= curSeg
.fLength
- fMaxSegmentSize
;
1310 curSeg
.fLength
= fMaxSegmentSize
;
1311 state
->fIOVMAddr
= curSeg
.fLength
+ curSeg
.fIOVMAddr
;
1312 internalState
->fIOVMAddrValid
= true;
1315 // alignment current length
1317 reduce
= (curSeg
.fLength
& fAlignMaskLength
);
1318 if (reduce
&& (curSeg
.fLength
> reduce
)) {
1320 curSeg
.fLength
-= reduce
;
1321 state
->fIOVMAddr
= curSeg
.fLength
+ curSeg
.fIOVMAddr
;
1322 internalState
->fIOVMAddrValid
= true;
1325 // alignment next address
1327 reduce
= (state
->fIOVMAddr
& fAlignMaskInternalSegments
);
1328 if (reduce
&& (curSeg
.fLength
> reduce
)) {
1330 curSeg
.fLength
-= reduce
;
1331 state
->fIOVMAddr
= curSeg
.fLength
+ curSeg
.fIOVMAddr
;
1332 internalState
->fIOVMAddrValid
= true;
1336 DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1338 curSeg
.fIOVMAddr
, curSeg
.fLength
);
1339 state
->fLength
= leftover
;
1345 if (internalState
->fCursor
) {
1349 mask
= (segIndex
? fAlignMaskInternalSegments
: internalState
->fSourceAlignMask
);
1350 misaligned
= (0 != (mask
& curSeg
.fIOVMAddr
));
1352 mask
= fAlignMaskLength
;
1353 misaligned
|= (0 != (mask
& curSeg
.fLength
));
1357 DEBG("cursor misaligned %qx:%qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
);
1359 curSegValid
= curSeg
.fIOVMAddr
= 0;
1360 ret
= kIOReturnNotAligned
;
1365 if (offset
>= memLength
) {
1366 curSeg
.fLength
-= (offset
- memLength
);
1368 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= state
->fLength
= 0; // At end
1373 if (internalState
->fIOVMAddrValid
) {
1374 if ((segIndex
+ 1 == numSegments
)) {
1378 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1379 curSegValid
= curSeg
.fIOVMAddr
= 0;
1380 if (kIOReturnSuccess
!= ret
) {
1387 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1390 if (kIOReturnSuccess
== ret
) {
1391 state
->fOffset
= offset
;
1392 *offsetP
= offset
- internalState
->fPreparedOffset
;
1393 *numSegmentsP
= segIndex
;
1399 IODMACommand::clientOutputSegment(
1400 void *reference
, IODMACommand
*target
,
1401 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1403 SegmentFunction segmentFunction
= (SegmentFunction
) reference
;
1404 IOReturn ret
= kIOReturnSuccess
;
1406 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64)
1407 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
)
1408 && (target
->reserved
->fLocalMapperAllocValid
|| !target
->fMapper
)) {
1409 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1410 ret
= kIOReturnMessageTooLarge
;
1413 if (!(*segmentFunction
)(target
, segment
, vSegList
, outSegIndex
)) {
1414 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1415 ret
= kIOReturnMessageTooLarge
;
1422 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction
,
1425 UInt32
*numSegmentsP
)
1427 return genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) segmentFunction
,
1428 offsetP
, segmentsP
, numSegmentsP
);
1432 IODMACommand::OutputHost32(IODMACommand
*,
1433 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1435 Segment32
*base
= (Segment32
*) vSegList
;
1436 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1437 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1442 IODMACommand::OutputBig32(IODMACommand
*,
1443 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1445 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1446 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1447 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1448 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1453 IODMACommand::OutputLittle32(IODMACommand
*,
1454 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1456 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1457 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1458 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1459 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1464 IODMACommand::OutputHost64(IODMACommand
*,
1465 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1467 Segment64
*base
= (Segment64
*) vSegList
;
1468 base
[outSegIndex
] = segment
;
1473 IODMACommand::OutputBig64(IODMACommand
*,
1474 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1476 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1477 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1478 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1479 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1484 IODMACommand::OutputLittle64(IODMACommand
*,
1485 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1487 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1488 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1489 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1490 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);