2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33 #include <libkern/OSDebug.h>
35 #include <IOKit/IOReturn.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOMapper.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <IOKit/IOBufferMemoryDescriptor.h>
42 #include "IOKitKernelInternal.h"
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
49 kWalkSyncIn
= 0x01, // bounce -> md
50 kWalkSyncOut
= 0x02, // bounce <- md
51 kWalkSyncAlways
= 0x04,
52 kWalkPreflight
= 0x08,
53 kWalkDoubleBuffer
= 0x10,
60 #define fInternalState reserved
61 #define fState reserved->fState
62 #define fMDSummary reserved->fMDSummary
66 // no direction => OutIn
67 #define SHOULD_COPY_DIR(op, direction) \
68 ((kIODirectionNone == (direction)) \
69 || (kWalkSyncAlways & (op)) \
70 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
74 #define SHOULD_COPY_DIR(state, direction) (true)
78 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
80 #define DEBG(fmt, args...) {}
83 /**************************** class IODMACommand ***************************/
86 #define super IOCommand
87 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
89 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
90 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
91 OSMetaClassDefineReservedUsed(IODMACommand
, 2);
92 OSMetaClassDefineReservedUsed(IODMACommand
, 3);
93 OSMetaClassDefineReservedUsed(IODMACommand
, 4);
94 OSMetaClassDefineReservedUsed(IODMACommand
, 5);
95 OSMetaClassDefineReservedUsed(IODMACommand
, 6);
96 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
104 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
107 IODMACommand::withRefCon(void * refCon
)
109 IODMACommand
* me
= new IODMACommand
;
111 if (me
&& !me
->initWithRefCon(refCon
))
121 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
122 const SegmentOptions
* segmentOptions
,
123 uint32_t mappingOptions
,
127 IODMACommand
* me
= new IODMACommand
;
129 if (me
&& !me
->initWithSpecification(outSegFunc
, segmentOptions
, mappingOptions
,
140 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
141 UInt8 numAddressBits
,
142 UInt64 maxSegmentSize
,
143 MappingOptions mappingOptions
,
144 UInt64 maxTransferSize
,
149 IODMACommand
* me
= new IODMACommand
;
151 if (me
&& !me
->initWithSpecification(outSegFunc
,
152 numAddressBits
, maxSegmentSize
,
153 mappingOptions
, maxTransferSize
,
154 alignment
, mapper
, refCon
))
164 IODMACommand::cloneCommand(void *refCon
)
166 SegmentOptions segmentOptions
=
168 .fStructSize
= sizeof(segmentOptions
),
169 .fNumAddressBits
= (uint8_t)fNumAddressBits
,
170 .fMaxSegmentSize
= fMaxSegmentSize
,
171 .fMaxTransferSize
= fMaxTransferSize
,
172 .fAlignment
= fAlignMask
+ 1,
173 .fAlignmentLength
= fAlignMaskInternalSegments
+ 1,
174 .fAlignmentInternalSegments
= fAlignMaskLength
+ 1
177 return (IODMACommand::withSpecification(fOutSeg
, &segmentOptions
,
178 fMappingOptions
, fMapper
, refCon
));
181 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
184 IODMACommand::initWithRefCon(void * refCon
)
186 if (!super::init()) return (false);
190 reserved
= IONew(IODMACommandInternal
, 1);
191 if (!reserved
) return false;
193 bzero(reserved
, sizeof(IODMACommandInternal
));
200 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
201 const SegmentOptions
* segmentOptions
,
202 uint32_t mappingOptions
,
206 if (!initWithRefCon(refCon
)) return false;
208 if (kIOReturnSuccess
!= setSpecification(outSegFunc
, segmentOptions
,
209 mappingOptions
, mapper
)) return false;
215 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
216 UInt8 numAddressBits
,
217 UInt64 maxSegmentSize
,
218 MappingOptions mappingOptions
,
219 UInt64 maxTransferSize
,
224 SegmentOptions segmentOptions
=
226 .fStructSize
= sizeof(segmentOptions
),
227 .fNumAddressBits
= numAddressBits
,
228 .fMaxSegmentSize
= maxSegmentSize
,
229 .fMaxTransferSize
= maxTransferSize
,
230 .fAlignment
= alignment
,
231 .fAlignmentLength
= 1,
232 .fAlignmentInternalSegments
= alignment
235 return (initWithSpecification(outSegFunc
, &segmentOptions
, mappingOptions
, mapper
, refCon
));
239 IODMACommand::setSpecification(SegmentFunction outSegFunc
,
240 const SegmentOptions
* segmentOptions
,
241 uint32_t mappingOptions
,
244 IOService
* device
= 0;
245 UInt8 numAddressBits
;
246 UInt64 maxSegmentSize
;
247 UInt64 maxTransferSize
;
252 if (!outSegFunc
|| !segmentOptions
) return (kIOReturnBadArgument
);
254 is32Bit
= ((OutputHost32
== outSegFunc
)
255 || (OutputBig32
== outSegFunc
)
256 || (OutputLittle32
== outSegFunc
));
258 numAddressBits
= segmentOptions
->fNumAddressBits
;
259 maxSegmentSize
= segmentOptions
->fMaxSegmentSize
;
260 maxTransferSize
= segmentOptions
->fMaxTransferSize
;
261 alignment
= segmentOptions
->fAlignment
;
266 else if (numAddressBits
> 32)
267 return (kIOReturnBadArgument
); // Wrong output function for bits
270 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
)) return (kIOReturnBadArgument
);
272 if (!maxSegmentSize
) maxSegmentSize
--; // Set Max segment to -1
273 if (!maxTransferSize
) maxTransferSize
--; // Set Max transfer to -1
275 if (mapper
&& !OSDynamicCast(IOMapper
, mapper
))
280 if (!mapper
&& (kUnmapped
!= MAPTYPE(mappingOptions
)))
282 IOMapper::checkForSystemMapper();
283 mapper
= IOMapper::gSystem
;
287 fOutSeg
= outSegFunc
;
288 fNumAddressBits
= numAddressBits
;
289 fMaxSegmentSize
= maxSegmentSize
;
290 fMappingOptions
= mappingOptions
;
291 fMaxTransferSize
= maxTransferSize
;
292 if (!alignment
) alignment
= 1;
293 fAlignMask
= alignment
- 1;
295 alignment
= segmentOptions
->fAlignmentLength
;
296 if (!alignment
) alignment
= 1;
297 fAlignMaskLength
= alignment
- 1;
299 alignment
= segmentOptions
->fAlignmentInternalSegments
;
300 if (!alignment
) alignment
= (fAlignMask
+ 1);
301 fAlignMaskInternalSegments
= alignment
- 1;
303 switch (MAPTYPE(mappingOptions
))
306 case kUnmapped
: break;
307 case kNonCoherent
: break;
311 return (kIOReturnBadArgument
);
314 return (kIOReturnBadArgument
);
317 if (mapper
!= fMapper
)
319 if (mapper
) mapper
->retain();
320 if (fMapper
) fMapper
->release();
324 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
325 fInternalState
->fDevice
= device
;
327 return (kIOReturnSuccess
);
333 if (reserved
) IODelete(reserved
, IODMACommandInternal
, 1);
335 if (fMapper
) fMapper
->release();
341 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
343 IOReturn err
= kIOReturnSuccess
;
352 return kIOReturnSuccess
;
356 // As we are almost certainly being called from a work loop thread
357 // if fActive is true it is probably not a good time to potentially
358 // block. Just test for it and return an error
360 return kIOReturnBusy
;
361 clearMemoryDescriptor();
365 bzero(&fMDSummary
, sizeof(fMDSummary
));
366 err
= mem
->dmaCommandOperation(kIOMDGetCharacteristics
| (kMapped
== MAPTYPE(fMappingOptions
)),
367 &fMDSummary
, sizeof(fMDSummary
));
371 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
373 if ((kMapped
== MAPTYPE(fMappingOptions
))
375 fInternalState
->fCheckAddressing
= false;
377 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
379 fInternalState
->fNewMD
= true;
382 fInternalState
->fSetActiveNoMapper
= (!fMapper
);
383 if (fInternalState
->fSetActiveNoMapper
) mem
->dmaCommandOperation(kIOMDSetDMAActive
, this, 0);
387 clearMemoryDescriptor();
396 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
398 if (fActive
&& !autoComplete
) return (kIOReturnNotReady
);
402 while (fActive
) complete();
403 if (fInternalState
->fSetActiveNoMapper
) fMemory
->dmaCommandOperation(kIOMDSetDMAInactive
, this, 0);
408 return (kIOReturnSuccess
);
411 const IOMemoryDescriptor
*
412 IODMACommand::getMemoryDescriptor() const
418 IODMACommand::getIOMemoryDescriptor() const
420 IOMemoryDescriptor
* mem
;
422 mem
= reserved
->fCopyMD
;
423 if (!mem
) mem
= __IODEQUALIFY(IOMemoryDescriptor
*, fMemory
);
429 IODMACommand::segmentOp(
431 IODMACommand
*target
,
436 IOOptionBits op
= (uintptr_t) reference
;
437 addr64_t maxPhys
, address
;
442 IODMACommandInternal
* state
= target
->reserved
;
444 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64) && (state
->fLocalMapperAllocValid
|| !target
->fMapper
))
445 maxPhys
= (1ULL << target
->fNumAddressBits
);
450 address
= segment
.fIOVMAddr
;
451 length
= segment
.fLength
;
455 if (!state
->fMisaligned
)
457 mask
= (segmentIndex
? target
->fAlignMaskInternalSegments
: state
->fSourceAlignMask
);
458 state
->fMisaligned
|= (0 != (mask
& address
));
459 if (state
->fMisaligned
) DEBG("misaligned address %qx:%qx, %x\n", address
, length
, mask
);
461 if (!state
->fMisaligned
)
463 mask
= target
->fAlignMaskLength
;
464 state
->fMisaligned
|= (0 != (mask
& length
));
465 if (state
->fMisaligned
) DEBG("misaligned length %qx:%qx, %x\n", address
, length
, mask
);
468 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
469 return (kIOReturnNotAligned
);
471 if (!state
->fDoubleBuffer
)
473 if ((address
+ length
- 1) <= maxPhys
)
477 else if (address
<= maxPhys
)
479 DEBG("tail %qx, %qx", address
, length
);
480 length
= (address
+ length
- maxPhys
- 1);
481 address
= maxPhys
+ 1;
482 DEBG("-> %qx, %qx\n", address
, length
);
487 return (kIOReturnSuccess
);
489 numPages
= atop_64(round_page_64((address
& PAGE_MASK
) + length
));
491 if (kWalkPreflight
& op
)
493 state
->fCopyPageCount
+= numPages
;
499 if (kWalkPrepare
& op
)
501 lastPage
= state
->fCopyNext
;
502 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
504 vm_page_set_offset(lastPage
, atop_64(address
) + idx
);
505 lastPage
= vm_page_get_next(lastPage
);
509 if (!lastPage
|| SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
511 lastPage
= state
->fCopyNext
;
512 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
514 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
516 addr64_t cpuAddr
= address
;
520 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
523 cpuAddr
= target
->fMapper
->mapToPhysicalAddress(address
);
526 remapAddr
= ptoa_64(vm_page_get_phys_page(lastPage
));
527 if (!state
->fDoubleBuffer
)
529 remapAddr
+= (address
& PAGE_MASK
);
531 chunk
= PAGE_SIZE
- (address
& PAGE_MASK
);
535 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
536 (kWalkSyncIn
& op
) ? "->" : "<-",
539 if (kWalkSyncIn
& op
)
541 copypv(remapAddr
, cpuAddr
, chunk
,
542 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
546 copypv(cpuAddr
, remapAddr
, chunk
,
547 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
552 lastPage
= vm_page_get_next(lastPage
);
555 state
->fCopyNext
= lastPage
;
558 return kIOReturnSuccess
;
561 IOBufferMemoryDescriptor
*
562 IODMACommand::createCopyBuffer(IODirection direction
, UInt64 length
)
564 mach_vm_address_t mask
= 0xFFFFF000; //state->fSourceAlignMask
565 return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task
,
566 direction
, length
, mask
));
570 IODMACommand::walkAll(UInt8 op
)
572 IODMACommandInternal
* state
= fInternalState
;
574 IOReturn ret
= kIOReturnSuccess
;
578 if (kWalkPreflight
& op
)
580 state
->fMisaligned
= false;
581 state
->fDoubleBuffer
= false;
582 state
->fPrepared
= false;
583 state
->fCopyNext
= NULL
;
584 state
->fCopyPageAlloc
= 0;
585 state
->fCopyPageCount
= 0;
586 state
->fNextRemapPage
= NULL
;
589 if (!(kWalkDoubleBuffer
& op
))
593 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
596 op
&= ~kWalkPreflight
;
598 state
->fDoubleBuffer
= (state
->fMisaligned
|| state
->fForceDoubleBuffer
);
599 state
->fForceDoubleBuffer
= false;
600 if (state
->fDoubleBuffer
)
601 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
603 if (state
->fCopyPageCount
)
605 vm_page_t mapBase
= NULL
;
607 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
609 if (!fMapper
&& !state
->fDoubleBuffer
)
613 if (fMapper
) panic("fMapper copying");
615 kr
= vm_page_alloc_list(state
->fCopyPageCount
,
616 KMA_LOMEM
| KMA_NOPAGEWAIT
, &mapBase
);
617 if (KERN_SUCCESS
!= kr
)
619 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state
->fCopyPageCount
, kr
);
626 state
->fCopyPageAlloc
= mapBase
;
627 state
->fCopyNext
= state
->fCopyPageAlloc
;
630 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
631 state
->fPrepared
= true;
632 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
636 DEBG("alloc IOBMD\n");
637 state
->fCopyMD
= createCopyBuffer(fMDSummary
.fDirection
, state
->fPreparedLength
);
641 ret
= kIOReturnSuccess
;
642 state
->fPrepared
= true;
646 DEBG("IODMACommand !alloc IOBMD");
647 return (kIOReturnNoResources
);
653 if (state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
655 if (state
->fCopyPageCount
)
657 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
659 if (state
->fCopyPageAlloc
)
661 state
->fCopyNext
= state
->fCopyPageAlloc
;
664 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
666 else if (state
->fCopyMD
)
668 DEBG("sync IOBMD\n");
670 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
672 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
676 if (kWalkSyncIn
& op
)
677 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
678 state
->fCopyMD
->getBytesNoCopy(),
679 state
->fPreparedLength
);
681 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
682 state
->fCopyMD
->getBytesNoCopy(),
683 state
->fPreparedLength
);
684 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
685 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
688 ret
= kIOReturnSuccess
;
693 if (kWalkComplete
& op
)
695 if (state
->fCopyPageAlloc
)
697 vm_page_free_list(state
->fCopyPageAlloc
, FALSE
);
698 state
->fCopyPageAlloc
= 0;
699 state
->fCopyPageCount
= 0;
703 state
->fCopyMD
->release();
707 state
->fPrepared
= false;
713 IODMACommand::getNumAddressBits(void)
715 return (fNumAddressBits
);
719 IODMACommand::getAlignment(void)
721 return (fAlignMask
+ 1);
725 IODMACommand::getAlignmentLength(void)
727 return (fAlignMaskLength
+ 1);
731 IODMACommand::getAlignmentInternalSegments(void)
733 return (fAlignMaskInternalSegments
+ 1);
737 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
738 const SegmentOptions
* segmentOptions
,
739 uint32_t mappingOptions
,
748 if (fActive
) return kIOReturnNotPermitted
;
750 ret
= setSpecification(outSegFunc
, segmentOptions
, mappingOptions
, mapper
);
751 if (kIOReturnSuccess
!= ret
) return (ret
);
753 ret
= prepare(offset
, length
, flushCache
, synchronize
);
759 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
760 UInt8 numAddressBits
,
761 UInt64 maxSegmentSize
,
762 MappingOptions mappingOptions
,
763 UInt64 maxTransferSize
,
771 SegmentOptions segmentOptions
=
773 .fStructSize
= sizeof(segmentOptions
),
774 .fNumAddressBits
= numAddressBits
,
775 .fMaxSegmentSize
= maxSegmentSize
,
776 .fMaxTransferSize
= maxTransferSize
,
777 .fAlignment
= alignment
,
778 .fAlignmentLength
= 1,
779 .fAlignmentInternalSegments
= alignment
782 return (prepareWithSpecification(outSegFunc
, &segmentOptions
, mappingOptions
, mapper
,
783 offset
, length
, flushCache
, synchronize
));
788 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
790 IODMACommandInternal
* state
= fInternalState
;
791 IOReturn ret
= kIOReturnSuccess
;
792 uint32_t mappingOptions
= fMappingOptions
;
794 // check specification has been set
795 if (!fOutSeg
) return (kIOReturnNotReady
);
797 if (!length
) length
= fMDSummary
.fLength
;
799 if (length
> fMaxTransferSize
) return kIOReturnNoSpace
;
803 if ((state
->fPreparedOffset
!= offset
)
804 || (state
->fPreparedLength
!= length
))
805 ret
= kIOReturnNotReady
;
809 if (fAlignMaskLength
& length
) return (kIOReturnNotAligned
);
811 state
->fPreparedOffset
= offset
;
812 state
->fPreparedLength
= length
;
814 state
->fMapContig
= false;
815 state
->fMisaligned
= false;
816 state
->fDoubleBuffer
= false;
817 state
->fPrepared
= false;
818 state
->fCopyNext
= NULL
;
819 state
->fCopyPageAlloc
= 0;
820 state
->fCopyPageCount
= 0;
821 state
->fNextRemapPage
= NULL
;
823 state
->fLocalMapperAlloc
= 0;
824 state
->fLocalMapperAllocValid
= false;
825 state
->fLocalMapperAllocLength
= 0;
827 state
->fSourceAlignMask
= fAlignMask
;
829 state
->fSourceAlignMask
&= page_mask
;
831 state
->fCursor
= state
->fIterateOnly
832 || (!state
->fCheckAddressing
833 && (!state
->fSourceAlignMask
834 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& state
->fSourceAlignMask
)))));
838 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
844 if (IS_NONCOHERENT(mappingOptions
) && flushCache
)
848 state
->fCopyMD
->performOperation(kIOMemoryIncoherentIOStore
, 0, length
);
852 IOMemoryDescriptor
* md
= const_cast<IOMemoryDescriptor
*>(fMemory
);
853 md
->performOperation(kIOMemoryIncoherentIOStore
, offset
, length
);
859 IOMDDMAMapArgs mapArgs
;
860 bzero(&mapArgs
, sizeof(mapArgs
));
861 mapArgs
.fMapper
= fMapper
;
862 mapArgs
.fCommand
= this;
863 mapArgs
.fMapSpec
.device
= state
->fDevice
;
864 mapArgs
.fMapSpec
.alignment
= fAlignMask
+ 1;
865 mapArgs
.fMapSpec
.numAddressBits
= fNumAddressBits
? fNumAddressBits
: 64;
866 mapArgs
.fLength
= state
->fPreparedLength
;
867 const IOMemoryDescriptor
* md
= state
->fCopyMD
;
868 if (md
) { mapArgs
.fOffset
= 0; } else
871 mapArgs
.fOffset
= state
->fPreparedOffset
;
873 ret
= md
->dmaCommandOperation(kIOMDDMAMap
| state
->fIterateOnly
, &mapArgs
, sizeof(mapArgs
));
874 //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength);
876 if (kIOReturnSuccess
== ret
)
878 state
->fLocalMapperAlloc
= mapArgs
.fAlloc
;
879 state
->fLocalMapperAllocValid
= true;
880 state
->fLocalMapperAllocLength
= mapArgs
.fAllocLength
;
881 state
->fMapContig
= mapArgs
.fMapContig
;
883 if (NULL
!= IOMapper::gSystem
) ret
= kIOReturnSuccess
;
885 if (kIOReturnSuccess
== ret
) state
->fPrepared
= true;
891 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
893 IODMACommandInternal
* state
= fInternalState
;
894 IOReturn ret
= kIOReturnSuccess
;
895 IOMemoryDescriptor
* copyMD
;
898 return kIOReturnNotReady
;
902 copyMD
= state
->fCopyMD
;
903 if (copyMD
) copyMD
->retain();
905 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
909 copyMD
->performOperation(kIOMemoryIncoherentIOFlush
, 0, state
->fPreparedLength
);
913 IOMemoryDescriptor
* md
= const_cast<IOMemoryDescriptor
*>(fMemory
);
914 md
->performOperation(kIOMemoryIncoherentIOFlush
, state
->fPreparedOffset
, state
->fPreparedLength
);
920 IOOptionBits op
= kWalkComplete
;
926 if (state
->fLocalMapperAllocValid
)
928 IOMDDMAMapArgs mapArgs
;
929 bzero(&mapArgs
, sizeof(mapArgs
));
930 mapArgs
.fMapper
= fMapper
;
931 mapArgs
.fCommand
= this;
932 mapArgs
.fAlloc
= state
->fLocalMapperAlloc
;
933 mapArgs
.fAllocLength
= state
->fLocalMapperAllocLength
;
934 const IOMemoryDescriptor
* md
= copyMD
;
935 if (md
) { mapArgs
.fOffset
= 0; }
939 mapArgs
.fOffset
= state
->fPreparedOffset
;
942 ret
= md
->dmaCommandOperation(kIOMDDMAUnmap
, &mapArgs
, sizeof(mapArgs
));
944 state
->fLocalMapperAlloc
= 0;
945 state
->fLocalMapperAllocValid
= false;
946 state
->fLocalMapperAllocLength
= 0;
948 if (copyMD
) copyMD
->release();
949 state
->fPrepared
= false;
956 IODMACommand::getPreparedOffsetAndLength(UInt64
* offset
, UInt64
* length
)
958 IODMACommandInternal
* state
= fInternalState
;
960 return (kIOReturnNotReady
);
963 *offset
= state
->fPreparedOffset
;
965 *length
= state
->fPreparedLength
;
967 return (kIOReturnSuccess
);
971 IODMACommand::synchronize(IOOptionBits options
)
973 IODMACommandInternal
* state
= fInternalState
;
974 IOReturn ret
= kIOReturnSuccess
;
977 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
978 return kIOReturnBadArgument
;
981 return kIOReturnNotReady
;
984 if (kForceDoubleBuffer
& options
)
986 if (state
->fDoubleBuffer
) return kIOReturnSuccess
;
987 ret
= complete(false /* invalidateCache */, true /* synchronize */);
988 state
->fCursor
= false;
989 state
->fForceDoubleBuffer
= true;
990 ret
= prepare(state
->fPreparedOffset
, state
->fPreparedLength
, false /* flushCache */, true /* synchronize */);
994 else if (state
->fCursor
)
995 return kIOReturnSuccess
;
997 if (kIODirectionIn
& options
)
998 op
|= kWalkSyncIn
| kWalkSyncAlways
;
999 else if (kIODirectionOut
& options
)
1000 op
|= kWalkSyncOut
| kWalkSyncAlways
;
1007 struct IODMACommandTransferContext
1010 UInt64 bufferOffset
;
1016 kIODMACommandTransferOpReadBytes
= 1,
1017 kIODMACommandTransferOpWriteBytes
= 2
1021 IODMACommand::transferSegment(void *reference
,
1022 IODMACommand
*target
,
1025 UInt32 segmentIndex
)
1027 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) reference
;
1028 UInt64 length
= min(segment
.fLength
, context
->remaining
);
1029 addr64_t ioAddr
= segment
.fIOVMAddr
;
1030 addr64_t cpuAddr
= ioAddr
;
1032 context
->remaining
-= length
;
1036 UInt64 copyLen
= length
;
1037 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
1040 cpuAddr
= target
->fMapper
->mapToPhysicalAddress(ioAddr
);
1041 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
1045 switch (context
->op
)
1047 case kIODMACommandTransferOpReadBytes
:
1048 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
1049 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1051 case kIODMACommandTransferOpWriteBytes
:
1052 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
1053 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1057 context
->bufferOffset
+= copyLen
;
1060 return (context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
);
1064 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
1066 IODMACommandInternal
* state
= fInternalState
;
1067 IODMACommandTransferContext context
;
1068 Segment64 segments
[1];
1069 UInt32 numSegments
= 0-1;
1074 if (offset
>= state
->fPreparedLength
)
1076 length
= min(length
, state
->fPreparedLength
- offset
);
1078 context
.buffer
= buffer
;
1079 context
.bufferOffset
= 0;
1080 context
.remaining
= length
;
1081 context
.op
= transferOp
;
1082 (void) genIOVMSegments(kWalkClient
, transferSegment
, &context
, &offset
, &segments
[0], &numSegments
);
1084 return (length
- context
.remaining
);
1088 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
1090 return (transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
));
1094 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
1096 return (transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
));
1100 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
1102 UInt32
*numSegmentsP
)
1104 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) fOutSeg
,
1105 offsetP
, segmentsP
, numSegmentsP
));
1109 IODMACommand::genIOVMSegments(uint32_t op
,
1110 InternalSegmentFunction outSegFunc
,
1114 UInt32
*numSegmentsP
)
1116 IODMACommandInternal
* internalState
= fInternalState
;
1117 IOOptionBits mdOp
= kIOMDWalkSegments
;
1118 IOReturn ret
= kIOReturnSuccess
;
1120 if (!(kWalkComplete
& op
) && !fActive
)
1121 return kIOReturnNotReady
;
1123 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
1124 return kIOReturnBadArgument
;
1126 IOMDDMAWalkSegmentArgs
*state
=
1127 (IOMDDMAWalkSegmentArgs
*)(void *) fState
;
1129 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
1130 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
1132 if (offset
>= memLength
)
1133 return kIOReturnOverrun
;
1135 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
1137 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= 0;
1138 internalState
->fNextRemapPage
= NULL
;
1139 internalState
->fNewMD
= false;
1140 mdOp
= kIOMDFirstSegment
;
1143 if (internalState
->fLocalMapperAllocValid
)
1145 state
->fMapped
= kIOMDDMAWalkMappedLocal
;
1146 state
->fMappedBase
= internalState
->fLocalMapperAlloc
;
1148 else state
->fMapped
= true;
1152 UInt32 segIndex
= 0;
1153 UInt32 numSegments
= *numSegmentsP
;
1154 Segment64 curSeg
= { 0, 0 };
1155 bool curSegValid
= false;
1158 if (fNumAddressBits
&& (fNumAddressBits
< 64))
1159 maxPhys
= (1ULL << fNumAddressBits
);
1164 while (internalState
->fIOVMAddrValid
|| (state
->fOffset
< memLength
))
1167 if (!internalState
->fIOVMAddrValid
) {
1171 state
->fOffset
= offset
;
1172 state
->fLength
= memLength
- offset
;
1174 if (internalState
->fMapContig
&& internalState
->fLocalMapperAllocValid
)
1176 state
->fIOVMAddr
= internalState
->fLocalMapperAlloc
+ offset
- internalState
->fPreparedOffset
;
1177 rtn
= kIOReturnSuccess
;
1180 uint64_t checkOffset
;
1181 IOPhysicalLength segLen
;
1182 for (checkOffset
= 0; checkOffset
< state
->fLength
; )
1184 addr64_t phys
= const_cast<IOMemoryDescriptor
*>(fMemory
)->getPhysicalSegment(checkOffset
+ offset
, &segLen
, kIOMemoryMapperNone
);
1185 if (fMapper
->mapAddr(state
->fIOVMAddr
+ checkOffset
) != phys
)
1187 panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset
,
1188 state
->fIOVMAddr
+ checkOffset
, fMapper
->mapAddr(state
->fIOVMAddr
+ checkOffset
), state
->fLength
,
1191 checkOffset
+= page_size
- (phys
& page_mask
);
1198 const IOMemoryDescriptor
* memory
=
1199 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
1200 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
1201 mdOp
= kIOMDWalkSegments
;
1204 if (rtn
== kIOReturnSuccess
)
1206 internalState
->fIOVMAddrValid
= true;
1207 assert(state
->fLength
);
1208 if (curSegValid
&& ((curSeg
.fIOVMAddr
+ curSeg
.fLength
) == state
->fIOVMAddr
)) {
1209 UInt64 length
= state
->fLength
;
1211 curSeg
.fLength
+= length
;
1212 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= 0;
1215 else if (rtn
== kIOReturnOverrun
)
1216 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= state
->fLength
= 0; // At end
1221 // seg = state, offset = end of seg
1224 UInt64 length
= state
->fLength
;
1226 curSeg
.fIOVMAddr
= state
->fIOVMAddr
;
1227 curSeg
.fLength
= length
;
1229 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= 0;
1232 if (!internalState
->fIOVMAddrValid
)
1235 if ((kWalkClient
& op
) && (curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
1237 if (internalState
->fCursor
)
1239 curSegValid
= curSeg
.fIOVMAddr
= 0;
1240 ret
= kIOReturnMessageTooLarge
;
1243 else if (curSeg
.fIOVMAddr
<= maxPhys
)
1245 UInt64 remain
, newLength
;
1247 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
1248 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
1249 remain
= curSeg
.fLength
- newLength
;
1250 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
1251 internalState
->fIOVMAddrValid
= true;
1252 curSeg
.fLength
= newLength
;
1253 state
->fLength
= remain
;
1258 UInt64 addr
= curSeg
.fIOVMAddr
;
1259 ppnum_t addrPage
= atop_64(addr
);
1260 vm_page_t remap
= NULL
;
1261 UInt64 remain
, newLength
;
1263 DEBG("sparse switch %qx, %qx ", addr
, curSeg
.fLength
);
1265 remap
= internalState
->fNextRemapPage
;
1266 if (remap
&& (addrPage
== vm_page_get_offset(remap
)))
1269 else for (remap
= internalState
->fCopyPageAlloc
;
1270 remap
&& (addrPage
!= vm_page_get_offset(remap
));
1271 remap
= vm_page_get_next(remap
))
1275 if (!remap
) panic("no remap page found");
1277 curSeg
.fIOVMAddr
= ptoa_64(vm_page_get_phys_page(remap
))
1278 + (addr
& PAGE_MASK
);
1280 internalState
->fNextRemapPage
= vm_page_get_next(remap
);
1282 newLength
= PAGE_SIZE
- (addr
& PAGE_MASK
);
1283 if (newLength
< curSeg
.fLength
)
1285 remain
= curSeg
.fLength
- newLength
;
1286 state
->fIOVMAddr
= addr
+ newLength
;
1287 internalState
->fIOVMAddrValid
= true;
1288 curSeg
.fLength
= newLength
;
1289 state
->fLength
= remain
;
1292 DEBG("-> %qx, %qx offset %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, offset
);
1296 // reduce size of output segment
1297 uint64_t reduce
, leftover
= 0;
1300 if (curSeg
.fLength
> fMaxSegmentSize
)
1302 leftover
+= curSeg
.fLength
- fMaxSegmentSize
;
1303 curSeg
.fLength
= fMaxSegmentSize
;
1304 state
->fIOVMAddr
= curSeg
.fLength
+ curSeg
.fIOVMAddr
;
1305 internalState
->fIOVMAddrValid
= true;
1308 // alignment current length
1310 reduce
= (curSeg
.fLength
& fAlignMaskLength
);
1311 if (reduce
&& (curSeg
.fLength
> reduce
))
1314 curSeg
.fLength
-= reduce
;
1315 state
->fIOVMAddr
= curSeg
.fLength
+ curSeg
.fIOVMAddr
;
1316 internalState
->fIOVMAddrValid
= true;
1319 // alignment next address
1321 reduce
= (state
->fIOVMAddr
& fAlignMaskInternalSegments
);
1322 if (reduce
&& (curSeg
.fLength
> reduce
))
1325 curSeg
.fLength
-= reduce
;
1326 state
->fIOVMAddr
= curSeg
.fLength
+ curSeg
.fIOVMAddr
;
1327 internalState
->fIOVMAddrValid
= true;
1332 DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1334 curSeg
.fIOVMAddr
, curSeg
.fLength
);
1335 state
->fLength
= leftover
;
1341 if (internalState
->fCursor
)
1346 mask
= (segIndex
? fAlignMaskInternalSegments
: internalState
->fSourceAlignMask
);
1347 misaligned
= (0 != (mask
& curSeg
.fIOVMAddr
));
1350 mask
= fAlignMaskLength
;
1351 misaligned
|= (0 != (mask
& curSeg
.fLength
));
1355 if (misaligned
) DEBG("cursor misaligned %qx:%qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
);
1356 curSegValid
= curSeg
.fIOVMAddr
= 0;
1357 ret
= kIOReturnNotAligned
;
1362 if (offset
>= memLength
)
1364 curSeg
.fLength
-= (offset
- memLength
);
1366 internalState
->fIOVMAddrValid
= state
->fIOVMAddr
= state
->fLength
= 0; // At end
1371 if (internalState
->fIOVMAddrValid
) {
1372 if ((segIndex
+ 1 == numSegments
))
1375 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1376 curSegValid
= curSeg
.fIOVMAddr
= 0;
1377 if (kIOReturnSuccess
!= ret
)
1383 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1386 if (kIOReturnSuccess
== ret
)
1388 state
->fOffset
= offset
;
1389 *offsetP
= offset
- internalState
->fPreparedOffset
;
1390 *numSegmentsP
= segIndex
;
1396 IODMACommand::clientOutputSegment(
1397 void *reference
, IODMACommand
*target
,
1398 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1400 SegmentFunction segmentFunction
= (SegmentFunction
) reference
;
1401 IOReturn ret
= kIOReturnSuccess
;
1403 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64)
1404 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
)
1405 && (target
->reserved
->fLocalMapperAllocValid
|| !target
->fMapper
))
1407 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1408 ret
= kIOReturnMessageTooLarge
;
1411 if (!(*segmentFunction
)(target
, segment
, vSegList
, outSegIndex
))
1413 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1414 ret
= kIOReturnMessageTooLarge
;
1421 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction
,
1424 UInt32
*numSegmentsP
)
1426 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) segmentFunction
,
1427 offsetP
, segmentsP
, numSegmentsP
));
1431 IODMACommand::OutputHost32(IODMACommand
*,
1432 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1434 Segment32
*base
= (Segment32
*) vSegList
;
1435 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1436 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1441 IODMACommand::OutputBig32(IODMACommand
*,
1442 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1444 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1445 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1446 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1447 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1452 IODMACommand::OutputLittle32(IODMACommand
*,
1453 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1455 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1456 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1457 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1458 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1463 IODMACommand::OutputHost64(IODMACommand
*,
1464 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1466 Segment64
*base
= (Segment64
*) vSegList
;
1467 base
[outSegIndex
] = segment
;
1472 IODMACommand::OutputBig64(IODMACommand
*,
1473 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1475 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1476 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1477 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1478 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1483 IODMACommand::OutputLittle64(IODMACommand
*,
1484 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1486 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1487 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1488 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1489 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);