2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
41 #include "IOKitKernelInternal.h"
43 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
44 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
45 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
46 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
50 kWalkSyncIn
= 0x01, // bounce -> md
51 kWalkSyncOut
= 0x02, // bounce <- md
52 kWalkSyncAlways
= 0x04,
53 kWalkPreflight
= 0x08,
54 kWalkDoubleBuffer
= 0x10,
61 #define fInternalState reserved
62 #define fState reserved->fState
63 #define fMDSummary reserved->fMDSummary
67 // no direction => OutIn
68 #define SHOULD_COPY_DIR(op, direction) \
69 ((kIODirectionNone == (direction)) \
70 || (kWalkSyncAlways & (op)) \
71 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
75 #define SHOULD_COPY_DIR(state, direction) (true)
79 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
81 #define DEBG(fmt, args...) {}
84 /**************************** class IODMACommand ***************************/
87 #define super OSObject
88 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
90 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
91 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
92 OSMetaClassDefineReservedUsed(IODMACommand
, 2);
93 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
94 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
95 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
96 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
104 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
105 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
108 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
109 UInt8 numAddressBits
,
110 UInt64 maxSegmentSize
,
111 MappingOptions mappingOptions
,
112 UInt64 maxTransferSize
,
117 IODMACommand
* me
= new IODMACommand
;
119 if (me
&& !me
->initWithSpecification(outSegFunc
,
120 numAddressBits
, maxSegmentSize
,
121 mappingOptions
, maxTransferSize
,
122 alignment
, mapper
, refCon
))
132 IODMACommand::cloneCommand(void *refCon
)
134 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
135 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
138 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
141 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
142 UInt8 numAddressBits
,
143 UInt64 maxSegmentSize
,
144 MappingOptions mappingOptions
,
145 UInt64 maxTransferSize
,
150 if (!super::init() || !outSegFunc
|| !numAddressBits
)
153 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
154 || OutputLittle32
== outSegFunc
);
159 else if (numAddressBits
> 32)
160 return false; // Wrong output function for bits
163 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
167 maxSegmentSize
--; // Set Max segment to -1
168 if (!maxTransferSize
)
169 maxTransferSize
--; // Set Max transfer to -1
173 IOMapper::checkForSystemMapper();
174 mapper
= IOMapper::gSystem
;
179 fOutSeg
= outSegFunc
;
180 fNumAddressBits
= numAddressBits
;
181 fMaxSegmentSize
= maxSegmentSize
;
182 fMappingOptions
= mappingOptions
;
183 fMaxTransferSize
= maxTransferSize
;
186 fAlignMask
= alignment
- 1;
190 switch (MAPTYPE(mappingOptions
))
193 case kNonCoherent
: fMapper
= 0; break;
195 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
205 reserved
= IONew(IODMACommandInternal
, 1);
208 bzero(reserved
, sizeof(IODMACommandInternal
));
210 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
219 IODelete(reserved
, IODMACommandInternal
, 1);
228 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
237 return kIOReturnSuccess
;
241 // As we are almost certainly being called from a work loop thread
242 // if fActive is true it is probably not a good time to potentially
243 // block. Just test for it and return an error
245 return kIOReturnBusy
;
246 clearMemoryDescriptor();
250 bzero(&fMDSummary
, sizeof(fMDSummary
));
251 IOReturn rtn
= mem
->dmaCommandOperation(
252 kIOMDGetCharacteristics
,
253 &fMDSummary
, sizeof(fMDSummary
));
257 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
259 if ((kMapped
== MAPTYPE(fMappingOptions
))
261 && (!fNumAddressBits
|| (fNumAddressBits
>= 31)))
262 // assuming mapped space is 2G
263 fInternalState
->fCheckAddressing
= false;
265 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
267 fInternalState
->fNewMD
= true;
271 mem
->dmaCommandOperation(kIOMDSetDMAActive
, this, 0);
276 return kIOReturnSuccess
;
280 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
282 if (fActive
&& !autoComplete
)
283 return (kIOReturnNotReady
);
288 fMemory
->dmaCommandOperation(kIOMDSetDMAInactive
, this, 0);
293 return (kIOReturnSuccess
);
296 const IOMemoryDescriptor
*
297 IODMACommand::getMemoryDescriptor() const
304 IODMACommand::segmentOp(
306 IODMACommand
*target
,
311 IOOptionBits op
= (uintptr_t) reference
;
312 addr64_t maxPhys
, address
;
316 IODMACommandInternal
* state
= target
->reserved
;
318 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64) && !state
->fLocalMapper
)
319 maxPhys
= (1ULL << target
->fNumAddressBits
);
324 address
= segment
.fIOVMAddr
;
325 length
= segment
.fLength
;
330 if (!state
->fMisaligned
)
332 state
->fMisaligned
|= (0 != (state
->fSourceAlignMask
& address
));
333 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, state
->fSourceAlignMask
);
336 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
337 return (kIOReturnNotAligned
);
339 if (!state
->fDoubleBuffer
)
341 if ((address
+ length
- 1) <= maxPhys
)
345 else if (address
<= maxPhys
)
347 DEBG("tail %qx, %qx", address
, length
);
348 length
= (address
+ length
- maxPhys
- 1);
349 address
= maxPhys
+ 1;
350 DEBG("-> %qx, %qx\n", address
, length
);
355 return (kIOReturnSuccess
);
357 numPages
= atop_64(round_page_64((address
& PAGE_MASK
) + length
));
359 if (kWalkPreflight
& op
)
361 state
->fCopyPageCount
+= numPages
;
367 if (kWalkPrepare
& op
)
369 lastPage
= state
->fCopyNext
;
370 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
372 vm_page_set_offset(lastPage
, atop_64(address
) + idx
);
373 lastPage
= vm_page_get_next(lastPage
);
377 if (!lastPage
|| SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
379 lastPage
= state
->fCopyNext
;
380 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
382 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
387 remapAddr
= ptoa_64(vm_page_get_phys_page(lastPage
));
388 if (!state
->fDoubleBuffer
)
390 remapAddr
+= (address
& PAGE_MASK
);
392 chunk
= PAGE_SIZE
- (address
& PAGE_MASK
);
396 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
397 (kWalkSyncIn
& op
) ? "->" : "<-",
400 if (kWalkSyncIn
& op
)
402 copypv(remapAddr
, address
, chunk
,
403 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
407 copypv(address
, remapAddr
, chunk
,
408 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
413 lastPage
= vm_page_get_next(lastPage
);
416 state
->fCopyNext
= lastPage
;
419 return kIOReturnSuccess
;
423 IODMACommand::walkAll(UInt8 op
)
425 IODMACommandInternal
* state
= fInternalState
;
427 IOReturn ret
= kIOReturnSuccess
;
431 if (kWalkPreflight
& op
)
433 state
->fMapContig
= false;
434 state
->fMisaligned
= false;
435 state
->fDoubleBuffer
= false;
436 state
->fPrepared
= false;
437 state
->fCopyNext
= NULL
;
438 state
->fCopyPageAlloc
= 0;
439 state
->fLocalMapperPageAlloc
= 0;
440 state
->fCopyPageCount
= 0;
441 state
->fNextRemapPage
= NULL
;
444 if (!(kWalkDoubleBuffer
& op
))
448 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
451 op
&= ~kWalkPreflight
;
453 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
454 if (state
->fDoubleBuffer
)
455 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
457 if (state
->fCopyPageCount
)
459 vm_page_t mapBase
= NULL
;
461 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
463 if (!state
->fDoubleBuffer
)
466 kr
= vm_page_alloc_list(state
->fCopyPageCount
,
467 KMA_LOMEM
| KMA_NOPAGEWAIT
, &mapBase
);
468 if (KERN_SUCCESS
!= kr
)
470 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state
->fCopyPageCount
, kr
);
477 state
->fCopyPageAlloc
= mapBase
;
478 state
->fCopyNext
= state
->fCopyPageAlloc
;
481 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
482 state
->fPrepared
= true;
483 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
487 DEBG("alloc IOBMD\n");
488 mach_vm_address_t mask
= 0xFFFFF000; //state->fSourceAlignMask
489 state
->fCopyMD
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task
,
490 fMDSummary
.fDirection
, state
->fPreparedLength
, mask
);
494 ret
= kIOReturnSuccess
;
495 state
->fPrepared
= true;
499 DEBG("IODMACommand !iovmAlloc");
500 return (kIOReturnNoResources
);
505 if (state
->fLocalMapper
)
507 state
->fLocalMapperPageCount
= atop_64(round_page(
508 state
->fPreparedLength
+ ((state
->fPreparedOffset
+ fMDSummary
.fPageAlign
) & page_mask
)));
509 state
->fLocalMapperPageAlloc
= fMapper
->iovmAllocDMACommand(this, state
->fLocalMapperPageCount
);
510 state
->fMapContig
= true;
514 if (state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
516 if (state
->fCopyPageCount
)
518 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
520 if (state
->fCopyPageAlloc
)
522 state
->fCopyNext
= state
->fCopyPageAlloc
;
525 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
527 else if (state
->fCopyMD
)
529 DEBG("sync IOBMD\n");
531 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
533 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
537 if (kWalkSyncIn
& op
)
538 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
539 state
->fCopyMD
->getBytesNoCopy(),
540 state
->fPreparedLength
);
542 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
543 state
->fCopyMD
->getBytesNoCopy(),
544 state
->fPreparedLength
);
545 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
546 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
549 ret
= kIOReturnSuccess
;
554 if (kWalkComplete
& op
)
556 if (state
->fLocalMapperPageAlloc
)
558 fMapper
->iovmFreeDMACommand(this, state
->fLocalMapperPageAlloc
, state
->fLocalMapperPageCount
);
559 state
->fLocalMapperPageAlloc
= 0;
560 state
->fLocalMapperPageCount
= 0;
562 if (state
->fCopyPageAlloc
)
564 vm_page_free_list(state
->fCopyPageAlloc
, FALSE
);
565 state
->fCopyPageAlloc
= 0;
566 state
->fCopyPageCount
= 0;
570 state
->fCopyMD
->release();
574 state
->fPrepared
= false;
580 IODMACommand::getNumAddressBits(void)
582 return (fNumAddressBits
);
586 IODMACommand::getAlignment(void)
588 return (fAlignMask
+ 1);
592 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
593 UInt8 numAddressBits
,
594 UInt64 maxSegmentSize
,
595 MappingOptions mappingOptions
,
596 UInt64 maxTransferSize
,
605 return kIOReturnNotPermitted
;
607 if (!outSegFunc
|| !numAddressBits
)
608 return kIOReturnBadArgument
;
610 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
611 || OutputLittle32
== outSegFunc
);
616 else if (numAddressBits
> 32)
617 return kIOReturnBadArgument
; // Wrong output function for bits
620 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
621 return kIOReturnBadArgument
;
624 maxSegmentSize
--; // Set Max segment to -1
625 if (!maxTransferSize
)
626 maxTransferSize
--; // Set Max transfer to -1
630 IOMapper::checkForSystemMapper();
631 mapper
= IOMapper::gSystem
;
634 switch (MAPTYPE(mappingOptions
))
637 case kNonCoherent
: fMapper
= 0; break;
639 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
640 return kIOReturnBadArgument
;
643 return kIOReturnBadArgument
;
648 fOutSeg
= outSegFunc
;
649 fNumAddressBits
= numAddressBits
;
650 fMaxSegmentSize
= maxSegmentSize
;
651 fMappingOptions
= mappingOptions
;
652 fMaxTransferSize
= maxTransferSize
;
655 fAlignMask
= alignment
- 1;
656 if (mapper
!= fMapper
)
663 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
665 return prepare(offset
, length
, flushCache
, synchronize
);
670 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
672 IODMACommandInternal
* state
= fInternalState
;
673 IOReturn ret
= kIOReturnSuccess
;
674 MappingOptions mappingOptions
= fMappingOptions
;
677 length
= fMDSummary
.fLength
;
679 if (length
> fMaxTransferSize
)
680 return kIOReturnNoSpace
;
682 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
683 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
685 poMD
->performOperation(kIOMemoryIncoherentIOStore
, offset
, length
);
689 if ((state
->fPreparedOffset
!= offset
)
690 || (state
->fPreparedLength
!= length
))
691 ret
= kIOReturnNotReady
;
695 state
->fPreparedOffset
= offset
;
696 state
->fPreparedLength
= length
;
698 state
->fMapContig
= false;
699 state
->fMisaligned
= false;
700 state
->fDoubleBuffer
= false;
701 state
->fPrepared
= false;
702 state
->fCopyNext
= NULL
;
703 state
->fCopyPageAlloc
= 0;
704 state
->fCopyPageCount
= 0;
705 state
->fNextRemapPage
= NULL
;
707 state
->fLocalMapperPageAlloc
= 0;
708 state
->fLocalMapperPageCount
= 0;
710 state
->fLocalMapper
= (fMapper
&& (fMapper
!= IOMapper::gSystem
));
712 state
->fSourceAlignMask
= fAlignMask
;
713 if (state
->fLocalMapper
)
714 state
->fSourceAlignMask
&= page_mask
;
716 state
->fCursor
= state
->fIterateOnly
717 || (!state
->fCheckAddressing
718 && !state
->fLocalMapper
719 && (!state
->fSourceAlignMask
720 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& state
->fSourceAlignMask
)))));
724 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
729 if (kIOReturnSuccess
== ret
)
730 state
->fPrepared
= true;
736 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
738 IODMACommandInternal
* state
= fInternalState
;
739 IOReturn ret
= kIOReturnSuccess
;
742 return kIOReturnNotReady
;
748 IOOptionBits op
= kWalkComplete
;
753 state
->fPrepared
= false;
755 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
757 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
759 poMD
->performOperation(kIOMemoryIncoherentIOFlush
, state
->fPreparedOffset
, state
->fPreparedLength
);
767 IODMACommand::getPreparedOffsetAndLength(UInt64
* offset
, UInt64
* length
)
769 IODMACommandInternal
* state
= fInternalState
;
771 return (kIOReturnNotReady
);
774 *offset
= state
->fPreparedOffset
;
776 *length
= state
->fPreparedLength
;
778 return (kIOReturnSuccess
);
782 IODMACommand::synchronize(IOOptionBits options
)
784 IODMACommandInternal
* state
= fInternalState
;
785 IOReturn ret
= kIOReturnSuccess
;
788 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
789 return kIOReturnBadArgument
;
792 return kIOReturnNotReady
;
795 if (kForceDoubleBuffer
& options
)
797 if (state
->fDoubleBuffer
)
798 return kIOReturnSuccess
;
800 state
->fCursor
= false;
802 ret
= walkAll(kWalkComplete
);
804 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
806 else if (state
->fCursor
)
807 return kIOReturnSuccess
;
809 if (kIODirectionIn
& options
)
810 op
|= kWalkSyncIn
| kWalkSyncAlways
;
811 else if (kIODirectionOut
& options
)
812 op
|= kWalkSyncOut
| kWalkSyncAlways
;
819 struct IODMACommandTransferContext
828 kIODMACommandTransferOpReadBytes
= 1,
829 kIODMACommandTransferOpWriteBytes
= 2
833 IODMACommand::transferSegment(void *reference
,
834 IODMACommand
*target
,
839 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) reference
;
840 UInt64 length
= min(segment
.fLength
, context
->remaining
);
841 addr64_t ioAddr
= segment
.fIOVMAddr
;
842 addr64_t cpuAddr
= ioAddr
;
844 context
->remaining
-= length
;
848 UInt64 copyLen
= length
;
849 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
852 cpuAddr
= target
->fMapper
->mapAddr(ioAddr
);
853 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
859 case kIODMACommandTransferOpReadBytes
:
860 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
861 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
863 case kIODMACommandTransferOpWriteBytes
:
864 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
865 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
869 context
->bufferOffset
+= copyLen
;
872 return (context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
);
876 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
878 IODMACommandInternal
* state
= fInternalState
;
879 IODMACommandTransferContext context
;
880 Segment64 segments
[1];
881 UInt32 numSegments
= 0-1;
886 if (offset
>= state
->fPreparedLength
)
888 length
= min(length
, state
->fPreparedLength
- offset
);
890 context
.buffer
= buffer
;
891 context
.bufferOffset
= 0;
892 context
.remaining
= length
;
893 context
.op
= transferOp
;
894 (void) genIOVMSegments(kWalkClient
, transferSegment
, &context
, &offset
, &segments
[0], &numSegments
);
896 return (length
- context
.remaining
);
900 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
902 return (transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
));
906 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
908 return (transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
));
912 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
914 UInt32
*numSegmentsP
)
916 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) fOutSeg
,
917 offsetP
, segmentsP
, numSegmentsP
));
921 IODMACommand::genIOVMSegments(uint32_t op
,
922 InternalSegmentFunction outSegFunc
,
926 UInt32
*numSegmentsP
)
928 IODMACommandInternal
* internalState
= fInternalState
;
929 IOOptionBits mdOp
= kIOMDWalkSegments
;
930 IOReturn ret
= kIOReturnSuccess
;
932 if (!(kWalkComplete
& op
) && !fActive
)
933 return kIOReturnNotReady
;
935 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
936 return kIOReturnBadArgument
;
938 IOMDDMAWalkSegmentArgs
*state
=
939 (IOMDDMAWalkSegmentArgs
*) fState
;
941 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
942 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
944 if (offset
>= memLength
)
945 return kIOReturnOverrun
;
947 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
949 state
->fIOVMAddr
= 0;
950 internalState
->fNextRemapPage
= NULL
;
951 internalState
->fNewMD
= false;
952 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
953 mdOp
= kIOMDFirstSegment
;
956 UInt64 bypassMask
= fBypassMask
;
958 UInt32 numSegments
= *numSegmentsP
;
959 Segment64 curSeg
= { 0, 0 };
962 if (fNumAddressBits
&& (fNumAddressBits
< 64))
963 maxPhys
= (1ULL << fNumAddressBits
);
968 while (state
->fIOVMAddr
|| (state
->fOffset
< memLength
))
971 if (!state
->fIOVMAddr
) {
975 state
->fOffset
= offset
;
976 state
->fLength
= memLength
- offset
;
978 if (internalState
->fMapContig
&& (kWalkClient
& op
))
980 ppnum_t pageNum
= internalState
->fLocalMapperPageAlloc
;
981 state
->fIOVMAddr
= ptoa_64(pageNum
)
982 + offset
- internalState
->fPreparedOffset
;
983 rtn
= kIOReturnSuccess
;
987 const IOMemoryDescriptor
* memory
=
988 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
989 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
990 mdOp
= kIOMDWalkSegments
;
993 if (rtn
== kIOReturnSuccess
)
995 assert(state
->fIOVMAddr
);
996 assert(state
->fLength
);
997 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
) == state
->fIOVMAddr
) {
998 UInt64 length
= state
->fLength
;
1000 curSeg
.fLength
+= length
;
1001 state
->fIOVMAddr
= 0;
1004 else if (rtn
== kIOReturnOverrun
)
1005 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1010 // seg = state, offset = end of seg
1011 if (!curSeg
.fIOVMAddr
)
1013 UInt64 length
= state
->fLength
;
1015 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
1016 curSeg
.fLength
= length
;
1017 state
->fIOVMAddr
= 0;
1020 if (!state
->fIOVMAddr
)
1022 if ((kWalkClient
& op
) && (curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
1024 if (internalState
->fCursor
)
1026 curSeg
.fIOVMAddr
= 0;
1027 ret
= kIOReturnMessageTooLarge
;
1030 else if (curSeg
.fIOVMAddr
<= maxPhys
)
1032 UInt64 remain
, newLength
;
1034 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
1035 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
1036 remain
= curSeg
.fLength
- newLength
;
1037 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
1038 curSeg
.fLength
= newLength
;
1039 state
->fLength
= remain
;
1044 UInt64 addr
= curSeg
.fIOVMAddr
;
1045 ppnum_t addrPage
= atop_64(addr
);
1046 vm_page_t remap
= NULL
;
1047 UInt64 remain
, newLength
;
1049 DEBG("sparse switch %qx, %qx ", addr
, curSeg
.fLength
);
1051 remap
= internalState
->fNextRemapPage
;
1052 if (remap
&& (addrPage
== vm_page_get_offset(remap
)))
1055 else for (remap
= internalState
->fCopyPageAlloc
;
1056 remap
&& (addrPage
!= vm_page_get_offset(remap
));
1057 remap
= vm_page_get_next(remap
))
1061 if (!remap
) panic("no remap page found");
1063 curSeg
.fIOVMAddr
= ptoa_64(vm_page_get_phys_page(remap
))
1064 + (addr
& PAGE_MASK
);
1065 internalState
->fNextRemapPage
= vm_page_get_next(remap
);
1067 newLength
= PAGE_SIZE
- (addr
& PAGE_MASK
);
1068 if (newLength
< curSeg
.fLength
)
1070 remain
= curSeg
.fLength
- newLength
;
1071 state
->fIOVMAddr
= addr
+ newLength
;
1072 curSeg
.fLength
= newLength
;
1073 state
->fLength
= remain
;
1076 DEBG("-> %qx, %qx offset %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, offset
);
1080 if (curSeg
.fLength
> fMaxSegmentSize
)
1082 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
1084 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
1085 curSeg
.fLength
= fMaxSegmentSize
;
1087 state
->fLength
= remain
;
1091 if (internalState
->fCursor
1092 && (0 != (internalState
->fSourceAlignMask
& curSeg
.fIOVMAddr
)))
1094 curSeg
.fIOVMAddr
= 0;
1095 ret
= kIOReturnNotAligned
;
1099 if (offset
>= memLength
)
1101 curSeg
.fLength
-= (offset
- memLength
);
1103 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1108 if (state
->fIOVMAddr
) {
1109 if ((segIndex
+ 1 == numSegments
))
1112 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1113 curSeg
.fIOVMAddr
= 0;
1114 if (kIOReturnSuccess
!= ret
)
1119 if (curSeg
.fIOVMAddr
) {
1120 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1123 if (kIOReturnSuccess
== ret
)
1125 state
->fOffset
= offset
;
1126 *offsetP
= offset
- internalState
->fPreparedOffset
;
1127 *numSegmentsP
= segIndex
;
1133 IODMACommand::clientOutputSegment(
1134 void *reference
, IODMACommand
*target
,
1135 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1137 SegmentFunction segmentFunction
= (SegmentFunction
) reference
;
1138 IOReturn ret
= kIOReturnSuccess
;
1140 if ((target
->fNumAddressBits
< 64)
1141 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
)
1142 && (target
->reserved
->fLocalMapperPageAlloc
|| !target
->reserved
->fLocalMapper
))
1144 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1145 ret
= kIOReturnMessageTooLarge
;
1148 if (!(*segmentFunction
)(target
, segment
, vSegList
, outSegIndex
))
1150 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1151 ret
= kIOReturnMessageTooLarge
;
1158 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction
,
1161 UInt32
*numSegmentsP
)
1163 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) segmentFunction
,
1164 offsetP
, segmentsP
, numSegmentsP
));
1168 IODMACommand::OutputHost32(IODMACommand
*,
1169 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1171 Segment32
*base
= (Segment32
*) vSegList
;
1172 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1173 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1178 IODMACommand::OutputBig32(IODMACommand
*,
1179 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1181 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1182 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1183 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1184 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1189 IODMACommand::OutputLittle32(IODMACommand
*,
1190 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1192 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1193 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1194 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1195 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1200 IODMACommand::OutputHost64(IODMACommand
*,
1201 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1203 Segment64
*base
= (Segment64
*) vSegList
;
1204 base
[outSegIndex
] = segment
;
1209 IODMACommand::OutputBig64(IODMACommand
*,
1210 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1212 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1213 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1214 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1215 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1220 IODMACommand::OutputLittle64(IODMACommand
*,
1221 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1223 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1224 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1225 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1226 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);