2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
41 #include "IOKitKernelInternal.h"
43 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
44 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
45 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
46 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
50 kWalkSyncIn
= 0x01, // bounce -> md
51 kWalkSyncOut
= 0x02, // bounce <- md
52 kWalkSyncAlways
= 0x04,
53 kWalkPreflight
= 0x08,
54 kWalkDoubleBuffer
= 0x10,
61 #define fInternalState reserved
62 #define fState reserved->fState
63 #define fMDSummary reserved->fMDSummary
67 // no direction => OutIn
68 #define SHOULD_COPY_DIR(op, direction) \
69 ((kIODirectionNone == (direction)) \
70 || (kWalkSyncAlways & (op)) \
71 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
75 #define SHOULD_COPY_DIR(state, direction) (true)
79 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
81 #define DEBG(fmt, args...) {}
84 /**************************** class IODMACommand ***************************/
87 #define super IOCommand
88 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
90 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
91 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
92 OSMetaClassDefineReservedUsed(IODMACommand
, 2);
93 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
94 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
95 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
96 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
104 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
105 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
108 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
109 UInt8 numAddressBits
,
110 UInt64 maxSegmentSize
,
111 MappingOptions mappingOptions
,
112 UInt64 maxTransferSize
,
117 IODMACommand
* me
= new IODMACommand
;
119 if (me
&& !me
->initWithSpecification(outSegFunc
,
120 numAddressBits
, maxSegmentSize
,
121 mappingOptions
, maxTransferSize
,
122 alignment
, mapper
, refCon
))
132 IODMACommand::cloneCommand(void *refCon
)
134 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
135 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
138 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
141 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
142 UInt8 numAddressBits
,
143 UInt64 maxSegmentSize
,
144 MappingOptions mappingOptions
,
145 UInt64 maxTransferSize
,
150 if (!super::init() || !outSegFunc
)
153 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
154 || OutputLittle32
== outSegFunc
);
159 else if (numAddressBits
> 32)
160 return false; // Wrong output function for bits
163 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
167 maxSegmentSize
--; // Set Max segment to -1
168 if (!maxTransferSize
)
169 maxTransferSize
--; // Set Max transfer to -1
173 IOMapper::checkForSystemMapper();
174 mapper
= IOMapper::gSystem
;
179 fOutSeg
= outSegFunc
;
180 fNumAddressBits
= numAddressBits
;
181 fMaxSegmentSize
= maxSegmentSize
;
182 fMappingOptions
= mappingOptions
;
183 fMaxTransferSize
= maxTransferSize
;
186 fAlignMask
= alignment
- 1;
190 switch (MAPTYPE(mappingOptions
))
193 case kNonCoherent
: fMapper
= 0; break;
195 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
205 reserved
= IONew(IODMACommandInternal
, 1);
208 bzero(reserved
, sizeof(IODMACommandInternal
));
210 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
219 IODelete(reserved
, IODMACommandInternal
, 1);
228 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
230 IOReturn err
= kIOReturnSuccess
;
239 return kIOReturnSuccess
;
243 // As we are almost certainly being called from a work loop thread
244 // if fActive is true it is probably not a good time to potentially
245 // block. Just test for it and return an error
247 return kIOReturnBusy
;
248 clearMemoryDescriptor();
252 bzero(&fMDSummary
, sizeof(fMDSummary
));
253 err
= mem
->dmaCommandOperation(
254 kIOMDGetCharacteristics
,
255 &fMDSummary
, sizeof(fMDSummary
));
259 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
261 if ((kMapped
== MAPTYPE(fMappingOptions
))
263 && (!fNumAddressBits
|| (fNumAddressBits
>= 31)))
264 // assuming mapped space is 2G
265 fInternalState
->fCheckAddressing
= false;
267 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
269 fInternalState
->fNewMD
= true;
273 mem
->dmaCommandOperation(kIOMDSetDMAActive
, this, 0);
277 clearMemoryDescriptor();
286 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
288 if (fActive
&& !autoComplete
)
289 return (kIOReturnNotReady
);
294 fMemory
->dmaCommandOperation(kIOMDSetDMAInactive
, this, 0);
299 return (kIOReturnSuccess
);
302 const IOMemoryDescriptor
*
303 IODMACommand::getMemoryDescriptor() const
310 IODMACommand::segmentOp(
312 IODMACommand
*target
,
317 IOOptionBits op
= (uintptr_t) reference
;
318 addr64_t maxPhys
, address
;
322 IODMACommandInternal
* state
= target
->reserved
;
324 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64) && !state
->fLocalMapper
)
325 maxPhys
= (1ULL << target
->fNumAddressBits
);
330 address
= segment
.fIOVMAddr
;
331 length
= segment
.fLength
;
336 if (!state
->fMisaligned
)
338 state
->fMisaligned
|= (0 != (state
->fSourceAlignMask
& address
));
339 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, state
->fSourceAlignMask
);
342 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
343 return (kIOReturnNotAligned
);
345 if (!state
->fDoubleBuffer
)
347 if ((address
+ length
- 1) <= maxPhys
)
351 else if (address
<= maxPhys
)
353 DEBG("tail %qx, %qx", address
, length
);
354 length
= (address
+ length
- maxPhys
- 1);
355 address
= maxPhys
+ 1;
356 DEBG("-> %qx, %qx\n", address
, length
);
361 return (kIOReturnSuccess
);
363 numPages
= atop_64(round_page_64((address
& PAGE_MASK
) + length
));
365 if (kWalkPreflight
& op
)
367 state
->fCopyPageCount
+= numPages
;
373 if (kWalkPrepare
& op
)
375 lastPage
= state
->fCopyNext
;
376 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
378 vm_page_set_offset(lastPage
, atop_64(address
) + idx
);
379 lastPage
= vm_page_get_next(lastPage
);
383 if (!lastPage
|| SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
385 lastPage
= state
->fCopyNext
;
386 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
388 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
393 remapAddr
= ptoa_64(vm_page_get_phys_page(lastPage
));
394 if (!state
->fDoubleBuffer
)
396 remapAddr
+= (address
& PAGE_MASK
);
398 chunk
= PAGE_SIZE
- (address
& PAGE_MASK
);
402 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
403 (kWalkSyncIn
& op
) ? "->" : "<-",
406 if (kWalkSyncIn
& op
)
408 copypv(remapAddr
, address
, chunk
,
409 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
413 copypv(address
, remapAddr
, chunk
,
414 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
419 lastPage
= vm_page_get_next(lastPage
);
422 state
->fCopyNext
= lastPage
;
425 return kIOReturnSuccess
;
429 IODMACommand::walkAll(UInt8 op
)
431 IODMACommandInternal
* state
= fInternalState
;
433 IOReturn ret
= kIOReturnSuccess
;
437 if (kWalkPreflight
& op
)
439 state
->fMapContig
= false;
440 state
->fMisaligned
= false;
441 state
->fDoubleBuffer
= false;
442 state
->fPrepared
= false;
443 state
->fCopyNext
= NULL
;
444 state
->fCopyPageAlloc
= 0;
445 state
->fLocalMapperPageAlloc
= 0;
446 state
->fCopyPageCount
= 0;
447 state
->fNextRemapPage
= NULL
;
450 if (!(kWalkDoubleBuffer
& op
))
454 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
457 op
&= ~kWalkPreflight
;
459 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
460 if (state
->fDoubleBuffer
)
461 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
463 if (state
->fCopyPageCount
)
465 vm_page_t mapBase
= NULL
;
467 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
469 if (!state
->fDoubleBuffer
)
472 kr
= vm_page_alloc_list(state
->fCopyPageCount
,
473 KMA_LOMEM
| KMA_NOPAGEWAIT
, &mapBase
);
474 if (KERN_SUCCESS
!= kr
)
476 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state
->fCopyPageCount
, kr
);
483 state
->fCopyPageAlloc
= mapBase
;
484 state
->fCopyNext
= state
->fCopyPageAlloc
;
487 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
488 state
->fPrepared
= true;
489 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
493 DEBG("alloc IOBMD\n");
494 mach_vm_address_t mask
= 0xFFFFF000; //state->fSourceAlignMask
495 state
->fCopyMD
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task
,
496 fMDSummary
.fDirection
, state
->fPreparedLength
, mask
);
500 ret
= kIOReturnSuccess
;
501 state
->fPrepared
= true;
505 DEBG("IODMACommand !alloc IOBMD");
506 return (kIOReturnNoResources
);
511 if (state
->fLocalMapper
)
513 state
->fLocalMapperPageCount
= atop_64(round_page(
514 state
->fPreparedLength
+ ((state
->fPreparedOffset
+ fMDSummary
.fPageAlign
) & page_mask
)));
515 state
->fLocalMapperPageAlloc
= fMapper
->iovmAllocDMACommand(this, state
->fLocalMapperPageCount
);
516 if (!state
->fLocalMapperPageAlloc
)
518 DEBG("IODMACommand !iovmAlloc");
519 return (kIOReturnNoResources
);
521 state
->fMapContig
= true;
525 if (state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
527 if (state
->fCopyPageCount
)
529 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
531 if (state
->fCopyPageAlloc
)
533 state
->fCopyNext
= state
->fCopyPageAlloc
;
536 ret
= genIOVMSegments(op
, segmentOp
, (void *) op
, &offset
, state
, &numSegments
);
538 else if (state
->fCopyMD
)
540 DEBG("sync IOBMD\n");
542 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
544 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
548 if (kWalkSyncIn
& op
)
549 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
550 state
->fCopyMD
->getBytesNoCopy(),
551 state
->fPreparedLength
);
553 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
554 state
->fCopyMD
->getBytesNoCopy(),
555 state
->fPreparedLength
);
556 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
557 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
560 ret
= kIOReturnSuccess
;
565 if (kWalkComplete
& op
)
567 if (state
->fLocalMapperPageAlloc
)
569 fMapper
->iovmFreeDMACommand(this, state
->fLocalMapperPageAlloc
, state
->fLocalMapperPageCount
);
570 state
->fLocalMapperPageAlloc
= 0;
571 state
->fLocalMapperPageCount
= 0;
573 if (state
->fCopyPageAlloc
)
575 vm_page_free_list(state
->fCopyPageAlloc
, FALSE
);
576 state
->fCopyPageAlloc
= 0;
577 state
->fCopyPageCount
= 0;
581 state
->fCopyMD
->release();
585 state
->fPrepared
= false;
591 IODMACommand::getNumAddressBits(void)
593 return (fNumAddressBits
);
597 IODMACommand::getAlignment(void)
599 return (fAlignMask
+ 1);
603 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
604 UInt8 numAddressBits
,
605 UInt64 maxSegmentSize
,
606 MappingOptions mappingOptions
,
607 UInt64 maxTransferSize
,
616 return kIOReturnNotPermitted
;
619 return kIOReturnBadArgument
;
621 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
622 || OutputLittle32
== outSegFunc
);
627 else if (numAddressBits
> 32)
628 return kIOReturnBadArgument
; // Wrong output function for bits
631 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
632 return kIOReturnBadArgument
;
635 maxSegmentSize
--; // Set Max segment to -1
636 if (!maxTransferSize
)
637 maxTransferSize
--; // Set Max transfer to -1
641 IOMapper::checkForSystemMapper();
642 mapper
= IOMapper::gSystem
;
645 switch (MAPTYPE(mappingOptions
))
648 case kNonCoherent
: fMapper
= 0; break;
650 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
651 return kIOReturnBadArgument
;
654 return kIOReturnBadArgument
;
659 fOutSeg
= outSegFunc
;
660 fNumAddressBits
= numAddressBits
;
661 fMaxSegmentSize
= maxSegmentSize
;
662 fMappingOptions
= mappingOptions
;
663 fMaxTransferSize
= maxTransferSize
;
666 fAlignMask
= alignment
- 1;
667 if (mapper
!= fMapper
)
674 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
676 return prepare(offset
, length
, flushCache
, synchronize
);
681 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
683 IODMACommandInternal
* state
= fInternalState
;
684 IOReturn ret
= kIOReturnSuccess
;
685 MappingOptions mappingOptions
= fMappingOptions
;
688 length
= fMDSummary
.fLength
;
690 if (length
> fMaxTransferSize
)
691 return kIOReturnNoSpace
;
693 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
694 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
696 poMD
->performOperation(kIOMemoryIncoherentIOStore
, offset
, length
);
700 if ((state
->fPreparedOffset
!= offset
)
701 || (state
->fPreparedLength
!= length
))
702 ret
= kIOReturnNotReady
;
706 state
->fPreparedOffset
= offset
;
707 state
->fPreparedLength
= length
;
709 state
->fMapContig
= false;
710 state
->fMisaligned
= false;
711 state
->fDoubleBuffer
= false;
712 state
->fPrepared
= false;
713 state
->fCopyNext
= NULL
;
714 state
->fCopyPageAlloc
= 0;
715 state
->fCopyPageCount
= 0;
716 state
->fNextRemapPage
= NULL
;
718 state
->fLocalMapperPageAlloc
= 0;
719 state
->fLocalMapperPageCount
= 0;
721 state
->fLocalMapper
= (fMapper
&& (fMapper
!= IOMapper::gSystem
));
723 state
->fSourceAlignMask
= fAlignMask
;
724 if (state
->fLocalMapper
)
725 state
->fSourceAlignMask
&= page_mask
;
727 state
->fCursor
= state
->fIterateOnly
728 || (!state
->fCheckAddressing
729 && !state
->fLocalMapper
730 && (!state
->fSourceAlignMask
731 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& state
->fSourceAlignMask
)))));
735 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
740 if (kIOReturnSuccess
== ret
)
741 state
->fPrepared
= true;
747 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
749 IODMACommandInternal
* state
= fInternalState
;
750 IOReturn ret
= kIOReturnSuccess
;
753 return kIOReturnNotReady
;
759 IOOptionBits op
= kWalkComplete
;
764 state
->fPrepared
= false;
766 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
768 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
770 poMD
->performOperation(kIOMemoryIncoherentIOFlush
, state
->fPreparedOffset
, state
->fPreparedLength
);
778 IODMACommand::getPreparedOffsetAndLength(UInt64
* offset
, UInt64
* length
)
780 IODMACommandInternal
* state
= fInternalState
;
782 return (kIOReturnNotReady
);
785 *offset
= state
->fPreparedOffset
;
787 *length
= state
->fPreparedLength
;
789 return (kIOReturnSuccess
);
793 IODMACommand::synchronize(IOOptionBits options
)
795 IODMACommandInternal
* state
= fInternalState
;
796 IOReturn ret
= kIOReturnSuccess
;
799 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
800 return kIOReturnBadArgument
;
803 return kIOReturnNotReady
;
806 if (kForceDoubleBuffer
& options
)
808 if (state
->fDoubleBuffer
)
809 return kIOReturnSuccess
;
811 state
->fCursor
= false;
813 ret
= walkAll(kWalkComplete
);
815 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
817 else if (state
->fCursor
)
818 return kIOReturnSuccess
;
820 if (kIODirectionIn
& options
)
821 op
|= kWalkSyncIn
| kWalkSyncAlways
;
822 else if (kIODirectionOut
& options
)
823 op
|= kWalkSyncOut
| kWalkSyncAlways
;
830 struct IODMACommandTransferContext
839 kIODMACommandTransferOpReadBytes
= 1,
840 kIODMACommandTransferOpWriteBytes
= 2
844 IODMACommand::transferSegment(void *reference
,
845 IODMACommand
*target
,
850 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) reference
;
851 UInt64 length
= min(segment
.fLength
, context
->remaining
);
852 addr64_t ioAddr
= segment
.fIOVMAddr
;
853 addr64_t cpuAddr
= ioAddr
;
855 context
->remaining
-= length
;
859 UInt64 copyLen
= length
;
860 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
863 cpuAddr
= target
->fMapper
->mapAddr(ioAddr
);
864 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
870 case kIODMACommandTransferOpReadBytes
:
871 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
872 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
874 case kIODMACommandTransferOpWriteBytes
:
875 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
876 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
880 context
->bufferOffset
+= copyLen
;
883 return (context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
);
887 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
889 IODMACommandInternal
* state
= fInternalState
;
890 IODMACommandTransferContext context
;
891 Segment64 segments
[1];
892 UInt32 numSegments
= 0-1;
897 if (offset
>= state
->fPreparedLength
)
899 length
= min(length
, state
->fPreparedLength
- offset
);
901 context
.buffer
= buffer
;
902 context
.bufferOffset
= 0;
903 context
.remaining
= length
;
904 context
.op
= transferOp
;
905 (void) genIOVMSegments(kWalkClient
, transferSegment
, &context
, &offset
, &segments
[0], &numSegments
);
907 return (length
- context
.remaining
);
911 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
913 return (transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
));
917 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
919 return (transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
));
923 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
925 UInt32
*numSegmentsP
)
927 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) fOutSeg
,
928 offsetP
, segmentsP
, numSegmentsP
));
932 IODMACommand::genIOVMSegments(uint32_t op
,
933 InternalSegmentFunction outSegFunc
,
937 UInt32
*numSegmentsP
)
939 IODMACommandInternal
* internalState
= fInternalState
;
940 IOOptionBits mdOp
= kIOMDWalkSegments
;
941 IOReturn ret
= kIOReturnSuccess
;
943 if (!(kWalkComplete
& op
) && !fActive
)
944 return kIOReturnNotReady
;
946 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
947 return kIOReturnBadArgument
;
949 IOMDDMAWalkSegmentArgs
*state
=
950 (IOMDDMAWalkSegmentArgs
*) fState
;
952 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
953 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
955 if (offset
>= memLength
)
956 return kIOReturnOverrun
;
958 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
960 state
->fIOVMAddr
= 0;
961 internalState
->fNextRemapPage
= NULL
;
962 internalState
->fNewMD
= false;
963 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
964 mdOp
= kIOMDFirstSegment
;
967 UInt64 bypassMask
= fBypassMask
;
969 UInt32 numSegments
= *numSegmentsP
;
970 Segment64 curSeg
= { 0, 0 };
973 if (fNumAddressBits
&& (fNumAddressBits
< 64))
974 maxPhys
= (1ULL << fNumAddressBits
);
979 while (state
->fIOVMAddr
|| (state
->fOffset
< memLength
))
982 if (!state
->fIOVMAddr
) {
986 state
->fOffset
= offset
;
987 state
->fLength
= memLength
- offset
;
989 if (internalState
->fMapContig
&& (kWalkClient
& op
))
991 ppnum_t pageNum
= internalState
->fLocalMapperPageAlloc
;
992 state
->fIOVMAddr
= ptoa_64(pageNum
)
993 + offset
- internalState
->fPreparedOffset
;
994 rtn
= kIOReturnSuccess
;
998 const IOMemoryDescriptor
* memory
=
999 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
1000 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
1001 mdOp
= kIOMDWalkSegments
;
1004 if (rtn
== kIOReturnSuccess
)
1006 assert(state
->fIOVMAddr
);
1007 assert(state
->fLength
);
1008 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
) == state
->fIOVMAddr
) {
1009 UInt64 length
= state
->fLength
;
1011 curSeg
.fLength
+= length
;
1012 state
->fIOVMAddr
= 0;
1015 else if (rtn
== kIOReturnOverrun
)
1016 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1021 // seg = state, offset = end of seg
1022 if (!curSeg
.fIOVMAddr
)
1024 UInt64 length
= state
->fLength
;
1026 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
1027 curSeg
.fLength
= length
;
1028 state
->fIOVMAddr
= 0;
1031 if (!state
->fIOVMAddr
)
1033 if ((kWalkClient
& op
) && (curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
1035 if (internalState
->fCursor
)
1037 curSeg
.fIOVMAddr
= 0;
1038 ret
= kIOReturnMessageTooLarge
;
1041 else if (curSeg
.fIOVMAddr
<= maxPhys
)
1043 UInt64 remain
, newLength
;
1045 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
1046 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
1047 remain
= curSeg
.fLength
- newLength
;
1048 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
1049 curSeg
.fLength
= newLength
;
1050 state
->fLength
= remain
;
1055 UInt64 addr
= curSeg
.fIOVMAddr
;
1056 ppnum_t addrPage
= atop_64(addr
);
1057 vm_page_t remap
= NULL
;
1058 UInt64 remain
, newLength
;
1060 DEBG("sparse switch %qx, %qx ", addr
, curSeg
.fLength
);
1062 remap
= internalState
->fNextRemapPage
;
1063 if (remap
&& (addrPage
== vm_page_get_offset(remap
)))
1066 else for (remap
= internalState
->fCopyPageAlloc
;
1067 remap
&& (addrPage
!= vm_page_get_offset(remap
));
1068 remap
= vm_page_get_next(remap
))
1072 if (!remap
) panic("no remap page found");
1074 curSeg
.fIOVMAddr
= ptoa_64(vm_page_get_phys_page(remap
))
1075 + (addr
& PAGE_MASK
);
1076 internalState
->fNextRemapPage
= vm_page_get_next(remap
);
1078 newLength
= PAGE_SIZE
- (addr
& PAGE_MASK
);
1079 if (newLength
< curSeg
.fLength
)
1081 remain
= curSeg
.fLength
- newLength
;
1082 state
->fIOVMAddr
= addr
+ newLength
;
1083 curSeg
.fLength
= newLength
;
1084 state
->fLength
= remain
;
1087 DEBG("-> %qx, %qx offset %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, offset
);
1091 if (curSeg
.fLength
> fMaxSegmentSize
)
1093 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
1095 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
1096 curSeg
.fLength
= fMaxSegmentSize
;
1098 state
->fLength
= remain
;
1102 if (internalState
->fCursor
1103 && (0 != (internalState
->fSourceAlignMask
& curSeg
.fIOVMAddr
)))
1105 curSeg
.fIOVMAddr
= 0;
1106 ret
= kIOReturnNotAligned
;
1110 if (offset
>= memLength
)
1112 curSeg
.fLength
-= (offset
- memLength
);
1114 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1119 if (state
->fIOVMAddr
) {
1120 if ((segIndex
+ 1 == numSegments
))
1123 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1124 curSeg
.fIOVMAddr
= 0;
1125 if (kIOReturnSuccess
!= ret
)
1130 if (curSeg
.fIOVMAddr
) {
1131 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1134 if (kIOReturnSuccess
== ret
)
1136 state
->fOffset
= offset
;
1137 *offsetP
= offset
- internalState
->fPreparedOffset
;
1138 *numSegmentsP
= segIndex
;
1144 IODMACommand::clientOutputSegment(
1145 void *reference
, IODMACommand
*target
,
1146 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1148 SegmentFunction segmentFunction
= (SegmentFunction
) reference
;
1149 IOReturn ret
= kIOReturnSuccess
;
1151 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64)
1152 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
)
1153 && (target
->reserved
->fLocalMapperPageAlloc
|| !target
->reserved
->fLocalMapper
))
1155 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1156 ret
= kIOReturnMessageTooLarge
;
1159 if (!(*segmentFunction
)(target
, segment
, vSegList
, outSegIndex
))
1161 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1162 ret
= kIOReturnMessageTooLarge
;
1169 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction
,
1172 UInt32
*numSegmentsP
)
1174 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) segmentFunction
,
1175 offsetP
, segmentsP
, numSegmentsP
));
1179 IODMACommand::OutputHost32(IODMACommand
*,
1180 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1182 Segment32
*base
= (Segment32
*) vSegList
;
1183 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1184 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1189 IODMACommand::OutputBig32(IODMACommand
*,
1190 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1192 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1193 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1194 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1195 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1200 IODMACommand::OutputLittle32(IODMACommand
*,
1201 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1203 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1204 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1205 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1206 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1211 IODMACommand::OutputHost64(IODMACommand
*,
1212 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1214 Segment64
*base
= (Segment64
*) vSegList
;
1215 base
[outSegIndex
] = segment
;
1220 IODMACommand::OutputBig64(IODMACommand
*,
1221 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1223 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1224 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1225 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1226 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1231 IODMACommand::OutputLittle64(IODMACommand
*,
1232 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1234 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1235 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1236 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1237 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);