2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/assert.h>
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33 #include <libkern/OSDebug.h>
35 #include <IOKit/IOReturn.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOMapper.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <IOKit/IOBufferMemoryDescriptor.h>
42 #include "IOKitKernelInternal.h"
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) != kBypassed)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
51 kWalkSyncIn
= 0x01, // bounce -> md
52 kWalkSyncOut
= 0x02, // bounce <- md
53 kWalkSyncAlways
= 0x04,
54 kWalkPreflight
= 0x08,
55 kWalkDoubleBuffer
= 0x10,
62 #define fInternalState reserved
63 #define fState reserved->fState
64 #define fMDSummary reserved->fMDSummary
68 // no direction => OutIn
69 #define SHOULD_COPY_DIR(op, direction) \
70 ((kIODirectionNone == (direction)) \
71 || (kWalkSyncAlways & (op)) \
72 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
76 #define SHOULD_COPY_DIR(state, direction) (true)
80 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
82 #define DEBG(fmt, args...) {}
85 /**************************** class IODMACommand ***************************/
88 #define super IOCommand
89 OSDefineMetaClassAndStructors(IODMACommand
, IOCommand
);
91 OSMetaClassDefineReservedUsed(IODMACommand
, 0);
92 OSMetaClassDefineReservedUsed(IODMACommand
, 1);
93 OSMetaClassDefineReservedUsed(IODMACommand
, 2);
94 OSMetaClassDefineReservedUnused(IODMACommand
, 3);
95 OSMetaClassDefineReservedUnused(IODMACommand
, 4);
96 OSMetaClassDefineReservedUnused(IODMACommand
, 5);
97 OSMetaClassDefineReservedUnused(IODMACommand
, 6);
98 OSMetaClassDefineReservedUnused(IODMACommand
, 7);
99 OSMetaClassDefineReservedUnused(IODMACommand
, 8);
100 OSMetaClassDefineReservedUnused(IODMACommand
, 9);
101 OSMetaClassDefineReservedUnused(IODMACommand
, 10);
102 OSMetaClassDefineReservedUnused(IODMACommand
, 11);
103 OSMetaClassDefineReservedUnused(IODMACommand
, 12);
104 OSMetaClassDefineReservedUnused(IODMACommand
, 13);
105 OSMetaClassDefineReservedUnused(IODMACommand
, 14);
106 OSMetaClassDefineReservedUnused(IODMACommand
, 15);
109 IODMACommand::withSpecification(SegmentFunction outSegFunc
,
110 UInt8 numAddressBits
,
111 UInt64 maxSegmentSize
,
112 MappingOptions mappingOptions
,
113 UInt64 maxTransferSize
,
118 IODMACommand
* me
= new IODMACommand
;
120 if (me
&& !me
->initWithSpecification(outSegFunc
,
121 numAddressBits
, maxSegmentSize
,
122 mappingOptions
, maxTransferSize
,
123 alignment
, mapper
, refCon
))
133 IODMACommand::cloneCommand(void *refCon
)
135 return withSpecification(fOutSeg
, fNumAddressBits
, fMaxSegmentSize
,
136 fMappingOptions
, fMaxTransferSize
, fAlignMask
+ 1, fMapper
, refCon
);
139 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
142 IODMACommand::initWithSpecification(SegmentFunction outSegFunc
,
143 UInt8 numAddressBits
,
144 UInt64 maxSegmentSize
,
145 MappingOptions mappingOptions
,
146 UInt64 maxTransferSize
,
151 IOService
* device
= 0;
153 if (!super::init() || !outSegFunc
)
156 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
157 || OutputLittle32
== outSegFunc
);
162 else if (numAddressBits
> 32)
163 return false; // Wrong output function for bits
166 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
170 maxSegmentSize
--; // Set Max segment to -1
171 if (!maxTransferSize
)
172 maxTransferSize
--; // Set Max transfer to -1
175 if (mapper
&& !OSDynamicCast(IOMapper
, mapper
))
182 IOMapper::checkForSystemMapper();
183 mapper
= IOMapper::gSystem
;
188 fOutSeg
= outSegFunc
;
189 fNumAddressBits
= numAddressBits
;
190 fMaxSegmentSize
= maxSegmentSize
;
191 fMappingOptions
= mappingOptions
;
192 fMaxTransferSize
= maxTransferSize
;
195 fAlignMask
= alignment
- 1;
199 switch (MAPTYPE(mappingOptions
))
202 case kNonCoherent
: /*fMapper = 0;*/ break;
204 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
214 reserved
= IONew(IODMACommandInternal
, 1);
217 bzero(reserved
, sizeof(IODMACommandInternal
));
219 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
220 fInternalState
->fDevice
= device
;
229 IODelete(reserved
, IODMACommandInternal
, 1);
238 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor
*mem
, bool autoPrepare
)
240 IOReturn err
= kIOReturnSuccess
;
249 return kIOReturnSuccess
;
253 // As we are almost certainly being called from a work loop thread
254 // if fActive is true it is probably not a good time to potentially
255 // block. Just test for it and return an error
257 return kIOReturnBusy
;
258 clearMemoryDescriptor();
262 bzero(&fMDSummary
, sizeof(fMDSummary
));
263 err
= mem
->dmaCommandOperation(kIOMDGetCharacteristics
| (kMapped
== MAPTYPE(fMappingOptions
)),
264 &fMDSummary
, sizeof(fMDSummary
));
268 ppnum_t highPage
= fMDSummary
.fHighestPage
? fMDSummary
.fHighestPage
: gIOLastPage
;
270 if ((kMapped
== MAPTYPE(fMappingOptions
))
272 fInternalState
->fCheckAddressing
= false;
274 fInternalState
->fCheckAddressing
= (fNumAddressBits
&& (highPage
>= (1UL << (fNumAddressBits
- PAGE_SHIFT
))));
276 fInternalState
->fNewMD
= true;
280 mem
->dmaCommandOperation(kIOMDSetDMAActive
, this, 0);
284 clearMemoryDescriptor();
293 IODMACommand::clearMemoryDescriptor(bool autoComplete
)
295 if (fActive
&& !autoComplete
)
296 return (kIOReturnNotReady
);
301 fMemory
->dmaCommandOperation(kIOMDSetDMAInactive
, this, 0);
306 return (kIOReturnSuccess
);
309 const IOMemoryDescriptor
*
310 IODMACommand::getMemoryDescriptor() const
317 IODMACommand::segmentOp(
319 IODMACommand
*target
,
324 IOOptionBits op
= (uintptr_t) reference
;
325 addr64_t maxPhys
, address
;
329 IODMACommandInternal
* state
= target
->reserved
;
331 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64) && (state
->fLocalMapperPageAlloc
|| !target
->fMapper
))
332 maxPhys
= (1ULL << target
->fNumAddressBits
);
337 address
= segment
.fIOVMAddr
;
338 length
= segment
.fLength
;
343 if (!state
->fMisaligned
)
345 state
->fMisaligned
|= (0 != (state
->fSourceAlignMask
& address
));
346 if (state
->fMisaligned
) DEBG("misaligned %qx:%qx, %lx\n", address
, length
, state
->fSourceAlignMask
);
349 if (state
->fMisaligned
&& (kWalkPreflight
& op
))
350 return (kIOReturnNotAligned
);
352 if (!state
->fDoubleBuffer
)
354 if ((address
+ length
- 1) <= maxPhys
)
358 else if (address
<= maxPhys
)
360 DEBG("tail %qx, %qx", address
, length
);
361 length
= (address
+ length
- maxPhys
- 1);
362 address
= maxPhys
+ 1;
363 DEBG("-> %qx, %qx\n", address
, length
);
368 return (kIOReturnSuccess
);
370 numPages
= atop_64(round_page_64((address
& PAGE_MASK
) + length
));
372 if (kWalkPreflight
& op
)
374 state
->fCopyPageCount
+= numPages
;
380 if (kWalkPrepare
& op
)
382 lastPage
= state
->fCopyNext
;
383 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
385 vm_page_set_offset(lastPage
, atop_64(address
) + idx
);
386 lastPage
= vm_page_get_next(lastPage
);
390 if (!lastPage
|| SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
392 lastPage
= state
->fCopyNext
;
393 for (IOItemCount idx
= 0; idx
< numPages
; idx
++)
395 if (SHOULD_COPY_DIR(op
, target
->fMDSummary
.fDirection
))
397 addr64_t cpuAddr
= address
;
401 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
404 cpuAddr
= target
->fMapper
->mapAddr(address
);
407 remapAddr
= ptoa_64(vm_page_get_phys_page(lastPage
));
408 if (!state
->fDoubleBuffer
)
410 remapAddr
+= (address
& PAGE_MASK
);
412 chunk
= PAGE_SIZE
- (address
& PAGE_MASK
);
416 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr
,
417 (kWalkSyncIn
& op
) ? "->" : "<-",
420 if (kWalkSyncIn
& op
)
422 copypv(remapAddr
, cpuAddr
, chunk
,
423 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
427 copypv(cpuAddr
, remapAddr
, chunk
,
428 cppvPsnk
| cppvFsnk
| cppvPsrc
| cppvNoRefSrc
);
433 lastPage
= vm_page_get_next(lastPage
);
436 state
->fCopyNext
= lastPage
;
439 return kIOReturnSuccess
;
443 IODMACommand::walkAll(UInt8 op
)
445 IODMACommandInternal
* state
= fInternalState
;
447 IOReturn ret
= kIOReturnSuccess
;
451 if (kWalkPreflight
& op
)
453 state
->fMisaligned
= false;
454 state
->fDoubleBuffer
= false;
455 state
->fPrepared
= false;
456 state
->fCopyNext
= NULL
;
457 state
->fCopyPageAlloc
= 0;
458 state
->fCopyPageCount
= 0;
459 state
->fNextRemapPage
= NULL
;
462 if (!(kWalkDoubleBuffer
& op
))
466 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
469 op
&= ~kWalkPreflight
;
471 state
->fDoubleBuffer
= (state
->fMisaligned
|| (kWalkDoubleBuffer
& op
));
472 if (state
->fDoubleBuffer
)
473 state
->fCopyPageCount
= atop_64(round_page(state
->fPreparedLength
));
475 if (state
->fCopyPageCount
)
477 vm_page_t mapBase
= NULL
;
479 DEBG("preflight fCopyPageCount %d\n", state
->fCopyPageCount
);
481 if (!state
->fDoubleBuffer
)
485 if (fMapper
) panic("fMapper copying");
487 kr
= vm_page_alloc_list(state
->fCopyPageCount
,
488 KMA_LOMEM
| KMA_NOPAGEWAIT
, &mapBase
);
489 if (KERN_SUCCESS
!= kr
)
491 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state
->fCopyPageCount
, kr
);
498 state
->fCopyPageAlloc
= mapBase
;
499 state
->fCopyNext
= state
->fCopyPageAlloc
;
502 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
503 state
->fPrepared
= true;
504 op
&= ~(kWalkSyncIn
| kWalkSyncOut
);
508 DEBG("alloc IOBMD\n");
509 mach_vm_address_t mask
= 0xFFFFF000; //state->fSourceAlignMask
510 state
->fCopyMD
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task
,
511 fMDSummary
.fDirection
, state
->fPreparedLength
, mask
);
515 ret
= kIOReturnSuccess
;
516 state
->fPrepared
= true;
520 DEBG("IODMACommand !alloc IOBMD");
521 return (kIOReturnNoResources
);
527 if (state
->fPrepared
&& ((kWalkSyncIn
| kWalkSyncOut
) & op
))
529 if (state
->fCopyPageCount
)
531 DEBG("sync fCopyPageCount %d\n", state
->fCopyPageCount
);
533 if (state
->fCopyPageAlloc
)
535 state
->fCopyNext
= state
->fCopyPageAlloc
;
538 ret
= genIOVMSegments(op
, segmentOp
, (void *)(uintptr_t) op
, &offset
, state
, &numSegments
);
540 else if (state
->fCopyMD
)
542 DEBG("sync IOBMD\n");
544 if (SHOULD_COPY_DIR(op
, fMDSummary
.fDirection
))
546 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
550 if (kWalkSyncIn
& op
)
551 bytes
= poMD
->writeBytes(state
->fPreparedOffset
,
552 state
->fCopyMD
->getBytesNoCopy(),
553 state
->fPreparedLength
);
555 bytes
= poMD
->readBytes(state
->fPreparedOffset
,
556 state
->fCopyMD
->getBytesNoCopy(),
557 state
->fPreparedLength
);
558 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn
& op
) ? "wrote" : "read", bytes
);
559 ret
= (bytes
== state
->fPreparedLength
) ? kIOReturnSuccess
: kIOReturnUnderrun
;
562 ret
= kIOReturnSuccess
;
567 if (kWalkComplete
& op
)
569 if (state
->fCopyPageAlloc
)
571 vm_page_free_list(state
->fCopyPageAlloc
, FALSE
);
572 state
->fCopyPageAlloc
= 0;
573 state
->fCopyPageCount
= 0;
577 state
->fCopyMD
->release();
581 state
->fPrepared
= false;
587 IODMACommand::getNumAddressBits(void)
589 return (fNumAddressBits
);
593 IODMACommand::getAlignment(void)
595 return (fAlignMask
+ 1);
599 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc
,
600 UInt8 numAddressBits
,
601 UInt64 maxSegmentSize
,
602 MappingOptions mappingOptions
,
603 UInt64 maxTransferSize
,
612 return kIOReturnNotPermitted
;
615 return kIOReturnBadArgument
;
617 bool is32Bit
= (OutputHost32
== outSegFunc
|| OutputBig32
== outSegFunc
618 || OutputLittle32
== outSegFunc
);
623 else if (numAddressBits
> 32)
624 return kIOReturnBadArgument
; // Wrong output function for bits
627 if (numAddressBits
&& (numAddressBits
< PAGE_SHIFT
))
628 return kIOReturnBadArgument
;
631 maxSegmentSize
--; // Set Max segment to -1
632 if (!maxTransferSize
)
633 maxTransferSize
--; // Set Max transfer to -1
635 if (mapper
&& !OSDynamicCast(IOMapper
, mapper
))
637 fInternalState
->fDevice
= mapper
;
642 IOMapper::checkForSystemMapper();
643 mapper
= IOMapper::gSystem
;
646 switch (MAPTYPE(mappingOptions
))
649 case kNonCoherent
: break;
651 if (mapper
&& !mapper
->getBypassMask(&fBypassMask
))
652 return kIOReturnBadArgument
;
655 return kIOReturnBadArgument
;
660 fOutSeg
= outSegFunc
;
661 fNumAddressBits
= numAddressBits
;
662 fMaxSegmentSize
= maxSegmentSize
;
663 fMappingOptions
= mappingOptions
;
664 fMaxTransferSize
= maxTransferSize
;
667 fAlignMask
= alignment
- 1;
668 if (mapper
!= fMapper
)
675 fInternalState
->fIterateOnly
= (0 != (kIterateOnly
& mappingOptions
));
677 return prepare(offset
, length
, flushCache
, synchronize
);
682 IODMACommand::prepare(UInt64 offset
, UInt64 length
, bool flushCache
, bool synchronize
)
684 IODMACommandInternal
* state
= fInternalState
;
685 IOReturn ret
= kIOReturnSuccess
;
686 MappingOptions mappingOptions
= fMappingOptions
;
689 length
= fMDSummary
.fLength
;
691 if (length
> fMaxTransferSize
)
692 return kIOReturnNoSpace
;
694 if (IS_NONCOHERENT(mappingOptions
) && flushCache
) {
695 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
697 poMD
->performOperation(kIOMemoryIncoherentIOStore
, offset
, length
);
701 if ((state
->fPreparedOffset
!= offset
)
702 || (state
->fPreparedLength
!= length
))
703 ret
= kIOReturnNotReady
;
707 state
->fPreparedOffset
= offset
;
708 state
->fPreparedLength
= length
;
710 state
->fMapContig
= false;
711 state
->fMisaligned
= false;
712 state
->fDoubleBuffer
= false;
713 state
->fPrepared
= false;
714 state
->fCopyNext
= NULL
;
715 state
->fCopyPageAlloc
= 0;
716 state
->fCopyPageCount
= 0;
717 state
->fNextRemapPage
= NULL
;
719 state
->fLocalMapperPageAlloc
= 0;
720 state
->fLocalMapperPageCount
= 0;
722 state
->fLocalMapper
= (fMapper
&& (fMapper
!= IOMapper::gSystem
));
724 state
->fSourceAlignMask
= fAlignMask
;
726 state
->fSourceAlignMask
&= page_mask
;
728 state
->fCursor
= state
->fIterateOnly
729 || (!state
->fCheckAddressing
730 && (!state
->fSourceAlignMask
731 || ((fMDSummary
.fPageAlign
& (1 << 31)) && (0 == (fMDSummary
.fPageAlign
& state
->fSourceAlignMask
)))));
735 IOOptionBits op
= kWalkPrepare
| kWalkPreflight
;
743 if (state
->fLocalMapper
)
745 state
->fLocalMapperPageCount
= atop_64(round_page(
746 state
->fPreparedLength
+ ((state
->fPreparedOffset
+ fMDSummary
.fPageAlign
) & page_mask
)));
747 state
->fLocalMapperPageAlloc
= ptoa_64(fMapper
->iovmAllocDMACommand(this, state
->fLocalMapperPageCount
));
748 if (!state
->fLocalMapperPageAlloc
)
750 DEBG("IODMACommand !iovmAlloc");
751 return (kIOReturnNoResources
);
753 state
->fMapContig
= true;
757 IOMDDMAMapArgs mapArgs
;
758 bzero(&mapArgs
, sizeof(mapArgs
));
759 mapArgs
.fMapper
= fMapper
;
760 mapArgs
.fMapSpec
.device
= state
->fDevice
;
761 mapArgs
.fMapSpec
.alignment
= fAlignMask
+ 1;
762 mapArgs
.fMapSpec
.numAddressBits
= fNumAddressBits
? fNumAddressBits
: 64;
763 mapArgs
.fOffset
= state
->fPreparedOffset
;
764 mapArgs
.fLength
= state
->fPreparedLength
;
765 const IOMemoryDescriptor
* md
= state
->fCopyMD
;
766 if (!md
) md
= fMemory
;
767 ret
= md
->dmaCommandOperation(kIOMDDMAMap
| state
->fIterateOnly
, &mapArgs
, sizeof(mapArgs
));
768 if (kIOReturnSuccess
== ret
)
770 state
->fLocalMapperPageAlloc
= mapArgs
.fAlloc
;
771 state
->fLocalMapperPageCount
= mapArgs
.fAllocCount
;
772 state
->fMapContig
= mapArgs
.fMapContig
;
774 ret
= kIOReturnSuccess
;
779 if (kIOReturnSuccess
== ret
)
780 state
->fPrepared
= true;
786 IODMACommand::complete(bool invalidateCache
, bool synchronize
)
788 IODMACommandInternal
* state
= fInternalState
;
789 IOReturn ret
= kIOReturnSuccess
;
792 return kIOReturnNotReady
;
798 IOOptionBits op
= kWalkComplete
;
803 if (state
->fLocalMapperPageAlloc
)
805 if (state
->fLocalMapper
)
807 fMapper
->iovmFreeDMACommand(this, atop_64(state
->fLocalMapperPageAlloc
), state
->fLocalMapperPageCount
);
809 else if (state
->fLocalMapperPageCount
)
811 fMapper
->iovmFree(atop_64(state
->fLocalMapperPageAlloc
), state
->fLocalMapperPageCount
);
813 state
->fLocalMapperPageAlloc
= 0;
814 state
->fLocalMapperPageCount
= 0;
817 state
->fPrepared
= false;
819 if (IS_NONCOHERENT(fMappingOptions
) && invalidateCache
)
821 IOMemoryDescriptor
*poMD
= const_cast<IOMemoryDescriptor
*>(fMemory
);
823 poMD
->performOperation(kIOMemoryIncoherentIOFlush
, state
->fPreparedOffset
, state
->fPreparedLength
);
831 IODMACommand::getPreparedOffsetAndLength(UInt64
* offset
, UInt64
* length
)
833 IODMACommandInternal
* state
= fInternalState
;
835 return (kIOReturnNotReady
);
838 *offset
= state
->fPreparedOffset
;
840 *length
= state
->fPreparedLength
;
842 return (kIOReturnSuccess
);
846 IODMACommand::synchronize(IOOptionBits options
)
848 IODMACommandInternal
* state
= fInternalState
;
849 IOReturn ret
= kIOReturnSuccess
;
852 if (kIODirectionOutIn
== (kIODirectionOutIn
& options
))
853 return kIOReturnBadArgument
;
856 return kIOReturnNotReady
;
859 if (kForceDoubleBuffer
& options
)
861 if (state
->fDoubleBuffer
)
862 return kIOReturnSuccess
;
864 state
->fCursor
= false;
866 ret
= walkAll(kWalkComplete
);
868 op
|= kWalkPrepare
| kWalkPreflight
| kWalkDoubleBuffer
;
870 else if (state
->fCursor
)
871 return kIOReturnSuccess
;
873 if (kIODirectionIn
& options
)
874 op
|= kWalkSyncIn
| kWalkSyncAlways
;
875 else if (kIODirectionOut
& options
)
876 op
|= kWalkSyncOut
| kWalkSyncAlways
;
883 struct IODMACommandTransferContext
892 kIODMACommandTransferOpReadBytes
= 1,
893 kIODMACommandTransferOpWriteBytes
= 2
897 IODMACommand::transferSegment(void *reference
,
898 IODMACommand
*target
,
903 IODMACommandTransferContext
* context
= (IODMACommandTransferContext
*) reference
;
904 UInt64 length
= min(segment
.fLength
, context
->remaining
);
905 addr64_t ioAddr
= segment
.fIOVMAddr
;
906 addr64_t cpuAddr
= ioAddr
;
908 context
->remaining
-= length
;
912 UInt64 copyLen
= length
;
913 if ((kMapped
== MAPTYPE(target
->fMappingOptions
))
916 cpuAddr
= target
->fMapper
->mapAddr(ioAddr
);
917 copyLen
= min(copyLen
, page_size
- (ioAddr
& (page_size
- 1)));
923 case kIODMACommandTransferOpReadBytes
:
924 copypv(cpuAddr
, context
->bufferOffset
+ (addr64_t
) context
->buffer
, copyLen
,
925 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
927 case kIODMACommandTransferOpWriteBytes
:
928 copypv(context
->bufferOffset
+ (addr64_t
) context
->buffer
, cpuAddr
, copyLen
,
929 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
933 context
->bufferOffset
+= copyLen
;
936 return (context
->remaining
? kIOReturnSuccess
: kIOReturnOverrun
);
940 IODMACommand::transfer(IOOptionBits transferOp
, UInt64 offset
, void * buffer
, UInt64 length
)
942 IODMACommandInternal
* state
= fInternalState
;
943 IODMACommandTransferContext context
;
944 Segment64 segments
[1];
945 UInt32 numSegments
= 0-1;
950 if (offset
>= state
->fPreparedLength
)
952 length
= min(length
, state
->fPreparedLength
- offset
);
954 context
.buffer
= buffer
;
955 context
.bufferOffset
= 0;
956 context
.remaining
= length
;
957 context
.op
= transferOp
;
958 (void) genIOVMSegments(kWalkClient
, transferSegment
, &context
, &offset
, &segments
[0], &numSegments
);
960 return (length
- context
.remaining
);
964 IODMACommand::readBytes(UInt64 offset
, void *bytes
, UInt64 length
)
966 return (transfer(kIODMACommandTransferOpReadBytes
, offset
, bytes
, length
));
970 IODMACommand::writeBytes(UInt64 offset
, const void *bytes
, UInt64 length
)
972 return (transfer(kIODMACommandTransferOpWriteBytes
, offset
, const_cast<void *>(bytes
), length
));
976 IODMACommand::genIOVMSegments(UInt64
*offsetP
,
978 UInt32
*numSegmentsP
)
980 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) fOutSeg
,
981 offsetP
, segmentsP
, numSegmentsP
));
985 IODMACommand::genIOVMSegments(uint32_t op
,
986 InternalSegmentFunction outSegFunc
,
990 UInt32
*numSegmentsP
)
992 IODMACommandInternal
* internalState
= fInternalState
;
993 IOOptionBits mdOp
= kIOMDWalkSegments
;
994 IOReturn ret
= kIOReturnSuccess
;
996 if (!(kWalkComplete
& op
) && !fActive
)
997 return kIOReturnNotReady
;
999 if (!offsetP
|| !segmentsP
|| !numSegmentsP
|| !*numSegmentsP
)
1000 return kIOReturnBadArgument
;
1002 IOMDDMAWalkSegmentArgs
*state
=
1003 (IOMDDMAWalkSegmentArgs
*)(void *) fState
;
1005 UInt64 offset
= *offsetP
+ internalState
->fPreparedOffset
;
1006 UInt64 memLength
= internalState
->fPreparedOffset
+ internalState
->fPreparedLength
;
1008 if (offset
>= memLength
)
1009 return kIOReturnOverrun
;
1011 if ((offset
== internalState
->fPreparedOffset
) || (offset
!= state
->fOffset
) || internalState
->fNewMD
) {
1013 state
->fIOVMAddr
= 0;
1014 internalState
->fNextRemapPage
= NULL
;
1015 internalState
->fNewMD
= false;
1016 state
->fMapped
= (IS_MAPPED(fMappingOptions
) && fMapper
);
1017 mdOp
= kIOMDFirstSegment
;
1020 UInt64 bypassMask
= fBypassMask
;
1021 UInt32 segIndex
= 0;
1022 UInt32 numSegments
= *numSegmentsP
;
1023 Segment64 curSeg
= { 0, 0 };
1026 if (fNumAddressBits
&& (fNumAddressBits
< 64))
1027 maxPhys
= (1ULL << fNumAddressBits
);
1032 while (state
->fIOVMAddr
|| (state
->fOffset
< memLength
))
1035 if (!state
->fIOVMAddr
) {
1039 state
->fOffset
= offset
;
1040 state
->fLength
= memLength
- offset
;
1042 if (internalState
->fMapContig
&& internalState
->fLocalMapperPageAlloc
)
1044 state
->fIOVMAddr
= internalState
->fLocalMapperPageAlloc
+ offset
;
1045 rtn
= kIOReturnSuccess
;
1048 uint64_t checkOffset
;
1049 IOPhysicalLength segLen
;
1050 for (checkOffset
= 0; checkOffset
< state
->fLength
; )
1052 addr64_t phys
= const_cast<IOMemoryDescriptor
*>(fMemory
)->getPhysicalSegment(checkOffset
+ offset
, &segLen
, kIOMemoryMapperNone
);
1053 if (fMapper
->mapAddr(state
->fIOVMAddr
+ checkOffset
) != phys
)
1055 panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset
,
1056 state
->fIOVMAddr
+ checkOffset
, fMapper
->mapAddr(state
->fIOVMAddr
+ checkOffset
), state
->fLength
,
1059 checkOffset
+= page_size
- (phys
& page_mask
);
1066 const IOMemoryDescriptor
* memory
=
1067 internalState
->fCopyMD
? internalState
->fCopyMD
: fMemory
;
1068 rtn
= memory
->dmaCommandOperation(mdOp
, fState
, sizeof(fState
));
1069 mdOp
= kIOMDWalkSegments
;
1072 if (rtn
== kIOReturnSuccess
)
1074 assert(state
->fIOVMAddr
);
1075 assert(state
->fLength
);
1076 if ((curSeg
.fIOVMAddr
+ curSeg
.fLength
) == state
->fIOVMAddr
) {
1077 UInt64 length
= state
->fLength
;
1079 curSeg
.fLength
+= length
;
1080 state
->fIOVMAddr
= 0;
1083 else if (rtn
== kIOReturnOverrun
)
1084 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1089 // seg = state, offset = end of seg
1090 if (!curSeg
.fIOVMAddr
)
1092 UInt64 length
= state
->fLength
;
1094 curSeg
.fIOVMAddr
= state
->fIOVMAddr
| bypassMask
;
1095 curSeg
.fLength
= length
;
1096 state
->fIOVMAddr
= 0;
1099 if (!state
->fIOVMAddr
)
1101 if ((kWalkClient
& op
) && (curSeg
.fIOVMAddr
+ curSeg
.fLength
- 1) > maxPhys
)
1103 if (internalState
->fCursor
)
1105 curSeg
.fIOVMAddr
= 0;
1106 ret
= kIOReturnMessageTooLarge
;
1109 else if (curSeg
.fIOVMAddr
<= maxPhys
)
1111 UInt64 remain
, newLength
;
1113 newLength
= (maxPhys
+ 1 - curSeg
.fIOVMAddr
);
1114 DEBG("trunc %qx, %qx-> %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, newLength
);
1115 remain
= curSeg
.fLength
- newLength
;
1116 state
->fIOVMAddr
= newLength
+ curSeg
.fIOVMAddr
;
1117 curSeg
.fLength
= newLength
;
1118 state
->fLength
= remain
;
1123 UInt64 addr
= curSeg
.fIOVMAddr
;
1124 ppnum_t addrPage
= atop_64(addr
);
1125 vm_page_t remap
= NULL
;
1126 UInt64 remain
, newLength
;
1128 DEBG("sparse switch %qx, %qx ", addr
, curSeg
.fLength
);
1130 remap
= internalState
->fNextRemapPage
;
1131 if (remap
&& (addrPage
== vm_page_get_offset(remap
)))
1134 else for (remap
= internalState
->fCopyPageAlloc
;
1135 remap
&& (addrPage
!= vm_page_get_offset(remap
));
1136 remap
= vm_page_get_next(remap
))
1140 if (!remap
) panic("no remap page found");
1142 curSeg
.fIOVMAddr
= ptoa_64(vm_page_get_phys_page(remap
))
1143 + (addr
& PAGE_MASK
);
1144 internalState
->fNextRemapPage
= vm_page_get_next(remap
);
1146 newLength
= PAGE_SIZE
- (addr
& PAGE_MASK
);
1147 if (newLength
< curSeg
.fLength
)
1149 remain
= curSeg
.fLength
- newLength
;
1150 state
->fIOVMAddr
= addr
+ newLength
;
1151 curSeg
.fLength
= newLength
;
1152 state
->fLength
= remain
;
1155 DEBG("-> %qx, %qx offset %qx\n", curSeg
.fIOVMAddr
, curSeg
.fLength
, offset
);
1159 if (curSeg
.fLength
> fMaxSegmentSize
)
1161 UInt64 remain
= curSeg
.fLength
- fMaxSegmentSize
;
1163 state
->fIOVMAddr
= fMaxSegmentSize
+ curSeg
.fIOVMAddr
;
1164 curSeg
.fLength
= fMaxSegmentSize
;
1166 state
->fLength
= remain
;
1170 if (internalState
->fCursor
1171 && (0 != (internalState
->fSourceAlignMask
& curSeg
.fIOVMAddr
)))
1173 curSeg
.fIOVMAddr
= 0;
1174 ret
= kIOReturnNotAligned
;
1178 if (offset
>= memLength
)
1180 curSeg
.fLength
-= (offset
- memLength
);
1182 state
->fIOVMAddr
= state
->fLength
= 0; // At end
1187 if (state
->fIOVMAddr
) {
1188 if ((segIndex
+ 1 == numSegments
))
1191 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1192 curSeg
.fIOVMAddr
= 0;
1193 if (kIOReturnSuccess
!= ret
)
1198 if (curSeg
.fIOVMAddr
) {
1199 ret
= (*outSegFunc
)(reference
, this, curSeg
, segmentsP
, segIndex
++);
1202 if (kIOReturnSuccess
== ret
)
1204 state
->fOffset
= offset
;
1205 *offsetP
= offset
- internalState
->fPreparedOffset
;
1206 *numSegmentsP
= segIndex
;
1212 IODMACommand::clientOutputSegment(
1213 void *reference
, IODMACommand
*target
,
1214 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1216 SegmentFunction segmentFunction
= (SegmentFunction
) reference
;
1217 IOReturn ret
= kIOReturnSuccess
;
1219 if (target
->fNumAddressBits
&& (target
->fNumAddressBits
< 64)
1220 && ((segment
.fIOVMAddr
+ segment
.fLength
- 1) >> target
->fNumAddressBits
)
1221 && (target
->reserved
->fLocalMapperPageAlloc
|| !target
->fMapper
))
1223 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1224 ret
= kIOReturnMessageTooLarge
;
1227 if (!(*segmentFunction
)(target
, segment
, vSegList
, outSegIndex
))
1229 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment
.fIOVMAddr
, segment
.fLength
);
1230 ret
= kIOReturnMessageTooLarge
;
1237 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction
,
1240 UInt32
*numSegmentsP
)
1242 return (genIOVMSegments(kWalkClient
, clientOutputSegment
, (void *) segmentFunction
,
1243 offsetP
, segmentsP
, numSegmentsP
));
1247 IODMACommand::OutputHost32(IODMACommand
*,
1248 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1250 Segment32
*base
= (Segment32
*) vSegList
;
1251 base
[outSegIndex
].fIOVMAddr
= (UInt32
) segment
.fIOVMAddr
;
1252 base
[outSegIndex
].fLength
= (UInt32
) segment
.fLength
;
1257 IODMACommand::OutputBig32(IODMACommand
*,
1258 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1260 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1261 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1262 OSWriteBigInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1263 OSWriteBigInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1268 IODMACommand::OutputLittle32(IODMACommand
*,
1269 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1271 const UInt offAddr
= outSegIndex
* sizeof(Segment32
);
1272 const UInt offLen
= offAddr
+ sizeof(UInt32
);
1273 OSWriteLittleInt32(vSegList
, offAddr
, (UInt32
) segment
.fIOVMAddr
);
1274 OSWriteLittleInt32(vSegList
, offLen
, (UInt32
) segment
.fLength
);
1279 IODMACommand::OutputHost64(IODMACommand
*,
1280 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1282 Segment64
*base
= (Segment64
*) vSegList
;
1283 base
[outSegIndex
] = segment
;
1288 IODMACommand::OutputBig64(IODMACommand
*,
1289 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1291 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1292 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1293 OSWriteBigInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1294 OSWriteBigInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);
1299 IODMACommand::OutputLittle64(IODMACommand
*,
1300 Segment64 segment
, void *vSegList
, UInt32 outSegIndex
)
1302 const UInt offAddr
= outSegIndex
* sizeof(Segment64
);
1303 const UInt offLen
= offAddr
+ sizeof(UInt64
);
1304 OSWriteLittleInt64(vSegList
, offAddr
, (UInt64
) segment
.fIOVMAddr
);
1305 OSWriteLittleInt64(vSegList
, offLen
, (UInt64
) segment
.fLength
);