]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IODMACommand.cpp
xnu-1486.2.11.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <IOKit/assert.h>
30
31#include <libkern/OSTypes.h>
32#include <libkern/OSByteOrder.h>
33
34#include <IOKit/IOReturn.h>
35#include <IOKit/IOLib.h>
36#include <IOKit/IODMACommand.h>
37#include <IOKit/IOMapper.h>
38#include <IOKit/IOMemoryDescriptor.h>
39#include <IOKit/IOBufferMemoryDescriptor.h>
40
41#include "IOKitKernelInternal.h"
42#include "IOCopyMapper.h"
43
44#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45#define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48
0c530ab8
A
49enum
50{
51 kWalkSyncIn = 0x01, // bounce -> md
52 kWalkSyncOut = 0x02, // bounce <- md
53 kWalkSyncAlways = 0x04,
54 kWalkPreflight = 0x08,
55 kWalkDoubleBuffer = 0x10,
56 kWalkPrepare = 0x20,
57 kWalkComplete = 0x40,
58 kWalkClient = 0x80
59};
60
0c530ab8
A
61
62#define fInternalState reserved
63#define fState reserved->fState
64#define fMDSummary reserved->fMDSummary
65
66
67#if 1
68// no direction => OutIn
69#define SHOULD_COPY_DIR(op, direction) \
70 ((kIODirectionNone == (direction)) \
71 || (kWalkSyncAlways & (op)) \
72 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
73 & (direction)))
74
75#else
76#define SHOULD_COPY_DIR(state, direction) (true)
77#endif
78
79#if 0
80#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
81#else
82#define DEBG(fmt, args...) {}
83#endif
84
85
86/**************************** class IODMACommand ***************************/
87
88#undef super
89#define super OSObject
90OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
91
2d21ac55
A
92OSMetaClassDefineReservedUsed(IODMACommand, 0);
93OSMetaClassDefineReservedUsed(IODMACommand, 1);
b0d623f7 94OSMetaClassDefineReservedUsed(IODMACommand, 2);
0c530ab8
A
95OSMetaClassDefineReservedUnused(IODMACommand, 3);
96OSMetaClassDefineReservedUnused(IODMACommand, 4);
97OSMetaClassDefineReservedUnused(IODMACommand, 5);
98OSMetaClassDefineReservedUnused(IODMACommand, 6);
99OSMetaClassDefineReservedUnused(IODMACommand, 7);
100OSMetaClassDefineReservedUnused(IODMACommand, 8);
101OSMetaClassDefineReservedUnused(IODMACommand, 9);
102OSMetaClassDefineReservedUnused(IODMACommand, 10);
103OSMetaClassDefineReservedUnused(IODMACommand, 11);
104OSMetaClassDefineReservedUnused(IODMACommand, 12);
105OSMetaClassDefineReservedUnused(IODMACommand, 13);
106OSMetaClassDefineReservedUnused(IODMACommand, 14);
107OSMetaClassDefineReservedUnused(IODMACommand, 15);
108
109IODMACommand *
110IODMACommand::withSpecification(SegmentFunction outSegFunc,
111 UInt8 numAddressBits,
112 UInt64 maxSegmentSize,
113 MappingOptions mappingOptions,
114 UInt64 maxTransferSize,
115 UInt32 alignment,
116 IOMapper *mapper,
117 void *refCon)
118{
119 IODMACommand * me = new IODMACommand;
120
121 if (me && !me->initWithSpecification(outSegFunc,
122 numAddressBits, maxSegmentSize,
123 mappingOptions, maxTransferSize,
124 alignment, mapper, refCon))
125 {
126 me->release();
127 return 0;
128 };
129
130 return me;
131}
132
133IODMACommand *
134IODMACommand::cloneCommand(void *refCon)
135{
136 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
137 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
138}
139
140#define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
141
142bool
143IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
144 UInt8 numAddressBits,
145 UInt64 maxSegmentSize,
146 MappingOptions mappingOptions,
147 UInt64 maxTransferSize,
148 UInt32 alignment,
149 IOMapper *mapper,
150 void *refCon)
151{
152 if (!super::init() || !outSegFunc || !numAddressBits)
153 return false;
154
155 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
156 || OutputLittle32 == outSegFunc);
157 if (is32Bit)
158 {
159 if (!numAddressBits)
160 numAddressBits = 32;
161 else if (numAddressBits > 32)
162 return false; // Wrong output function for bits
163 }
164
165 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
166 return false;
167
168 if (!maxSegmentSize)
169 maxSegmentSize--; // Set Max segment to -1
170 if (!maxTransferSize)
171 maxTransferSize--; // Set Max transfer to -1
172
173 if (!mapper)
174 {
175 IOMapper::checkForSystemMapper();
176 mapper = IOMapper::gSystem;
177 }
178
179 fNumSegments = 0;
180 fBypassMask = 0;
181 fOutSeg = outSegFunc;
182 fNumAddressBits = numAddressBits;
183 fMaxSegmentSize = maxSegmentSize;
184 fMappingOptions = mappingOptions;
185 fMaxTransferSize = maxTransferSize;
186 if (!alignment)
187 alignment = 1;
188 fAlignMask = alignment - 1;
189 fMapper = mapper;
190 fRefCon = refCon;
191
192 switch (MAPTYPE(mappingOptions))
193 {
194 case kMapped: break;
195 case kNonCoherent: fMapper = 0; break;
196 case kBypassed:
197 if (mapper && !mapper->getBypassMask(&fBypassMask))
198 return false;
199 break;
200 default:
201 return false;
202 };
203
b0d623f7
A
204 if (fMapper)
205 fMapper->retain();
206
2d21ac55 207 reserved = IONew(IODMACommandInternal, 1);
0c530ab8
A
208 if (!reserved)
209 return false;
2d21ac55 210 bzero(reserved, sizeof(IODMACommandInternal));
0c530ab8
A
211
212 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
213
214 return true;
215}
216
217void
218IODMACommand::free()
219{
220 if (reserved)
2d21ac55 221 IODelete(reserved, IODMACommandInternal, 1);
0c530ab8 222
b0d623f7
A
223 if (fMapper)
224 fMapper->release();
225
0c530ab8
A
226 super::free();
227}
228
229IOReturn
230IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
231{
232 if (mem == fMemory)
233 {
234 if (!autoPrepare)
235 {
236 while (fActive)
237 complete();
238 }
239 return kIOReturnSuccess;
240 }
241
242 if (fMemory) {
243 // As we are almost certainly being called from a work loop thread
244 // if fActive is true it is probably not a good time to potentially
245 // block. Just test for it and return an error
246 if (fActive)
247 return kIOReturnBusy;
248 clearMemoryDescriptor();
249 };
250
251 if (mem) {
252 bzero(&fMDSummary, sizeof(fMDSummary));
253 IOReturn rtn = mem->dmaCommandOperation(
254 kIOMDGetCharacteristics,
255 &fMDSummary, sizeof(fMDSummary));
256 if (rtn)
257 return rtn;
258
259 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
260
261 if ((kMapped == MAPTYPE(fMappingOptions))
262 && fMapper
263 && (!fNumAddressBits || (fNumAddressBits >= 31)))
264 // assuming mapped space is 2G
265 fInternalState->fCheckAddressing = false;
266 else
267 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
268
4a3eedf9 269 fInternalState->fNewMD = true;
0c530ab8
A
270 mem->retain();
271 fMemory = mem;
272
b0d623f7 273 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
0c530ab8
A
274 if (autoPrepare)
275 return prepare();
276 };
277
278 return kIOReturnSuccess;
279}
280
281IOReturn
282IODMACommand::clearMemoryDescriptor(bool autoComplete)
283{
284 if (fActive && !autoComplete)
285 return (kIOReturnNotReady);
286
287 if (fMemory) {
288 while (fActive)
289 complete();
b0d623f7 290 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
0c530ab8
A
291 fMemory->release();
292 fMemory = 0;
293 }
294
295 return (kIOReturnSuccess);
296}
297
298const IOMemoryDescriptor *
299IODMACommand::getMemoryDescriptor() const
300{
301 return fMemory;
302}
303
304
305IOReturn
306IODMACommand::segmentOp(
307 void *reference,
308 IODMACommand *target,
309 Segment64 segment,
310 void *segments,
311 UInt32 segmentIndex)
312{
b0d623f7 313 IOOptionBits op = (uintptr_t) reference;
0c530ab8
A
314 addr64_t maxPhys, address;
315 addr64_t remapAddr = 0;
316 uint64_t length;
317 uint32_t numPages;
318
319 IODMACommandInternal * state = target->reserved;
320
b0d623f7 321 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
0c530ab8
A
322 maxPhys = (1ULL << target->fNumAddressBits);
323 else
324 maxPhys = 0;
325 maxPhys--;
326
327 address = segment.fIOVMAddr;
328 length = segment.fLength;
329
330 assert(address);
331 assert(length);
332
333 if (!state->fMisaligned)
334 {
b0d623f7
A
335 state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
336 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
0c530ab8
A
337 }
338
339 if (state->fMisaligned && (kWalkPreflight & op))
340 return (kIOReturnNotAligned);
341
342 if (!state->fDoubleBuffer)
343 {
344 if ((address + length - 1) <= maxPhys)
345 {
346 length = 0;
347 }
348 else if (address <= maxPhys)
349 {
350 DEBG("tail %qx, %qx", address, length);
351 length = (address + length - maxPhys - 1);
352 address = maxPhys + 1;
353 DEBG("-> %qx, %qx\n", address, length);
354 }
355 }
356
357 if (!length)
358 return (kIOReturnSuccess);
359
360 numPages = atop_64(round_page_64(length));
361 remapAddr = state->fCopyNext;
362
363 if (kWalkPreflight & op)
364 {
365 state->fCopyPageCount += numPages;
366 }
367 else
368 {
369 if (kWalkPrepare & op)
370 {
371 for (IOItemCount idx = 0; idx < numPages; idx++)
372 gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
373 }
374 if (state->fDoubleBuffer)
375 state->fCopyNext += length;
376 else
377 {
378 state->fCopyNext += round_page(length);
379 remapAddr += (address & PAGE_MASK);
380 }
381
382 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
383 {
384 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
385 (kWalkSyncIn & op) ? "->" : "<-",
386 address, length, op);
387 if (kWalkSyncIn & op)
388 { // cppvNoModSnk
389 copypv(remapAddr, address, length,
390 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
391 }
392 else
393 {
394 copypv(address, remapAddr, length,
395 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
396 }
397 }
398 }
399
400 return kIOReturnSuccess;
401}
402
403IOReturn
404IODMACommand::walkAll(UInt8 op)
405{
406 IODMACommandInternal * state = fInternalState;
407
408 IOReturn ret = kIOReturnSuccess;
409 UInt32 numSegments;
410 UInt64 offset;
411
b0d623f7 412 if (kWalkPreflight & op)
0c530ab8 413 {
b0d623f7 414 state->fMapContig = false;
0c530ab8
A
415 state->fMisaligned = false;
416 state->fDoubleBuffer = false;
417 state->fPrepared = false;
418 state->fCopyNext = 0;
b0d623f7
A
419 state->fCopyMapperPageAlloc = 0;
420 state->fLocalMapperPageAlloc = 0;
0c530ab8 421 state->fCopyPageCount = 0;
2d21ac55 422 state->fNextRemapIndex = 0;
0c530ab8
A
423 state->fCopyMD = 0;
424
425 if (!(kWalkDoubleBuffer & op))
426 {
427 offset = 0;
428 numSegments = 0-1;
b0d623f7 429 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
0c530ab8
A
430 }
431
432 op &= ~kWalkPreflight;
433
434 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
435 if (state->fDoubleBuffer)
436 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
437
438 if (state->fCopyPageCount)
439 {
440 IOMapper * mapper;
441 ppnum_t mapBase = 0;
442
443 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
444
445 mapper = gIOCopyMapper;
446 if (mapper)
447 mapBase = mapper->iovmAlloc(state->fCopyPageCount);
448 if (mapBase)
449 {
b0d623f7
A
450 state->fCopyMapperPageAlloc = mapBase;
451 if (state->fCopyMapperPageAlloc && state->fDoubleBuffer)
0c530ab8
A
452 {
453 DEBG("contig copy map\n");
b0d623f7 454 state->fMapContig = true;
0c530ab8
A
455 }
456
b0d623f7 457 state->fCopyNext = ptoa_64(state->fCopyMapperPageAlloc);
0c530ab8
A
458 offset = 0;
459 numSegments = 0-1;
b0d623f7 460 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
0c530ab8
A
461 state->fPrepared = true;
462 op &= ~(kWalkSyncIn | kWalkSyncOut);
463 }
464 else
465 {
466 DEBG("alloc IOBMD\n");
467 state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
b0d623f7 468 fMDSummary.fDirection, state->fPreparedLength, state->fSourceAlignMask);
0c530ab8
A
469
470 if (state->fCopyMD)
471 {
472 ret = kIOReturnSuccess;
473 state->fPrepared = true;
474 }
475 else
476 {
477 DEBG("IODMACommand !iovmAlloc");
478 return (kIOReturnNoResources);
479 }
480 }
481 }
b0d623f7
A
482
483 if (state->fLocalMapper)
484 {
485 state->fLocalMapperPageCount = atop_64(round_page(state->fPreparedLength));
486 state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
487 state->fMapContig = true;
488 }
0c530ab8
A
489 }
490
b0d623f7 491 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
0c530ab8
A
492 {
493 if (state->fCopyPageCount)
494 {
495 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
496
b0d623f7 497 if (state->fCopyMapperPageAlloc)
0c530ab8 498 {
b0d623f7 499 state->fCopyNext = ptoa_64(state->fCopyMapperPageAlloc);
0c530ab8
A
500 offset = 0;
501 numSegments = 0-1;
b0d623f7 502 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
0c530ab8
A
503 }
504 else if (state->fCopyMD)
505 {
506 DEBG("sync IOBMD\n");
507
508 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
509 {
510 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
511
512 IOByteCount bytes;
513
514 if (kWalkSyncIn & op)
515 bytes = poMD->writeBytes(state->fPreparedOffset,
516 state->fCopyMD->getBytesNoCopy(),
517 state->fPreparedLength);
518 else
519 bytes = poMD->readBytes(state->fPreparedOffset,
520 state->fCopyMD->getBytesNoCopy(),
521 state->fPreparedLength);
522 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
523 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
524 }
525 else
526 ret = kIOReturnSuccess;
527 }
528 }
529 }
530
531 if (kWalkComplete & op)
532 {
b0d623f7
A
533 if (state->fLocalMapperPageAlloc)
534 {
535 fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
536 state->fLocalMapperPageAlloc = 0;
537 state->fLocalMapperPageCount = 0;
538 }
539 if (state->fCopyMapperPageAlloc)
0c530ab8 540 {
b0d623f7
A
541 gIOCopyMapper->iovmFree(state->fCopyMapperPageAlloc, state->fCopyPageCount);
542 state->fCopyMapperPageAlloc = 0;
0c530ab8
A
543 state->fCopyPageCount = 0;
544 }
545 if (state->fCopyMD)
546 {
547 state->fCopyMD->release();
548 state->fCopyMD = 0;
549 }
550
551 state->fPrepared = false;
552 }
553 return (ret);
554}
555
b0d623f7
A
556UInt8
557IODMACommand::getNumAddressBits(void)
558{
559 return (fNumAddressBits);
560}
561
562UInt32
563IODMACommand::getAlignment(void)
564{
565 return (fAlignMask + 1);
566}
567
2d21ac55
A
568IOReturn
569IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
570 UInt8 numAddressBits,
571 UInt64 maxSegmentSize,
572 MappingOptions mappingOptions,
573 UInt64 maxTransferSize,
574 UInt32 alignment,
575 IOMapper *mapper,
576 UInt64 offset,
577 UInt64 length,
578 bool flushCache,
579 bool synchronize)
580{
581 if (fActive)
582 return kIOReturnNotPermitted;
583
584 if (!outSegFunc || !numAddressBits)
585 return kIOReturnBadArgument;
586
587 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
588 || OutputLittle32 == outSegFunc);
589 if (is32Bit)
590 {
591 if (!numAddressBits)
592 numAddressBits = 32;
593 else if (numAddressBits > 32)
594 return kIOReturnBadArgument; // Wrong output function for bits
595 }
596
597 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
598 return kIOReturnBadArgument;
599
600 if (!maxSegmentSize)
601 maxSegmentSize--; // Set Max segment to -1
602 if (!maxTransferSize)
603 maxTransferSize--; // Set Max transfer to -1
604
605 if (!mapper)
606 {
607 IOMapper::checkForSystemMapper();
608 mapper = IOMapper::gSystem;
609 }
610
611 switch (MAPTYPE(mappingOptions))
612 {
613 case kMapped: break;
614 case kNonCoherent: fMapper = 0; break;
615 case kBypassed:
616 if (mapper && !mapper->getBypassMask(&fBypassMask))
617 return kIOReturnBadArgument;
618 break;
619 default:
620 return kIOReturnBadArgument;
621 };
622
623 fNumSegments = 0;
624 fBypassMask = 0;
625 fOutSeg = outSegFunc;
626 fNumAddressBits = numAddressBits;
627 fMaxSegmentSize = maxSegmentSize;
628 fMappingOptions = mappingOptions;
629 fMaxTransferSize = maxTransferSize;
630 if (!alignment)
631 alignment = 1;
632 fAlignMask = alignment - 1;
b0d623f7
A
633 if (mapper != fMapper)
634 {
635 mapper->retain();
636 fMapper->release();
637 fMapper = mapper;
638 }
2d21ac55
A
639
640 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
641
642 return prepare(offset, length, flushCache, synchronize);
643}
644
645
0c530ab8
A
646IOReturn
647IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
648{
649 IODMACommandInternal * state = fInternalState;
650 IOReturn ret = kIOReturnSuccess;
2d21ac55 651 MappingOptions mappingOptions = fMappingOptions;
0c530ab8
A
652
653 if (!length)
654 length = fMDSummary.fLength;
655
656 if (length > fMaxTransferSize)
657 return kIOReturnNoSpace;
658
0c530ab8
A
659 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
660 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
661
b0d623f7 662 poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
0c530ab8 663 }
0c530ab8
A
664 if (fActive++)
665 {
666 if ((state->fPreparedOffset != offset)
667 || (state->fPreparedLength != length))
668 ret = kIOReturnNotReady;
669 }
670 else
671 {
672 state->fPreparedOffset = offset;
673 state->fPreparedLength = length;
674
b0d623f7 675 state->fMapContig = false;
0c530ab8
A
676 state->fMisaligned = false;
677 state->fDoubleBuffer = false;
678 state->fPrepared = false;
679 state->fCopyNext = 0;
b0d623f7 680 state->fCopyMapperPageAlloc = 0;
0c530ab8 681 state->fCopyPageCount = 0;
2d21ac55 682 state->fNextRemapIndex = 0;
0c530ab8 683 state->fCopyMD = 0;
b0d623f7
A
684 state->fLocalMapperPageAlloc = 0;
685 state->fLocalMapperPageCount = 0;
0c530ab8 686
b0d623f7
A
687 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
688
689 state->fSourceAlignMask = fAlignMask;
690 if (state->fLocalMapper)
691 state->fSourceAlignMask &= page_mask;
692
0c530ab8
A
693 state->fCursor = state->fIterateOnly
694 || (!state->fCheckAddressing
b0d623f7
A
695 && !state->fLocalMapper
696 && (!state->fSourceAlignMask
697 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
698
0c530ab8
A
699 if (!state->fCursor)
700 {
701 IOOptionBits op = kWalkPrepare | kWalkPreflight;
702 if (synchronize)
703 op |= kWalkSyncOut;
704 ret = walkAll(op);
705 }
706 if (kIOReturnSuccess == ret)
707 state->fPrepared = true;
708 }
709 return ret;
710}
711
712IOReturn
713IODMACommand::complete(bool invalidateCache, bool synchronize)
714{
715 IODMACommandInternal * state = fInternalState;
716 IOReturn ret = kIOReturnSuccess;
717
718 if (fActive < 1)
719 return kIOReturnNotReady;
720
721 if (!--fActive)
722 {
723 if (!state->fCursor)
724 {
2d21ac55
A
725 IOOptionBits op = kWalkComplete;
726 if (synchronize)
727 op |= kWalkSyncIn;
728 ret = walkAll(op);
0c530ab8
A
729 }
730 state->fPrepared = false;
731
0c530ab8
A
732 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
733 {
0c530ab8
A
734 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
735
b0d623f7 736 poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
0c530ab8 737 }
0c530ab8
A
738 }
739
740 return ret;
741}
742
b0d623f7
A
743IOReturn
744IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
745{
746 IODMACommandInternal * state = fInternalState;
747 if (fActive < 1)
748 return (kIOReturnNotReady);
749
750 if (offset)
751 *offset = state->fPreparedOffset;
752 if (length)
753 *length = state->fPreparedLength;
754
755 return (kIOReturnSuccess);
756}
757
0c530ab8
A
758IOReturn
759IODMACommand::synchronize(IOOptionBits options)
760{
761 IODMACommandInternal * state = fInternalState;
762 IOReturn ret = kIOReturnSuccess;
763 IOOptionBits op;
764
765 if (kIODirectionOutIn == (kIODirectionOutIn & options))
766 return kIOReturnBadArgument;
767
768 if (fActive < 1)
769 return kIOReturnNotReady;
770
771 op = 0;
772 if (kForceDoubleBuffer & options)
773 {
774 if (state->fDoubleBuffer)
775 return kIOReturnSuccess;
776 if (state->fCursor)
777 state->fCursor = false;
778 else
779 ret = walkAll(kWalkComplete);
780
781 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
782 }
783 else if (state->fCursor)
784 return kIOReturnSuccess;
785
786 if (kIODirectionIn & options)
787 op |= kWalkSyncIn | kWalkSyncAlways;
788 else if (kIODirectionOut & options)
789 op |= kWalkSyncOut | kWalkSyncAlways;
790
791 ret = walkAll(op);
792
793 return ret;
794}
795
2d21ac55
A
796struct IODMACommandTransferContext
797{
798 void * buffer;
799 UInt64 bufferOffset;
800 UInt64 remaining;
801 UInt32 op;
802};
803enum
804{
805 kIODMACommandTransferOpReadBytes = 1,
806 kIODMACommandTransferOpWriteBytes = 2
807};
808
809IOReturn
810IODMACommand::transferSegment(void *reference,
811 IODMACommand *target,
812 Segment64 segment,
813 void *segments,
814 UInt32 segmentIndex)
815{
b0d623f7 816 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
2d21ac55
A
817 UInt64 length = min(segment.fLength, context->remaining);
818 addr64_t ioAddr = segment.fIOVMAddr;
819 addr64_t cpuAddr = ioAddr;
820
821 context->remaining -= length;
822
823 while (length)
824 {
825 UInt64 copyLen = length;
826 if ((kMapped == MAPTYPE(target->fMappingOptions))
827 && target->fMapper)
828 {
829 cpuAddr = target->fMapper->mapAddr(ioAddr);
830 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
831 ioAddr += copyLen;
832 }
833
834 switch (context->op)
835 {
836 case kIODMACommandTransferOpReadBytes:
837 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
838 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
839 break;
840 case kIODMACommandTransferOpWriteBytes:
841 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
842 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
843 break;
844 }
845 length -= copyLen;
846 context->bufferOffset += copyLen;
847 }
848
849 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
850}
851
852UInt64
853IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
854{
855 IODMACommandInternal * state = fInternalState;
856 IODMACommandTransferContext context;
b0d623f7 857 Segment64 segments[1];
2d21ac55
A
858 UInt32 numSegments = 0-1;
859
860 if (fActive < 1)
861 return (0);
862
863 if (offset >= state->fPreparedLength)
864 return (0);
865 length = min(length, state->fPreparedLength - offset);
866
867 context.buffer = buffer;
868 context.bufferOffset = 0;
869 context.remaining = length;
870 context.op = transferOp;
b0d623f7 871 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
2d21ac55
A
872
873 return (length - context.remaining);
874}
875
876UInt64
877IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
878{
879 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
880}
881
882UInt64
883IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
884{
885 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
886}
887
0c530ab8
A
888IOReturn
889IODMACommand::genIOVMSegments(UInt64 *offsetP,
890 void *segmentsP,
891 UInt32 *numSegmentsP)
892{
b0d623f7
A
893 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
894 offsetP, segmentsP, numSegmentsP));
0c530ab8
A
895}
896
897IOReturn
b0d623f7
A
898IODMACommand::genIOVMSegments(uint32_t op,
899 InternalSegmentFunction outSegFunc,
0c530ab8
A
900 void *reference,
901 UInt64 *offsetP,
902 void *segmentsP,
903 UInt32 *numSegmentsP)
904{
0c530ab8
A
905 IODMACommandInternal * internalState = fInternalState;
906 IOOptionBits mdOp = kIOMDWalkSegments;
907 IOReturn ret = kIOReturnSuccess;
908
909 if (!(kWalkComplete & op) && !fActive)
910 return kIOReturnNotReady;
911
912 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
913 return kIOReturnBadArgument;
914
915 IOMDDMAWalkSegmentArgs *state =
916 (IOMDDMAWalkSegmentArgs *) fState;
917
2d21ac55 918 UInt64 offset = *offsetP + internalState->fPreparedOffset;
0c530ab8
A
919 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
920
921 if (offset >= memLength)
922 return kIOReturnOverrun;
923
4a3eedf9 924 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
2d21ac55
A
925 state->fOffset = 0;
926 state->fIOVMAddr = 0;
927 internalState->fNextRemapIndex = 0;
4a3eedf9 928 internalState->fNewMD = false;
2d21ac55
A
929 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
930 mdOp = kIOMDFirstSegment;
0c530ab8
A
931 };
932
933 UInt64 bypassMask = fBypassMask;
934 UInt32 segIndex = 0;
935 UInt32 numSegments = *numSegmentsP;
936 Segment64 curSeg = { 0, 0 };
937 addr64_t maxPhys;
938
939 if (fNumAddressBits && (fNumAddressBits < 64))
940 maxPhys = (1ULL << fNumAddressBits);
941 else
942 maxPhys = 0;
943 maxPhys--;
944
945 while ((state->fIOVMAddr) || state->fOffset < memLength)
946 {
947 if (!state->fIOVMAddr) {
948
949 IOReturn rtn;
950
951 state->fOffset = offset;
952 state->fLength = memLength - offset;
953
b0d623f7 954 if (internalState->fMapContig && (kWalkClient & op))
0c530ab8 955 {
b0d623f7
A
956 ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
957 if (!pageNum)
958 pageNum = internalState->fCopyMapperPageAlloc;
959 state->fIOVMAddr = ptoa_64(pageNum)
0c530ab8
A
960 + offset - internalState->fPreparedOffset;
961 rtn = kIOReturnSuccess;
962 }
963 else
964 {
965 const IOMemoryDescriptor * memory =
966 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
967 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
968 mdOp = kIOMDWalkSegments;
969 }
970
971 if (rtn == kIOReturnSuccess) {
972 assert(state->fIOVMAddr);
973 assert(state->fLength);
974 }
975 else if (rtn == kIOReturnOverrun)
976 state->fIOVMAddr = state->fLength = 0; // At end
977 else
978 return rtn;
979 };
980
981 if (!curSeg.fIOVMAddr) {
982 UInt64 length = state->fLength;
983
984 offset += length;
985 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
986 curSeg.fLength = length;
987 state->fIOVMAddr = 0;
988 }
989 else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
990 UInt64 length = state->fLength;
991 offset += length;
992 curSeg.fLength += length;
993 state->fIOVMAddr = 0;
994 };
995
996
997 if (!state->fIOVMAddr)
998 {
999 if (kWalkClient & op)
1000 {
1001 if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
1002 {
1003 if (internalState->fCursor)
1004 {
1005 curSeg.fIOVMAddr = 0;
1006 ret = kIOReturnMessageTooLarge;
1007 break;
1008 }
1009 else if (curSeg.fIOVMAddr <= maxPhys)
1010 {
1011 UInt64 remain, newLength;
1012
1013 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1014 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1015 remain = curSeg.fLength - newLength;
1016 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1017 curSeg.fLength = newLength;
1018 state->fLength = remain;
1019 offset -= remain;
1020 }
1021 else if (gIOCopyMapper)
1022 {
1023 DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
2d21ac55 1024 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
b0d623f7 1025 ptoa_64(internalState->fCopyMapperPageAlloc + internalState->fNextRemapIndex)))
2d21ac55
A
1026 {
1027
b0d623f7 1028 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyMapperPageAlloc + internalState->fNextRemapIndex)
2d21ac55
A
1029 + (curSeg.fIOVMAddr & PAGE_MASK);
1030 internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength));
1031 }
1032 else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
0c530ab8
A
1033 {
1034 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
b0d623f7 1035 ptoa_64(internalState->fCopyMapperPageAlloc + checkRemapIndex)))
0c530ab8 1036 {
b0d623f7 1037 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyMapperPageAlloc + checkRemapIndex)
2d21ac55
A
1038 + (curSeg.fIOVMAddr & PAGE_MASK);
1039 internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength));
0c530ab8
A
1040 break;
1041 }
1042 }
1043 DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1044 }
1045 }
1046 }
1047
1048 if (curSeg.fLength > fMaxSegmentSize)
1049 {
1050 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
1051
1052 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
1053 curSeg.fLength = fMaxSegmentSize;
1054
1055 state->fLength = remain;
1056 offset -= remain;
1057 }
1058
1059 if (internalState->fCursor
b0d623f7 1060 && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
0c530ab8
A
1061 {
1062 curSeg.fIOVMAddr = 0;
1063 ret = kIOReturnNotAligned;
1064 break;
1065 }
1066
1067 if (offset >= memLength)
1068 {
1069 curSeg.fLength -= (offset - memLength);
1070 offset = memLength;
1071 state->fIOVMAddr = state->fLength = 0; // At end
1072 break;
1073 }
1074 }
1075
1076 if (state->fIOVMAddr) {
1077 if ((segIndex + 1 == numSegments))
1078 break;
1079
1080 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1081 curSeg.fIOVMAddr = 0;
1082 if (kIOReturnSuccess != ret)
1083 break;
1084 }
1085 }
1086
1087 if (curSeg.fIOVMAddr) {
1088 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1089 }
1090
1091 if (kIOReturnSuccess == ret)
1092 {
1093 state->fOffset = offset;
1094 *offsetP = offset - internalState->fPreparedOffset;
1095 *numSegmentsP = segIndex;
1096 }
1097 return ret;
1098}
1099
1100IOReturn
1101IODMACommand::clientOutputSegment(
1102 void *reference, IODMACommand *target,
1103 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1104{
b0d623f7 1105 SegmentFunction segmentFunction = (SegmentFunction) reference;
0c530ab8
A
1106 IOReturn ret = kIOReturnSuccess;
1107
1108 if ((target->fNumAddressBits < 64)
b0d623f7
A
1109 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1110 && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
0c530ab8
A
1111 {
1112 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1113 ret = kIOReturnMessageTooLarge;
1114 }
1115
b0d623f7 1116 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
0c530ab8
A
1117 {
1118 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1119 ret = kIOReturnMessageTooLarge;
1120 }
1121
1122 return (ret);
1123}
1124
b0d623f7
A
1125IOReturn
1126IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1127 UInt64 *offsetP,
1128 void *segmentsP,
1129 UInt32 *numSegmentsP)
1130{
1131 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1132 offsetP, segmentsP, numSegmentsP));
1133}
1134
0c530ab8
A
1135bool
1136IODMACommand::OutputHost32(IODMACommand *,
1137 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1138{
1139 Segment32 *base = (Segment32 *) vSegList;
1140 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1141 base[outSegIndex].fLength = (UInt32) segment.fLength;
1142 return true;
1143}
1144
1145bool
1146IODMACommand::OutputBig32(IODMACommand *,
1147 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1148{
1149 const UInt offAddr = outSegIndex * sizeof(Segment32);
1150 const UInt offLen = offAddr + sizeof(UInt32);
1151 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1152 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1153 return true;
1154}
1155
1156bool
1157IODMACommand::OutputLittle32(IODMACommand *,
1158 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1159{
1160 const UInt offAddr = outSegIndex * sizeof(Segment32);
1161 const UInt offLen = offAddr + sizeof(UInt32);
1162 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1163 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1164 return true;
1165}
1166
1167bool
1168IODMACommand::OutputHost64(IODMACommand *,
1169 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1170{
1171 Segment64 *base = (Segment64 *) vSegList;
1172 base[outSegIndex] = segment;
1173 return true;
1174}
1175
1176bool
1177IODMACommand::OutputBig64(IODMACommand *,
1178 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1179{
1180 const UInt offAddr = outSegIndex * sizeof(Segment64);
1181 const UInt offLen = offAddr + sizeof(UInt64);
1182 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1183 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1184 return true;
1185}
1186
1187bool
1188IODMACommand::OutputLittle64(IODMACommand *,
1189 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1190{
1191 const UInt offAddr = outSegIndex * sizeof(Segment64);
1192 const UInt offLen = offAddr + sizeof(UInt64);
1193 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1194 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1195 return true;
1196}
1197
1198