]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IODMACommand.cpp
xnu-1699.22.81.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <IOKit/assert.h>
30
31#include <libkern/OSTypes.h>
32#include <libkern/OSByteOrder.h>
33
34#include <IOKit/IOReturn.h>
35#include <IOKit/IOLib.h>
36#include <IOKit/IODMACommand.h>
37#include <IOKit/IOMapper.h>
38#include <IOKit/IOMemoryDescriptor.h>
39#include <IOKit/IOBufferMemoryDescriptor.h>
40
41#include "IOKitKernelInternal.h"
0c530ab8
A
42
43#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
44#define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
45#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
46#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
47
0c530ab8
A
48enum
49{
50 kWalkSyncIn = 0x01, // bounce -> md
51 kWalkSyncOut = 0x02, // bounce <- md
52 kWalkSyncAlways = 0x04,
53 kWalkPreflight = 0x08,
54 kWalkDoubleBuffer = 0x10,
55 kWalkPrepare = 0x20,
56 kWalkComplete = 0x40,
57 kWalkClient = 0x80
58};
59
0c530ab8
A
60
61#define fInternalState reserved
62#define fState reserved->fState
63#define fMDSummary reserved->fMDSummary
64
65
66#if 1
67// no direction => OutIn
68#define SHOULD_COPY_DIR(op, direction) \
69 ((kIODirectionNone == (direction)) \
70 || (kWalkSyncAlways & (op)) \
71 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
72 & (direction)))
73
74#else
75#define SHOULD_COPY_DIR(state, direction) (true)
76#endif
77
78#if 0
0b4c1975 79#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
0c530ab8
A
80#else
81#define DEBG(fmt, args...) {}
82#endif
83
0c530ab8
A
84/**************************** class IODMACommand ***************************/
85
86#undef super
6d2010ae 87#define super IOCommand
0c530ab8
A
88OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
89
2d21ac55
A
90OSMetaClassDefineReservedUsed(IODMACommand, 0);
91OSMetaClassDefineReservedUsed(IODMACommand, 1);
b0d623f7 92OSMetaClassDefineReservedUsed(IODMACommand, 2);
0c530ab8
A
93OSMetaClassDefineReservedUnused(IODMACommand, 3);
94OSMetaClassDefineReservedUnused(IODMACommand, 4);
95OSMetaClassDefineReservedUnused(IODMACommand, 5);
96OSMetaClassDefineReservedUnused(IODMACommand, 6);
97OSMetaClassDefineReservedUnused(IODMACommand, 7);
98OSMetaClassDefineReservedUnused(IODMACommand, 8);
99OSMetaClassDefineReservedUnused(IODMACommand, 9);
100OSMetaClassDefineReservedUnused(IODMACommand, 10);
101OSMetaClassDefineReservedUnused(IODMACommand, 11);
102OSMetaClassDefineReservedUnused(IODMACommand, 12);
103OSMetaClassDefineReservedUnused(IODMACommand, 13);
104OSMetaClassDefineReservedUnused(IODMACommand, 14);
105OSMetaClassDefineReservedUnused(IODMACommand, 15);
106
107IODMACommand *
108IODMACommand::withSpecification(SegmentFunction outSegFunc,
109 UInt8 numAddressBits,
110 UInt64 maxSegmentSize,
111 MappingOptions mappingOptions,
112 UInt64 maxTransferSize,
113 UInt32 alignment,
114 IOMapper *mapper,
115 void *refCon)
116{
117 IODMACommand * me = new IODMACommand;
118
119 if (me && !me->initWithSpecification(outSegFunc,
120 numAddressBits, maxSegmentSize,
121 mappingOptions, maxTransferSize,
122 alignment, mapper, refCon))
123 {
124 me->release();
125 return 0;
126 };
127
128 return me;
129}
130
131IODMACommand *
132IODMACommand::cloneCommand(void *refCon)
133{
134 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
135 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
136}
137
138#define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
139
140bool
141IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
142 UInt8 numAddressBits,
143 UInt64 maxSegmentSize,
144 MappingOptions mappingOptions,
145 UInt64 maxTransferSize,
146 UInt32 alignment,
147 IOMapper *mapper,
148 void *refCon)
149{
150 if (!super::init() || !outSegFunc || !numAddressBits)
151 return false;
152
153 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
154 || OutputLittle32 == outSegFunc);
155 if (is32Bit)
156 {
157 if (!numAddressBits)
158 numAddressBits = 32;
159 else if (numAddressBits > 32)
160 return false; // Wrong output function for bits
161 }
162
163 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
164 return false;
165
166 if (!maxSegmentSize)
167 maxSegmentSize--; // Set Max segment to -1
168 if (!maxTransferSize)
169 maxTransferSize--; // Set Max transfer to -1
170
171 if (!mapper)
172 {
173 IOMapper::checkForSystemMapper();
174 mapper = IOMapper::gSystem;
175 }
176
177 fNumSegments = 0;
178 fBypassMask = 0;
179 fOutSeg = outSegFunc;
180 fNumAddressBits = numAddressBits;
181 fMaxSegmentSize = maxSegmentSize;
182 fMappingOptions = mappingOptions;
183 fMaxTransferSize = maxTransferSize;
184 if (!alignment)
185 alignment = 1;
186 fAlignMask = alignment - 1;
187 fMapper = mapper;
188 fRefCon = refCon;
189
190 switch (MAPTYPE(mappingOptions))
191 {
192 case kMapped: break;
193 case kNonCoherent: fMapper = 0; break;
194 case kBypassed:
195 if (mapper && !mapper->getBypassMask(&fBypassMask))
196 return false;
197 break;
198 default:
199 return false;
200 };
201
b0d623f7
A
202 if (fMapper)
203 fMapper->retain();
204
2d21ac55 205 reserved = IONew(IODMACommandInternal, 1);
0c530ab8
A
206 if (!reserved)
207 return false;
2d21ac55 208 bzero(reserved, sizeof(IODMACommandInternal));
0c530ab8
A
209
210 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
211
212 return true;
213}
214
215void
216IODMACommand::free()
217{
218 if (reserved)
2d21ac55 219 IODelete(reserved, IODMACommandInternal, 1);
0c530ab8 220
b0d623f7
A
221 if (fMapper)
222 fMapper->release();
223
0c530ab8
A
224 super::free();
225}
226
227IOReturn
228IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
229{
6d2010ae
A
230 IOReturn err = kIOReturnSuccess;
231
0c530ab8
A
232 if (mem == fMemory)
233 {
234 if (!autoPrepare)
235 {
236 while (fActive)
237 complete();
238 }
239 return kIOReturnSuccess;
240 }
241
242 if (fMemory) {
243 // As we are almost certainly being called from a work loop thread
244 // if fActive is true it is probably not a good time to potentially
245 // block. Just test for it and return an error
246 if (fActive)
247 return kIOReturnBusy;
248 clearMemoryDescriptor();
6d2010ae 249 }
0c530ab8
A
250
251 if (mem) {
252 bzero(&fMDSummary, sizeof(fMDSummary));
6d2010ae 253 err = mem->dmaCommandOperation(
0c530ab8
A
254 kIOMDGetCharacteristics,
255 &fMDSummary, sizeof(fMDSummary));
6d2010ae
A
256 if (err)
257 return err;
0c530ab8
A
258
259 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
260
261 if ((kMapped == MAPTYPE(fMappingOptions))
262 && fMapper
263 && (!fNumAddressBits || (fNumAddressBits >= 31)))
264 // assuming mapped space is 2G
265 fInternalState->fCheckAddressing = false;
266 else
267 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
268
4a3eedf9 269 fInternalState->fNewMD = true;
0c530ab8
A
270 mem->retain();
271 fMemory = mem;
272
b0d623f7 273 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
6d2010ae
A
274 if (autoPrepare) {
275 err = prepare();
276 if (err) {
277 clearMemoryDescriptor();
278 }
279 }
280 }
281
282 return err;
0c530ab8
A
283}
284
285IOReturn
286IODMACommand::clearMemoryDescriptor(bool autoComplete)
287{
288 if (fActive && !autoComplete)
289 return (kIOReturnNotReady);
290
291 if (fMemory) {
292 while (fActive)
293 complete();
b0d623f7 294 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
0c530ab8
A
295 fMemory->release();
296 fMemory = 0;
297 }
298
299 return (kIOReturnSuccess);
300}
301
302const IOMemoryDescriptor *
303IODMACommand::getMemoryDescriptor() const
304{
305 return fMemory;
306}
307
308
309IOReturn
310IODMACommand::segmentOp(
311 void *reference,
312 IODMACommand *target,
313 Segment64 segment,
314 void *segments,
315 UInt32 segmentIndex)
316{
b0d623f7 317 IOOptionBits op = (uintptr_t) reference;
0c530ab8 318 addr64_t maxPhys, address;
0c530ab8
A
319 uint64_t length;
320 uint32_t numPages;
321
322 IODMACommandInternal * state = target->reserved;
323
b0d623f7 324 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
0c530ab8
A
325 maxPhys = (1ULL << target->fNumAddressBits);
326 else
327 maxPhys = 0;
328 maxPhys--;
329
330 address = segment.fIOVMAddr;
331 length = segment.fLength;
332
333 assert(address);
334 assert(length);
335
336 if (!state->fMisaligned)
337 {
b0d623f7
A
338 state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
339 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
0c530ab8
A
340 }
341
342 if (state->fMisaligned && (kWalkPreflight & op))
343 return (kIOReturnNotAligned);
344
345 if (!state->fDoubleBuffer)
346 {
347 if ((address + length - 1) <= maxPhys)
348 {
349 length = 0;
350 }
351 else if (address <= maxPhys)
352 {
353 DEBG("tail %qx, %qx", address, length);
354 length = (address + length - maxPhys - 1);
355 address = maxPhys + 1;
356 DEBG("-> %qx, %qx\n", address, length);
357 }
358 }
359
360 if (!length)
361 return (kIOReturnSuccess);
362
0b4c1975 363 numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
0c530ab8
A
364
365 if (kWalkPreflight & op)
366 {
367 state->fCopyPageCount += numPages;
368 }
369 else
370 {
0b4c1975
A
371 vm_page_t lastPage;
372 lastPage = NULL;
0c530ab8
A
373 if (kWalkPrepare & op)
374 {
0b4c1975 375 lastPage = state->fCopyNext;
0c530ab8 376 for (IOItemCount idx = 0; idx < numPages; idx++)
0b4c1975
A
377 {
378 vm_page_set_offset(lastPage, atop_64(address) + idx);
379 lastPage = vm_page_get_next(lastPage);
380 }
0c530ab8
A
381 }
382
0b4c1975 383 if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
0c530ab8 384 {
0b4c1975
A
385 lastPage = state->fCopyNext;
386 for (IOItemCount idx = 0; idx < numPages; idx++)
0c530ab8 387 {
0b4c1975
A
388 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
389 {
390 addr64_t remapAddr;
391 uint64_t chunk;
392
393 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
394 if (!state->fDoubleBuffer)
395 {
396 remapAddr += (address & PAGE_MASK);
397 }
398 chunk = PAGE_SIZE - (address & PAGE_MASK);
399 if (chunk > length)
400 chunk = length;
401
402 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
403 (kWalkSyncIn & op) ? "->" : "<-",
404 address, chunk, op);
405
406 if (kWalkSyncIn & op)
407 { // cppvNoModSnk
408 copypv(remapAddr, address, chunk,
409 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
410 }
411 else
412 {
413 copypv(address, remapAddr, chunk,
414 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
415 }
416 address += chunk;
417 length -= chunk;
418 }
419 lastPage = vm_page_get_next(lastPage);
0c530ab8
A
420 }
421 }
0b4c1975 422 state->fCopyNext = lastPage;
0c530ab8
A
423 }
424
425 return kIOReturnSuccess;
426}
427
428IOReturn
429IODMACommand::walkAll(UInt8 op)
430{
431 IODMACommandInternal * state = fInternalState;
432
433 IOReturn ret = kIOReturnSuccess;
434 UInt32 numSegments;
435 UInt64 offset;
436
b0d623f7 437 if (kWalkPreflight & op)
0c530ab8 438 {
b0d623f7 439 state->fMapContig = false;
0c530ab8
A
440 state->fMisaligned = false;
441 state->fDoubleBuffer = false;
442 state->fPrepared = false;
0b4c1975
A
443 state->fCopyNext = NULL;
444 state->fCopyPageAlloc = 0;
b0d623f7 445 state->fLocalMapperPageAlloc = 0;
0c530ab8 446 state->fCopyPageCount = 0;
0b4c1975
A
447 state->fNextRemapPage = NULL;
448 state->fCopyMD = 0;
0c530ab8
A
449
450 if (!(kWalkDoubleBuffer & op))
451 {
452 offset = 0;
453 numSegments = 0-1;
b0d623f7 454 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
0c530ab8
A
455 }
456
457 op &= ~kWalkPreflight;
458
459 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
460 if (state->fDoubleBuffer)
461 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
462
463 if (state->fCopyPageCount)
464 {
0b4c1975 465 vm_page_t mapBase = NULL;
0c530ab8
A
466
467 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
468
0b4c1975 469 if (!state->fDoubleBuffer)
0c530ab8 470 {
0b4c1975
A
471 kern_return_t kr;
472 kr = vm_page_alloc_list(state->fCopyPageCount,
473 KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
474 if (KERN_SUCCESS != kr)
0c530ab8 475 {
0b4c1975
A
476 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
477 mapBase = NULL;
0c530ab8 478 }
0b4c1975 479 }
0c530ab8 480
0b4c1975
A
481 if (mapBase)
482 {
483 state->fCopyPageAlloc = mapBase;
484 state->fCopyNext = state->fCopyPageAlloc;
0c530ab8
A
485 offset = 0;
486 numSegments = 0-1;
b0d623f7 487 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
0c530ab8
A
488 state->fPrepared = true;
489 op &= ~(kWalkSyncIn | kWalkSyncOut);
490 }
491 else
492 {
493 DEBG("alloc IOBMD\n");
0b4c1975
A
494 mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
495 state->fCopyMD = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
496 fMDSummary.fDirection, state->fPreparedLength, mask);
0c530ab8
A
497
498 if (state->fCopyMD)
499 {
500 ret = kIOReturnSuccess;
501 state->fPrepared = true;
502 }
503 else
504 {
505 DEBG("IODMACommand !iovmAlloc");
506 return (kIOReturnNoResources);
507 }
508 }
509 }
b0d623f7
A
510
511 if (state->fLocalMapper)
512 {
b7266188
A
513 state->fLocalMapperPageCount = atop_64(round_page(
514 state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
b0d623f7
A
515 state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
516 state->fMapContig = true;
517 }
0c530ab8
A
518 }
519
b0d623f7 520 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
0c530ab8
A
521 {
522 if (state->fCopyPageCount)
523 {
524 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
525
0b4c1975 526 if (state->fCopyPageAlloc)
0c530ab8 527 {
0b4c1975 528 state->fCopyNext = state->fCopyPageAlloc;
0c530ab8
A
529 offset = 0;
530 numSegments = 0-1;
b0d623f7 531 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
0c530ab8
A
532 }
533 else if (state->fCopyMD)
534 {
535 DEBG("sync IOBMD\n");
536
537 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
538 {
539 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
540
541 IOByteCount bytes;
542
543 if (kWalkSyncIn & op)
544 bytes = poMD->writeBytes(state->fPreparedOffset,
545 state->fCopyMD->getBytesNoCopy(),
546 state->fPreparedLength);
547 else
548 bytes = poMD->readBytes(state->fPreparedOffset,
549 state->fCopyMD->getBytesNoCopy(),
550 state->fPreparedLength);
551 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
552 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
553 }
554 else
555 ret = kIOReturnSuccess;
556 }
557 }
558 }
559
560 if (kWalkComplete & op)
561 {
b0d623f7
A
562 if (state->fLocalMapperPageAlloc)
563 {
564 fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
565 state->fLocalMapperPageAlloc = 0;
566 state->fLocalMapperPageCount = 0;
0b4c1975
A
567 }
568 if (state->fCopyPageAlloc)
0c530ab8 569 {
0b4c1975
A
570 vm_page_free_list(state->fCopyPageAlloc, FALSE);
571 state->fCopyPageAlloc = 0;
0c530ab8
A
572 state->fCopyPageCount = 0;
573 }
574 if (state->fCopyMD)
575 {
576 state->fCopyMD->release();
577 state->fCopyMD = 0;
578 }
579
580 state->fPrepared = false;
581 }
582 return (ret);
583}
584
b0d623f7
A
585UInt8
586IODMACommand::getNumAddressBits(void)
587{
588 return (fNumAddressBits);
589}
590
591UInt32
592IODMACommand::getAlignment(void)
593{
594 return (fAlignMask + 1);
595}
596
2d21ac55
A
597IOReturn
598IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
599 UInt8 numAddressBits,
600 UInt64 maxSegmentSize,
601 MappingOptions mappingOptions,
602 UInt64 maxTransferSize,
603 UInt32 alignment,
604 IOMapper *mapper,
605 UInt64 offset,
606 UInt64 length,
607 bool flushCache,
608 bool synchronize)
609{
610 if (fActive)
611 return kIOReturnNotPermitted;
612
613 if (!outSegFunc || !numAddressBits)
614 return kIOReturnBadArgument;
615
616 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
617 || OutputLittle32 == outSegFunc);
618 if (is32Bit)
619 {
620 if (!numAddressBits)
621 numAddressBits = 32;
622 else if (numAddressBits > 32)
623 return kIOReturnBadArgument; // Wrong output function for bits
624 }
625
626 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
627 return kIOReturnBadArgument;
628
629 if (!maxSegmentSize)
630 maxSegmentSize--; // Set Max segment to -1
631 if (!maxTransferSize)
632 maxTransferSize--; // Set Max transfer to -1
633
634 if (!mapper)
635 {
636 IOMapper::checkForSystemMapper();
637 mapper = IOMapper::gSystem;
638 }
639
640 switch (MAPTYPE(mappingOptions))
641 {
642 case kMapped: break;
643 case kNonCoherent: fMapper = 0; break;
644 case kBypassed:
645 if (mapper && !mapper->getBypassMask(&fBypassMask))
646 return kIOReturnBadArgument;
647 break;
648 default:
649 return kIOReturnBadArgument;
650 };
651
652 fNumSegments = 0;
653 fBypassMask = 0;
654 fOutSeg = outSegFunc;
655 fNumAddressBits = numAddressBits;
656 fMaxSegmentSize = maxSegmentSize;
657 fMappingOptions = mappingOptions;
658 fMaxTransferSize = maxTransferSize;
659 if (!alignment)
660 alignment = 1;
661 fAlignMask = alignment - 1;
b0d623f7
A
662 if (mapper != fMapper)
663 {
664 mapper->retain();
665 fMapper->release();
666 fMapper = mapper;
667 }
2d21ac55
A
668
669 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
670
671 return prepare(offset, length, flushCache, synchronize);
672}
673
674
0c530ab8
A
675IOReturn
676IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
677{
678 IODMACommandInternal * state = fInternalState;
679 IOReturn ret = kIOReturnSuccess;
2d21ac55 680 MappingOptions mappingOptions = fMappingOptions;
0c530ab8
A
681
682 if (!length)
683 length = fMDSummary.fLength;
684
685 if (length > fMaxTransferSize)
686 return kIOReturnNoSpace;
687
0c530ab8
A
688 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
689 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
690
b0d623f7 691 poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
0c530ab8 692 }
0c530ab8
A
693 if (fActive++)
694 {
695 if ((state->fPreparedOffset != offset)
696 || (state->fPreparedLength != length))
697 ret = kIOReturnNotReady;
698 }
699 else
700 {
701 state->fPreparedOffset = offset;
702 state->fPreparedLength = length;
703
b0d623f7 704 state->fMapContig = false;
0c530ab8
A
705 state->fMisaligned = false;
706 state->fDoubleBuffer = false;
707 state->fPrepared = false;
0b4c1975
A
708 state->fCopyNext = NULL;
709 state->fCopyPageAlloc = 0;
0c530ab8 710 state->fCopyPageCount = 0;
0b4c1975 711 state->fNextRemapPage = NULL;
0c530ab8 712 state->fCopyMD = 0;
b0d623f7
A
713 state->fLocalMapperPageAlloc = 0;
714 state->fLocalMapperPageCount = 0;
0c530ab8 715
b0d623f7
A
716 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
717
718 state->fSourceAlignMask = fAlignMask;
719 if (state->fLocalMapper)
720 state->fSourceAlignMask &= page_mask;
721
0c530ab8
A
722 state->fCursor = state->fIterateOnly
723 || (!state->fCheckAddressing
b0d623f7
A
724 && !state->fLocalMapper
725 && (!state->fSourceAlignMask
726 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
727
0c530ab8
A
728 if (!state->fCursor)
729 {
730 IOOptionBits op = kWalkPrepare | kWalkPreflight;
731 if (synchronize)
732 op |= kWalkSyncOut;
733 ret = walkAll(op);
734 }
735 if (kIOReturnSuccess == ret)
736 state->fPrepared = true;
737 }
738 return ret;
739}
740
741IOReturn
742IODMACommand::complete(bool invalidateCache, bool synchronize)
743{
744 IODMACommandInternal * state = fInternalState;
745 IOReturn ret = kIOReturnSuccess;
746
747 if (fActive < 1)
748 return kIOReturnNotReady;
749
750 if (!--fActive)
751 {
752 if (!state->fCursor)
753 {
2d21ac55
A
754 IOOptionBits op = kWalkComplete;
755 if (synchronize)
756 op |= kWalkSyncIn;
757 ret = walkAll(op);
0c530ab8
A
758 }
759 state->fPrepared = false;
760
0c530ab8
A
761 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
762 {
0c530ab8
A
763 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
764
b0d623f7 765 poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
0c530ab8 766 }
0c530ab8
A
767 }
768
769 return ret;
770}
771
b0d623f7
A
772IOReturn
773IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
774{
775 IODMACommandInternal * state = fInternalState;
776 if (fActive < 1)
777 return (kIOReturnNotReady);
778
779 if (offset)
780 *offset = state->fPreparedOffset;
781 if (length)
782 *length = state->fPreparedLength;
783
784 return (kIOReturnSuccess);
785}
786
0c530ab8
A
787IOReturn
788IODMACommand::synchronize(IOOptionBits options)
789{
790 IODMACommandInternal * state = fInternalState;
791 IOReturn ret = kIOReturnSuccess;
792 IOOptionBits op;
793
794 if (kIODirectionOutIn == (kIODirectionOutIn & options))
795 return kIOReturnBadArgument;
796
797 if (fActive < 1)
798 return kIOReturnNotReady;
799
800 op = 0;
801 if (kForceDoubleBuffer & options)
802 {
803 if (state->fDoubleBuffer)
804 return kIOReturnSuccess;
805 if (state->fCursor)
806 state->fCursor = false;
807 else
808 ret = walkAll(kWalkComplete);
809
810 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
811 }
812 else if (state->fCursor)
813 return kIOReturnSuccess;
814
815 if (kIODirectionIn & options)
816 op |= kWalkSyncIn | kWalkSyncAlways;
817 else if (kIODirectionOut & options)
818 op |= kWalkSyncOut | kWalkSyncAlways;
819
820 ret = walkAll(op);
821
822 return ret;
823}
824
2d21ac55
A
825struct IODMACommandTransferContext
826{
827 void * buffer;
828 UInt64 bufferOffset;
829 UInt64 remaining;
830 UInt32 op;
831};
832enum
833{
834 kIODMACommandTransferOpReadBytes = 1,
835 kIODMACommandTransferOpWriteBytes = 2
836};
837
838IOReturn
839IODMACommand::transferSegment(void *reference,
840 IODMACommand *target,
841 Segment64 segment,
842 void *segments,
843 UInt32 segmentIndex)
844{
b0d623f7 845 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
2d21ac55
A
846 UInt64 length = min(segment.fLength, context->remaining);
847 addr64_t ioAddr = segment.fIOVMAddr;
848 addr64_t cpuAddr = ioAddr;
849
850 context->remaining -= length;
851
852 while (length)
853 {
854 UInt64 copyLen = length;
855 if ((kMapped == MAPTYPE(target->fMappingOptions))
856 && target->fMapper)
857 {
858 cpuAddr = target->fMapper->mapAddr(ioAddr);
859 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
860 ioAddr += copyLen;
861 }
862
863 switch (context->op)
864 {
865 case kIODMACommandTransferOpReadBytes:
866 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
867 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
868 break;
869 case kIODMACommandTransferOpWriteBytes:
870 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
871 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
872 break;
873 }
874 length -= copyLen;
875 context->bufferOffset += copyLen;
876 }
877
878 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
879}
880
881UInt64
882IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
883{
884 IODMACommandInternal * state = fInternalState;
885 IODMACommandTransferContext context;
b0d623f7 886 Segment64 segments[1];
2d21ac55
A
887 UInt32 numSegments = 0-1;
888
889 if (fActive < 1)
890 return (0);
891
892 if (offset >= state->fPreparedLength)
893 return (0);
894 length = min(length, state->fPreparedLength - offset);
895
896 context.buffer = buffer;
897 context.bufferOffset = 0;
898 context.remaining = length;
899 context.op = transferOp;
b0d623f7 900 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
2d21ac55
A
901
902 return (length - context.remaining);
903}
904
905UInt64
906IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
907{
908 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
909}
910
911UInt64
912IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
913{
914 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
915}
916
0c530ab8
A
917IOReturn
918IODMACommand::genIOVMSegments(UInt64 *offsetP,
919 void *segmentsP,
920 UInt32 *numSegmentsP)
921{
b0d623f7
A
922 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
923 offsetP, segmentsP, numSegmentsP));
0c530ab8
A
924}
925
926IOReturn
b0d623f7
A
927IODMACommand::genIOVMSegments(uint32_t op,
928 InternalSegmentFunction outSegFunc,
0c530ab8
A
929 void *reference,
930 UInt64 *offsetP,
931 void *segmentsP,
932 UInt32 *numSegmentsP)
933{
0c530ab8
A
934 IODMACommandInternal * internalState = fInternalState;
935 IOOptionBits mdOp = kIOMDWalkSegments;
936 IOReturn ret = kIOReturnSuccess;
937
938 if (!(kWalkComplete & op) && !fActive)
939 return kIOReturnNotReady;
940
941 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
942 return kIOReturnBadArgument;
943
944 IOMDDMAWalkSegmentArgs *state =
945 (IOMDDMAWalkSegmentArgs *) fState;
946
2d21ac55 947 UInt64 offset = *offsetP + internalState->fPreparedOffset;
0c530ab8
A
948 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
949
950 if (offset >= memLength)
951 return kIOReturnOverrun;
952
4a3eedf9 953 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
2d21ac55
A
954 state->fOffset = 0;
955 state->fIOVMAddr = 0;
0b4c1975 956 internalState->fNextRemapPage = NULL;
4a3eedf9 957 internalState->fNewMD = false;
2d21ac55
A
958 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
959 mdOp = kIOMDFirstSegment;
0c530ab8
A
960 };
961
962 UInt64 bypassMask = fBypassMask;
963 UInt32 segIndex = 0;
964 UInt32 numSegments = *numSegmentsP;
965 Segment64 curSeg = { 0, 0 };
966 addr64_t maxPhys;
967
968 if (fNumAddressBits && (fNumAddressBits < 64))
969 maxPhys = (1ULL << fNumAddressBits);
970 else
971 maxPhys = 0;
972 maxPhys--;
973
0b4c1975 974 while (state->fIOVMAddr || (state->fOffset < memLength))
0c530ab8 975 {
0b4c1975
A
976 // state = next seg
977 if (!state->fIOVMAddr) {
0c530ab8
A
978
979 IOReturn rtn;
980
981 state->fOffset = offset;
982 state->fLength = memLength - offset;
983
b0d623f7 984 if (internalState->fMapContig && (kWalkClient & op))
0c530ab8 985 {
b0d623f7 986 ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
b0d623f7 987 state->fIOVMAddr = ptoa_64(pageNum)
0c530ab8
A
988 + offset - internalState->fPreparedOffset;
989 rtn = kIOReturnSuccess;
990 }
991 else
992 {
993 const IOMemoryDescriptor * memory =
994 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
995 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
996 mdOp = kIOMDWalkSegments;
997 }
998
0b4c1975
A
999 if (rtn == kIOReturnSuccess)
1000 {
0c530ab8
A
1001 assert(state->fIOVMAddr);
1002 assert(state->fLength);
0b4c1975
A
1003 if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) {
1004 UInt64 length = state->fLength;
1005 offset += length;
1006 curSeg.fLength += length;
1007 state->fIOVMAddr = 0;
1008 }
0c530ab8
A
1009 }
1010 else if (rtn == kIOReturnOverrun)
1011 state->fIOVMAddr = state->fLength = 0; // At end
1012 else
1013 return rtn;
0b4c1975 1014 }
0c530ab8 1015
0b4c1975
A
1016 // seg = state, offset = end of seg
1017 if (!curSeg.fIOVMAddr)
1018 {
0c530ab8 1019 UInt64 length = state->fLength;
0b4c1975
A
1020 offset += length;
1021 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
1022 curSeg.fLength = length;
1023 state->fIOVMAddr = 0;
1024 }
0c530ab8
A
1025
1026 if (!state->fIOVMAddr)
1027 {
0b4c1975 1028 if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
0c530ab8 1029 {
0b4c1975
A
1030 if (internalState->fCursor)
1031 {
1032 curSeg.fIOVMAddr = 0;
1033 ret = kIOReturnMessageTooLarge;
1034 break;
1035 }
1036 else if (curSeg.fIOVMAddr <= maxPhys)
1037 {
1038 UInt64 remain, newLength;
1039
1040 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1041 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1042 remain = curSeg.fLength - newLength;
1043 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1044 curSeg.fLength = newLength;
1045 state->fLength = remain;
1046 offset -= remain;
1047 }
1048 else
0c530ab8 1049 {
0b4c1975
A
1050 UInt64 addr = curSeg.fIOVMAddr;
1051 ppnum_t addrPage = atop_64(addr);
1052 vm_page_t remap = NULL;
1053 UInt64 remain, newLength;
1054
1055 DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1056
1057 remap = internalState->fNextRemapPage;
1058 if (remap && (addrPage == vm_page_get_offset(remap)))
0c530ab8 1059 {
0c530ab8 1060 }
0b4c1975
A
1061 else for (remap = internalState->fCopyPageAlloc;
1062 remap && (addrPage != vm_page_get_offset(remap));
1063 remap = vm_page_get_next(remap))
0c530ab8 1064 {
0c530ab8 1065 }
0b4c1975
A
1066
1067 if (!remap) panic("no remap page found");
1068
1069 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1070 + (addr & PAGE_MASK);
1071 internalState->fNextRemapPage = vm_page_get_next(remap);
1072
1073 newLength = PAGE_SIZE - (addr & PAGE_MASK);
1074 if (newLength < curSeg.fLength)
0c530ab8 1075 {
0b4c1975
A
1076 remain = curSeg.fLength - newLength;
1077 state->fIOVMAddr = addr + newLength;
1078 curSeg.fLength = newLength;
1079 state->fLength = remain;
1080 offset -= remain;
0c530ab8 1081 }
0b4c1975 1082 DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
0c530ab8
A
1083 }
1084 }
1085
1086 if (curSeg.fLength > fMaxSegmentSize)
1087 {
1088 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
1089
1090 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
1091 curSeg.fLength = fMaxSegmentSize;
1092
1093 state->fLength = remain;
1094 offset -= remain;
1095 }
1096
1097 if (internalState->fCursor
b0d623f7 1098 && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
0c530ab8
A
1099 {
1100 curSeg.fIOVMAddr = 0;
1101 ret = kIOReturnNotAligned;
1102 break;
1103 }
1104
1105 if (offset >= memLength)
1106 {
1107 curSeg.fLength -= (offset - memLength);
1108 offset = memLength;
1109 state->fIOVMAddr = state->fLength = 0; // At end
1110 break;
1111 }
1112 }
1113
1114 if (state->fIOVMAddr) {
1115 if ((segIndex + 1 == numSegments))
1116 break;
1117
1118 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1119 curSeg.fIOVMAddr = 0;
1120 if (kIOReturnSuccess != ret)
1121 break;
1122 }
1123 }
1124
1125 if (curSeg.fIOVMAddr) {
1126 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1127 }
1128
1129 if (kIOReturnSuccess == ret)
1130 {
1131 state->fOffset = offset;
1132 *offsetP = offset - internalState->fPreparedOffset;
1133 *numSegmentsP = segIndex;
1134 }
1135 return ret;
1136}
1137
1138IOReturn
1139IODMACommand::clientOutputSegment(
1140 void *reference, IODMACommand *target,
1141 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1142{
b0d623f7 1143 SegmentFunction segmentFunction = (SegmentFunction) reference;
0c530ab8
A
1144 IOReturn ret = kIOReturnSuccess;
1145
1146 if ((target->fNumAddressBits < 64)
b0d623f7
A
1147 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1148 && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
0c530ab8
A
1149 {
1150 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1151 ret = kIOReturnMessageTooLarge;
1152 }
1153
b0d623f7 1154 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
0c530ab8
A
1155 {
1156 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1157 ret = kIOReturnMessageTooLarge;
1158 }
1159
1160 return (ret);
1161}
1162
b0d623f7
A
1163IOReturn
1164IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1165 UInt64 *offsetP,
1166 void *segmentsP,
1167 UInt32 *numSegmentsP)
1168{
1169 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1170 offsetP, segmentsP, numSegmentsP));
1171}
1172
0c530ab8
A
1173bool
1174IODMACommand::OutputHost32(IODMACommand *,
1175 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1176{
1177 Segment32 *base = (Segment32 *) vSegList;
1178 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1179 base[outSegIndex].fLength = (UInt32) segment.fLength;
1180 return true;
1181}
1182
1183bool
1184IODMACommand::OutputBig32(IODMACommand *,
1185 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1186{
1187 const UInt offAddr = outSegIndex * sizeof(Segment32);
1188 const UInt offLen = offAddr + sizeof(UInt32);
1189 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1190 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1191 return true;
1192}
1193
1194bool
1195IODMACommand::OutputLittle32(IODMACommand *,
1196 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1197{
1198 const UInt offAddr = outSegIndex * sizeof(Segment32);
1199 const UInt offLen = offAddr + sizeof(UInt32);
1200 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1201 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1202 return true;
1203}
1204
1205bool
1206IODMACommand::OutputHost64(IODMACommand *,
1207 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1208{
1209 Segment64 *base = (Segment64 *) vSegList;
1210 base[outSegIndex] = segment;
1211 return true;
1212}
1213
1214bool
1215IODMACommand::OutputBig64(IODMACommand *,
1216 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1217{
1218 const UInt offAddr = outSegIndex * sizeof(Segment64);
1219 const UInt offLen = offAddr + sizeof(UInt64);
1220 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1221 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1222 return true;
1223}
1224
1225bool
1226IODMACommand::OutputLittle64(IODMACommand *,
1227 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1228{
1229 const UInt offAddr = outSegIndex * sizeof(Segment64);
1230 const UInt offLen = offAddr + sizeof(UInt64);
1231 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1232 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1233 return true;
1234}
1235
1236