]>
Commit | Line | Data |
---|---|---|
0c530ab8 | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. |
0c530ab8 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
0c530ab8 A |
27 | */ |
28 | ||
f427ee49 A |
29 | #define IOKIT_ENABLE_SHARED_PTR |
30 | ||
0c530ab8 A |
31 | #include <IOKit/assert.h> |
32 | ||
33 | #include <libkern/OSTypes.h> | |
34 | #include <libkern/OSByteOrder.h> | |
99c3a104 | 35 | #include <libkern/OSDebug.h> |
0c530ab8 A |
36 | |
37 | #include <IOKit/IOReturn.h> | |
38 | #include <IOKit/IOLib.h> | |
39 | #include <IOKit/IODMACommand.h> | |
40 | #include <IOKit/IOMapper.h> | |
41 | #include <IOKit/IOMemoryDescriptor.h> | |
42 | #include <IOKit/IOBufferMemoryDescriptor.h> | |
43 | ||
44 | #include "IOKitKernelInternal.h" | |
0c530ab8 | 45 | |
0a7de745 A |
46 | #define MAPTYPE(type) ((UInt) (type) & kTypeMask) |
47 | #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) | |
48 | ||
49 | enum{ | |
50 | kWalkSyncIn = 0x01,// bounce -> md | |
51 | kWalkSyncOut = 0x02,// bounce <- md | |
52 | kWalkSyncAlways = 0x04, | |
53 | kWalkPreflight = 0x08, | |
54 | kWalkDoubleBuffer = 0x10, | |
55 | kWalkPrepare = 0x20, | |
56 | kWalkComplete = 0x40, | |
57 | kWalkClient = 0x80 | |
0c530ab8 A |
58 | }; |
59 | ||
0c530ab8 A |
60 | |
61 | #define fInternalState reserved | |
62 | #define fState reserved->fState | |
63 | #define fMDSummary reserved->fMDSummary | |
64 | ||
65 | ||
66 | #if 1 | |
67 | // no direction => OutIn | |
0a7de745 A |
68 | #define SHOULD_COPY_DIR(op, direction) \ |
69 | ((kIODirectionNone == (direction)) \ | |
70 | || (kWalkSyncAlways & (op)) \ | |
0c530ab8 | 71 | || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \ |
0a7de745 | 72 | & (direction))) |
0c530ab8 A |
73 | |
74 | #else | |
75 | #define SHOULD_COPY_DIR(state, direction) (true) | |
76 | #endif | |
77 | ||
78 | #if 0 | |
0a7de745 | 79 | #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } |
0c530ab8 | 80 | #else |
0a7de745 | 81 | #define DEBG(fmt, args...) {} |
0c530ab8 A |
82 | #endif |
83 | ||
f427ee49 A |
84 | #if 0 |
85 | #define LOGTAG 0x87654321 | |
86 | #endif | |
87 | ||
0c530ab8 A |
88 | /**************************** class IODMACommand ***************************/ |
89 | ||
90 | #undef super | |
6d2010ae | 91 | #define super IOCommand |
f427ee49 A |
92 | OSDefineMetaClassAndStructorsWithZone(IODMACommand, IOCommand, ZC_NONE); |
93 | ||
94 | OSMetaClassDefineReservedUsedX86(IODMACommand, 0); | |
95 | OSMetaClassDefineReservedUsedX86(IODMACommand, 1); | |
96 | OSMetaClassDefineReservedUsedX86(IODMACommand, 2); | |
97 | OSMetaClassDefineReservedUsedX86(IODMACommand, 3); | |
98 | OSMetaClassDefineReservedUsedX86(IODMACommand, 4); | |
99 | OSMetaClassDefineReservedUsedX86(IODMACommand, 5); | |
100 | OSMetaClassDefineReservedUsedX86(IODMACommand, 6); | |
0a7de745 A |
101 | OSMetaClassDefineReservedUnused(IODMACommand, 7); |
102 | OSMetaClassDefineReservedUnused(IODMACommand, 8); | |
103 | OSMetaClassDefineReservedUnused(IODMACommand, 9); | |
0c530ab8 A |
104 | OSMetaClassDefineReservedUnused(IODMACommand, 10); |
105 | OSMetaClassDefineReservedUnused(IODMACommand, 11); | |
106 | OSMetaClassDefineReservedUnused(IODMACommand, 12); | |
107 | OSMetaClassDefineReservedUnused(IODMACommand, 13); | |
108 | OSMetaClassDefineReservedUnused(IODMACommand, 14); | |
109 | OSMetaClassDefineReservedUnused(IODMACommand, 15); | |
110 | ||
f427ee49 A |
111 | |
112 | OSSharedPtr<IODMACommand> | |
3e170ce0 A |
113 | IODMACommand::withRefCon(void * refCon) |
114 | { | |
f427ee49 | 115 | OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>(); |
3e170ce0 | 116 | |
0a7de745 | 117 | if (me && !me->initWithRefCon(refCon)) { |
f427ee49 | 118 | return nullptr; |
0a7de745 | 119 | } |
3e170ce0 | 120 | |
0a7de745 | 121 | return me; |
3e170ce0 A |
122 | } |
123 | ||
f427ee49 | 124 | OSSharedPtr<IODMACommand> |
3e170ce0 | 125 | IODMACommand::withSpecification(SegmentFunction outSegFunc, |
0a7de745 A |
126 | const SegmentOptions * segmentOptions, |
127 | uint32_t mappingOptions, | |
128 | IOMapper * mapper, | |
129 | void * refCon) | |
3e170ce0 | 130 | { |
f427ee49 | 131 | OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>(); |
3e170ce0 | 132 | |
0a7de745 A |
133 | if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, |
134 | mapper, refCon)) { | |
f427ee49 | 135 | return nullptr; |
0a7de745 | 136 | } |
3e170ce0 | 137 | |
0a7de745 | 138 | return me; |
3e170ce0 A |
139 | } |
140 | ||
f427ee49 | 141 | OSSharedPtr<IODMACommand> |
0c530ab8 | 142 | IODMACommand::withSpecification(SegmentFunction outSegFunc, |
0a7de745 A |
143 | UInt8 numAddressBits, |
144 | UInt64 maxSegmentSize, | |
145 | MappingOptions mappingOptions, | |
146 | UInt64 maxTransferSize, | |
147 | UInt32 alignment, | |
148 | IOMapper *mapper, | |
149 | void *refCon) | |
0c530ab8 | 150 | { |
f427ee49 | 151 | OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>(); |
0a7de745 A |
152 | |
153 | if (me && !me->initWithSpecification(outSegFunc, | |
154 | numAddressBits, maxSegmentSize, | |
155 | mappingOptions, maxTransferSize, | |
156 | alignment, mapper, refCon)) { | |
f427ee49 | 157 | return nullptr; |
0a7de745 A |
158 | } |
159 | ||
160 | return me; | |
0c530ab8 A |
161 | } |
162 | ||
f427ee49 | 163 | OSSharedPtr<IODMACommand> |
0c530ab8 A |
164 | IODMACommand::cloneCommand(void *refCon) |
165 | { | |
0a7de745 A |
166 | SegmentOptions segmentOptions = |
167 | { | |
168 | .fStructSize = sizeof(segmentOptions), | |
169 | .fNumAddressBits = (uint8_t)fNumAddressBits, | |
170 | .fMaxSegmentSize = fMaxSegmentSize, | |
171 | .fMaxTransferSize = fMaxTransferSize, | |
172 | .fAlignment = fAlignMask + 1, | |
173 | .fAlignmentLength = fAlignMaskInternalSegments + 1, | |
174 | .fAlignmentInternalSegments = fAlignMaskLength + 1 | |
175 | }; | |
176 | ||
177 | return IODMACommand::withSpecification(fOutSeg, &segmentOptions, | |
f427ee49 | 178 | fMappingOptions, fMapper.get(), refCon); |
0c530ab8 A |
179 | } |
180 | ||
181 | #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) | |
182 | ||
3e170ce0 A |
183 | bool |
184 | IODMACommand::initWithRefCon(void * refCon) | |
185 | { | |
0a7de745 A |
186 | if (!super::init()) { |
187 | return false; | |
188 | } | |
3e170ce0 | 189 | |
0a7de745 A |
190 | if (!reserved) { |
191 | reserved = IONew(IODMACommandInternal, 1); | |
192 | if (!reserved) { | |
193 | return false; | |
194 | } | |
195 | } | |
196 | bzero(reserved, sizeof(IODMACommandInternal)); | |
197 | fRefCon = refCon; | |
3e170ce0 | 198 | |
0a7de745 | 199 | return true; |
3e170ce0 A |
200 | } |
201 | ||
202 | bool | |
0a7de745 A |
203 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, |
204 | const SegmentOptions * segmentOptions, | |
205 | uint32_t mappingOptions, | |
206 | IOMapper * mapper, | |
207 | void * refCon) | |
3e170ce0 | 208 | { |
0a7de745 A |
209 | if (!initWithRefCon(refCon)) { |
210 | return false; | |
211 | } | |
3e170ce0 | 212 | |
0a7de745 A |
213 | if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, |
214 | mappingOptions, mapper)) { | |
215 | return false; | |
216 | } | |
3e170ce0 | 217 | |
0a7de745 | 218 | return true; |
3e170ce0 A |
219 | } |
220 | ||
0c530ab8 A |
221 | bool |
222 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, | |
0a7de745 A |
223 | UInt8 numAddressBits, |
224 | UInt64 maxSegmentSize, | |
225 | MappingOptions mappingOptions, | |
226 | UInt64 maxTransferSize, | |
227 | UInt32 alignment, | |
228 | IOMapper *mapper, | |
229 | void *refCon) | |
3e170ce0 | 230 | { |
0a7de745 A |
231 | SegmentOptions segmentOptions = |
232 | { | |
233 | .fStructSize = sizeof(segmentOptions), | |
234 | .fNumAddressBits = numAddressBits, | |
235 | .fMaxSegmentSize = maxSegmentSize, | |
236 | .fMaxTransferSize = maxTransferSize, | |
237 | .fAlignment = alignment, | |
238 | .fAlignmentLength = 1, | |
239 | .fAlignmentInternalSegments = alignment | |
240 | }; | |
241 | ||
242 | return initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon); | |
3e170ce0 A |
243 | } |
244 | ||
245 | IOReturn | |
246 | IODMACommand::setSpecification(SegmentFunction outSegFunc, | |
0a7de745 A |
247 | const SegmentOptions * segmentOptions, |
248 | uint32_t mappingOptions, | |
249 | IOMapper * mapper) | |
0c530ab8 | 250 | { |
cb323159 | 251 | IOService * device = NULL; |
0a7de745 A |
252 | UInt8 numAddressBits; |
253 | UInt64 maxSegmentSize; | |
254 | UInt64 maxTransferSize; | |
255 | UInt32 alignment; | |
256 | ||
257 | bool is32Bit; | |
258 | ||
259 | if (!outSegFunc || !segmentOptions) { | |
260 | return kIOReturnBadArgument; | |
261 | } | |
262 | ||
263 | is32Bit = ((OutputHost32 == outSegFunc) | |
264 | || (OutputBig32 == outSegFunc) | |
265 | || (OutputLittle32 == outSegFunc)); | |
266 | ||
267 | numAddressBits = segmentOptions->fNumAddressBits; | |
268 | maxSegmentSize = segmentOptions->fMaxSegmentSize; | |
269 | maxTransferSize = segmentOptions->fMaxTransferSize; | |
270 | alignment = segmentOptions->fAlignment; | |
271 | if (is32Bit) { | |
272 | if (!numAddressBits) { | |
273 | numAddressBits = 32; | |
274 | } else if (numAddressBits > 32) { | |
275 | return kIOReturnBadArgument; // Wrong output function for bits | |
276 | } | |
277 | } | |
278 | ||
279 | if (numAddressBits && (numAddressBits < PAGE_SHIFT)) { | |
280 | return kIOReturnBadArgument; | |
281 | } | |
282 | ||
283 | if (!maxSegmentSize) { | |
284 | maxSegmentSize--; // Set Max segment to -1 | |
285 | } | |
286 | if (!maxTransferSize) { | |
287 | maxTransferSize--; // Set Max transfer to -1 | |
288 | } | |
289 | if (mapper && !OSDynamicCast(IOMapper, mapper)) { | |
290 | device = mapper; | |
cb323159 | 291 | mapper = NULL; |
0a7de745 A |
292 | } |
293 | if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) { | |
294 | IOMapper::checkForSystemMapper(); | |
295 | mapper = IOMapper::gSystem; | |
296 | } | |
297 | ||
298 | fNumSegments = 0; | |
299 | fOutSeg = outSegFunc; | |
300 | fNumAddressBits = numAddressBits; | |
301 | fMaxSegmentSize = maxSegmentSize; | |
302 | fMappingOptions = mappingOptions; | |
303 | fMaxTransferSize = maxTransferSize; | |
304 | if (!alignment) { | |
305 | alignment = 1; | |
306 | } | |
307 | fAlignMask = alignment - 1; | |
308 | ||
309 | alignment = segmentOptions->fAlignmentLength; | |
310 | if (!alignment) { | |
311 | alignment = 1; | |
312 | } | |
313 | fAlignMaskLength = alignment - 1; | |
314 | ||
315 | alignment = segmentOptions->fAlignmentInternalSegments; | |
316 | if (!alignment) { | |
317 | alignment = (fAlignMask + 1); | |
318 | } | |
319 | fAlignMaskInternalSegments = alignment - 1; | |
320 | ||
321 | switch (MAPTYPE(mappingOptions)) { | |
322 | case kMapped: break; | |
323 | case kUnmapped: break; | |
324 | case kNonCoherent: break; | |
325 | ||
326 | case kBypassed: | |
327 | if (!mapper) { | |
328 | break; | |
329 | } | |
330 | return kIOReturnBadArgument; | |
331 | ||
332 | default: | |
333 | return kIOReturnBadArgument; | |
334 | } | |
335 | ; | |
336 | ||
337 | if (mapper != fMapper) { | |
f427ee49 | 338 | fMapper.reset(mapper, OSRetain); |
0a7de745 A |
339 | } |
340 | ||
341 | fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); | |
342 | fInternalState->fDevice = device; | |
343 | ||
344 | return kIOReturnSuccess; | |
0c530ab8 A |
345 | } |
346 | ||
347 | void | |
348 | IODMACommand::free() | |
349 | { | |
0a7de745 A |
350 | if (reserved) { |
351 | IODelete(reserved, IODMACommandInternal, 1); | |
352 | } | |
0c530ab8 | 353 | |
f427ee49 A |
354 | fMapper.reset(); |
355 | ||
356 | // Correct use of this class when setting an IOMemoryDescriptor | |
357 | // in fMemory via setMemoryDescriptor(desc) is, for the caller, to | |
358 | // have a matching call to clearMemoryDescriptor() before releasing | |
359 | // the object. The matching call has also the effect of releasing | |
360 | // the ref taken on the IOMemoryDescriptor in setMemoryDescriptor(). | |
361 | // | |
362 | // A number of "misbehaving" drivers has been found during testing, | |
363 | // whereby a matching call to clearMemoryDescriptor() is missing: | |
364 | // | |
365 | // rdar://59947343 | |
366 | // rdar://59946968 | |
367 | // | |
368 | // Both the approaches taken in said drivers are wrong, but have gone | |
369 | // basically silent with fMemory being a regular pointer. With fMemory | |
370 | // becoming a OSSharedPtr, the IODMACommand destructor expects to find | |
371 | // either fMemory reset (through the call to clearMemoryDescriptor()) or | |
372 | // a reference hold for the release. | |
373 | // | |
374 | // For this reason, this workaround of detaching fMemory is put in | |
375 | // place here, choosing the leak over the panic for misbehaving | |
376 | // drivers. Once all instances are fixed, this workaround will be | |
377 | // removed. | |
378 | // | |
379 | // Note: all well behaving drivers that have matching calls for | |
380 | // setMemoryDescriptor() and clearMemoryDescriptor() are unaffected | |
381 | // since fMemory will be null at this point. | |
382 | fMemory.detach(); | |
b0d623f7 | 383 | |
0a7de745 | 384 | super::free(); |
0c530ab8 A |
385 | } |
386 | ||
387 | IOReturn | |
388 | IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) | |
389 | { | |
0a7de745 A |
390 | IOReturn err = kIOReturnSuccess; |
391 | ||
392 | if (mem == fMemory) { | |
393 | if (!autoPrepare) { | |
394 | while (fActive) { | |
395 | complete(); | |
396 | } | |
397 | } | |
398 | return kIOReturnSuccess; | |
0c530ab8 | 399 | } |
0a7de745 A |
400 | |
401 | if (fMemory) { | |
402 | // As we are almost certainly being called from a work loop thread | |
403 | // if fActive is true it is probably not a good time to potentially | |
404 | // block. Just test for it and return an error | |
405 | if (fActive) { | |
406 | return kIOReturnBusy; | |
407 | } | |
99c3a104 | 408 | clearMemoryDescriptor(); |
6d2010ae | 409 | } |
0a7de745 A |
410 | |
411 | if (mem) { | |
412 | bzero(&fMDSummary, sizeof(fMDSummary)); | |
413 | err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), | |
414 | &fMDSummary, sizeof(fMDSummary)); | |
415 | if (err) { | |
416 | return err; | |
417 | } | |
418 | ||
419 | ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; | |
420 | ||
421 | if ((kMapped == MAPTYPE(fMappingOptions)) | |
422 | && fMapper) { | |
423 | fInternalState->fCheckAddressing = false; | |
424 | } else { | |
425 | fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); | |
426 | } | |
427 | ||
428 | fInternalState->fNewMD = true; | |
f427ee49 | 429 | fMemory.reset(const_cast<IOMemoryDescriptor *>(mem), OSRetain); |
0a7de745 A |
430 | fInternalState->fSetActiveNoMapper = (!fMapper); |
431 | if (fInternalState->fSetActiveNoMapper) { | |
432 | mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); | |
433 | } | |
434 | if (autoPrepare) { | |
435 | err = prepare(); | |
436 | if (err) { | |
437 | clearMemoryDescriptor(); | |
438 | } | |
439 | } | |
440 | } | |
441 | ||
442 | return err; | |
0c530ab8 A |
443 | } |
444 | ||
445 | IOReturn | |
446 | IODMACommand::clearMemoryDescriptor(bool autoComplete) | |
447 | { | |
0a7de745 A |
448 | if (fActive && !autoComplete) { |
449 | return kIOReturnNotReady; | |
450 | } | |
0c530ab8 | 451 | |
0a7de745 A |
452 | if (fMemory) { |
453 | while (fActive) { | |
454 | complete(); | |
455 | } | |
456 | if (fInternalState->fSetActiveNoMapper) { | |
457 | fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); | |
458 | } | |
f427ee49 | 459 | fMemory.reset(); |
0a7de745 | 460 | } |
0c530ab8 | 461 | |
0a7de745 | 462 | return kIOReturnSuccess; |
0c530ab8 A |
463 | } |
464 | ||
465 | const IOMemoryDescriptor * | |
466 | IODMACommand::getMemoryDescriptor() const | |
467 | { | |
f427ee49 | 468 | return fMemory.get(); |
0c530ab8 A |
469 | } |
470 | ||
3e170ce0 A |
471 | IOMemoryDescriptor * |
472 | IODMACommand::getIOMemoryDescriptor() const | |
473 | { | |
f427ee49 | 474 | OSSharedPtr<IOMemoryDescriptor> mem; |
3e170ce0 | 475 | |
0a7de745 A |
476 | mem = reserved->fCopyMD; |
477 | if (!mem) { | |
f427ee49 | 478 | mem = fMemory; |
0a7de745 | 479 | } |
3e170ce0 | 480 | |
f427ee49 | 481 | return mem.get(); |
3e170ce0 | 482 | } |
0c530ab8 A |
483 | |
484 | IOReturn | |
485 | IODMACommand::segmentOp( | |
0a7de745 A |
486 | void *reference, |
487 | IODMACommand *target, | |
488 | Segment64 segment, | |
489 | void *segments, | |
490 | UInt32 segmentIndex) | |
0c530ab8 | 491 | { |
f427ee49 | 492 | IOOptionBits op = (IOOptionBits)(uintptr_t) reference; |
0a7de745 A |
493 | addr64_t maxPhys, address; |
494 | uint64_t length; | |
495 | uint32_t numPages; | |
496 | uint32_t mask; | |
497 | ||
498 | IODMACommandInternal * state = target->reserved; | |
499 | ||
500 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) { | |
501 | maxPhys = (1ULL << target->fNumAddressBits); | |
502 | } else { | |
503 | maxPhys = 0; | |
0c530ab8 | 504 | } |
0a7de745 A |
505 | maxPhys--; |
506 | ||
507 | address = segment.fIOVMAddr; | |
508 | length = segment.fLength; | |
509 | ||
510 | assert(length); | |
511 | ||
512 | if (!state->fMisaligned) { | |
513 | mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); | |
514 | state->fMisaligned |= (0 != (mask & address)); | |
515 | if (state->fMisaligned) { | |
516 | DEBG("misaligned address %qx:%qx, %x\n", address, length, mask); | |
517 | } | |
518 | } | |
519 | if (!state->fMisaligned) { | |
520 | mask = target->fAlignMaskLength; | |
521 | state->fMisaligned |= (0 != (mask & length)); | |
522 | if (state->fMisaligned) { | |
523 | DEBG("misaligned length %qx:%qx, %x\n", address, length, mask); | |
524 | } | |
0c530ab8 A |
525 | } |
526 | ||
0a7de745 A |
527 | if (state->fMisaligned && (kWalkPreflight & op)) { |
528 | return kIOReturnNotAligned; | |
529 | } | |
530 | ||
531 | if (!state->fDoubleBuffer) { | |
532 | if ((address + length - 1) <= maxPhys) { | |
533 | length = 0; | |
534 | } else if (address <= maxPhys) { | |
535 | DEBG("tail %qx, %qx", address, length); | |
536 | length = (address + length - maxPhys - 1); | |
537 | address = maxPhys + 1; | |
538 | DEBG("-> %qx, %qx\n", address, length); | |
539 | } | |
540 | } | |
541 | ||
542 | if (!length) { | |
543 | return kIOReturnSuccess; | |
544 | } | |
545 | ||
f427ee49 A |
546 | uint64_t numPages64 = atop_64(round_page_64((address & PAGE_MASK) + length)); |
547 | if (numPages64 > UINT_MAX) { | |
548 | return kIOReturnVMError; | |
549 | } | |
550 | numPages = (typeof(numPages))numPages64; | |
0a7de745 A |
551 | |
552 | if (kWalkPreflight & op) { | |
553 | state->fCopyPageCount += numPages; | |
554 | } else { | |
555 | vm_page_t lastPage; | |
556 | lastPage = NULL; | |
557 | if (kWalkPrepare & op) { | |
558 | lastPage = state->fCopyNext; | |
559 | for (IOItemCount idx = 0; idx < numPages; idx++) { | |
560 | vm_page_set_offset(lastPage, atop_64(address) + idx); | |
561 | lastPage = vm_page_get_next(lastPage); | |
562 | } | |
563 | } | |
564 | ||
565 | if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { | |
566 | lastPage = state->fCopyNext; | |
567 | for (IOItemCount idx = 0; idx < numPages; idx++) { | |
568 | if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { | |
569 | addr64_t cpuAddr = address; | |
570 | addr64_t remapAddr; | |
571 | uint64_t chunk; | |
572 | ||
573 | if ((kMapped == MAPTYPE(target->fMappingOptions)) | |
574 | && target->fMapper) { | |
575 | cpuAddr = target->fMapper->mapToPhysicalAddress(address); | |
576 | } | |
577 | ||
578 | remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); | |
579 | if (!state->fDoubleBuffer) { | |
580 | remapAddr += (address & PAGE_MASK); | |
581 | } | |
582 | chunk = PAGE_SIZE - (address & PAGE_MASK); | |
583 | if (chunk > length) { | |
584 | chunk = length; | |
585 | } | |
f427ee49 A |
586 | if (chunk > (UINT_MAX - PAGE_SIZE + 1)) { |
587 | chunk = (UINT_MAX - PAGE_SIZE + 1); | |
588 | } | |
0a7de745 A |
589 | |
590 | DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, | |
591 | (kWalkSyncIn & op) ? "->" : "<-", | |
592 | address, chunk, op); | |
593 | ||
594 | if (kWalkSyncIn & op) { // cppvNoModSnk | |
f427ee49 | 595 | copypv(remapAddr, cpuAddr, (unsigned int) chunk, |
0a7de745 A |
596 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
597 | } else { | |
f427ee49 | 598 | copypv(cpuAddr, remapAddr, (unsigned int) chunk, |
0a7de745 A |
599 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
600 | } | |
601 | address += chunk; | |
602 | length -= chunk; | |
603 | } | |
604 | lastPage = vm_page_get_next(lastPage); | |
605 | } | |
0b4c1975 | 606 | } |
0a7de745 | 607 | state->fCopyNext = lastPage; |
0c530ab8 | 608 | } |
0c530ab8 | 609 | |
0a7de745 | 610 | return kIOReturnSuccess; |
0c530ab8 A |
611 | } |
612 | ||
f427ee49 | 613 | OSSharedPtr<IOBufferMemoryDescriptor> |
3e170ce0 A |
614 | IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) |
615 | { | |
0a7de745 A |
616 | mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask |
617 | return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, | |
618 | direction, length, mask); | |
3e170ce0 A |
619 | } |
620 | ||
0c530ab8 | 621 | IOReturn |
f427ee49 | 622 | IODMACommand::walkAll(uint32_t op) |
0c530ab8 | 623 | { |
0a7de745 A |
624 | IODMACommandInternal * state = fInternalState; |
625 | ||
626 | IOReturn ret = kIOReturnSuccess; | |
627 | UInt32 numSegments; | |
628 | UInt64 offset; | |
629 | ||
630 | if (kWalkPreflight & op) { | |
631 | state->fMisaligned = false; | |
632 | state->fDoubleBuffer = false; | |
633 | state->fPrepared = false; | |
634 | state->fCopyNext = NULL; | |
cb323159 | 635 | state->fCopyPageAlloc = NULL; |
0a7de745 A |
636 | state->fCopyPageCount = 0; |
637 | state->fNextRemapPage = NULL; | |
cb323159 | 638 | state->fCopyMD = NULL; |
0a7de745 A |
639 | |
640 | if (!(kWalkDoubleBuffer & op)) { | |
641 | offset = 0; | |
642 | numSegments = 0 - 1; | |
643 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); | |
644 | } | |
0c530ab8 | 645 | |
0a7de745 | 646 | op &= ~kWalkPreflight; |
0c530ab8 | 647 | |
0a7de745 A |
648 | state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); |
649 | state->fForceDoubleBuffer = false; | |
650 | if (state->fDoubleBuffer) { | |
f427ee49 | 651 | state->fCopyPageCount = (typeof(state->fCopyPageCount))(atop_64(round_page(state->fPreparedLength))); |
0a7de745 | 652 | } |
0c530ab8 | 653 | |
0a7de745 A |
654 | if (state->fCopyPageCount) { |
655 | vm_page_t mapBase = NULL; | |
0c530ab8 | 656 | |
0a7de745 | 657 | DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount); |
0c530ab8 | 658 | |
0a7de745 A |
659 | if (!fMapper && !state->fDoubleBuffer) { |
660 | kern_return_t kr; | |
99c3a104 | 661 | |
0a7de745 A |
662 | if (fMapper) { |
663 | panic("fMapper copying"); | |
664 | } | |
99c3a104 | 665 | |
0a7de745 | 666 | kr = vm_page_alloc_list(state->fCopyPageCount, |
c3c9b80d | 667 | (kma_flags_t)(KMA_LOMEM | KMA_NOPAGEWAIT), &mapBase); |
0a7de745 A |
668 | if (KERN_SUCCESS != kr) { |
669 | DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr); | |
670 | mapBase = NULL; | |
671 | } | |
672 | } | |
673 | ||
674 | if (mapBase) { | |
675 | state->fCopyPageAlloc = mapBase; | |
676 | state->fCopyNext = state->fCopyPageAlloc; | |
677 | offset = 0; | |
678 | numSegments = 0 - 1; | |
679 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); | |
680 | state->fPrepared = true; | |
681 | op &= ~(kWalkSyncIn | kWalkSyncOut); | |
682 | } else { | |
683 | DEBG("alloc IOBMD\n"); | |
684 | state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); | |
685 | ||
686 | if (state->fCopyMD) { | |
687 | ret = kIOReturnSuccess; | |
688 | state->fPrepared = true; | |
689 | } else { | |
690 | DEBG("IODMACommand !alloc IOBMD"); | |
691 | return kIOReturnNoResources; | |
692 | } | |
693 | } | |
0c530ab8 | 694 | } |
0c530ab8 | 695 | } |
0c530ab8 | 696 | |
0a7de745 A |
697 | if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) { |
698 | if (state->fCopyPageCount) { | |
699 | DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount); | |
700 | ||
701 | if (state->fCopyPageAlloc) { | |
702 | state->fCopyNext = state->fCopyPageAlloc; | |
703 | offset = 0; | |
704 | numSegments = 0 - 1; | |
705 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); | |
706 | } else if (state->fCopyMD) { | |
707 | DEBG("sync IOBMD\n"); | |
708 | ||
709 | if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) { | |
f427ee49 | 710 | OSSharedPtr<IOMemoryDescriptor> poMD = fMemory; |
0a7de745 A |
711 | |
712 | IOByteCount bytes; | |
713 | ||
714 | if (kWalkSyncIn & op) { | |
715 | bytes = poMD->writeBytes(state->fPreparedOffset, | |
0c530ab8 A |
716 | state->fCopyMD->getBytesNoCopy(), |
717 | state->fPreparedLength); | |
0a7de745 A |
718 | } else { |
719 | bytes = poMD->readBytes(state->fPreparedOffset, | |
0c530ab8 A |
720 | state->fCopyMD->getBytesNoCopy(), |
721 | state->fPreparedLength); | |
0a7de745 A |
722 | } |
723 | DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes); | |
724 | ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; | |
725 | } else { | |
726 | ret = kIOReturnSuccess; | |
727 | } | |
728 | } | |
0c530ab8 | 729 | } |
0c530ab8 | 730 | } |
0c530ab8 | 731 | |
0a7de745 A |
732 | if (kWalkComplete & op) { |
733 | if (state->fCopyPageAlloc) { | |
734 | vm_page_free_list(state->fCopyPageAlloc, FALSE); | |
cb323159 | 735 | state->fCopyPageAlloc = NULL; |
0a7de745 A |
736 | state->fCopyPageCount = 0; |
737 | } | |
738 | if (state->fCopyMD) { | |
f427ee49 | 739 | state->fCopyMD.reset(); |
0a7de745 | 740 | } |
0c530ab8 | 741 | |
0a7de745 A |
742 | state->fPrepared = false; |
743 | } | |
744 | return ret; | |
0c530ab8 A |
745 | } |
746 | ||
b0d623f7 A |
747 | UInt8 |
748 | IODMACommand::getNumAddressBits(void) | |
749 | { | |
f427ee49 | 750 | return (UInt8) fNumAddressBits; |
b0d623f7 A |
751 | } |
752 | ||
753 | UInt32 | |
754 | IODMACommand::getAlignment(void) | |
755 | { | |
0a7de745 | 756 | return fAlignMask + 1; |
b0d623f7 A |
757 | } |
758 | ||
3e170ce0 A |
759 | uint32_t |
760 | IODMACommand::getAlignmentLength(void) | |
761 | { | |
0a7de745 | 762 | return fAlignMaskLength + 1; |
3e170ce0 A |
763 | } |
764 | ||
765 | uint32_t | |
766 | IODMACommand::getAlignmentInternalSegments(void) | |
767 | { | |
0a7de745 | 768 | return fAlignMaskInternalSegments + 1; |
3e170ce0 A |
769 | } |
770 | ||
771 | IOReturn | |
0a7de745 A |
772 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
773 | const SegmentOptions * segmentOptions, | |
774 | uint32_t mappingOptions, | |
775 | IOMapper * mapper, | |
776 | UInt64 offset, | |
777 | UInt64 length, | |
778 | bool flushCache, | |
779 | bool synchronize) | |
3e170ce0 | 780 | { |
0a7de745 | 781 | IOReturn ret; |
3e170ce0 | 782 | |
0a7de745 A |
783 | if (fActive) { |
784 | return kIOReturnNotPermitted; | |
785 | } | |
3e170ce0 | 786 | |
0a7de745 A |
787 | ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); |
788 | if (kIOReturnSuccess != ret) { | |
789 | return ret; | |
790 | } | |
3e170ce0 | 791 | |
0a7de745 | 792 | ret = prepare(offset, length, flushCache, synchronize); |
3e170ce0 | 793 | |
0a7de745 | 794 | return ret; |
3e170ce0 A |
795 | } |
796 | ||
2d21ac55 | 797 | IOReturn |
0a7de745 A |
798 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
799 | UInt8 numAddressBits, | |
800 | UInt64 maxSegmentSize, | |
801 | MappingOptions mappingOptions, | |
802 | UInt64 maxTransferSize, | |
803 | UInt32 alignment, | |
804 | IOMapper *mapper, | |
805 | UInt64 offset, | |
806 | UInt64 length, | |
807 | bool flushCache, | |
808 | bool synchronize) | |
2d21ac55 | 809 | { |
0a7de745 A |
810 | SegmentOptions segmentOptions = |
811 | { | |
812 | .fStructSize = sizeof(segmentOptions), | |
813 | .fNumAddressBits = numAddressBits, | |
814 | .fMaxSegmentSize = maxSegmentSize, | |
815 | .fMaxTransferSize = maxTransferSize, | |
816 | .fAlignment = alignment, | |
817 | .fAlignmentLength = 1, | |
818 | .fAlignmentInternalSegments = alignment | |
819 | }; | |
820 | ||
821 | return prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, | |
822 | offset, length, flushCache, synchronize); | |
2d21ac55 A |
823 | } |
824 | ||
825 | ||
0a7de745 | 826 | IOReturn |
0c530ab8 A |
827 | IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) |
828 | { | |
0a7de745 A |
829 | IODMACommandInternal * state = fInternalState; |
830 | IOReturn ret = kIOReturnSuccess; | |
831 | uint32_t mappingOptions = fMappingOptions; | |
832 | ||
833 | // check specification has been set | |
834 | if (!fOutSeg) { | |
835 | return kIOReturnNotReady; | |
0c530ab8 | 836 | } |
99c3a104 | 837 | |
0a7de745 A |
838 | if (!length) { |
839 | length = fMDSummary.fLength; | |
840 | } | |
841 | ||
842 | if (length > fMaxTransferSize) { | |
843 | return kIOReturnNoSpace; | |
844 | } | |
845 | ||
846 | if (fActive++) { | |
847 | if ((state->fPreparedOffset != offset) | |
848 | || (state->fPreparedLength != length)) { | |
849 | ret = kIOReturnNotReady; | |
850 | } | |
851 | } else { | |
852 | if (fAlignMaskLength & length) { | |
853 | return kIOReturnNotAligned; | |
854 | } | |
855 | ||
f427ee49 A |
856 | if (atop_64(state->fPreparedLength) > UINT_MAX) { |
857 | return kIOReturnVMError; | |
858 | } | |
0a7de745 A |
859 | state->fPreparedOffset = offset; |
860 | state->fPreparedLength = length; | |
861 | ||
0a7de745 A |
862 | state->fMisaligned = false; |
863 | state->fDoubleBuffer = false; | |
864 | state->fPrepared = false; | |
865 | state->fCopyNext = NULL; | |
cb323159 | 866 | state->fCopyPageAlloc = NULL; |
0a7de745 A |
867 | state->fCopyPageCount = 0; |
868 | state->fNextRemapPage = NULL; | |
cb323159 | 869 | state->fCopyMD = NULL; |
0a7de745 A |
870 | state->fLocalMapperAlloc = 0; |
871 | state->fLocalMapperAllocValid = false; | |
872 | state->fLocalMapperAllocLength = 0; | |
873 | ||
874 | state->fSourceAlignMask = fAlignMask; | |
875 | if (fMapper) { | |
876 | state->fSourceAlignMask &= page_mask; | |
877 | } | |
878 | ||
879 | state->fCursor = state->fIterateOnly | |
880 | || (!state->fCheckAddressing | |
881 | && (!state->fSourceAlignMask | |
882 | || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); | |
883 | ||
884 | if (!state->fCursor) { | |
885 | IOOptionBits op = kWalkPrepare | kWalkPreflight; | |
886 | if (synchronize) { | |
887 | op |= kWalkSyncOut; | |
888 | } | |
889 | ret = walkAll(op); | |
890 | } | |
891 | ||
892 | if (IS_NONCOHERENT(mappingOptions) && flushCache) { | |
893 | if (state->fCopyMD) { | |
894 | state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); | |
895 | } else { | |
f427ee49 | 896 | fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length); |
0a7de745 A |
897 | } |
898 | } | |
899 | ||
900 | if (fMapper) { | |
901 | IOMDDMAMapArgs mapArgs; | |
902 | bzero(&mapArgs, sizeof(mapArgs)); | |
f427ee49 | 903 | mapArgs.fMapper = fMapper.get(); |
0a7de745 A |
904 | mapArgs.fCommand = this; |
905 | mapArgs.fMapSpec.device = state->fDevice; | |
906 | mapArgs.fMapSpec.alignment = fAlignMask + 1; | |
f427ee49 | 907 | mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64; |
0a7de745 | 908 | mapArgs.fLength = state->fPreparedLength; |
f427ee49 | 909 | OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD; |
0a7de745 A |
910 | if (md) { |
911 | mapArgs.fOffset = 0; | |
912 | } else { | |
913 | md = fMemory; | |
914 | mapArgs.fOffset = state->fPreparedOffset; | |
915 | } | |
99c3a104 | 916 | |
f427ee49 A |
917 | ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs)); |
918 | ||
919 | if ((kIOReturnSuccess == ret) | |
920 | && mapArgs.fAllocLength | |
921 | && (mapArgs.fAllocLength != mapArgs.fLength)) { | |
922 | do { | |
923 | // multisegment case | |
924 | IOMDDMAWalkSegmentState walkState; | |
925 | IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState; | |
926 | IOOptionBits mdOp; | |
927 | uint64_t index; | |
928 | IOPhysicalLength segLen; | |
929 | uint32_t segCount; | |
930 | uint64_t phys, align; | |
931 | uint64_t mapperPageMask; | |
932 | uint64_t mapperPageShift; | |
933 | uint64_t insertOffset; | |
934 | uint32_t mapOptions; | |
935 | uint64_t length; | |
936 | ||
937 | assert(mapArgs.fAllocLength > mapArgs.fLength); | |
938 | ||
939 | mapperPageMask = fMapper->getPageSize(); | |
940 | assert(mapperPageMask); | |
941 | mapperPageMask -= 1; | |
942 | mapperPageShift = (64 - __builtin_clzll(mapperPageMask)); | |
943 | walkArgs->fMapped = false; | |
944 | length = state->fPreparedLength; | |
945 | mdOp = kIOMDFirstSegment; | |
946 | segCount = 0; | |
947 | for (index = 0; index < length; segCount++) { | |
948 | walkArgs->fOffset = state->fPreparedOffset + index; | |
949 | ||
950 | ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); | |
951 | mdOp = kIOMDWalkSegments; | |
952 | assert(kIOReturnSuccess == ret); | |
953 | if (ret != kIOReturnSuccess) { | |
954 | panic("dmaCommandOperation"); | |
955 | } | |
956 | segLen = walkArgs->fLength; | |
957 | index += segLen; | |
958 | } | |
959 | if (ret != kIOReturnSuccess) { | |
960 | break; | |
961 | } | |
962 | ||
963 | #if defined(LOGTAG) | |
964 | if (LOGTAG == fMemory->getTag()) { | |
965 | IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength); | |
966 | } | |
967 | #endif /* defined(LOGTAG) */ | |
968 | ||
969 | state->fMapSegments = IONewZero(IODMACommandMapSegment, segCount); | |
970 | if (!state->fMapSegments) { | |
971 | ret = kIOReturnNoMemory; | |
972 | break; | |
973 | } | |
974 | state->fMapSegmentsCount = segCount; | |
975 | ||
976 | switch (kIODirectionOutIn & fMDSummary.fDirection) { | |
977 | case kIODirectionOut: | |
978 | mapOptions = kIODMAMapReadAccess; | |
979 | break; | |
980 | case kIODirectionIn: | |
981 | mapOptions = kIODMAMapWriteAccess; | |
982 | break; | |
983 | default: | |
984 | mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess; | |
985 | break; | |
986 | } | |
987 | ||
988 | mdOp = kIOMDFirstSegment; | |
989 | segCount = 0; | |
990 | for (insertOffset = 0, index = 0; index < length; segCount++) { | |
991 | walkArgs->fOffset = state->fPreparedOffset + index; | |
992 | ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); | |
993 | mdOp = kIOMDWalkSegments; | |
994 | if (ret != kIOReturnSuccess) { | |
995 | panic("dmaCommandOperation 0x%x", ret); | |
996 | } | |
997 | phys = walkArgs->fIOVMAddr; | |
998 | segLen = walkArgs->fLength; | |
999 | ||
1000 | #if defined(LOGTAG) | |
1001 | if (LOGTAG == fMemory->getTag()) { | |
1002 | IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen); | |
1003 | } | |
1004 | #endif /* defined(LOGTAG) */ | |
1005 | ||
1006 | align = (phys & mapperPageMask); | |
1007 | ||
1008 | #if defined(LOGTAG) | |
1009 | if (LOGTAG == fMemory->getTag()) { | |
1010 | IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align); | |
1011 | } | |
1012 | #endif /* defined(LOGTAG) */ | |
1013 | ||
1014 | assert(segCount < state->fMapSegmentsCount); | |
1015 | state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index; | |
1016 | state->fMapSegments[segCount].fMapOffset = insertOffset; | |
1017 | state->fMapSegments[segCount].fPageOffset = align; | |
1018 | index += segLen; | |
1019 | ||
1020 | // segment page align | |
1021 | segLen = ((phys + segLen + mapperPageMask) & ~mapperPageMask); | |
1022 | phys -= align; | |
1023 | segLen -= phys; | |
1024 | insertOffset += segLen; | |
1025 | } | |
1026 | state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask); | |
1027 | #if defined(LOGTAG) | |
1028 | if (LOGTAG == fMemory->getTag()) { | |
1029 | IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount); | |
1030 | } | |
1031 | #endif /* defined(LOGTAG) */ | |
1032 | } while (false); | |
1033 | } | |
0a7de745 A |
1034 | if (kIOReturnSuccess == ret) { |
1035 | state->fLocalMapperAlloc = mapArgs.fAlloc; | |
1036 | state->fLocalMapperAllocValid = true; | |
1037 | state->fLocalMapperAllocLength = mapArgs.fAllocLength; | |
0a7de745 A |
1038 | } |
1039 | } | |
1040 | if (kIOReturnSuccess == ret) { | |
1041 | state->fPrepared = true; | |
1042 | } | |
1043 | } | |
1044 | return ret; | |
0c530ab8 A |
1045 | } |
1046 | ||
0a7de745 | 1047 | IOReturn |
0c530ab8 A |
1048 | IODMACommand::complete(bool invalidateCache, bool synchronize) |
1049 | { | |
0a7de745 A |
1050 | IODMACommandInternal * state = fInternalState; |
1051 | IOReturn ret = kIOReturnSuccess; | |
f427ee49 | 1052 | OSSharedPtr<IOMemoryDescriptor> copyMD; |
0c530ab8 | 1053 | |
0a7de745 A |
1054 | if (fActive < 1) { |
1055 | return kIOReturnNotReady; | |
1056 | } | |
0c530ab8 | 1057 | |
0a7de745 A |
1058 | if (!--fActive) { |
1059 | copyMD = state->fCopyMD; | |
5ba3f43e | 1060 | |
0a7de745 A |
1061 | if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) { |
1062 | if (copyMD) { | |
1063 | copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); | |
1064 | } else { | |
f427ee49 | 1065 | OSSharedPtr<IOMemoryDescriptor> md = fMemory; |
0a7de745 A |
1066 | md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); |
1067 | } | |
1068 | } | |
1069 | ||
1070 | if (!state->fCursor) { | |
1071 | IOOptionBits op = kWalkComplete; | |
1072 | if (synchronize) { | |
1073 | op |= kWalkSyncIn; | |
1074 | } | |
1075 | ret = walkAll(op); | |
1076 | } | |
1077 | ||
1078 | if (state->fLocalMapperAllocValid) { | |
1079 | IOMDDMAMapArgs mapArgs; | |
1080 | bzero(&mapArgs, sizeof(mapArgs)); | |
f427ee49 | 1081 | mapArgs.fMapper = fMapper.get(); |
0a7de745 A |
1082 | mapArgs.fCommand = this; |
1083 | mapArgs.fAlloc = state->fLocalMapperAlloc; | |
1084 | mapArgs.fAllocLength = state->fLocalMapperAllocLength; | |
f427ee49 | 1085 | OSSharedPtr<IOMemoryDescriptor> md = copyMD; |
0a7de745 A |
1086 | if (md) { |
1087 | mapArgs.fOffset = 0; | |
1088 | } else { | |
1089 | md = fMemory; | |
1090 | mapArgs.fOffset = state->fPreparedOffset; | |
1091 | } | |
1092 | ||
1093 | ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs)); | |
1094 | ||
1095 | state->fLocalMapperAlloc = 0; | |
1096 | state->fLocalMapperAllocValid = false; | |
1097 | state->fLocalMapperAllocLength = 0; | |
f427ee49 A |
1098 | if (state->fMapSegments) { |
1099 | IODelete(state->fMapSegments, IODMACommandMapSegment, state->fMapSegmentsCount); | |
1100 | state->fMapSegments = NULL; | |
1101 | state->fMapSegmentsCount = 0; | |
1102 | } | |
0a7de745 | 1103 | } |
f427ee49 | 1104 | |
0a7de745 A |
1105 | state->fPrepared = false; |
1106 | } | |
1107 | ||
1108 | return ret; | |
0c530ab8 A |
1109 | } |
1110 | ||
b0d623f7 A |
1111 | IOReturn |
1112 | IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) | |
1113 | { | |
0a7de745 A |
1114 | IODMACommandInternal * state = fInternalState; |
1115 | if (fActive < 1) { | |
1116 | return kIOReturnNotReady; | |
1117 | } | |
b0d623f7 | 1118 | |
0a7de745 A |
1119 | if (offset) { |
1120 | *offset = state->fPreparedOffset; | |
1121 | } | |
1122 | if (length) { | |
1123 | *length = state->fPreparedLength; | |
1124 | } | |
b0d623f7 | 1125 | |
0a7de745 | 1126 | return kIOReturnSuccess; |
b0d623f7 A |
1127 | } |
1128 | ||
0c530ab8 A |
1129 | IOReturn |
1130 | IODMACommand::synchronize(IOOptionBits options) | |
1131 | { | |
0a7de745 A |
1132 | IODMACommandInternal * state = fInternalState; |
1133 | IOReturn ret = kIOReturnSuccess; | |
1134 | IOOptionBits op; | |
0c530ab8 | 1135 | |
0a7de745 A |
1136 | if (kIODirectionOutIn == (kIODirectionOutIn & options)) { |
1137 | return kIOReturnBadArgument; | |
1138 | } | |
1139 | ||
1140 | if (fActive < 1) { | |
1141 | return kIOReturnNotReady; | |
1142 | } | |
1143 | ||
1144 | op = 0; | |
1145 | if (kForceDoubleBuffer & options) { | |
1146 | if (state->fDoubleBuffer) { | |
1147 | return kIOReturnSuccess; | |
1148 | } | |
1149 | ret = complete(false /* invalidateCache */, true /* synchronize */); | |
1150 | state->fCursor = false; | |
1151 | state->fForceDoubleBuffer = true; | |
1152 | ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */); | |
1153 | ||
1154 | return ret; | |
1155 | } else if (state->fCursor) { | |
1156 | return kIOReturnSuccess; | |
1157 | } | |
1158 | ||
1159 | if (kIODirectionIn & options) { | |
1160 | op |= kWalkSyncIn | kWalkSyncAlways; | |
1161 | } else if (kIODirectionOut & options) { | |
1162 | op |= kWalkSyncOut | kWalkSyncAlways; | |
1163 | } | |
0c530ab8 | 1164 | |
0a7de745 | 1165 | ret = walkAll(op); |
0c530ab8 | 1166 | |
0a7de745 | 1167 | return ret; |
0c530ab8 A |
1168 | } |
1169 | ||
0a7de745 A |
1170 | struct IODMACommandTransferContext { |
1171 | void * buffer; | |
1172 | UInt64 bufferOffset; | |
1173 | UInt64 remaining; | |
1174 | UInt32 op; | |
2d21ac55 | 1175 | }; |
0a7de745 A |
1176 | enum{ |
1177 | kIODMACommandTransferOpReadBytes = 1, | |
1178 | kIODMACommandTransferOpWriteBytes = 2 | |
2d21ac55 A |
1179 | }; |
1180 | ||
1181 | IOReturn | |
1182 | IODMACommand::transferSegment(void *reference, | |
0a7de745 A |
1183 | IODMACommand *target, |
1184 | Segment64 segment, | |
1185 | void *segments, | |
1186 | UInt32 segmentIndex) | |
2d21ac55 | 1187 | { |
0a7de745 A |
1188 | IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; |
1189 | UInt64 length = min(segment.fLength, context->remaining); | |
1190 | addr64_t ioAddr = segment.fIOVMAddr; | |
1191 | addr64_t cpuAddr = ioAddr; | |
1192 | ||
1193 | context->remaining -= length; | |
1194 | ||
1195 | while (length) { | |
1196 | UInt64 copyLen = length; | |
1197 | if ((kMapped == MAPTYPE(target->fMappingOptions)) | |
1198 | && target->fMapper) { | |
1199 | cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); | |
1200 | copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); | |
1201 | ioAddr += copyLen; | |
1202 | } | |
f427ee49 A |
1203 | if (copyLen > (UINT_MAX - PAGE_SIZE + 1)) { |
1204 | copyLen = (UINT_MAX - PAGE_SIZE + 1); | |
1205 | } | |
0a7de745 A |
1206 | |
1207 | switch (context->op) { | |
1208 | case kIODMACommandTransferOpReadBytes: | |
f427ee49 | 1209 | copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, (unsigned int) copyLen, |
0a7de745 A |
1210 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); |
1211 | break; | |
1212 | case kIODMACommandTransferOpWriteBytes: | |
f427ee49 | 1213 | copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, (unsigned int) copyLen, |
0a7de745 A |
1214 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); |
1215 | break; | |
1216 | } | |
1217 | length -= copyLen; | |
1218 | context->bufferOffset += copyLen; | |
2d21ac55 A |
1219 | } |
1220 | ||
0a7de745 | 1221 | return context->remaining ? kIOReturnSuccess : kIOReturnOverrun; |
2d21ac55 A |
1222 | } |
1223 | ||
1224 | UInt64 | |
1225 | IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length) | |
1226 | { | |
0a7de745 A |
1227 | IODMACommandInternal * state = fInternalState; |
1228 | IODMACommandTransferContext context; | |
1229 | Segment64 segments[1]; | |
1230 | UInt32 numSegments = 0 - 1; | |
2d21ac55 | 1231 | |
0a7de745 A |
1232 | if (fActive < 1) { |
1233 | return 0; | |
1234 | } | |
2d21ac55 | 1235 | |
0a7de745 A |
1236 | if (offset >= state->fPreparedLength) { |
1237 | return 0; | |
1238 | } | |
1239 | length = min(length, state->fPreparedLength - offset); | |
2d21ac55 | 1240 | |
0a7de745 A |
1241 | context.buffer = buffer; |
1242 | context.bufferOffset = 0; | |
1243 | context.remaining = length; | |
1244 | context.op = transferOp; | |
1245 | (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); | |
2d21ac55 | 1246 | |
0a7de745 | 1247 | return length - context.remaining; |
2d21ac55 A |
1248 | } |
1249 | ||
1250 | UInt64 | |
1251 | IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length) | |
1252 | { | |
0a7de745 | 1253 | return transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length); |
2d21ac55 A |
1254 | } |
1255 | ||
1256 | UInt64 | |
1257 | IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length) | |
1258 | { | |
0a7de745 | 1259 | return transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length); |
2d21ac55 A |
1260 | } |
1261 | ||
0c530ab8 A |
1262 | IOReturn |
1263 | IODMACommand::genIOVMSegments(UInt64 *offsetP, | |
0a7de745 A |
1264 | void *segmentsP, |
1265 | UInt32 *numSegmentsP) | |
0c530ab8 | 1266 | { |
0a7de745 A |
1267 | return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, |
1268 | offsetP, segmentsP, numSegmentsP); | |
0c530ab8 A |
1269 | } |
1270 | ||
1271 | IOReturn | |
b0d623f7 | 1272 | IODMACommand::genIOVMSegments(uint32_t op, |
0a7de745 A |
1273 | InternalSegmentFunction outSegFunc, |
1274 | void *reference, | |
1275 | UInt64 *offsetP, | |
1276 | void *segmentsP, | |
1277 | UInt32 *numSegmentsP) | |
0c530ab8 | 1278 | { |
0a7de745 A |
1279 | IODMACommandInternal * internalState = fInternalState; |
1280 | IOOptionBits mdOp = kIOMDWalkSegments; | |
1281 | IOReturn ret = kIOReturnSuccess; | |
0c530ab8 | 1282 | |
0a7de745 A |
1283 | if (!(kWalkComplete & op) && !fActive) { |
1284 | return kIOReturnNotReady; | |
1285 | } | |
0c530ab8 | 1286 | |
0a7de745 A |
1287 | if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) { |
1288 | return kIOReturnBadArgument; | |
1289 | } | |
0c530ab8 | 1290 | |
0a7de745 A |
1291 | IOMDDMAWalkSegmentArgs *state = |
1292 | (IOMDDMAWalkSegmentArgs *)(void *) fState; | |
0c530ab8 | 1293 | |
0a7de745 A |
1294 | UInt64 offset = *offsetP + internalState->fPreparedOffset; |
1295 | UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; | |
0c530ab8 | 1296 | |
0a7de745 A |
1297 | if (offset >= memLength) { |
1298 | return kIOReturnOverrun; | |
1299 | } | |
0c530ab8 | 1300 | |
0a7de745 A |
1301 | if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { |
1302 | state->fOffset = 0; | |
1303 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; | |
1304 | internalState->fNextRemapPage = NULL; | |
1305 | internalState->fNewMD = false; | |
1306 | mdOp = kIOMDFirstSegment; | |
1307 | if (fMapper) { | |
1308 | if (internalState->fLocalMapperAllocValid) { | |
f427ee49 | 1309 | state->fMapped = true; |
0a7de745 A |
1310 | state->fMappedBase = internalState->fLocalMapperAlloc; |
1311 | } else { | |
f427ee49 | 1312 | state->fMapped = false; |
99c3a104 | 1313 | } |
0b4c1975 | 1314 | } |
0b4c1975 | 1315 | } |
0a7de745 A |
1316 | |
1317 | UInt32 segIndex = 0; | |
1318 | UInt32 numSegments = *numSegmentsP; | |
1319 | Segment64 curSeg = { 0, 0 }; | |
1320 | bool curSegValid = false; | |
1321 | addr64_t maxPhys; | |
1322 | ||
1323 | if (fNumAddressBits && (fNumAddressBits < 64)) { | |
1324 | maxPhys = (1ULL << fNumAddressBits); | |
1325 | } else { | |
1326 | maxPhys = 0; | |
0b4c1975 | 1327 | } |
0a7de745 | 1328 | maxPhys--; |
0c530ab8 | 1329 | |
0a7de745 A |
1330 | while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) { |
1331 | // state = next seg | |
1332 | if (!internalState->fIOVMAddrValid) { | |
1333 | IOReturn rtn; | |
1334 | ||
1335 | state->fOffset = offset; | |
1336 | state->fLength = memLength - offset; | |
1337 | ||
f427ee49 A |
1338 | bool done = false; |
1339 | bool check = false; | |
1340 | ||
1341 | if (internalState->fLocalMapperAllocValid) { | |
1342 | if (!internalState->fMapSegmentsCount) { | |
1343 | state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; | |
1344 | rtn = kIOReturnSuccess; | |
1345 | done = true; | |
1346 | check = true; | |
1347 | } else { | |
1348 | uint64_t address; | |
1349 | uint64_t length; | |
1350 | uint64_t runOffset; | |
1351 | uint64_t ind; | |
1352 | uint64_t off2Ind = internalState->fOffset2Index; | |
1353 | ||
1354 | // Validate the previous offset | |
1355 | if (offset | |
1356 | && (offset == internalState->fNextOffset || off2Ind <= offset)) { | |
1357 | ind = internalState->fIndex; | |
1358 | } else { | |
1359 | ind = off2Ind = 0; // Start from beginning | |
0a7de745 | 1360 | } |
f427ee49 A |
1361 | #if defined(LOGTAG) |
1362 | if (LOGTAG == fMemory->getTag()) { | |
1363 | IOLog("DMA[%p] offsets 0x%qx, 0x%qx, 0x%qx ind %qd\n", this, offset, internalState->fPreparedOffset, internalState->fNextOffset, ind); | |
1364 | } | |
1365 | #endif /* defined(LOGTAG) */ | |
1366 | ||
1367 | // Scan through iopl info blocks looking for block containing offset | |
1368 | while (ind < internalState->fMapSegmentsCount && offset >= internalState->fMapSegments[ind].fDMAOffset) { | |
1369 | ind++; | |
1370 | } | |
1371 | if (ind < internalState->fMapSegmentsCount) { | |
1372 | length = internalState->fMapSegments[ind].fDMAOffset; | |
1373 | } else { | |
1374 | length = memLength; | |
1375 | } | |
1376 | length -= offset; // Remainder within iopl | |
1377 | ||
1378 | // Go back to actual range as search goes past it | |
1379 | ind--; | |
1380 | off2Ind = internalState->fMapSegments[ind].fDMAOffset; | |
1381 | ||
1382 | // Subtract offset till this iopl in total list | |
1383 | runOffset = offset - off2Ind; | |
1384 | ||
1385 | // Compute an offset relative to the mapped base | |
1386 | ||
1387 | runOffset += internalState->fMapSegments[ind].fPageOffset; | |
1388 | address = internalState->fLocalMapperAllocBase + internalState->fMapSegments[ind].fMapOffset + runOffset; | |
1389 | #if defined(LOGTAG) | |
1390 | if (LOGTAG == fMemory->getTag()) { | |
1391 | IOLog("DMA[%p] addrlen 0x%qx, 0x%qx\n", this, address, length); | |
1392 | } | |
1393 | #endif /* defined(LOGTAG) */ | |
1394 | ||
1395 | state->fIOVMAddr = address; | |
1396 | state->fLength = length; | |
1397 | ||
1398 | internalState->fIndex = ind; | |
1399 | internalState->fOffset2Index = off2Ind; | |
1400 | internalState->fNextOffset = state->fOffset + length; | |
1401 | ||
1402 | rtn = kIOReturnSuccess; | |
1403 | done = true; | |
1404 | check = true; | |
0a7de745 | 1405 | } |
f427ee49 A |
1406 | } |
1407 | ||
1408 | if (!done) { | |
1409 | IOMemoryDescriptor * memory = | |
1410 | internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get(); | |
0a7de745 A |
1411 | rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); |
1412 | mdOp = kIOMDWalkSegments; | |
1413 | } | |
f427ee49 A |
1414 | #if 0 |
1415 | if (check | |
1416 | && !ml_at_interrupt_context() | |
1417 | && (rtn == kIOReturnSuccess) | |
1418 | && fMapper | |
1419 | && strcmp("AppleNVMeMMU", fMapper->getName())) { | |
1420 | uint64_t checkOffset; | |
1421 | IOPhysicalLength segLen; | |
1422 | IOMemoryDescriptor * memory = | |
1423 | internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get(); | |
1424 | for (checkOffset = 0; checkOffset < state->fLength;) { | |
1425 | addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone); | |
1426 | addr64_t mapperPhys; | |
1427 | ||
1428 | mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset); | |
1429 | mapperPhys |= (phys & (fMapper->getPageSize() - 1)); | |
1430 | if (mapperPhys != phys) { | |
1431 | panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx\n", | |
1432 | this, offset, checkOffset, | |
1433 | state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength); | |
1434 | } | |
1435 | checkOffset += page_size - (phys & page_mask); | |
1436 | } | |
1437 | } | |
1438 | #endif | |
0a7de745 A |
1439 | if (rtn == kIOReturnSuccess) { |
1440 | internalState->fIOVMAddrValid = true; | |
1441 | assert(state->fLength); | |
1442 | if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { | |
1443 | UInt64 length = state->fLength; | |
1444 | offset += length; | |
1445 | curSeg.fLength += length; | |
1446 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; | |
1447 | } | |
1448 | } else if (rtn == kIOReturnOverrun) { | |
1449 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end | |
1450 | } else { | |
1451 | return rtn; | |
1452 | } | |
0b4c1975 | 1453 | } |
0a7de745 A |
1454 | |
1455 | // seg = state, offset = end of seg | |
1456 | if (!curSegValid) { | |
1457 | UInt64 length = state->fLength; | |
1458 | offset += length; | |
1459 | curSeg.fIOVMAddr = state->fIOVMAddr; | |
1460 | curSeg.fLength = length; | |
1461 | curSegValid = true; | |
1462 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; | |
0c530ab8 | 1463 | } |
0a7de745 A |
1464 | |
1465 | if (!internalState->fIOVMAddrValid) { | |
1466 | // maxPhys | |
1467 | if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) { | |
1468 | if (internalState->fCursor) { | |
1469 | curSegValid = curSeg.fIOVMAddr = 0; | |
1470 | ret = kIOReturnMessageTooLarge; | |
1471 | break; | |
1472 | } else if (curSeg.fIOVMAddr <= maxPhys) { | |
1473 | UInt64 remain, newLength; | |
1474 | ||
1475 | newLength = (maxPhys + 1 - curSeg.fIOVMAddr); | |
1476 | DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); | |
1477 | remain = curSeg.fLength - newLength; | |
1478 | state->fIOVMAddr = newLength + curSeg.fIOVMAddr; | |
1479 | internalState->fIOVMAddrValid = true; | |
1480 | curSeg.fLength = newLength; | |
1481 | state->fLength = remain; | |
1482 | offset -= remain; | |
1483 | } else { | |
1484 | UInt64 addr = curSeg.fIOVMAddr; | |
f427ee49 | 1485 | ppnum_t addrPage = (ppnum_t) atop_64(addr); |
0a7de745 A |
1486 | vm_page_t remap = NULL; |
1487 | UInt64 remain, newLength; | |
1488 | ||
1489 | DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength); | |
1490 | ||
1491 | remap = internalState->fNextRemapPage; | |
1492 | if (remap && (addrPage == vm_page_get_offset(remap))) { | |
1493 | } else { | |
1494 | for (remap = internalState->fCopyPageAlloc; | |
1495 | remap && (addrPage != vm_page_get_offset(remap)); | |
1496 | remap = vm_page_get_next(remap)) { | |
1497 | } | |
1498 | } | |
1499 | ||
1500 | if (!remap) { | |
1501 | panic("no remap page found"); | |
1502 | } | |
1503 | ||
1504 | curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) | |
1505 | + (addr & PAGE_MASK); | |
1506 | curSegValid = true; | |
1507 | internalState->fNextRemapPage = vm_page_get_next(remap); | |
1508 | ||
1509 | newLength = PAGE_SIZE - (addr & PAGE_MASK); | |
1510 | if (newLength < curSeg.fLength) { | |
1511 | remain = curSeg.fLength - newLength; | |
1512 | state->fIOVMAddr = addr + newLength; | |
1513 | internalState->fIOVMAddrValid = true; | |
1514 | curSeg.fLength = newLength; | |
1515 | state->fLength = remain; | |
1516 | offset -= remain; | |
1517 | } | |
1518 | DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset); | |
1519 | } | |
1520 | } | |
1521 | ||
1522 | // reduce size of output segment | |
1523 | uint64_t reduce, leftover = 0; | |
1524 | ||
1525 | // fMaxSegmentSize | |
1526 | if (curSeg.fLength > fMaxSegmentSize) { | |
1527 | leftover += curSeg.fLength - fMaxSegmentSize; | |
1528 | curSeg.fLength = fMaxSegmentSize; | |
1529 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1530 | internalState->fIOVMAddrValid = true; | |
1531 | } | |
1532 | ||
1533 | // alignment current length | |
1534 | ||
1535 | reduce = (curSeg.fLength & fAlignMaskLength); | |
1536 | if (reduce && (curSeg.fLength > reduce)) { | |
1537 | leftover += reduce; | |
1538 | curSeg.fLength -= reduce; | |
1539 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1540 | internalState->fIOVMAddrValid = true; | |
1541 | } | |
1542 | ||
1543 | // alignment next address | |
1544 | ||
1545 | reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); | |
1546 | if (reduce && (curSeg.fLength > reduce)) { | |
1547 | leftover += reduce; | |
1548 | curSeg.fLength -= reduce; | |
1549 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1550 | internalState->fIOVMAddrValid = true; | |
1551 | } | |
1552 | ||
1553 | if (leftover) { | |
1554 | DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", | |
1555 | leftover, offset, | |
1556 | curSeg.fIOVMAddr, curSeg.fLength); | |
1557 | state->fLength = leftover; | |
1558 | offset -= leftover; | |
1559 | } | |
1560 | ||
1561 | // | |
1562 | ||
1563 | if (internalState->fCursor) { | |
1564 | bool misaligned; | |
1565 | uint32_t mask; | |
1566 | ||
1567 | mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); | |
1568 | misaligned = (0 != (mask & curSeg.fIOVMAddr)); | |
1569 | if (!misaligned) { | |
1570 | mask = fAlignMaskLength; | |
1571 | misaligned |= (0 != (mask & curSeg.fLength)); | |
1572 | } | |
1573 | if (misaligned) { | |
1574 | if (misaligned) { | |
1575 | DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength); | |
1576 | } | |
1577 | curSegValid = curSeg.fIOVMAddr = 0; | |
1578 | ret = kIOReturnNotAligned; | |
1579 | break; | |
1580 | } | |
1581 | } | |
1582 | ||
1583 | if (offset >= memLength) { | |
1584 | curSeg.fLength -= (offset - memLength); | |
1585 | offset = memLength; | |
1586 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end | |
1587 | break; | |
1588 | } | |
3e170ce0 | 1589 | } |
0a7de745 A |
1590 | |
1591 | if (internalState->fIOVMAddrValid) { | |
1592 | if ((segIndex + 1 == numSegments)) { | |
1593 | break; | |
1594 | } | |
f427ee49 A |
1595 | #if defined(LOGTAG) |
1596 | if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) { | |
1597 | IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength); | |
1598 | } | |
1599 | #endif /* defined(LOGTAG) */ | |
0a7de745 A |
1600 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); |
1601 | curSegValid = curSeg.fIOVMAddr = 0; | |
1602 | if (kIOReturnSuccess != ret) { | |
1603 | break; | |
1604 | } | |
3e170ce0 | 1605 | } |
0a7de745 A |
1606 | } |
1607 | ||
1608 | if (curSegValid) { | |
f427ee49 A |
1609 | #if defined(LOGTAG) |
1610 | if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) { | |
1611 | IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength); | |
1612 | } | |
1613 | #endif /* defined(LOGTAG) */ | |
0a7de745 A |
1614 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); |
1615 | } | |
1616 | ||
1617 | if (kIOReturnSuccess == ret) { | |
1618 | state->fOffset = offset; | |
1619 | *offsetP = offset - internalState->fPreparedOffset; | |
1620 | *numSegmentsP = segIndex; | |
1621 | } | |
1622 | return ret; | |
0c530ab8 A |
1623 | } |
1624 | ||
0a7de745 | 1625 | IOReturn |
0c530ab8 A |
1626 | IODMACommand::clientOutputSegment( |
1627 | void *reference, IODMACommand *target, | |
1628 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1629 | { | |
0a7de745 A |
1630 | SegmentFunction segmentFunction = (SegmentFunction) reference; |
1631 | IOReturn ret = kIOReturnSuccess; | |
1632 | ||
1633 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) | |
1634 | && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) | |
1635 | && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) { | |
1636 | DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); | |
1637 | ret = kIOReturnMessageTooLarge; | |
1638 | } | |
1639 | ||
1640 | if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) { | |
1641 | DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); | |
1642 | ret = kIOReturnMessageTooLarge; | |
1643 | } | |
1644 | ||
1645 | return ret; | |
0c530ab8 A |
1646 | } |
1647 | ||
b0d623f7 A |
1648 | IOReturn |
1649 | IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, | |
0a7de745 A |
1650 | UInt64 *offsetP, |
1651 | void *segmentsP, | |
1652 | UInt32 *numSegmentsP) | |
b0d623f7 | 1653 | { |
0a7de745 A |
1654 | return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, |
1655 | offsetP, segmentsP, numSegmentsP); | |
b0d623f7 A |
1656 | } |
1657 | ||
0a7de745 | 1658 | bool |
0c530ab8 | 1659 | IODMACommand::OutputHost32(IODMACommand *, |
0a7de745 | 1660 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1661 | { |
0a7de745 A |
1662 | Segment32 *base = (Segment32 *) vSegList; |
1663 | base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; | |
1664 | base[outSegIndex].fLength = (UInt32) segment.fLength; | |
1665 | return true; | |
0c530ab8 A |
1666 | } |
1667 | ||
0a7de745 | 1668 | bool |
0c530ab8 | 1669 | IODMACommand::OutputBig32(IODMACommand *, |
0a7de745 | 1670 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1671 | { |
0a7de745 A |
1672 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
1673 | const UInt offLen = offAddr + sizeof(UInt32); | |
1674 | OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); | |
1675 | OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); | |
1676 | return true; | |
0c530ab8 A |
1677 | } |
1678 | ||
1679 | bool | |
1680 | IODMACommand::OutputLittle32(IODMACommand *, | |
0a7de745 | 1681 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1682 | { |
0a7de745 A |
1683 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
1684 | const UInt offLen = offAddr + sizeof(UInt32); | |
1685 | OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); | |
1686 | OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); | |
1687 | return true; | |
0c530ab8 A |
1688 | } |
1689 | ||
1690 | bool | |
1691 | IODMACommand::OutputHost64(IODMACommand *, | |
0a7de745 | 1692 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1693 | { |
0a7de745 A |
1694 | Segment64 *base = (Segment64 *) vSegList; |
1695 | base[outSegIndex] = segment; | |
1696 | return true; | |
0c530ab8 A |
1697 | } |
1698 | ||
1699 | bool | |
1700 | IODMACommand::OutputBig64(IODMACommand *, | |
0a7de745 | 1701 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1702 | { |
0a7de745 A |
1703 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
1704 | const UInt offLen = offAddr + sizeof(UInt64); | |
1705 | OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); | |
1706 | OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); | |
1707 | return true; | |
0c530ab8 A |
1708 | } |
1709 | ||
1710 | bool | |
1711 | IODMACommand::OutputLittle64(IODMACommand *, | |
0a7de745 | 1712 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1713 | { |
0a7de745 A |
1714 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
1715 | const UInt offLen = offAddr + sizeof(UInt64); | |
1716 | OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); | |
1717 | OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); | |
1718 | return true; | |
0c530ab8 | 1719 | } |