]> git.saurik.com Git - apple/xnu.git/blob - iokit/Tests/TestIOMemoryDescriptor.cpp
xnu-6153.11.26.tar.gz
[apple/xnu.git] / iokit / Tests / TestIOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 2014-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38 #include "Tests.h"
39
40 #ifndef __LP64__
41 #include <IOKit/IOSubMemoryDescriptor.h>
42 #endif /* !__LP64__ */
43 #include <IOKit/IOSubMemoryDescriptor.h>
44 #include <IOKit/IOMultiMemoryDescriptor.h>
45 #include <IOKit/IOBufferMemoryDescriptor.h>
46
47 #include <IOKit/IOKitDebug.h>
48 #include <libkern/OSDebug.h>
49 #include <sys/uio.h>
50
51 __BEGIN_DECLS
52 #include <vm/pmap.h>
53 #include <vm/vm_pageout.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
56
57 #include <mach/vm_prot.h>
58 #include <mach/mach_vm.h>
59 #include <vm/vm_fault.h>
60 #include <vm/vm_protos.h>
61 __END_DECLS
62
63
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66 #if DEVELOPMENT || DEBUG
67
68 extern SInt32 gIOMemoryReferenceCount;
69
70 static int
71 IOMultMemoryDescriptorTest(int newValue)
72 {
73 IOMemoryDescriptor * mds[3];
74 IOMultiMemoryDescriptor * mmd;
75 IOMemoryMap * map;
76 void * addr;
77 uint8_t * data;
78 uint32_t i;
79 IOAddressRange ranges[2];
80
81 data = (typeof(data))IOMallocAligned(ptoa(8), page_size);
82 for (i = 0; i < ptoa(8); i++) {
83 data[i] = atop(i) | 0xD0;
84 }
85
86 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
87 ranges[0].length = ptoa(4);
88 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
89 ranges[1].length = ptoa(4);
90
91 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
92
93 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
94 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
95
96 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
97 mds[2]->release();
98 mds[1]->release();
99 mds[0]->release();
100 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7));
101 mmd->release();
102 assert(map);
103
104 addr = (void *) map->getVirtualAddress();
105 assert(ptoa(4) == map->getLength());
106 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
107 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
108 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
109 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
110 map->release();
111 IOFreeAligned(data, ptoa(8));
112
113 return 0;
114 }
115
116
117
118 // <rdar://problem/30102458>
119 static int
120 IODMACommandForceDoubleBufferTest(int newValue)
121 {
122 IOReturn ret;
123 IOBufferMemoryDescriptor * bmd;
124 IODMACommand * dma;
125 uint32_t dir, data;
126 IODMACommand::SegmentOptions segOptions =
127 {
128 .fStructSize = sizeof(segOptions),
129 .fNumAddressBits = 64,
130 .fMaxSegmentSize = 0x2000,
131 .fMaxTransferSize = 128 * 1024,
132 .fAlignment = 1,
133 .fAlignmentLength = 1,
134 .fAlignmentInternalSegments = 1
135 };
136 IODMACommand::Segment64 segments[1];
137 UInt32 numSegments;
138 UInt64 dmaOffset;
139
140
141 for (dir = kIODirectionIn;; dir++) {
142 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task,
143 dir | kIOMemoryPageable, ptoa(8));
144 assert(bmd);
145
146 ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir;
147
148 ret = bmd->prepare((IODirection) dir);
149 assert(kIOReturnSuccess == ret);
150
151 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
152 kIODMAMapOptionMapped,
153 NULL, NULL);
154 assert(dma);
155 ret = dma->setMemoryDescriptor(bmd, true);
156 assert(kIOReturnSuccess == ret);
157
158 ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut);
159 assert(kIOReturnSuccess == ret);
160
161 dmaOffset = 0;
162 numSegments = 1;
163 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
164 assert(kIOReturnSuccess == ret);
165 assert(1 == numSegments);
166
167 if (kIODirectionOut & dir) {
168 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
169 assertf((0x53535300 | dir) == data, "mismatch 0x%x", data);
170 }
171 if (kIODirectionIn & dir) {
172 IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir);
173 }
174
175 ret = dma->clearMemoryDescriptor(true);
176 assert(kIOReturnSuccess == ret);
177 dma->release();
178
179 bmd->complete((IODirection) dir);
180
181 if (kIODirectionIn & dir) {
182 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
183 assertf((0x11223300 | dir) == data, "mismatch 0x%x", data);
184 }
185
186 bmd->release();
187
188 if (dir == kIODirectionInOut) {
189 break;
190 }
191 }
192
193 return 0;
194 }
195
196 // <rdar://problem/34322778>
197 static int __unused
198 IODMACommandLocalMappedNonContig(int newValue)
199 {
200 IOReturn kr;
201 IOMemoryDescriptor * md;
202 IODMACommand * dma;
203 OSDictionary * matching;
204 IOService * device;
205 IOMapper * mapper;
206 IODMACommand::SegmentOptions segOptions =
207 {
208 .fStructSize = sizeof(segOptions),
209 .fNumAddressBits = 64,
210 .fMaxSegmentSize = 128 * 1024,
211 .fMaxTransferSize = 128 * 1024,
212 .fAlignment = 1,
213 .fAlignmentLength = 1,
214 .fAlignmentInternalSegments = 1
215 };
216 IODMACommand::Segment64 segments[1];
217 UInt32 numSegments;
218 UInt64 dmaOffset;
219 UInt64 segPhys;
220 vm_address_t buffer;
221 vm_size_t bufSize = ptoa(4);
222
223 if (!IOMapper::gSystem) {
224 return 0;
225 }
226
227 buffer = 0;
228 kr = vm_allocate_kernel(kernel_map, &buffer, bufSize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
229 assert(KERN_SUCCESS == kr);
230
231 // fragment the vmentries
232 kr = vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE);
233 assert(KERN_SUCCESS == kr);
234
235 md = IOMemoryDescriptor::withAddressRange(
236 buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task);
237 assert(md);
238 kr = md->prepare(kIODirectionOutIn);
239 assert(kIOReturnSuccess == kr);
240
241 segPhys = md->getPhysicalSegment(0, NULL, 0);
242
243 matching = IOService::nameMatching("XHC1");
244 assert(matching);
245 device = IOService::copyMatchingService(matching);
246 matching->release();
247 mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL;
248
249 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
250 kIODMAMapOptionMapped,
251 mapper, NULL);
252 assert(dma);
253 kr = dma->setMemoryDescriptor(md, true);
254 assert(kIOReturnSuccess == kr);
255
256 dmaOffset = 0;
257 numSegments = 1;
258 kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
259 assert(kIOReturnSuccess == kr);
260 assert(1 == numSegments);
261
262 if (mapper) {
263 assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma);
264 }
265
266 kr = dma->clearMemoryDescriptor(true);
267 assert(kIOReturnSuccess == kr);
268 dma->release();
269
270 kr = md->complete(kIODirectionOutIn);
271 assert(kIOReturnSuccess == kr);
272 md->release();
273
274 kr = vm_deallocate(kernel_map, buffer, bufSize);
275 assert(KERN_SUCCESS == kr);
276 OSSafeReleaseNULL(mapper);
277
278 return 0;
279 }
280
281 // <rdar://problem/30102458>
282 static int
283 IOMemoryRemoteTest(int newValue)
284 {
285 IOReturn ret;
286 IOMemoryDescriptor * md;
287 IOByteCount offset, length;
288 addr64_t addr;
289 uint32_t idx;
290
291 IODMACommand * dma;
292 IODMACommand::SegmentOptions segOptions =
293 {
294 .fStructSize = sizeof(segOptions),
295 .fNumAddressBits = 64,
296 .fMaxSegmentSize = 0x2000,
297 .fMaxTransferSize = 128 * 1024,
298 .fAlignment = 1,
299 .fAlignmentLength = 1,
300 .fAlignmentInternalSegments = 1
301 };
302 IODMACommand::Segment64 segments[1];
303 UInt32 numSegments;
304 UInt64 dmaOffset;
305
306 IOAddressRange ranges[2] = {
307 { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 },
308 };
309
310 md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn | kIOMemoryRemote, TASK_NULL);
311 assert(md);
312
313 // md->map();
314 // md->readBytes(0, &idx, sizeof(idx));
315
316 ret = md->prepare(kIODirectionOutIn);
317 assert(kIOReturnSuccess == ret);
318
319 printf("remote md flags 0x%qx, r %d\n",
320 md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags())));
321
322 for (offset = 0, idx = 0; true; offset += length, idx++) {
323 addr = md->getPhysicalSegment(offset, &length, 0);
324 if (!length) {
325 break;
326 }
327 assert(idx < 2);
328 assert(addr == ranges[idx].address);
329 assert(length == ranges[idx].length);
330 }
331 assert(offset == md->getLength());
332
333 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
334 kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly,
335 NULL, NULL);
336 assert(dma);
337 ret = dma->setMemoryDescriptor(md, true);
338 assert(kIOReturnSuccess == ret);
339
340 for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) {
341 numSegments = 1;
342 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
343 assert(kIOReturnSuccess == ret);
344 assert(1 == numSegments);
345 assert(idx < 2);
346 assert(segments[0].fIOVMAddr == ranges[idx].address);
347 assert(segments[0].fLength == ranges[idx].length);
348 }
349 assert(dmaOffset == md->getLength());
350
351 ret = dma->clearMemoryDescriptor(true);
352 assert(kIOReturnSuccess == ret);
353 dma->release();
354 md->complete(kIODirectionOutIn);
355 md->release();
356
357 return 0;
358 }
359
360 static IOReturn
361 IOMemoryPrefaultTest(uint32_t options)
362 {
363 IOBufferMemoryDescriptor * bmd;
364 IOMemoryMap * map;
365 IOReturn kr;
366 uint32_t data;
367 uint32_t * p;
368 IOSimpleLock * lock;
369
370 lock = IOSimpleLockAlloc();
371 assert(lock);
372
373 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
374 kIODirectionOutIn | kIOMemoryPageable, ptoa(8));
375 assert(bmd);
376 kr = bmd->prepare();
377 assert(KERN_SUCCESS == kr);
378
379 map = bmd->map(kIOMapPrefault);
380 assert(map);
381
382 p = (typeof(p))map->getVirtualAddress();
383 IOSimpleLockLock(lock);
384 data = p[0];
385 IOSimpleLockUnlock(lock);
386
387 IOLog("IOMemoryPrefaultTest %d\n", data);
388
389 map->release();
390 bmd->release();
391 IOSimpleLockFree(lock);
392
393 return kIOReturnSuccess;
394 }
395
396
397 // <rdar://problem/26375234>
398 static IOReturn
399 ZeroLengthTest(int newValue)
400 {
401 IOMemoryDescriptor * md;
402
403 md = IOMemoryDescriptor::withAddressRange(
404 0, 0, kIODirectionNone, current_task());
405 assert(md);
406 md->prepare();
407 md->complete();
408 md->release();
409 return 0;
410 }
411
412 // <rdar://problem/27002624>
413 static IOReturn
414 BadFixedAllocTest(int newValue)
415 {
416 IOBufferMemoryDescriptor * bmd;
417 IOMemoryMap * map;
418
419 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
420 kIODirectionIn | kIOMemoryPageable, ptoa(1));
421 assert(bmd);
422 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
423 assert(!map);
424
425 bmd->release();
426 return 0;
427 }
428
429 // <rdar://problem/26466423>
430 static IOReturn
431 IODirectionPrepareNoZeroFillTest(int newValue)
432 {
433 IOBufferMemoryDescriptor * bmd;
434
435 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
436 kIODirectionIn | kIOMemoryPageable, ptoa(24));
437 assert(bmd);
438 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
439 bmd->prepare(kIODirectionIn);
440 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
441 bmd->complete(kIODirectionIn);
442 bmd->release();
443 return 0;
444 }
445
446 // <rdar://problem/28190483>
447 static IOReturn
448 IOMemoryMapTest(uint32_t options)
449 {
450 IOBufferMemoryDescriptor * bmd;
451 IOMemoryDescriptor * md;
452 IOMemoryMap * map;
453 uint32_t data;
454 user_addr_t p;
455 uint8_t * p2;
456 int r;
457 uint64_t time, nano;
458
459 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
460 kIODirectionOutIn | kIOMemoryPageable, 0x4018 + 0x800);
461 assert(bmd);
462 p = (typeof(p))bmd->getBytesNoCopy();
463 p += 0x800;
464 data = 0x11111111;
465 r = copyout(&data, p, sizeof(data));
466 assert(r == 0);
467 data = 0x22222222;
468 r = copyout(&data, p + 0x1000, sizeof(data));
469 assert(r == 0);
470 data = 0x33333333;
471 r = copyout(&data, p + 0x2000, sizeof(data));
472 assert(r == 0);
473 data = 0x44444444;
474 r = copyout(&data, p + 0x3000, sizeof(data));
475 assert(r == 0);
476
477 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
478 kIODirectionOut | options,
479 current_task());
480 assert(md);
481 time = mach_absolute_time();
482 map = md->map(kIOMapReadOnly);
483 time = mach_absolute_time() - time;
484 assert(map);
485 absolutetime_to_nanoseconds(time, &nano);
486
487 p2 = (typeof(p2))map->getVirtualAddress();
488 assert(0x11 == p2[0]);
489 assert(0x22 == p2[0x1000]);
490 assert(0x33 == p2[0x2000]);
491 assert(0x44 == p2[0x3000]);
492
493 data = 0x99999999;
494 r = copyout(&data, p + 0x2000, sizeof(data));
495 assert(r == 0);
496
497 assert(0x11 == p2[0]);
498 assert(0x22 == p2[0x1000]);
499 assert(0x44 == p2[0x3000]);
500 if (kIOMemoryMapCopyOnWrite & options) {
501 assert(0x33 == p2[0x2000]);
502 } else {
503 assert(0x99 == p2[0x2000]);
504 }
505
506 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
507 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
508 nano);
509
510 map->release();
511 md->release();
512 bmd->release();
513
514 return kIOReturnSuccess;
515 }
516
517 static int
518 IOMemoryMapCopyOnWriteTest(int newValue)
519 {
520 IOMemoryMapTest(0);
521 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
522 return 0;
523 }
524
525 static int
526 AllocationNameTest(int newValue)
527 {
528 IOMemoryDescriptor * bmd;
529 kern_allocation_name_t name, prior;
530
531 name = kern_allocation_name_allocate("com.apple.iokit.test", 0);
532 assert(name);
533
534 prior = thread_set_allocation_name(name);
535
536 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
537 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
538 ptoa(13));
539 assert(bmd);
540 bmd->prepare();
541
542 thread_set_allocation_name(prior);
543 kern_allocation_name_release(name);
544
545 if (newValue != 7) {
546 bmd->release();
547 }
548
549 return 0;
550 }
551
552 int
553 IOMemoryDescriptorTest(int newValue)
554 {
555 int result;
556
557 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
558
559 #if 0
560 if (6 == newValue) {
561 IOMemoryDescriptor * sbmds[3];
562 IOMultiMemoryDescriptor * smmd;
563 IOMemoryDescriptor * mds[2];
564 IOMultiMemoryDescriptor * mmd;
565 IOMemoryMap * map;
566
567 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
568 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
569 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
570 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds) / sizeof(sbmds[0]), kIODirectionOutIn, false);
571
572 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
573 mds[1] = smmd;
574 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
575 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere);
576 assert(map);
577 map->release();
578 mmd->release();
579 mds[0]->release();
580 mds[1]->release();
581 sbmds[0]->release();
582 sbmds[1]->release();
583 sbmds[2]->release();
584
585 return 0;
586 } else if (5 == newValue) {
587 IOReturn ret;
588 IOMemoryDescriptor * md;
589 IODMACommand * dma;
590 IODMACommand::SegmentOptions segOptions =
591 {
592 .fStructSize = sizeof(segOptions),
593 .fNumAddressBits = 64,
594 .fMaxSegmentSize = 4096,
595 .fMaxTransferSize = 128 * 1024,
596 .fAlignment = 4,
597 .fAlignmentLength = 4,
598 .fAlignmentInternalSegments = 0x1000
599 };
600
601 IOAddressRange ranges[3][2] =
602 {
603 {
604 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
605 { 0, 0 },
606 },
607 {
608 { ranges[0][0].address, 0x10 },
609 { 0x3000 + ranges[0][0].address, 0xff0 },
610 },
611 {
612 { ranges[0][0].address, 0x2ffc },
613 { trunc_page(ranges[0][0].address), 0x800 },
614 },
615 };
616 static const uint32_t rangesCount[3] = { 1, 2, 2 };
617 uint32_t test;
618
619 for (test = 0; test < 3; test++) {
620 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
621 ranges[test][0].address, ranges[test][0].length,
622 ranges[test][1].address, ranges[test][1].length);
623
624 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
625 assert(md);
626 ret = md->prepare();
627 assert(kIOReturnSuccess == ret);
628 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
629 IODMACommand::kMapped, NULL, NULL);
630 assert(dma);
631 ret = dma->setMemoryDescriptor(md, true);
632 if (kIOReturnSuccess == ret) {
633 IODMACommand::Segment64 segments[1];
634 UInt32 numSegments;
635 UInt64 offset;
636
637 offset = 0;
638 do{
639 numSegments = 1;
640 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
641 assert(kIOReturnSuccess == ret);
642 assert(1 == numSegments);
643 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
644 }while (offset < md->getLength());
645
646 ret = dma->clearMemoryDescriptor(true);
647 assert(kIOReturnSuccess == ret);
648 dma->release();
649 }
650 md->release();
651 }
652
653 return kIOReturnSuccess;
654 } else if (4 == newValue) {
655 IOService * isp;
656 IOMapper * mapper;
657 IOBufferMemoryDescriptor * md1;
658 IODMACommand * dma;
659 IOReturn ret;
660 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
661 uint64_t start, time, nano;
662
663 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
664 assert(isp);
665 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
666 assert(mapper);
667
668 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
669 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
670 bufSize, page_size);
671
672 ret = md1->prepare();
673 assert(kIOReturnSuccess == ret);
674
675 IODMAMapSpecification mapSpec;
676 bzero(&mapSpec, sizeof(mapSpec));
677 uint64_t mapped;
678 uint64_t mappedLength;
679
680 start = mach_absolute_time();
681
682 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
683 assert(kIOReturnSuccess == ret);
684
685 time = mach_absolute_time() - start;
686
687 absolutetime_to_nanoseconds(time, &nano);
688 kprintf("time %lld us\n", nano / 1000ULL);
689 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
690
691 assert(md1);
692
693 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
694 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
695
696 assert(dma);
697
698 start = mach_absolute_time();
699 ret = dma->setMemoryDescriptor(md1, true);
700 assert(kIOReturnSuccess == ret);
701 time = mach_absolute_time() - start;
702
703 absolutetime_to_nanoseconds(time, &nano);
704 kprintf("time %lld us\n", nano / 1000ULL);
705
706
707 IODMACommand::Segment32 segments[1];
708 UInt32 numSegments = 1;
709 UInt64 offset;
710
711 offset = 0;
712 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
713 assert(kIOReturnSuccess == ret);
714 assert(1 == numSegments);
715 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
716
717 ret = dma->clearMemoryDescriptor(true);
718 assert(kIOReturnSuccess == ret);
719
720 md1->release();
721
722 return kIOReturnSuccess;
723 }
724
725 if (3 == newValue) {
726 IOBufferMemoryDescriptor * md1;
727 IOBufferMemoryDescriptor * md2;
728 IOMemoryMap * map1;
729 IOMemoryMap * map2;
730 uint32_t * buf1;
731 uint32_t * buf2;
732 IOReturn err;
733
734 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
735 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
736 64 * 1024, page_size);
737 assert(md1);
738 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
739 assert(map1);
740 buf1 = (uint32_t *) map1->getVirtualAddress();
741
742 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
743 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
744 64 * 1024, page_size);
745 assert(md2);
746 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
747 assert(map2);
748 buf2 = (uint32_t *) map2->getVirtualAddress();
749
750 memset(buf1, 0x11, 64 * 1024L);
751 memset(buf2, 0x22, 64 * 1024L);
752
753 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
754
755 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
756 assert(0x11111111 == buf1[0]);
757 assert(0x22222222 == buf2[0]);
758 err = map1->redirect(md2, 0, 0ULL);
759 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
760 assert(0x11111111 == buf2[0]);
761 assert(0x22222222 == buf1[0]);
762 err = map1->redirect(md1, 0, 0ULL);
763 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
764 assert(0x11111111 == buf1[0]);
765 assert(0x22222222 == buf2[0]);
766 map1->release();
767 map2->release();
768 md1->release();
769 md2->release();
770 }
771 #endif
772
773 // result = IODMACommandLocalMappedNonContig(newValue);
774 // if (result) return (result);
775
776 result = IODMACommandForceDoubleBufferTest(newValue);
777 if (result) {
778 return result;
779 }
780
781 result = AllocationNameTest(newValue);
782 if (result) {
783 return result;
784 }
785
786 result = IOMemoryMapCopyOnWriteTest(newValue);
787 if (result) {
788 return result;
789 }
790
791 result = IOMultMemoryDescriptorTest(newValue);
792 if (result) {
793 return result;
794 }
795
796 result = ZeroLengthTest(newValue);
797 if (result) {
798 return result;
799 }
800
801 result = IODirectionPrepareNoZeroFillTest(newValue);
802 if (result) {
803 return result;
804 }
805
806 result = BadFixedAllocTest(newValue);
807 if (result) {
808 return result;
809 }
810
811 result = IOMemoryRemoteTest(newValue);
812 if (result) {
813 return result;
814 }
815
816 result = IOMemoryPrefaultTest(newValue);
817 if (result) {
818 return result;
819 }
820
821 IOGeneralMemoryDescriptor * md;
822 vm_offset_t data[2];
823 vm_size_t bsize = 16 * 1024 * 1024;
824 vm_size_t srcsize, srcoffset, mapoffset, size;
825 kern_return_t kr;
826
827 data[0] = data[1] = 0;
828 kr = vm_allocate_kernel(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
829 assert(KERN_SUCCESS == kr);
830
831 vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
832 vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
833
834 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
835
836 uint32_t idx, offidx;
837 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) {
838 ((uint32_t*)data[0])[idx] = idx;
839 }
840
841 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) {
842 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) {
843 IOAddressRange ranges[3];
844 uint32_t rangeCount = 1;
845
846 bzero(&ranges[0], sizeof(ranges));
847 ranges[0].address = data[0] + srcoffset;
848 ranges[0].length = srcsize;
849 ranges[1].address = ranges[2].address = data[0];
850
851 if (srcsize > ptoa(5)) {
852 ranges[0].length = 7634;
853 ranges[1].length = 9870;
854 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
855 ranges[1].address = ranges[0].address + ranges[0].length;
856 ranges[2].address = ranges[1].address + ranges[1].length;
857 rangeCount = 3;
858 } else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
859 ranges[0].length = ptoa(1);
860 ranges[1].length = ptoa(1);
861 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
862 ranges[0].address = data[0] + srcoffset + ptoa(1);
863 ranges[1].address = data[0] + srcoffset;
864 ranges[2].address = ranges[0].address + ranges[0].length;
865 rangeCount = 3;
866 }
867
868 md = OSDynamicCast(IOGeneralMemoryDescriptor,
869 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
870 assert(md);
871
872 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
873 (long) srcsize, (long) srcoffset,
874 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
875 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
876 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
877
878 if (kIOReturnSuccess == kr) {
879 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) {
880 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) {
881 IOMemoryMap * map;
882 mach_vm_address_t addr = 0;
883 uint32_t data;
884
885 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
886
887 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
888 if (map) {
889 addr = map->getAddress();
890 } else {
891 kr = kIOReturnError;
892 }
893
894 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
895
896 if (kIOReturnSuccess != kr) {
897 break;
898 }
899 kr = md->prepare();
900 if (kIOReturnSuccess != kr) {
901 panic("prepare() fail 0x%x\n", kr);
902 break;
903 }
904 for (idx = 0; idx < size; idx += sizeof(uint32_t)) {
905 offidx = (idx + mapoffset + srcoffset);
906 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
907 if (offidx < ptoa(2)) {
908 offidx ^= ptoa(1);
909 }
910 }
911 offidx /= sizeof(uint32_t);
912
913 if (offidx != ((uint32_t*)addr)[idx / sizeof(uint32_t)]) {
914 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
915 kr = kIOReturnBadMedia;
916 } else {
917 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) {
918 data = 0;
919 }
920 if (offidx != data) {
921 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
922 kr = kIOReturnBadMedia;
923 }
924 }
925 }
926 md->complete();
927 map->release();
928 // IOLog("unmapRef %llx\n", addr);
929 }
930 if (kIOReturnSuccess != kr) {
931 break;
932 }
933 }
934 }
935 md->release();
936 if (kIOReturnSuccess != kr) {
937 break;
938 }
939 }
940 if (kIOReturnSuccess != kr) {
941 break;
942 }
943 }
944
945 if (kIOReturnSuccess != kr) {
946 IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
947 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
948 }
949
950 assert(kr == kIOReturnSuccess);
951
952 vm_deallocate(kernel_map, data[0], bsize);
953 // vm_deallocate(kernel_map, data[1], size);
954
955 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
956
957 return 0;
958 }
959
960 #endif /* DEVELOPMENT || DEBUG */