]> git.saurik.com Git - apple/xnu.git/blob - iokit/Tests/TestIOMemoryDescriptor.cpp
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / iokit / Tests / TestIOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38 #include "Tests.h"
39
40 #ifndef __LP64__
41 #include <IOKit/IOSubMemoryDescriptor.h>
42 #endif /* !__LP64__ */
43 #include <IOKit/IOSubMemoryDescriptor.h>
44 #include <IOKit/IOMultiMemoryDescriptor.h>
45 #include <IOKit/IOBufferMemoryDescriptor.h>
46
47 #include <IOKit/IOKitDebug.h>
48 #include <libkern/OSDebug.h>
49 #include <sys/uio.h>
50
51 __BEGIN_DECLS
52 #include <vm/pmap.h>
53 #include <vm/vm_pageout.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
56
57 #include <mach/vm_prot.h>
58 #include <mach/mach_vm.h>
59 #include <vm/vm_fault.h>
60 #include <vm/vm_protos.h>
61 __END_DECLS
62
63
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66 #if DEVELOPMENT || DEBUG
67
68 extern SInt32 gIOMemoryReferenceCount;
69
70 static int
71 IOMultMemoryDescriptorTest(int newValue)
72 {
73 IOMemoryDescriptor * mds[3];
74 IOMultiMemoryDescriptor * mmd;
75 IOMemoryMap * map;
76 void * addr;
77 uint8_t * data;
78 uint32_t i;
79 IOAddressRange ranges[2];
80
81 data = (typeof(data))IOMallocAligned(ptoa(8), page_size);
82 for (i = 0; i < ptoa(8); i++) {
83 data[i] = ((uint8_t) atop(i)) | 0xD0;
84 }
85
86 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
87 ranges[0].length = ptoa(4);
88 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
89 ranges[1].length = ptoa(4);
90
91 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, 2, kIODirectionOutIn, kernel_task);
92 assert(mds[0]);
93 {
94 uint64_t dmaLen, dmaOffset;
95 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
96 assert(0 == dmaOffset);
97 assert(ptoa(1) == dmaLen);
98 }
99 mds[0]->release();
100 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) (data + page_size - 2), 4, kIODirectionOutIn, kernel_task);
101 assert(mds[0]);
102 {
103 uint64_t dmaLen, dmaOffset;
104 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
105 assert((page_size - 2) == dmaOffset);
106 assert(ptoa(2) == dmaLen);
107 }
108 mds[0]->release();
109
110 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
111 {
112 uint64_t dmaLen, dmaOffset;
113 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
114 assert(0 == dmaOffset);
115 assert(ptoa(8) == dmaLen);
116 }
117 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
118 {
119 uint64_t dmaLen, dmaOffset;
120 dmaLen = mds[1]->getDMAMapLength(&dmaOffset);
121 assert(0 == dmaOffset);
122 assert(ptoa(2) == dmaLen);
123 }
124 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
125
126 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
127 {
128 uint64_t dmaLen, dmaOffset;
129 dmaLen = mmd->getDMAMapLength(&dmaOffset);
130 assert(0 == dmaOffset);
131 assert(ptoa(11) == dmaLen);
132 }
133 mds[2]->release();
134 mds[1]->release();
135 mds[0]->release();
136 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7));
137 mmd->release();
138 assert(map);
139
140 addr = (void *) map->getVirtualAddress();
141 assert(ptoa(4) == map->getLength());
142 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
143 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
144 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
145 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
146 map->release();
147 IOFreeAligned(data, ptoa(8));
148
149 return 0;
150 }
151
152
153
154 // <rdar://problem/30102458>
155 static int
156 IODMACommandForceDoubleBufferTest(int newValue)
157 {
158 IOReturn ret;
159 IOBufferMemoryDescriptor * bmd;
160 IODMACommand * dma;
161 uint32_t dir, data;
162 IODMACommand::SegmentOptions segOptions =
163 {
164 .fStructSize = sizeof(segOptions),
165 .fNumAddressBits = 64,
166 .fMaxSegmentSize = 0x2000,
167 .fMaxTransferSize = 128 * 1024,
168 .fAlignment = 1,
169 .fAlignmentLength = 1,
170 .fAlignmentInternalSegments = 1
171 };
172 IODMACommand::Segment64 segments[1];
173 UInt32 numSegments;
174 UInt64 dmaOffset;
175
176
177 for (dir = kIODirectionIn;; dir++) {
178 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task,
179 dir | kIOMemoryPageable, ptoa(8));
180 assert(bmd);
181 {
182 uint64_t dmaLen, dmaOffset;
183 dmaLen = bmd->getDMAMapLength(&dmaOffset);
184 assert(0 == dmaOffset);
185 assert(ptoa(8) == dmaLen);
186 }
187
188 ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir;
189
190 ret = bmd->prepare((IODirection) dir);
191 assert(kIOReturnSuccess == ret);
192
193 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
194 kIODMAMapOptionMapped,
195 NULL, NULL);
196 assert(dma);
197 ret = dma->setMemoryDescriptor(bmd, true);
198 assert(kIOReturnSuccess == ret);
199
200 ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut);
201 assert(kIOReturnSuccess == ret);
202
203 dmaOffset = 0;
204 numSegments = 1;
205 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
206 assert(kIOReturnSuccess == ret);
207 assert(1 == numSegments);
208
209 if (kIODirectionOut & dir) {
210 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
211 assertf((0x53535300 | dir) == data, "mismatch 0x%x", data);
212 }
213 if (kIODirectionIn & dir) {
214 IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir);
215 }
216
217 ret = dma->clearMemoryDescriptor(true);
218 assert(kIOReturnSuccess == ret);
219 dma->release();
220
221 bmd->complete((IODirection) dir);
222
223 if (kIODirectionIn & dir) {
224 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
225 assertf((0x11223300 | dir) == data, "mismatch 0x%x", data);
226 }
227
228 bmd->release();
229
230 if (dir == kIODirectionInOut) {
231 break;
232 }
233 }
234
235 return 0;
236 }
237
238 // <rdar://problem/34322778>
239 static int __unused
240 IODMACommandLocalMappedNonContig(int newValue)
241 {
242 IOReturn kr;
243 IOMemoryDescriptor * md;
244 IODMACommand * dma;
245 OSDictionary * matching;
246 IOService * device;
247 IOMapper * mapper;
248 IODMACommand::SegmentOptions segOptions =
249 {
250 .fStructSize = sizeof(segOptions),
251 .fNumAddressBits = 64,
252 .fMaxSegmentSize = 128 * 1024,
253 .fMaxTransferSize = 128 * 1024,
254 .fAlignment = 1,
255 .fAlignmentLength = 1,
256 .fAlignmentInternalSegments = 1
257 };
258 IODMACommand::Segment64 segments[1];
259 UInt32 numSegments;
260 UInt64 dmaOffset;
261 UInt64 segPhys;
262 vm_address_t buffer;
263 vm_size_t bufSize = ptoa(4);
264
265 if (!IOMapper::gSystem) {
266 return 0;
267 }
268
269 buffer = 0;
270 kr = vm_allocate_kernel(kernel_map, &buffer, bufSize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
271 assert(KERN_SUCCESS == kr);
272
273 // fragment the vmentries
274 kr = vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE);
275 assert(KERN_SUCCESS == kr);
276
277 md = IOMemoryDescriptor::withAddressRange(
278 buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task);
279 assert(md);
280 kr = md->prepare(kIODirectionOutIn);
281 assert(kIOReturnSuccess == kr);
282
283 segPhys = md->getPhysicalSegment(0, NULL, 0);
284
285 matching = IOService::nameMatching("XHC1");
286 assert(matching);
287 device = IOService::copyMatchingService(matching);
288 matching->release();
289 mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL;
290
291 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
292 kIODMAMapOptionMapped,
293 mapper, NULL);
294 assert(dma);
295 kr = dma->setMemoryDescriptor(md, true);
296 assert(kIOReturnSuccess == kr);
297
298 dmaOffset = 0;
299 numSegments = 1;
300 kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
301 assert(kIOReturnSuccess == kr);
302 assert(1 == numSegments);
303
304 if (mapper) {
305 assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma);
306 }
307
308 kr = dma->clearMemoryDescriptor(true);
309 assert(kIOReturnSuccess == kr);
310 dma->release();
311
312 kr = md->complete(kIODirectionOutIn);
313 assert(kIOReturnSuccess == kr);
314 md->release();
315
316 kr = vm_deallocate(kernel_map, buffer, bufSize);
317 assert(KERN_SUCCESS == kr);
318 OSSafeReleaseNULL(mapper);
319
320 return 0;
321 }
322
323 // <rdar://problem/30102458>
324 static int
325 IOMemoryRemoteTest(int newValue)
326 {
327 IOReturn ret;
328 IOMemoryDescriptor * md;
329 IOByteCount offset, length;
330 addr64_t addr;
331 uint32_t idx;
332
333 IODMACommand * dma;
334 IODMACommand::SegmentOptions segOptions =
335 {
336 .fStructSize = sizeof(segOptions),
337 .fNumAddressBits = 64,
338 .fMaxSegmentSize = 0x2000,
339 .fMaxTransferSize = 128 * 1024,
340 .fAlignment = 1,
341 .fAlignmentLength = 1,
342 .fAlignmentInternalSegments = 1
343 };
344 IODMACommand::Segment64 segments[1];
345 UInt32 numSegments;
346 UInt64 dmaOffset;
347
348 IOAddressRange ranges[2] = {
349 { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 },
350 };
351
352 md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn | kIOMemoryRemote, TASK_NULL);
353 assert(md);
354
355 // md->map();
356 // md->readBytes(0, &idx, sizeof(idx));
357
358 ret = md->prepare(kIODirectionOutIn);
359 assert(kIOReturnSuccess == ret);
360
361 printf("remote md flags 0x%qx, r %d\n",
362 md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags())));
363
364 for (offset = 0, idx = 0; true; offset += length, idx++) {
365 addr = md->getPhysicalSegment(offset, &length, 0);
366 if (!length) {
367 break;
368 }
369 assert(idx < 2);
370 assert(addr == ranges[idx].address);
371 assert(length == ranges[idx].length);
372 }
373 assert(offset == md->getLength());
374
375 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
376 kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly,
377 NULL, NULL);
378 assert(dma);
379 ret = dma->setMemoryDescriptor(md, true);
380 assert(kIOReturnSuccess == ret);
381
382 for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) {
383 numSegments = 1;
384 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
385 assert(kIOReturnSuccess == ret);
386 assert(1 == numSegments);
387 assert(idx < 2);
388 assert(segments[0].fIOVMAddr == ranges[idx].address);
389 assert(segments[0].fLength == ranges[idx].length);
390 }
391 assert(dmaOffset == md->getLength());
392
393 ret = dma->clearMemoryDescriptor(true);
394 assert(kIOReturnSuccess == ret);
395 dma->release();
396 md->complete(kIODirectionOutIn);
397 md->release();
398
399 return 0;
400 }
401
402 static IOReturn
403 IOMemoryPrefaultTest(uint32_t options)
404 {
405 IOBufferMemoryDescriptor * bmd;
406 IOMemoryMap * map;
407 IOReturn kr;
408 uint32_t data;
409 uint32_t * p;
410 IOSimpleLock * lock;
411
412 lock = IOSimpleLockAlloc();
413 assert(lock);
414
415 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
416 kIODirectionOutIn | kIOMemoryPageable, ptoa(8));
417 assert(bmd);
418 kr = bmd->prepare();
419 assert(KERN_SUCCESS == kr);
420
421 map = bmd->map(kIOMapPrefault);
422 assert(map);
423
424 p = (typeof(p))map->getVirtualAddress();
425 IOSimpleLockLock(lock);
426 data = p[0];
427 IOSimpleLockUnlock(lock);
428
429 IOLog("IOMemoryPrefaultTest %d\n", data);
430
431 map->release();
432 bmd->release();
433 IOSimpleLockFree(lock);
434
435 return kIOReturnSuccess;
436 }
437
438 static IOReturn
439 IOBMDOverflowTest(uint32_t options)
440 {
441 IOBufferMemoryDescriptor * bmd;
442
443 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, kIOMemoryKernelUserShared | kIODirectionOut,
444 0xffffffffffffffff, 0xfffffffffffff000);
445 assert(NULL == bmd);
446
447 return kIOReturnSuccess;
448 }
449
450 // <rdar://problem/26375234>
451 static IOReturn
452 ZeroLengthTest(int newValue)
453 {
454 IOMemoryDescriptor * md;
455
456 md = IOMemoryDescriptor::withAddressRange(
457 0, 0, kIODirectionNone, current_task());
458 assert(md);
459 md->prepare();
460 md->complete();
461 md->release();
462 return 0;
463 }
464
465 // <rdar://problem/27002624>
466 static IOReturn
467 BadFixedAllocTest(int newValue)
468 {
469 IOBufferMemoryDescriptor * bmd;
470 IOMemoryMap * map;
471
472 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
473 kIODirectionIn | kIOMemoryPageable, ptoa(1));
474 assert(bmd);
475 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
476 assert(!map);
477
478 bmd->release();
479 return 0;
480 }
481
482 // <rdar://problem/26466423>
483 static IOReturn
484 IODirectionPrepareNoZeroFillTest(int newValue)
485 {
486 IOBufferMemoryDescriptor * bmd;
487
488 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
489 kIODirectionIn | kIOMemoryPageable, ptoa(24));
490 assert(bmd);
491 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
492 bmd->prepare(kIODirectionIn);
493 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
494 bmd->complete(kIODirectionIn);
495 bmd->release();
496 return 0;
497 }
498
499 // <rdar://problem/28190483>
500 static IOReturn
501 IOMemoryMapTest(uint32_t options)
502 {
503 IOBufferMemoryDescriptor * bmd;
504 IOMemoryDescriptor * md;
505 IOMemoryMap * map;
506 uint32_t data;
507 user_addr_t p;
508 uint8_t * p2;
509 int r;
510 uint64_t time, nano;
511
512 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
513 kIODirectionOutIn | kIOMemoryPageable, 0x4018 + 0x800);
514 assert(bmd);
515 p = (typeof(p))bmd->getBytesNoCopy();
516 p += 0x800;
517 data = 0x11111111;
518 r = copyout(&data, p, sizeof(data));
519 assert(r == 0);
520 data = 0x22222222;
521 r = copyout(&data, p + 0x1000, sizeof(data));
522 assert(r == 0);
523 data = 0x33333333;
524 r = copyout(&data, p + 0x2000, sizeof(data));
525 assert(r == 0);
526 data = 0x44444444;
527 r = copyout(&data, p + 0x3000, sizeof(data));
528 assert(r == 0);
529
530 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
531 kIODirectionOut | options,
532 current_task());
533 assert(md);
534 time = mach_absolute_time();
535 map = md->map(kIOMapReadOnly);
536 time = mach_absolute_time() - time;
537 assert(map);
538 absolutetime_to_nanoseconds(time, &nano);
539
540 p2 = (typeof(p2))map->getVirtualAddress();
541 assert(0x11 == p2[0]);
542 assert(0x22 == p2[0x1000]);
543 assert(0x33 == p2[0x2000]);
544 assert(0x44 == p2[0x3000]);
545
546 data = 0x99999999;
547 r = copyout(&data, p + 0x2000, sizeof(data));
548 assert(r == 0);
549
550 assert(0x11 == p2[0]);
551 assert(0x22 == p2[0x1000]);
552 assert(0x44 == p2[0x3000]);
553 if (kIOMemoryMapCopyOnWrite & options) {
554 assert(0x33 == p2[0x2000]);
555 } else {
556 assert(0x99 == p2[0x2000]);
557 }
558
559 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
560 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
561 nano);
562
563 map->release();
564 md->release();
565 bmd->release();
566
567 return kIOReturnSuccess;
568 }
569
570 static int
571 IOMemoryMapCopyOnWriteTest(int newValue)
572 {
573 IOMemoryMapTest(0);
574 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
575 return 0;
576 }
577
578 static int
579 AllocationNameTest(int newValue)
580 {
581 IOMemoryDescriptor * bmd;
582 kern_allocation_name_t name, prior;
583
584 name = kern_allocation_name_allocate("com.apple.iokit.test", 0);
585 assert(name);
586
587 prior = thread_set_allocation_name(name);
588
589 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
590 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
591 ptoa(13));
592 assert(bmd);
593 bmd->prepare();
594
595 thread_set_allocation_name(prior);
596 kern_allocation_name_release(name);
597
598 if (newValue != 7) {
599 bmd->release();
600 }
601
602 return 0;
603 }
604
605 int
606 IOMemoryDescriptorTest(int newValue)
607 {
608 int result;
609
610 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
611
612 #if 0
613 if (6 == newValue) {
614 IOMemoryDescriptor * sbmds[3];
615 IOMultiMemoryDescriptor * smmd;
616 IOMemoryDescriptor * mds[2];
617 IOMultiMemoryDescriptor * mmd;
618 IOMemoryMap * map;
619
620 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
621 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
622 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
623 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds) / sizeof(sbmds[0]), kIODirectionOutIn, false);
624
625 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
626 mds[1] = smmd;
627 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
628 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere);
629 assert(map);
630 map->release();
631 mmd->release();
632 mds[0]->release();
633 mds[1]->release();
634 sbmds[0]->release();
635 sbmds[1]->release();
636 sbmds[2]->release();
637
638 return 0;
639 } else if (5 == newValue) {
640 IOReturn ret;
641 IOMemoryDescriptor * md;
642 IODMACommand * dma;
643 IODMACommand::SegmentOptions segOptions =
644 {
645 .fStructSize = sizeof(segOptions),
646 .fNumAddressBits = 64,
647 .fMaxSegmentSize = 4096,
648 .fMaxTransferSize = 128 * 1024,
649 .fAlignment = 4,
650 .fAlignmentLength = 4,
651 .fAlignmentInternalSegments = 0x1000
652 };
653
654 IOAddressRange ranges[3][2] =
655 {
656 {
657 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
658 { 0, 0 },
659 },
660 {
661 { ranges[0][0].address, 0x10 },
662 { 0x3000 + ranges[0][0].address, 0xff0 },
663 },
664 {
665 { ranges[0][0].address, 0x2ffc },
666 { trunc_page(ranges[0][0].address), 0x800 },
667 },
668 };
669 static const uint32_t rangesCount[3] = { 1, 2, 2 };
670 uint32_t test;
671
672 for (test = 0; test < 3; test++) {
673 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
674 ranges[test][0].address, ranges[test][0].length,
675 ranges[test][1].address, ranges[test][1].length);
676
677 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
678 assert(md);
679 ret = md->prepare();
680 assert(kIOReturnSuccess == ret);
681 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
682 IODMACommand::kMapped, NULL, NULL);
683 assert(dma);
684 ret = dma->setMemoryDescriptor(md, true);
685 if (kIOReturnSuccess == ret) {
686 IODMACommand::Segment64 segments[1];
687 UInt32 numSegments;
688 UInt64 offset;
689
690 offset = 0;
691 do{
692 numSegments = 1;
693 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
694 assert(kIOReturnSuccess == ret);
695 assert(1 == numSegments);
696 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
697 }while (offset < md->getLength());
698
699 ret = dma->clearMemoryDescriptor(true);
700 assert(kIOReturnSuccess == ret);
701 dma->release();
702 }
703 md->release();
704 }
705
706 return kIOReturnSuccess;
707 } else if (4 == newValue) {
708 IOService * isp;
709 IOMapper * mapper;
710 IOBufferMemoryDescriptor * md1;
711 IODMACommand * dma;
712 IOReturn ret;
713 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
714 uint64_t start, time, nano;
715
716 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
717 assert(isp);
718 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
719 assert(mapper);
720
721 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
722 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
723 bufSize, page_size);
724
725 ret = md1->prepare();
726 assert(kIOReturnSuccess == ret);
727
728 IODMAMapSpecification mapSpec;
729 bzero(&mapSpec, sizeof(mapSpec));
730 uint64_t mapped;
731 uint64_t mappedLength;
732
733 start = mach_absolute_time();
734
735 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
736 assert(kIOReturnSuccess == ret);
737
738 time = mach_absolute_time() - start;
739
740 absolutetime_to_nanoseconds(time, &nano);
741 kprintf("time %lld us\n", nano / 1000ULL);
742 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
743
744 assert(md1);
745
746 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
747 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
748
749 assert(dma);
750
751 start = mach_absolute_time();
752 ret = dma->setMemoryDescriptor(md1, true);
753 assert(kIOReturnSuccess == ret);
754 time = mach_absolute_time() - start;
755
756 absolutetime_to_nanoseconds(time, &nano);
757 kprintf("time %lld us\n", nano / 1000ULL);
758
759
760 IODMACommand::Segment32 segments[1];
761 UInt32 numSegments = 1;
762 UInt64 offset;
763
764 offset = 0;
765 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
766 assert(kIOReturnSuccess == ret);
767 assert(1 == numSegments);
768 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
769
770 ret = dma->clearMemoryDescriptor(true);
771 assert(kIOReturnSuccess == ret);
772
773 md1->release();
774
775 return kIOReturnSuccess;
776 }
777
778 if (3 == newValue) {
779 IOBufferMemoryDescriptor * md1;
780 IOBufferMemoryDescriptor * md2;
781 IOMemoryMap * map1;
782 IOMemoryMap * map2;
783 uint32_t * buf1;
784 uint32_t * buf2;
785 IOReturn err;
786
787 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
788 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
789 64 * 1024, page_size);
790 assert(md1);
791 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
792 assert(map1);
793 buf1 = (uint32_t *) map1->getVirtualAddress();
794
795 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
796 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
797 64 * 1024, page_size);
798 assert(md2);
799 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
800 assert(map2);
801 buf2 = (uint32_t *) map2->getVirtualAddress();
802
803 memset(buf1, 0x11, 64 * 1024L);
804 memset(buf2, 0x22, 64 * 1024L);
805
806 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
807
808 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
809 assert(0x11111111 == buf1[0]);
810 assert(0x22222222 == buf2[0]);
811 err = map1->redirect(md2, 0, 0ULL);
812 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
813 assert(0x11111111 == buf2[0]);
814 assert(0x22222222 == buf1[0]);
815 err = map1->redirect(md1, 0, 0ULL);
816 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
817 assert(0x11111111 == buf1[0]);
818 assert(0x22222222 == buf2[0]);
819 map1->release();
820 map2->release();
821 md1->release();
822 md2->release();
823 }
824 #endif
825
826 // result = IODMACommandLocalMappedNonContig(newValue);
827 // if (result) return (result);
828
829 result = IODMACommandForceDoubleBufferTest(newValue);
830 if (result) {
831 return result;
832 }
833
834 result = AllocationNameTest(newValue);
835 if (result) {
836 return result;
837 }
838
839 result = IOMemoryMapCopyOnWriteTest(newValue);
840 if (result) {
841 return result;
842 }
843
844 result = IOMultMemoryDescriptorTest(newValue);
845 if (result) {
846 return result;
847 }
848
849 result = IOBMDOverflowTest(newValue);
850 if (result) {
851 return result;
852 }
853
854 result = ZeroLengthTest(newValue);
855 if (result) {
856 return result;
857 }
858
859 result = IODirectionPrepareNoZeroFillTest(newValue);
860 if (result) {
861 return result;
862 }
863
864 result = BadFixedAllocTest(newValue);
865 if (result) {
866 return result;
867 }
868
869 result = IOMemoryRemoteTest(newValue);
870 if (result) {
871 return result;
872 }
873
874 result = IOMemoryPrefaultTest(newValue);
875 if (result) {
876 return result;
877 }
878
879 IOGeneralMemoryDescriptor * md;
880 vm_offset_t data[2];
881 vm_size_t bsize = 16 * 1024 * 1024;
882 vm_size_t srcsize, srcoffset, mapoffset, size;
883 kern_return_t kr;
884
885 data[0] = data[1] = 0;
886 kr = vm_allocate_kernel(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
887 assert(KERN_SUCCESS == kr);
888
889 vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
890 vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
891
892 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
893
894 uint32_t idx, offidx;
895 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) {
896 ((uint32_t*)data[0])[idx] = idx;
897 }
898
899 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) {
900 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) {
901 IOAddressRange ranges[3];
902 uint32_t rangeCount = 1;
903
904 bzero(&ranges[0], sizeof(ranges));
905 ranges[0].address = data[0] + srcoffset;
906 ranges[0].length = srcsize;
907 ranges[1].address = ranges[2].address = data[0];
908
909 if (srcsize > ptoa(5)) {
910 ranges[0].length = 7634;
911 ranges[1].length = 9870;
912 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
913 ranges[1].address = ranges[0].address + ranges[0].length;
914 ranges[2].address = ranges[1].address + ranges[1].length;
915 rangeCount = 3;
916 } else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
917 ranges[0].length = ptoa(1);
918 ranges[1].length = ptoa(1);
919 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
920 ranges[0].address = data[0] + srcoffset + ptoa(1);
921 ranges[1].address = data[0] + srcoffset;
922 ranges[2].address = ranges[0].address + ranges[0].length;
923 rangeCount = 3;
924 }
925
926 md = OSDynamicCast(IOGeneralMemoryDescriptor,
927 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
928 assert(md);
929
930 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
931 (long) srcsize, (long) srcoffset,
932 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
933 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
934 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
935
936 if (kIOReturnSuccess == kr) {
937 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) {
938 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) {
939 IOMemoryMap * map;
940 mach_vm_address_t addr = 0;
941 uint32_t data;
942
943 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
944
945 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
946 if (map) {
947 addr = map->getAddress();
948 } else {
949 kr = kIOReturnError;
950 }
951
952 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
953
954 if (kIOReturnSuccess != kr) {
955 break;
956 }
957 kr = md->prepare();
958 if (kIOReturnSuccess != kr) {
959 panic("prepare() fail 0x%x\n", kr);
960 break;
961 }
962 for (idx = 0; idx < size; idx += sizeof(uint32_t)) {
963 offidx = (typeof(offidx))(idx + mapoffset + srcoffset);
964 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
965 if (offidx < ptoa(2)) {
966 offidx ^= ptoa(1);
967 }
968 }
969 offidx /= sizeof(uint32_t);
970
971 if (offidx != ((uint32_t*)addr)[idx / sizeof(uint32_t)]) {
972 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
973 kr = kIOReturnBadMedia;
974 } else {
975 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) {
976 data = 0;
977 }
978 if (offidx != data) {
979 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
980 kr = kIOReturnBadMedia;
981 }
982 }
983 }
984 md->complete();
985 map->release();
986 // IOLog("unmapRef %llx\n", addr);
987 }
988 if (kIOReturnSuccess != kr) {
989 break;
990 }
991 }
992 }
993 md->release();
994 if (kIOReturnSuccess != kr) {
995 break;
996 }
997 }
998 if (kIOReturnSuccess != kr) {
999 break;
1000 }
1001 }
1002
1003 if (kIOReturnSuccess != kr) {
1004 IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
1005 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
1006 }
1007
1008 assert(kr == kIOReturnSuccess);
1009
1010 vm_deallocate(kernel_map, data[0], bsize);
1011 // vm_deallocate(kernel_map, data[1], size);
1012
1013 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
1014
1015 return 0;
1016 }
1017
1018 #endif /* DEVELOPMENT || DEBUG */