2 * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
41 #include <IOKit/IOSubMemoryDescriptor.h>
42 #endif /* !__LP64__ */
43 #include <IOKit/IOSubMemoryDescriptor.h>
44 #include <IOKit/IOMultiMemoryDescriptor.h>
45 #include <IOKit/IOBufferMemoryDescriptor.h>
47 #include <IOKit/IOKitDebug.h>
48 #include <libkern/OSDebug.h>
53 #include <vm/vm_pageout.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
57 #include <mach/vm_prot.h>
58 #include <mach/mach_vm.h>
59 #include <vm/vm_fault.h>
60 #include <vm/vm_protos.h>
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66 #if DEVELOPMENT || DEBUG
68 extern SInt32 gIOMemoryReferenceCount
;
71 IOMultMemoryDescriptorTest(int newValue
)
73 IOMemoryDescriptor
* mds
[3];
74 IOMultiMemoryDescriptor
* mmd
;
79 IOAddressRange ranges
[2];
81 data
= (typeof(data
))IOMallocAligned(ptoa(8), page_size
);
82 for (i
= 0; i
< ptoa(8); i
++) {
83 data
[i
] = ((uint8_t) atop(i
)) | 0xD0;
86 ranges
[0].address
= (IOVirtualAddress
)(data
+ ptoa(4));
87 ranges
[0].length
= ptoa(4);
88 ranges
[1].address
= (IOVirtualAddress
)(data
+ ptoa(0));
89 ranges
[1].length
= ptoa(4);
91 mds
[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t
) data
, 2, kIODirectionOutIn
, kernel_task
);
94 uint64_t dmaLen
, dmaOffset
;
95 dmaLen
= mds
[0]->getDMAMapLength(&dmaOffset
);
96 assert(0 == dmaOffset
);
97 assert(ptoa(1) == dmaLen
);
100 mds
[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t
) (data
+ page_size
- 2), 4, kIODirectionOutIn
, kernel_task
);
103 uint64_t dmaLen
, dmaOffset
;
104 dmaLen
= mds
[0]->getDMAMapLength(&dmaOffset
);
105 assert((page_size
- 2) == dmaOffset
);
106 assert(ptoa(2) == dmaLen
);
110 mds
[0] = IOMemoryDescriptor::withAddressRanges(&ranges
[0], 2, kIODirectionOutIn
, kernel_task
);
112 uint64_t dmaLen
, dmaOffset
;
113 dmaLen
= mds
[0]->getDMAMapLength(&dmaOffset
);
114 assert(0 == dmaOffset
);
115 assert(ptoa(8) == dmaLen
);
117 mds
[1] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(3), ptoa(2), kIODirectionOutIn
);
119 uint64_t dmaLen
, dmaOffset
;
120 dmaLen
= mds
[1]->getDMAMapLength(&dmaOffset
);
121 assert(0 == dmaOffset
);
122 assert(ptoa(2) == dmaLen
);
124 mds
[2] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(7), ptoa(1), kIODirectionOutIn
);
126 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
) / sizeof(mds
[0]), kIODirectionOutIn
, false);
128 uint64_t dmaLen
, dmaOffset
;
129 dmaLen
= mmd
->getDMAMapLength(&dmaOffset
);
130 assert(0 == dmaOffset
);
131 assert(ptoa(11) == dmaLen
);
136 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, ptoa(7), mmd
->getLength() - ptoa(7));
140 addr
= (void *) map
->getVirtualAddress();
141 assert(ptoa(4) == map
->getLength());
142 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(0) / sizeof(uint32_t)]);
143 assert(0xd7d7d7d7 == ((uint32_t *)addr
)[ptoa(1) / sizeof(uint32_t)]);
144 assert(0xd0d0d0d0 == ((uint32_t *)addr
)[ptoa(2) / sizeof(uint32_t)]);
145 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(3) / sizeof(uint32_t)]);
147 IOFreeAligned(data
, ptoa(8));
154 // <rdar://problem/30102458>
156 IODMACommandForceDoubleBufferTest(int newValue
)
159 IOBufferMemoryDescriptor
* bmd
;
162 IODMACommand::SegmentOptions segOptions
=
164 .fStructSize
= sizeof(segOptions
),
165 .fNumAddressBits
= 64,
166 .fMaxSegmentSize
= 0x2000,
167 .fMaxTransferSize
= 128 * 1024,
169 .fAlignmentLength
= 1,
170 .fAlignmentInternalSegments
= 1
172 IODMACommand::Segment64 segments
[1];
177 for (dir
= kIODirectionIn
;; dir
++) {
178 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
,
179 dir
| kIOMemoryPageable
, ptoa(8));
182 uint64_t dmaLen
, dmaOffset
;
183 dmaLen
= bmd
->getDMAMapLength(&dmaOffset
);
184 assert(0 == dmaOffset
);
185 assert(ptoa(8) == dmaLen
);
188 ((uint32_t*) bmd
->getBytesNoCopy())[0] = 0x53535300 | dir
;
190 ret
= bmd
->prepare((IODirection
) dir
);
191 assert(kIOReturnSuccess
== ret
);
193 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
194 kIODMAMapOptionMapped
,
197 ret
= dma
->setMemoryDescriptor(bmd
, true);
198 assert(kIOReturnSuccess
== ret
);
200 ret
= dma
->synchronize(IODMACommand::kForceDoubleBuffer
| kIODirectionOut
);
201 assert(kIOReturnSuccess
== ret
);
205 ret
= dma
->gen64IOVMSegments(&dmaOffset
, &segments
[0], &numSegments
);
206 assert(kIOReturnSuccess
== ret
);
207 assert(1 == numSegments
);
209 if (kIODirectionOut
& dir
) {
210 data
= ((uint32_t*) bmd
->getBytesNoCopy())[0];
211 assertf((0x53535300 | dir
) == data
, "mismatch 0x%x", data
);
213 if (kIODirectionIn
& dir
) {
214 IOMappedWrite32(segments
[0].fIOVMAddr
, 0x11223300 | dir
);
217 ret
= dma
->clearMemoryDescriptor(true);
218 assert(kIOReturnSuccess
== ret
);
221 bmd
->complete((IODirection
) dir
);
223 if (kIODirectionIn
& dir
) {
224 data
= ((uint32_t*) bmd
->getBytesNoCopy())[0];
225 assertf((0x11223300 | dir
) == data
, "mismatch 0x%x", data
);
230 if (dir
== kIODirectionInOut
) {
238 // <rdar://problem/34322778>
240 IODMACommandLocalMappedNonContig(int newValue
)
243 IOMemoryDescriptor
* md
;
245 OSDictionary
* matching
;
248 IODMACommand::SegmentOptions segOptions
=
250 .fStructSize
= sizeof(segOptions
),
251 .fNumAddressBits
= 64,
252 .fMaxSegmentSize
= 128 * 1024,
253 .fMaxTransferSize
= 128 * 1024,
255 .fAlignmentLength
= 1,
256 .fAlignmentInternalSegments
= 1
258 IODMACommand::Segment64 segments
[1];
263 vm_size_t bufSize
= ptoa(4);
265 if (!IOMapper::gSystem
) {
270 kr
= vm_allocate_kernel(kernel_map
, &buffer
, bufSize
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IOKIT
);
271 assert(KERN_SUCCESS
== kr
);
273 // fragment the vmentries
274 kr
= vm_inherit(kernel_map
, buffer
+ ptoa(1), ptoa(1), VM_INHERIT_NONE
);
275 assert(KERN_SUCCESS
== kr
);
277 md
= IOMemoryDescriptor::withAddressRange(
278 buffer
+ 0xa00, 0x2000, kIODirectionOutIn
, kernel_task
);
280 kr
= md
->prepare(kIODirectionOutIn
);
281 assert(kIOReturnSuccess
== kr
);
283 segPhys
= md
->getPhysicalSegment(0, NULL
, 0);
285 matching
= IOService::nameMatching("XHC1");
287 device
= IOService::copyMatchingService(matching
);
289 mapper
= device
? IOMapper::copyMapperForDeviceWithIndex(device
, 0) : NULL
;
291 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
292 kIODMAMapOptionMapped
,
295 kr
= dma
->setMemoryDescriptor(md
, true);
296 assert(kIOReturnSuccess
== kr
);
300 kr
= dma
->gen64IOVMSegments(&dmaOffset
, &segments
[0], &numSegments
);
301 assert(kIOReturnSuccess
== kr
);
302 assert(1 == numSegments
);
305 assertf(segments
[0].fIOVMAddr
!= segPhys
, "phys !local 0x%qx, 0x%qx, %p", segments
[0].fIOVMAddr
, segPhys
, dma
);
308 kr
= dma
->clearMemoryDescriptor(true);
309 assert(kIOReturnSuccess
== kr
);
312 kr
= md
->complete(kIODirectionOutIn
);
313 assert(kIOReturnSuccess
== kr
);
316 kr
= vm_deallocate(kernel_map
, buffer
, bufSize
);
317 assert(KERN_SUCCESS
== kr
);
318 OSSafeReleaseNULL(mapper
);
323 // <rdar://problem/30102458>
325 IOMemoryRemoteTest(int newValue
)
328 IOMemoryDescriptor
* md
;
329 IOByteCount offset
, length
;
334 IODMACommand::SegmentOptions segOptions
=
336 .fStructSize
= sizeof(segOptions
),
337 .fNumAddressBits
= 64,
338 .fMaxSegmentSize
= 0x2000,
339 .fMaxTransferSize
= 128 * 1024,
341 .fAlignmentLength
= 1,
342 .fAlignmentInternalSegments
= 1
344 IODMACommand::Segment64 segments
[1];
348 IOAddressRange ranges
[2] = {
349 { 0x1234567890123456ULL
, 0x1000 }, { 0x5432109876543210, 0x2000 },
352 md
= IOMemoryDescriptor::withAddressRanges(&ranges
[0], 2, kIODirectionOutIn
| kIOMemoryRemote
, TASK_NULL
);
356 // md->readBytes(0, &idx, sizeof(idx));
358 ret
= md
->prepare(kIODirectionOutIn
);
359 assert(kIOReturnSuccess
== ret
);
361 printf("remote md flags 0x%qx, r %d\n",
362 md
->getFlags(), (0 != (kIOMemoryRemote
& md
->getFlags())));
364 for (offset
= 0, idx
= 0; true; offset
+= length
, idx
++) {
365 addr
= md
->getPhysicalSegment(offset
, &length
, 0);
370 assert(addr
== ranges
[idx
].address
);
371 assert(length
== ranges
[idx
].length
);
373 assert(offset
== md
->getLength());
375 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
376 kIODMAMapOptionUnmapped
| kIODMAMapOptionIterateOnly
,
379 ret
= dma
->setMemoryDescriptor(md
, true);
380 assert(kIOReturnSuccess
== ret
);
382 for (dmaOffset
= 0, idx
= 0; dmaOffset
< md
->getLength(); idx
++) {
384 ret
= dma
->gen64IOVMSegments(&dmaOffset
, &segments
[0], &numSegments
);
385 assert(kIOReturnSuccess
== ret
);
386 assert(1 == numSegments
);
388 assert(segments
[0].fIOVMAddr
== ranges
[idx
].address
);
389 assert(segments
[0].fLength
== ranges
[idx
].length
);
391 assert(dmaOffset
== md
->getLength());
393 ret
= dma
->clearMemoryDescriptor(true);
394 assert(kIOReturnSuccess
== ret
);
396 md
->complete(kIODirectionOutIn
);
403 IOMemoryPrefaultTest(uint32_t options
)
405 IOBufferMemoryDescriptor
* bmd
;
412 lock
= IOSimpleLockAlloc();
415 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
416 kIODirectionOutIn
| kIOMemoryPageable
, ptoa(8));
419 assert(KERN_SUCCESS
== kr
);
421 map
= bmd
->map(kIOMapPrefault
);
424 p
= (typeof(p
))map
->getVirtualAddress();
425 IOSimpleLockLock(lock
);
427 IOSimpleLockUnlock(lock
);
429 IOLog("IOMemoryPrefaultTest %d\n", data
);
433 IOSimpleLockFree(lock
);
435 return kIOReturnSuccess
;
439 IOBMDOverflowTest(uint32_t options
)
441 IOBufferMemoryDescriptor
* bmd
;
443 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task
, kIOMemoryKernelUserShared
| kIODirectionOut
,
444 0xffffffffffffffff, 0xfffffffffffff000);
447 return kIOReturnSuccess
;
450 // <rdar://problem/26375234>
452 ZeroLengthTest(int newValue
)
454 IOMemoryDescriptor
* md
;
456 md
= IOMemoryDescriptor::withAddressRange(
457 0, 0, kIODirectionNone
, current_task());
465 // <rdar://problem/27002624>
467 BadFixedAllocTest(int newValue
)
469 IOBufferMemoryDescriptor
* bmd
;
472 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(NULL
,
473 kIODirectionIn
| kIOMemoryPageable
, ptoa(1));
475 map
= bmd
->createMappingInTask(kernel_task
, 0x2000, 0);
482 // <rdar://problem/26466423>
484 IODirectionPrepareNoZeroFillTest(int newValue
)
486 IOBufferMemoryDescriptor
* bmd
;
488 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(NULL
,
489 kIODirectionIn
| kIOMemoryPageable
, ptoa(24));
491 bmd
->prepare((IODirection
)(kIODirectionIn
| kIODirectionPrepareNoZeroFill
));
492 bmd
->prepare(kIODirectionIn
);
493 bmd
->complete((IODirection
)(kIODirectionIn
| kIODirectionCompleteWithDataValid
));
494 bmd
->complete(kIODirectionIn
);
499 // <rdar://problem/28190483>
501 IOMemoryMapTest(uint32_t options
)
503 IOBufferMemoryDescriptor
* bmd
;
504 IOMemoryDescriptor
* md
;
512 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
513 kIODirectionOutIn
| kIOMemoryPageable
, 0x4018 + 0x800);
515 p
= (typeof(p
))bmd
->getBytesNoCopy();
518 r
= copyout(&data
, p
, sizeof(data
));
521 r
= copyout(&data
, p
+ 0x1000, sizeof(data
));
524 r
= copyout(&data
, p
+ 0x2000, sizeof(data
));
527 r
= copyout(&data
, p
+ 0x3000, sizeof(data
));
530 md
= IOMemoryDescriptor::withAddressRange(p
, 0x4018,
531 kIODirectionOut
| options
,
534 time
= mach_absolute_time();
535 map
= md
->map(kIOMapReadOnly
);
536 time
= mach_absolute_time() - time
;
538 absolutetime_to_nanoseconds(time
, &nano
);
540 p2
= (typeof(p2
))map
->getVirtualAddress();
541 assert(0x11 == p2
[0]);
542 assert(0x22 == p2
[0x1000]);
543 assert(0x33 == p2
[0x2000]);
544 assert(0x44 == p2
[0x3000]);
547 r
= copyout(&data
, p
+ 0x2000, sizeof(data
));
550 assert(0x11 == p2
[0]);
551 assert(0x22 == p2
[0x1000]);
552 assert(0x44 == p2
[0x3000]);
553 if (kIOMemoryMapCopyOnWrite
& options
) {
554 assert(0x33 == p2
[0x2000]);
556 assert(0x99 == p2
[0x2000]);
559 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
560 kIOMemoryMapCopyOnWrite
& options
? "kIOMemoryMapCopyOnWrite" : "",
567 return kIOReturnSuccess
;
571 IOMemoryMapCopyOnWriteTest(int newValue
)
574 IOMemoryMapTest(kIOMemoryMapCopyOnWrite
);
579 AllocationNameTest(int newValue
)
581 IOMemoryDescriptor
* bmd
;
582 kern_allocation_name_t name
, prior
;
584 name
= kern_allocation_name_allocate("com.apple.iokit.test", 0);
587 prior
= thread_set_allocation_name(name
);
589 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
590 kIODirectionOutIn
| kIOMemoryPageable
| kIOMemoryKernelUserShared
,
595 thread_set_allocation_name(prior
);
596 kern_allocation_name_release(name
);
606 IOMemoryDescriptorTest(int newValue
)
610 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount
);
614 IOMemoryDescriptor
* sbmds
[3];
615 IOMultiMemoryDescriptor
* smmd
;
616 IOMemoryDescriptor
* mds
[2];
617 IOMultiMemoryDescriptor
* mmd
;
620 sbmds
[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(1));
621 sbmds
[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(2));
622 sbmds
[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(3));
623 smmd
= IOMultiMemoryDescriptor::withDescriptors(&sbmds
[0], sizeof(sbmds
) / sizeof(sbmds
[0]), kIODirectionOutIn
, false);
625 mds
[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(1));
627 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
) / sizeof(mds
[0]), kIODirectionOutIn
, false);
628 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
);
639 } else if (5 == newValue
) {
641 IOMemoryDescriptor
* md
;
643 IODMACommand::SegmentOptions segOptions
=
645 .fStructSize
= sizeof(segOptions
),
646 .fNumAddressBits
= 64,
647 .fMaxSegmentSize
= 4096,
648 .fMaxTransferSize
= 128 * 1024,
650 .fAlignmentLength
= 4,
651 .fAlignmentInternalSegments
= 0x1000
654 IOAddressRange ranges
[3][2] =
657 { (uintptr_t) &IOMemoryDescriptorTest
, 0x2ffc },
661 { ranges
[0][0].address
, 0x10 },
662 { 0x3000 + ranges
[0][0].address
, 0xff0 },
665 { ranges
[0][0].address
, 0x2ffc },
666 { trunc_page(ranges
[0][0].address
), 0x800 },
669 static const uint32_t rangesCount
[3] = { 1, 2, 2 };
672 for (test
= 0; test
< 3; test
++) {
673 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test
,
674 ranges
[test
][0].address
, ranges
[test
][0].length
,
675 ranges
[test
][1].address
, ranges
[test
][1].length
);
677 md
= IOMemoryDescriptor::withAddressRanges((IOAddressRange
*)&ranges
[test
][0], rangesCount
[test
], kIODirectionOut
, kernel_task
);
680 assert(kIOReturnSuccess
== ret
);
681 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
682 IODMACommand::kMapped
, NULL
, NULL
);
684 ret
= dma
->setMemoryDescriptor(md
, true);
685 if (kIOReturnSuccess
== ret
) {
686 IODMACommand::Segment64 segments
[1];
693 ret
= dma
->gen64IOVMSegments(&offset
, &segments
[0], &numSegments
);
694 assert(kIOReturnSuccess
== ret
);
695 assert(1 == numSegments
);
696 kprintf("seg 0x%qx, 0x%qx\n", segments
[0].fIOVMAddr
, segments
[0].fLength
);
697 }while (offset
< md
->getLength());
699 ret
= dma
->clearMemoryDescriptor(true);
700 assert(kIOReturnSuccess
== ret
);
706 return kIOReturnSuccess
;
707 } else if (4 == newValue
) {
710 IOBufferMemoryDescriptor
* md1
;
713 size_t bufSize
= 8192 * 8192 * sizeof(uint32_t);
714 uint64_t start
, time
, nano
;
716 isp
= IOService::copyMatchingService(IOService::nameMatching("isp"));
718 mapper
= IOMapper::copyMapperForDeviceWithIndex(isp
, 0);
721 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
722 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
725 ret
= md1
->prepare();
726 assert(kIOReturnSuccess
== ret
);
728 IODMAMapSpecification mapSpec
;
729 bzero(&mapSpec
, sizeof(mapSpec
));
731 uint64_t mappedLength
;
733 start
= mach_absolute_time();
735 ret
= md1
->dmaMap(mapper
, NULL
, &mapSpec
, 0, bufSize
, &mapped
, &mappedLength
);
736 assert(kIOReturnSuccess
== ret
);
738 time
= mach_absolute_time() - start
;
740 absolutetime_to_nanoseconds(time
, &nano
);
741 kprintf("time %lld us\n", nano
/ 1000ULL);
742 kprintf("seg0 0x%qx, 0x%qx\n", mapped
, mappedLength
);
746 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost32
,
747 32, 0, IODMACommand::kMapped
, 0, 1, mapper
, NULL
);
751 start
= mach_absolute_time();
752 ret
= dma
->setMemoryDescriptor(md1
, true);
753 assert(kIOReturnSuccess
== ret
);
754 time
= mach_absolute_time() - start
;
756 absolutetime_to_nanoseconds(time
, &nano
);
757 kprintf("time %lld us\n", nano
/ 1000ULL);
760 IODMACommand::Segment32 segments
[1];
761 UInt32 numSegments
= 1;
765 ret
= dma
->gen32IOVMSegments(&offset
, &segments
[0], &numSegments
);
766 assert(kIOReturnSuccess
== ret
);
767 assert(1 == numSegments
);
768 kprintf("seg0 0x%x, 0x%x\n", (int)segments
[0].fIOVMAddr
, (int)segments
[0].fLength
);
770 ret
= dma
->clearMemoryDescriptor(true);
771 assert(kIOReturnSuccess
== ret
);
775 return kIOReturnSuccess
;
779 IOBufferMemoryDescriptor
* md1
;
780 IOBufferMemoryDescriptor
* md2
;
787 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
788 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
789 64 * 1024, page_size
);
791 map1
= md1
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
793 buf1
= (uint32_t *) map1
->getVirtualAddress();
795 md2
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
796 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
797 64 * 1024, page_size
);
799 map2
= md2
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
801 buf2
= (uint32_t *) map2
->getVirtualAddress();
803 memset(buf1
, 0x11, 64 * 1024L);
804 memset(buf2
, 0x22, 64 * 1024L);
806 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1
, map1
, buf1
, md2
, map2
, buf2
);
808 kprintf("no redir 0x%08x, 0x%08x\n", buf1
[0], buf2
[0]);
809 assert(0x11111111 == buf1
[0]);
810 assert(0x22222222 == buf2
[0]);
811 err
= map1
->redirect(md2
, 0, 0ULL);
812 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
813 assert(0x11111111 == buf2
[0]);
814 assert(0x22222222 == buf1
[0]);
815 err
= map1
->redirect(md1
, 0, 0ULL);
816 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
817 assert(0x11111111 == buf1
[0]);
818 assert(0x22222222 == buf2
[0]);
826 // result = IODMACommandLocalMappedNonContig(newValue);
827 // if (result) return (result);
829 result
= IODMACommandForceDoubleBufferTest(newValue
);
834 result
= AllocationNameTest(newValue
);
839 result
= IOMemoryMapCopyOnWriteTest(newValue
);
844 result
= IOMultMemoryDescriptorTest(newValue
);
849 result
= IOBMDOverflowTest(newValue
);
854 result
= ZeroLengthTest(newValue
);
859 result
= IODirectionPrepareNoZeroFillTest(newValue
);
864 result
= BadFixedAllocTest(newValue
);
869 result
= IOMemoryRemoteTest(newValue
);
874 result
= IOMemoryPrefaultTest(newValue
);
879 IOGeneralMemoryDescriptor
* md
;
881 vm_size_t bsize
= 16 * 1024 * 1024;
882 vm_size_t srcsize
, srcoffset
, mapoffset
, size
;
885 data
[0] = data
[1] = 0;
886 kr
= vm_allocate_kernel(kernel_map
, &data
[0], bsize
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IOKIT
);
887 assert(KERN_SUCCESS
== kr
);
889 vm_inherit(kernel_map
, data
[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE
);
890 vm_inherit(kernel_map
, data
[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE
);
892 IOLog("data 0x%lx, 0x%lx\n", (long)data
[0], (long)data
[1]);
894 uint32_t idx
, offidx
;
895 for (idx
= 0; idx
< (bsize
/ sizeof(uint32_t)); idx
++) {
896 ((uint32_t*)data
[0])[idx
] = idx
;
899 for (srcoffset
= 0; srcoffset
< bsize
; srcoffset
= ((srcoffset
<< 2) + 0x40c)) {
900 for (srcsize
= 4; srcsize
< (bsize
- srcoffset
- 1); srcsize
= ((srcsize
<< 2) + 0x3fc)) {
901 IOAddressRange ranges
[3];
902 uint32_t rangeCount
= 1;
904 bzero(&ranges
[0], sizeof(ranges
));
905 ranges
[0].address
= data
[0] + srcoffset
;
906 ranges
[0].length
= srcsize
;
907 ranges
[1].address
= ranges
[2].address
= data
[0];
909 if (srcsize
> ptoa(5)) {
910 ranges
[0].length
= 7634;
911 ranges
[1].length
= 9870;
912 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
913 ranges
[1].address
= ranges
[0].address
+ ranges
[0].length
;
914 ranges
[2].address
= ranges
[1].address
+ ranges
[1].length
;
916 } else if ((srcsize
> ptoa(2)) && !(page_mask
& srcoffset
)) {
917 ranges
[0].length
= ptoa(1);
918 ranges
[1].length
= ptoa(1);
919 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
920 ranges
[0].address
= data
[0] + srcoffset
+ ptoa(1);
921 ranges
[1].address
= data
[0] + srcoffset
;
922 ranges
[2].address
= ranges
[0].address
+ ranges
[0].length
;
926 md
= OSDynamicCast(IOGeneralMemoryDescriptor
,
927 IOMemoryDescriptor::withAddressRanges(&ranges
[0], rangeCount
, kIODirectionInOut
, kernel_task
));
930 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
931 (long) srcsize
, (long) srcoffset
,
932 (long long) ranges
[0].address
- data
[0], (long long) ranges
[0].length
,
933 (long long) ranges
[1].address
- data
[0], (long long) ranges
[1].length
,
934 (long long) ranges
[2].address
- data
[0], (long long) ranges
[2].length
);
936 if (kIOReturnSuccess
== kr
) {
937 for (mapoffset
= 0; mapoffset
< srcsize
; mapoffset
= ((mapoffset
<< 1) + 0xf00)) {
938 for (size
= 4; size
< (srcsize
- mapoffset
- 1); size
= ((size
<< 2) + 0x200)) {
940 mach_vm_address_t addr
= 0;
943 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
945 map
= md
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, mapoffset
, size
);
947 addr
= map
->getAddress();
952 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
954 if (kIOReturnSuccess
!= kr
) {
958 if (kIOReturnSuccess
!= kr
) {
959 panic("prepare() fail 0x%x\n", kr
);
962 for (idx
= 0; idx
< size
; idx
+= sizeof(uint32_t)) {
963 offidx
= (typeof(offidx
))(idx
+ mapoffset
+ srcoffset
);
964 if ((srcsize
<= ptoa(5)) && (srcsize
> ptoa(2)) && !(page_mask
& srcoffset
)) {
965 if (offidx
< ptoa(2)) {
969 offidx
/= sizeof(uint32_t);
971 if (offidx
!= ((uint32_t*)addr
)[idx
/ sizeof(uint32_t)]) {
972 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
973 kr
= kIOReturnBadMedia
;
975 if (sizeof(data
) != md
->readBytes(mapoffset
+ idx
, &data
, sizeof(data
))) {
978 if (offidx
!= data
) {
979 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
980 kr
= kIOReturnBadMedia
;
986 // IOLog("unmapRef %llx\n", addr);
988 if (kIOReturnSuccess
!= kr
) {
994 if (kIOReturnSuccess
!= kr
) {
998 if (kIOReturnSuccess
!= kr
) {
1003 if (kIOReturnSuccess
!= kr
) {
1004 IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
1005 (long) srcsize
, (long) srcoffset
, (long) size
, (long) mapoffset
);
1008 assert(kr
== kIOReturnSuccess
);
1010 vm_deallocate(kernel_map
, data
[0], bsize
);
1011 // vm_deallocate(kernel_map, data[1], size);
1013 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount
);
1018 #endif /* DEVELOPMENT || DEBUG */