2 * Copyright (c) 2014-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
41 #include <IOKit/IOSubMemoryDescriptor.h>
42 #endif /* !__LP64__ */
43 #include <IOKit/IOSubMemoryDescriptor.h>
44 #include <IOKit/IOMultiMemoryDescriptor.h>
45 #include <IOKit/IOBufferMemoryDescriptor.h>
47 #include <IOKit/IOKitDebug.h>
48 #include <libkern/OSDebug.h>
53 #include <vm/vm_pageout.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
57 #include <mach/vm_prot.h>
58 #include <mach/mach_vm.h>
59 #include <vm/vm_fault.h>
60 #include <vm/vm_protos.h>
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66 #if DEVELOPMENT || DEBUG
68 static int IOMultMemoryDescriptorTest(int newValue
)
70 IOMemoryDescriptor
* mds
[3];
71 IOMultiMemoryDescriptor
* mmd
;
76 IOAddressRange ranges
[2];
78 data
= (typeof(data
)) IOMallocAligned(ptoa(8), page_size
);
79 for (i
= 0; i
< ptoa(8); i
++) data
[i
] = atop(i
) | 0xD0;
81 ranges
[0].address
= (IOVirtualAddress
)(data
+ ptoa(4));
82 ranges
[0].length
= ptoa(4);
83 ranges
[1].address
= (IOVirtualAddress
)(data
+ ptoa(0));
84 ranges
[1].length
= ptoa(4);
86 mds
[0] = IOMemoryDescriptor::withAddressRanges(&ranges
[0], 2, kIODirectionOutIn
, kernel_task
);
88 mds
[1] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(3), ptoa(2), kIODirectionOutIn
);
89 mds
[2] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(7), ptoa(1), kIODirectionOutIn
);
91 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
)/sizeof(mds
[0]), kIODirectionOutIn
, false);
95 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, ptoa(7), mmd
->getLength() - ptoa(7));
99 addr
= (void *) map
->getVirtualAddress();
100 assert(ptoa(4) == map
->getLength());
101 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(0) / sizeof(uint32_t)]);
102 assert(0xd7d7d7d7 == ((uint32_t *)addr
)[ptoa(1) / sizeof(uint32_t)]);
103 assert(0xd0d0d0d0 == ((uint32_t *)addr
)[ptoa(2) / sizeof(uint32_t)]);
104 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(3) / sizeof(uint32_t)]);
106 IOFreeAligned(data
, ptoa(8));
111 // <rdar://problem/26375234>
113 ZeroLengthTest(int newValue
)
115 IOMemoryDescriptor
* md
;
117 md
= IOMemoryDescriptor::withAddressRange(
118 0, 0, kIODirectionNone
, current_task());
126 // <rdar://problem/26466423>
128 IODirectionPrepareNoZeroFillTest(int newValue
)
130 IOBufferMemoryDescriptor
* bmd
;
132 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(NULL
,
133 kIODirectionIn
| kIOMemoryPageable
, ptoa(24));
135 bmd
->prepare((IODirection
)(kIODirectionIn
| kIODirectionPrepareNoZeroFill
));
136 bmd
->prepare(kIODirectionIn
);
137 bmd
->complete((IODirection
)(kIODirectionIn
| kIODirectionCompleteWithDataValid
));
138 bmd
->complete(kIODirectionIn
);
143 int IOMemoryDescriptorTest(int newValue
)
150 IOMemoryDescriptor
* sbmds
[3];
151 IOMultiMemoryDescriptor
* smmd
;
152 IOMemoryDescriptor
* mds
[2];
153 IOMultiMemoryDescriptor
* mmd
;
156 sbmds
[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(1));
157 sbmds
[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(2));
158 sbmds
[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(3));
159 smmd
= IOMultiMemoryDescriptor::withDescriptors(&sbmds
[0], sizeof(sbmds
)/sizeof(sbmds
[0]), kIODirectionOutIn
, false);
161 mds
[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(1));
163 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
)/sizeof(mds
[0]), kIODirectionOutIn
, false);
164 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
);
176 else if (5 == newValue
)
179 IOMemoryDescriptor
* md
;
181 IODMACommand::SegmentOptions segOptions
=
183 .fStructSize
= sizeof(segOptions
),
184 .fNumAddressBits
= 64,
185 .fMaxSegmentSize
= 4096,
186 .fMaxTransferSize
= 128*1024,
188 .fAlignmentLength
= 4,
189 .fAlignmentInternalSegments
= 0x1000
192 IOAddressRange ranges
[3][2] =
195 { (uintptr_t) &IOMemoryDescriptorTest
, 0x2ffc },
199 { ranges
[0][0].address
, 0x10 },
200 { 0x3000 + ranges
[0][0].address
, 0xff0 },
203 { ranges
[0][0].address
, 0x2ffc },
204 { trunc_page(ranges
[0][0].address
), 0x800 },
207 static const uint32_t rangesCount
[3] = { 1, 2, 2 };
210 for (test
= 0; test
< 3; test
++)
212 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test
,
213 ranges
[test
][0].address
, ranges
[test
][0].length
,
214 ranges
[test
][1].address
, ranges
[test
][1].length
);
216 md
= IOMemoryDescriptor::withAddressRanges((IOAddressRange
*)&ranges
[test
][0], rangesCount
[test
], kIODirectionOut
, kernel_task
);
219 assert(kIOReturnSuccess
== ret
);
220 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
221 IODMACommand::kMapped
, NULL
, NULL
);
223 ret
= dma
->setMemoryDescriptor(md
, true);
224 if (kIOReturnSuccess
== ret
)
226 IODMACommand::Segment64 segments
[1];
234 ret
= dma
->gen64IOVMSegments(&offset
, &segments
[0], &numSegments
);
235 assert(kIOReturnSuccess
== ret
);
236 assert(1 == numSegments
);
237 kprintf("seg 0x%qx, 0x%qx\n", segments
[0].fIOVMAddr
, segments
[0].fLength
);
239 while (offset
< md
->getLength());
241 ret
= dma
->clearMemoryDescriptor(true);
242 assert(kIOReturnSuccess
== ret
);
248 return (kIOReturnSuccess
);
250 else if (4 == newValue
)
254 IOBufferMemoryDescriptor
* md1
;
257 size_t bufSize
= 8192 * 8192 * sizeof(uint32_t);
258 uint64_t start
, time
, nano
;
260 isp
= IOService::copyMatchingService(IOService::nameMatching("isp"));
262 mapper
= IOMapper::copyMapperForDeviceWithIndex(isp
, 0);
265 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
266 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
269 ret
= md1
->prepare();
270 assert(kIOReturnSuccess
== ret
);
272 IODMAMapSpecification mapSpec
;
273 bzero(&mapSpec
, sizeof(mapSpec
));
275 uint64_t mappedLength
;
277 start
= mach_absolute_time();
279 ret
= md1
->dmaMap(mapper
, NULL
, &mapSpec
, 0, bufSize
, &mapped
, &mappedLength
);
280 assert(kIOReturnSuccess
== ret
);
282 time
= mach_absolute_time() - start
;
284 absolutetime_to_nanoseconds(time
, &nano
);
285 kprintf("time %lld us\n", nano
/ 1000ULL);
286 kprintf("seg0 0x%qx, 0x%qx\n", mapped
, mappedLength
);
290 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost32
,
291 32, 0, IODMACommand::kMapped
, 0, 1, mapper
, NULL
);
295 start
= mach_absolute_time();
296 ret
= dma
->setMemoryDescriptor(md1
, true);
297 assert(kIOReturnSuccess
== ret
);
298 time
= mach_absolute_time() - start
;
300 absolutetime_to_nanoseconds(time
, &nano
);
301 kprintf("time %lld us\n", nano
/ 1000ULL);
304 IODMACommand::Segment32 segments
[1];
305 UInt32 numSegments
= 1;
309 ret
= dma
->gen32IOVMSegments(&offset
, &segments
[0], &numSegments
);
310 assert(kIOReturnSuccess
== ret
);
311 assert(1 == numSegments
);
312 kprintf("seg0 0x%x, 0x%x\n", (int)segments
[0].fIOVMAddr
, (int)segments
[0].fLength
);
314 ret
= dma
->clearMemoryDescriptor(true);
315 assert(kIOReturnSuccess
== ret
);
319 return (kIOReturnSuccess
);
324 IOBufferMemoryDescriptor
* md1
;
325 IOBufferMemoryDescriptor
* md2
;
332 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
333 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
336 map1
= md1
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
338 buf1
= (uint32_t *) map1
->getVirtualAddress();
340 md2
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
341 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
344 map2
= md2
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
346 buf2
= (uint32_t *) map2
->getVirtualAddress();
348 memset(buf1
, 0x11, 64*1024L);
349 memset(buf2
, 0x22, 64*1024L);
351 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1
, map1
, buf1
, md2
, map2
, buf2
);
353 kprintf("no redir 0x%08x, 0x%08x\n", buf1
[0], buf2
[0]);
354 assert(0x11111111 == buf1
[0]);
355 assert(0x22222222 == buf2
[0]);
356 err
= map1
->redirect(md2
, 0, 0ULL);
357 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
358 assert(0x11111111 == buf2
[0]);
359 assert(0x22222222 == buf1
[0]);
360 err
= map1
->redirect(md1
, 0, 0ULL);
361 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
362 assert(0x11111111 == buf1
[0]);
363 assert(0x22222222 == buf2
[0]);
371 result
= IOMultMemoryDescriptorTest(newValue
);
372 if (result
) return (result
);
374 result
= ZeroLengthTest(newValue
);
375 if (result
) return (result
);
377 result
= IODirectionPrepareNoZeroFillTest(newValue
);
378 if (result
) return (result
);
380 IOGeneralMemoryDescriptor
* md
;
382 vm_size_t bsize
= 16*1024*1024;
383 vm_size_t srcsize
, srcoffset
, mapoffset
, size
;
386 kr
= vm_allocate(kernel_map
, &data
[0], bsize
, VM_FLAGS_ANYWHERE
);
387 vm_inherit(kernel_map
, data
[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE
);
388 vm_inherit(kernel_map
, data
[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE
);
390 IOLog("data 0x%lx, 0x%lx\n", (long)data
[0], (long)data
[1]);
392 uint32_t idx
, offidx
;
393 for (idx
= 0; idx
< (bsize
/ sizeof(uint32_t)); idx
++)
395 ((uint32_t*)data
[0])[idx
] = idx
;
398 for (srcoffset
= 0; srcoffset
< bsize
; srcoffset
= ((srcoffset
<< 2) + 0x40c))
400 for (srcsize
= 4; srcsize
< (bsize
- srcoffset
- 1); srcsize
= ((srcsize
<< 2) + 0x3fc))
402 IOAddressRange ranges
[3];
403 uint32_t rangeCount
= 1;
405 bzero(&ranges
[0], sizeof(ranges
));
406 ranges
[0].address
= data
[0] + srcoffset
;
407 ranges
[0].length
= srcsize
;
409 if (srcsize
> ptoa(5))
411 ranges
[0].length
= 7634;
412 ranges
[1].length
= 9870;
413 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
414 ranges
[1].address
= ranges
[0].address
+ ranges
[0].length
;
415 ranges
[2].address
= ranges
[1].address
+ ranges
[1].length
;
418 else if ((srcsize
> ptoa(2)) && !(page_mask
& srcoffset
))
420 ranges
[0].length
= ptoa(1);
421 ranges
[1].length
= ptoa(1);
422 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
423 ranges
[0].address
= data
[0] + srcoffset
+ ptoa(1);
424 ranges
[1].address
= data
[0] + srcoffset
;
425 ranges
[2].address
= ranges
[0].address
+ ranges
[0].length
;
429 md
= OSDynamicCast(IOGeneralMemoryDescriptor
,
430 IOMemoryDescriptor::withAddressRanges(&ranges
[0], rangeCount
, kIODirectionInOut
, kernel_task
));
433 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
434 (long) srcsize
, (long) srcoffset
,
435 (long long) ranges
[0].address
- data
[0], (long long) ranges
[0].length
,
436 (long long) ranges
[1].address
- data
[0], (long long) ranges
[1].length
,
437 (long long) ranges
[2].address
- data
[0], (long long) ranges
[2].length
);
439 if (kIOReturnSuccess
== kr
)
441 for (mapoffset
= 0; mapoffset
< srcsize
; mapoffset
= ((mapoffset
<< 1) + 0xf00))
443 for (size
= 4; size
< (srcsize
- mapoffset
- 1); size
= ((size
<< 2) + 0x200))
446 mach_vm_address_t addr
= 0;
449 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
451 map
= md
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, mapoffset
, size
);
452 if (map
) addr
= map
->getAddress();
453 else kr
= kIOReturnError
;
455 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
457 if (kIOReturnSuccess
!= kr
) break;
459 if (kIOReturnSuccess
!= kr
)
461 panic("prepare() fail 0x%x\n", kr
);
464 for (idx
= 0; idx
< size
; idx
+= sizeof(uint32_t))
466 offidx
= (idx
+ mapoffset
+ srcoffset
);
467 if ((srcsize
<= ptoa(5)) && (srcsize
> ptoa(2)) && !(page_mask
& srcoffset
))
469 if (offidx
< ptoa(2)) offidx
^= ptoa(1);
471 offidx
/= sizeof(uint32_t);
473 if (offidx
!= ((uint32_t*)addr
)[idx
/sizeof(uint32_t)])
475 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
476 kr
= kIOReturnBadMedia
;
480 if (sizeof(data
) != md
->readBytes(mapoffset
+ idx
, &data
, sizeof(data
))) data
= 0;
483 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
484 kr
= kIOReturnBadMedia
;
490 // IOLog("unmapRef %llx\n", addr);
492 if (kIOReturnSuccess
!= kr
) break;
496 if (kIOReturnSuccess
!= kr
) break;
498 if (kIOReturnSuccess
!= kr
) break;
501 if (kIOReturnSuccess
!= kr
) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
502 (long) srcsize
, (long) srcoffset
, (long) size
, (long) mapoffset
);
504 assert(kr
== kIOReturnSuccess
);
506 vm_deallocate(kernel_map
, data
[0], bsize
);
507 // vm_deallocate(kernel_map, data[1], size);
512 #endif /* DEVELOPMENT || DEBUG */