2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_page.h>
31 #include <kern/ledger.h>
32 #include <kern/thread.h>
33 #if defined(__arm64__)
34 #include <pexpert/arm64/board_config.h>
36 #include <arm64/ppl/tests/shart.h>
40 extern ledger_template_t task_ledger_template
;
42 extern boolean_t
arm_force_fast_fault(ppnum_t
, vm_prot_t
, int, void*);
43 extern kern_return_t
arm_fast_fault(pmap_t
, vm_map_address_t
, vm_prot_t
, bool, bool);
45 kern_return_t
test_pmap_enter_disconnect(unsigned int num_loops
);
46 kern_return_t
test_pmap_iommu_disconnect(void);
47 kern_return_t
test_pmap_extended(void);
49 #define PMAP_TEST_VA (0xDEAD << PAGE_SHIFT)
53 volatile boolean_t stop
;
55 } pmap_test_thread_args
;
58 pmap_create_wrapper(unsigned int flags
)
60 pmap_t new_pmap
= NULL
;
62 assert(task_ledger_template
!= NULL
);
63 if ((ledger
= ledger_instantiate(task_ledger_template
, LEDGER_CREATE_ACTIVE_ENTRIES
)) == NULL
) {
66 new_pmap
= pmap_create_options(ledger
, 0, flags
);
67 ledger_dereference(ledger
);
72 pmap_disconnect_thread(void *arg
, wait_result_t __unused wres
)
74 pmap_test_thread_args
*args
= arg
;
76 pmap_disconnect(args
->pn
);
77 } while (!args
->stop
);
78 thread_wakeup((event_t
)args
);
82 test_pmap_enter_disconnect(unsigned int num_loops
)
84 kern_return_t kr
= KERN_SUCCESS
;
85 thread_t disconnect_thread
;
86 pmap_t new_pmap
= pmap_create_wrapper(0);
87 if (new_pmap
== NULL
) {
90 vm_page_t m
= vm_page_grab();
91 if (m
== VM_PAGE_NULL
) {
92 pmap_destroy(new_pmap
);
95 ppnum_t phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
96 pmap_test_thread_args args
= {new_pmap
, FALSE
, phys_page
};
97 kern_return_t res
= kernel_thread_start(pmap_disconnect_thread
, &args
, &disconnect_thread
);
99 pmap_destroy(new_pmap
);
100 vm_page_lock_queues();
102 vm_page_unlock_queues();
105 thread_deallocate(disconnect_thread
);
107 while (num_loops
-- != 0) {
108 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
, phys_page
,
109 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
110 assert(kr
== KERN_SUCCESS
);
113 assert_wait((event_t
)&args
, THREAD_UNINT
);
115 thread_block(THREAD_CONTINUE_NULL
);
117 pmap_remove(new_pmap
, PMAP_TEST_VA
, PMAP_TEST_VA
+ PAGE_SIZE
);
118 vm_page_lock_queues();
120 vm_page_unlock_queues();
121 pmap_destroy(new_pmap
);
126 test_pmap_iommu_disconnect(void)
129 kern_return_t kr
= KERN_SUCCESS
;
130 pmap_t new_pmap
= pmap_create_wrapper(0);
132 vm_page_t m
= vm_page_grab();
134 vm_page_lock_queues();
135 if (m
!= VM_PAGE_NULL
) {
136 vm_page_wire(m
, VM_KERN_MEMORY_PTE
, TRUE
);
138 vm_page_unlock_queues();
140 shart_ppl
*iommu
= NULL
;
141 kr
= pmap_iommu_init(shart_get_desc(), "sharttest0", NULL
, 0, (ppl_iommu_state
**)(&iommu
));
143 if (kr
!= KERN_SUCCESS
) {
147 if ((new_pmap
== NULL
) || (m
== VM_PAGE_NULL
) || (iommu
== NULL
)) {
152 ppnum_t phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
154 const ppl_iommu_seg shart_segs
[] = {
156 .paddr
= ptoa(phys_page
),
158 .prot
= VM_PROT_READ
,
162 .paddr
= ptoa(phys_page
),
164 .prot
= VM_PROT_READ
| VM_PROT_WRITE
,
168 .paddr
= ptoa(phys_page
),
170 .prot
= VM_PROT_READ
,
174 .paddr
= ptoa(phys_page
),
176 .prot
= VM_PROT_READ
,
180 /* Phase 1: one CPU mapping */
181 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
, phys_page
, VM_PROT_READ
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
182 assert(kr
== KERN_SUCCESS
);
183 assert(!pmap_verify_free(phys_page
));
184 pmap_disconnect(phys_page
);
185 assert(pmap_verify_free(phys_page
));
187 /* Phase 2: two CPU mappings */
188 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
, phys_page
, VM_PROT_READ
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
189 assert(kr
== KERN_SUCCESS
);
190 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
+ PAGE_SIZE
, phys_page
, VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
191 assert(kr
== KERN_SUCCESS
);
192 assert(!pmap_verify_free(phys_page
));
193 pmap_disconnect(phys_page
);
194 assert(pmap_verify_free(phys_page
));
196 /* Phase 3: one IOMMU mapping */
197 kr
= pmap_iommu_map(&iommu
->super
, shart_segs
, 1, 0, NULL
);
198 assert(kr
== KERN_SUCCESS
);
199 assert(!pmap_verify_free(phys_page
));
200 pmap_disconnect(phys_page
);
201 assert(!pmap_verify_free(phys_page
));
202 pmap_iommu_unmap(&iommu
->super
, shart_segs
, 1, 0, NULL
);
203 assert(pmap_verify_free(phys_page
));
205 /* Phase 4: two IOMMU mappings */
206 kr
= pmap_iommu_map(&iommu
->super
, shart_segs
, 2, 0, NULL
);
207 assert(kr
== KERN_SUCCESS
);
208 assert(!pmap_verify_free(phys_page
));
209 pmap_disconnect(phys_page
);
210 assert(!pmap_verify_free(phys_page
));
211 pmap_iommu_unmap(&iommu
->super
, &shart_segs
[1], 1, 0, NULL
);
212 assert(!pmap_verify_free(phys_page
));
213 pmap_disconnect(phys_page
);
214 assert(!pmap_verify_free(phys_page
));
215 pmap_iommu_unmap(&iommu
->super
, shart_segs
, 1, 0, NULL
);
216 assert(pmap_verify_free(phys_page
));
218 /* Phase 5: combined CPU and IOMMU mappings */
219 kr
= pmap_iommu_map(&iommu
->super
, shart_segs
, 1, 0, NULL
);
220 assert(kr
== KERN_SUCCESS
);
221 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
, phys_page
, VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
222 assert(kr
== KERN_SUCCESS
);
223 kr
= pmap_iommu_map(&iommu
->super
, &shart_segs
[1], 2, 0, NULL
);
224 assert(kr
== KERN_SUCCESS
);
225 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
+ PAGE_SIZE
, phys_page
, VM_PROT_READ
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
226 assert(kr
== KERN_SUCCESS
);
227 kr
= pmap_iommu_map(&iommu
->super
, &shart_segs
[3], 1, 0, NULL
);
228 assert(kr
== KERN_SUCCESS
);
229 assert(!pmap_verify_free(phys_page
));
230 pmap_disconnect(phys_page
);
231 assert(!pmap_verify_free(phys_page
));
232 pmap_iommu_unmap(&iommu
->super
, shart_segs
, 4, 0, NULL
);
233 assert(pmap_verify_free(phys_page
));
235 /* Phase 6: differently combined CPU and IOMMU mappings */
236 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
, phys_page
, VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
237 assert(kr
== KERN_SUCCESS
);
238 kr
= pmap_iommu_map(&iommu
->super
, &shart_segs
[1], 3, 0, NULL
);
239 assert(kr
== KERN_SUCCESS
);
240 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
+ PAGE_SIZE
, phys_page
, VM_PROT_READ
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
241 assert(kr
== KERN_SUCCESS
);
242 kr
= pmap_iommu_map(&iommu
->super
, shart_segs
, 1, 0, NULL
);
243 assert(kr
== KERN_SUCCESS
);
244 kr
= pmap_enter(new_pmap
, PMAP_TEST_VA
+ (2 * PAGE_SIZE
), phys_page
, VM_PROT_READ
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
245 assert(kr
== KERN_SUCCESS
);
246 assert(!pmap_verify_free(phys_page
));
247 pmap_iommu_unmap(&iommu
->super
, &shart_segs
[2], 1, 0, NULL
);
248 assert(!pmap_verify_free(phys_page
));
249 pmap_disconnect(phys_page
);
250 assert(!pmap_verify_free(phys_page
));
251 pmap_iommu_unmap(&iommu
->super
, shart_segs
, 4, 0, NULL
);
252 assert(pmap_verify_free(phys_page
));
253 pmap_disconnect(phys_page
);
254 assert(pmap_verify_free(phys_page
));
256 /* Phase 7: allocate contiguous memory and hand it to the shart */
257 shart_more more_shart
;
258 more_shart
.nbytes
= (PAGE_SIZE
* 5) + 42;
259 more_shart
.baseaddr
= pmap_iommu_alloc_contiguous_pages(&iommu
->super
, more_shart
.nbytes
, 0, 0, VM_WIMG_DEFAULT
);
260 assert(more_shart
.baseaddr
!= 0);
262 kr
= pmap_iommu_ioctl(&iommu
->super
, SHART_IOCTL_MORE
, &more_shart
, sizeof(more_shart
), NULL
, 0);
263 assert(kr
== KERN_SUCCESS
);
264 assert(iommu
->extra_memory
== more_shart
.baseaddr
);
265 assert(iommu
->extra_bytes
== more_shart
.nbytes
);
267 more_shart
.baseaddr
+= PAGE_SIZE
;
268 more_shart
.nbytes
-= PAGE_SIZE
;
269 kr
= pmap_iommu_ioctl(&iommu
->super
, SHART_IOCTL_MORE
, &more_shart
, sizeof(more_shart
), NULL
, 0);
270 assert(kr
== KERN_NOT_SUPPORTED
);
272 assert(iommu
->extra_memory
== (more_shart
.baseaddr
- PAGE_SIZE
));
273 assert(iommu
->extra_bytes
== (more_shart
.nbytes
+ PAGE_SIZE
));
278 pmap_iommu_ioctl(&iommu
->super
, SHART_IOCTL_TEARDOWN
, NULL
, 0, NULL
, 0);
280 vm_page_lock_queues();
281 if (m
!= VM_PAGE_NULL
) {
284 vm_page_unlock_queues();
285 if (new_pmap
!= NULL
) {
286 pmap_destroy(new_pmap
);
297 test_pmap_extended(void)