]> git.saurik.com Git - apple/xnu.git/blob - osfmk/tests/pmap_tests.c
b4bdf207169e737d7d6b29c668f94d7967162bd5
[apple/xnu.git] / osfmk / tests / pmap_tests.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_page.h>
30 #include <vm/pmap.h>
31 #include <kern/ledger.h>
32 #include <kern/thread.h>
33 #if defined(__arm64__)
34 #include <pexpert/arm64/board_config.h>
35 #if XNU_MONITOR
36 #include <arm64/ppl/tests/shart.h>
37 #endif
38 #endif
39
40 extern ledger_template_t task_ledger_template;
41
42 extern boolean_t arm_force_fast_fault(ppnum_t, vm_prot_t, int, void*);
43 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
44
45 kern_return_t test_pmap_enter_disconnect(unsigned int num_loops);
46 kern_return_t test_pmap_iommu_disconnect(void);
47 kern_return_t test_pmap_extended(void);
48
49 #define PMAP_TEST_VA (0xDEAD << PAGE_SHIFT)
50
51 typedef struct {
52 pmap_t pmap;
53 volatile boolean_t stop;
54 ppnum_t pn;
55 } pmap_test_thread_args;
56
57 static pmap_t
58 pmap_create_wrapper(unsigned int flags)
59 {
60 pmap_t new_pmap = NULL;
61 ledger_t ledger;
62 assert(task_ledger_template != NULL);
63 if ((ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
64 return NULL;
65 }
66 new_pmap = pmap_create_options(ledger, 0, flags);
67 ledger_dereference(ledger);
68 return new_pmap;
69 }
70
71 static void
72 pmap_disconnect_thread(void *arg, wait_result_t __unused wres)
73 {
74 pmap_test_thread_args *args = arg;
75 do {
76 pmap_disconnect(args->pn);
77 } while (!args->stop);
78 thread_wakeup((event_t)args);
79 }
80
81 kern_return_t
82 test_pmap_enter_disconnect(unsigned int num_loops)
83 {
84 kern_return_t kr = KERN_SUCCESS;
85 thread_t disconnect_thread;
86 pmap_t new_pmap = pmap_create_wrapper(0);
87 if (new_pmap == NULL) {
88 return KERN_FAILURE;
89 }
90 vm_page_t m = vm_page_grab();
91 if (m == VM_PAGE_NULL) {
92 pmap_destroy(new_pmap);
93 return KERN_FAILURE;
94 }
95 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
96 pmap_test_thread_args args = {new_pmap, FALSE, phys_page};
97 kern_return_t res = kernel_thread_start(pmap_disconnect_thread, &args, &disconnect_thread);
98 if (res) {
99 pmap_destroy(new_pmap);
100 vm_page_lock_queues();
101 vm_page_free(m);
102 vm_page_unlock_queues();
103 return res;
104 }
105 thread_deallocate(disconnect_thread);
106
107 while (num_loops-- != 0) {
108 kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page,
109 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
110 assert(kr == KERN_SUCCESS);
111 }
112
113 assert_wait((event_t)&args, THREAD_UNINT);
114 args.stop = TRUE;
115 thread_block(THREAD_CONTINUE_NULL);
116
117 pmap_remove(new_pmap, PMAP_TEST_VA, PMAP_TEST_VA + PAGE_SIZE);
118 vm_page_lock_queues();
119 vm_page_free(m);
120 vm_page_unlock_queues();
121 pmap_destroy(new_pmap);
122 return KERN_SUCCESS;
123 }
124
125 kern_return_t
126 test_pmap_iommu_disconnect(void)
127 {
128 #if XNU_MONITOR
129 kern_return_t kr = KERN_SUCCESS;
130 pmap_t new_pmap = pmap_create_wrapper(0);
131
132 vm_page_t m = vm_page_grab();
133
134 vm_page_lock_queues();
135 if (m != VM_PAGE_NULL) {
136 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
137 }
138 vm_page_unlock_queues();
139
140 shart_ppl *iommu = NULL;
141 kr = pmap_iommu_init(shart_get_desc(), "sharttest0", NULL, 0, (ppl_iommu_state**)(&iommu));
142
143 if (kr != KERN_SUCCESS) {
144 goto cleanup;
145 }
146
147 if ((new_pmap == NULL) || (m == VM_PAGE_NULL) || (iommu == NULL)) {
148 kr = KERN_FAILURE;
149 goto cleanup;
150 }
151
152 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
153
154 const ppl_iommu_seg shart_segs[] = {
155 {.iova = 0,
156 .paddr = ptoa(phys_page),
157 .nbytes = PAGE_SIZE,
158 .prot = VM_PROT_READ,
159 .refcon = 0},
160
161 {.iova = 1,
162 .paddr = ptoa(phys_page),
163 .nbytes = PAGE_SIZE,
164 .prot = VM_PROT_READ | VM_PROT_WRITE,
165 .refcon = 0},
166
167 {.iova = 2,
168 .paddr = ptoa(phys_page),
169 .nbytes = PAGE_SIZE,
170 .prot = VM_PROT_READ,
171 .refcon = 0},
172
173 {.iova = 3,
174 .paddr = ptoa(phys_page),
175 .nbytes = PAGE_SIZE,
176 .prot = VM_PROT_READ,
177 .refcon = 0}
178 };
179
180 /* Phase 1: one CPU mapping */
181 kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
182 assert(kr == KERN_SUCCESS);
183 assert(!pmap_verify_free(phys_page));
184 pmap_disconnect(phys_page);
185 assert(pmap_verify_free(phys_page));
186
187 /* Phase 2: two CPU mappings */
188 kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
189 assert(kr == KERN_SUCCESS);
190 kr = pmap_enter(new_pmap, PMAP_TEST_VA + PAGE_SIZE, phys_page, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
191 assert(kr == KERN_SUCCESS);
192 assert(!pmap_verify_free(phys_page));
193 pmap_disconnect(phys_page);
194 assert(pmap_verify_free(phys_page));
195
196 /* Phase 3: one IOMMU mapping */
197 kr = pmap_iommu_map(&iommu->super, shart_segs, 1, 0, NULL);
198 assert(kr == KERN_SUCCESS);
199 assert(!pmap_verify_free(phys_page));
200 pmap_disconnect(phys_page);
201 assert(!pmap_verify_free(phys_page));
202 pmap_iommu_unmap(&iommu->super, shart_segs, 1, 0, NULL);
203 assert(pmap_verify_free(phys_page));
204
205 /* Phase 4: two IOMMU mappings */
206 kr = pmap_iommu_map(&iommu->super, shart_segs, 2, 0, NULL);
207 assert(kr == KERN_SUCCESS);
208 assert(!pmap_verify_free(phys_page));
209 pmap_disconnect(phys_page);
210 assert(!pmap_verify_free(phys_page));
211 pmap_iommu_unmap(&iommu->super, &shart_segs[1], 1, 0, NULL);
212 assert(!pmap_verify_free(phys_page));
213 pmap_disconnect(phys_page);
214 assert(!pmap_verify_free(phys_page));
215 pmap_iommu_unmap(&iommu->super, shart_segs, 1, 0, NULL);
216 assert(pmap_verify_free(phys_page));
217
218 /* Phase 5: combined CPU and IOMMU mappings */
219 kr = pmap_iommu_map(&iommu->super, shart_segs, 1, 0, NULL);
220 assert(kr == KERN_SUCCESS);
221 kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
222 assert(kr == KERN_SUCCESS);
223 kr = pmap_iommu_map(&iommu->super, &shart_segs[1], 2, 0, NULL);
224 assert(kr == KERN_SUCCESS);
225 kr = pmap_enter(new_pmap, PMAP_TEST_VA + PAGE_SIZE, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
226 assert(kr == KERN_SUCCESS);
227 kr = pmap_iommu_map(&iommu->super, &shart_segs[3], 1, 0, NULL);
228 assert(kr == KERN_SUCCESS);
229 assert(!pmap_verify_free(phys_page));
230 pmap_disconnect(phys_page);
231 assert(!pmap_verify_free(phys_page));
232 pmap_iommu_unmap(&iommu->super, shart_segs, 4, 0, NULL);
233 assert(pmap_verify_free(phys_page));
234
235 /* Phase 6: differently combined CPU and IOMMU mappings */
236 kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
237 assert(kr == KERN_SUCCESS);
238 kr = pmap_iommu_map(&iommu->super, &shart_segs[1], 3, 0, NULL);
239 assert(kr == KERN_SUCCESS);
240 kr = pmap_enter(new_pmap, PMAP_TEST_VA + PAGE_SIZE, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
241 assert(kr == KERN_SUCCESS);
242 kr = pmap_iommu_map(&iommu->super, shart_segs, 1, 0, NULL);
243 assert(kr == KERN_SUCCESS);
244 kr = pmap_enter(new_pmap, PMAP_TEST_VA + (2 * PAGE_SIZE), phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
245 assert(kr == KERN_SUCCESS);
246 assert(!pmap_verify_free(phys_page));
247 pmap_iommu_unmap(&iommu->super, &shart_segs[2], 1, 0, NULL);
248 assert(!pmap_verify_free(phys_page));
249 pmap_disconnect(phys_page);
250 assert(!pmap_verify_free(phys_page));
251 pmap_iommu_unmap(&iommu->super, shart_segs, 4, 0, NULL);
252 assert(pmap_verify_free(phys_page));
253 pmap_disconnect(phys_page);
254 assert(pmap_verify_free(phys_page));
255
256 /* Phase 7: allocate contiguous memory and hand it to the shart */
257 shart_more more_shart;
258 more_shart.nbytes = (PAGE_SIZE * 5) + 42;
259 more_shart.baseaddr = pmap_iommu_alloc_contiguous_pages(&iommu->super, more_shart.nbytes, 0, 0, VM_WIMG_DEFAULT);
260 assert(more_shart.baseaddr != 0);
261
262 kr = pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_MORE, &more_shart, sizeof(more_shart), NULL, 0);
263 assert(kr == KERN_SUCCESS);
264 assert(iommu->extra_memory == more_shart.baseaddr);
265 assert(iommu->extra_bytes == more_shart.nbytes);
266
267 more_shart.baseaddr += PAGE_SIZE;
268 more_shart.nbytes -= PAGE_SIZE;
269 kr = pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_MORE, &more_shart, sizeof(more_shart), NULL, 0);
270 assert(kr == KERN_NOT_SUPPORTED);
271 kr = KERN_SUCCESS;
272 assert(iommu->extra_memory == (more_shart.baseaddr - PAGE_SIZE));
273 assert(iommu->extra_bytes == (more_shart.nbytes + PAGE_SIZE));
274
275 cleanup:
276
277 if (iommu != NULL) {
278 pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_TEARDOWN, NULL, 0, NULL, 0);
279 }
280 vm_page_lock_queues();
281 if (m != VM_PAGE_NULL) {
282 vm_page_free(m);
283 }
284 vm_page_unlock_queues();
285 if (new_pmap != NULL) {
286 pmap_destroy(new_pmap);
287 }
288
289 return kr;
290 #else
291 return KERN_SUCCESS;
292 #endif
293 }
294
295
296 kern_return_t
297 test_pmap_extended(void)
298 {
299 return KERN_SUCCESS;
300 }