]>
Commit | Line | Data |
---|---|---|
1 | /* Mach vm map miscellaneous unit tests | |
2 | * | |
3 | * This test program serves to be a regression test suite for legacy | |
4 | * vm issues, ideally each test will be linked to a radar number and | |
5 | * perform a set of certain validations. | |
6 | * | |
7 | */ | |
8 | #include <darwintest.h> | |
9 | ||
10 | #include <dlfcn.h> | |
11 | #include <errno.h> | |
12 | #include <ptrauth.h> | |
13 | #include <stdio.h> | |
14 | #include <stdlib.h> | |
15 | #include <string.h> | |
16 | #include <time.h> | |
17 | ||
18 | #include <sys/mman.h> | |
19 | ||
20 | #include <mach/mach_error.h> | |
21 | #include <mach/mach_init.h> | |
22 | #include <mach/mach_port.h> | |
23 | #include <mach/mach_vm.h> | |
24 | #include <mach/vm_map.h> | |
25 | #include <mach/task.h> | |
26 | #include <mach/task_info.h> | |
27 | #include <mach/shared_region.h> | |
28 | #include <machine/cpu_capabilities.h> | |
29 | ||
30 | T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"), | |
31 | T_META_RUN_CONCURRENTLY(true)); | |
32 | ||
33 | static void | |
34 | test_memory_entry_tagging(int override_tag) | |
35 | { | |
36 | int pass; | |
37 | int do_copy; | |
38 | kern_return_t kr; | |
39 | mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied; | |
40 | mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied; | |
41 | mach_vm_address_t *vmaddr_ptr; | |
42 | mach_vm_size_t *vmsize_ptr; | |
43 | mach_vm_address_t vmaddr_chunk; | |
44 | mach_vm_size_t vmsize_chunk; | |
45 | mach_vm_offset_t vmoff; | |
46 | mach_port_t mem_entry_copied, mem_entry_shared; | |
47 | mach_port_t *mem_entry_ptr; | |
48 | int i; | |
49 | vm_region_submap_short_info_data_64_t ri; | |
50 | mach_msg_type_number_t ri_count; | |
51 | unsigned int depth; | |
52 | int vm_flags; | |
53 | int expected_tag; | |
54 | ||
55 | vmaddr_copied = 0; | |
56 | vmaddr_shared = 0; | |
57 | vmsize_copied = 0; | |
58 | vmsize_shared = 0; | |
59 | vmaddr_chunk = 0; | |
60 | vmsize_chunk = 16 * 1024; | |
61 | vmaddr_orig = 0; | |
62 | vmsize_orig = 3 * vmsize_chunk; | |
63 | mem_entry_copied = MACH_PORT_NULL; | |
64 | mem_entry_shared = MACH_PORT_NULL; | |
65 | pass = 0; | |
66 | ||
67 | vmaddr_orig = 0; | |
68 | kr = mach_vm_allocate(mach_task_self(), | |
69 | &vmaddr_orig, | |
70 | vmsize_orig, | |
71 | VM_FLAGS_ANYWHERE); | |
72 | T_QUIET; | |
73 | T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)", | |
74 | override_tag, vmsize_orig); | |
75 | if (T_RESULT == T_RESULT_FAIL) { | |
76 | goto done; | |
77 | } | |
78 | ||
79 | for (i = 0; i < vmsize_orig / vmsize_chunk; i++) { | |
80 | vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk); | |
81 | kr = mach_vm_allocate(mach_task_self(), | |
82 | &vmaddr_chunk, | |
83 | vmsize_chunk, | |
84 | (VM_FLAGS_FIXED | | |
85 | VM_FLAGS_OVERWRITE | | |
86 | VM_MAKE_TAG(100 + i))); | |
87 | T_QUIET; | |
88 | T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)", | |
89 | override_tag, vmsize_chunk); | |
90 | if (T_RESULT == T_RESULT_FAIL) { | |
91 | goto done; | |
92 | } | |
93 | } | |
94 | ||
95 | for (vmoff = 0; | |
96 | vmoff < vmsize_orig; | |
97 | vmoff += PAGE_SIZE) { | |
98 | *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x'; | |
99 | } | |
100 | ||
101 | do_copy = time(NULL) & 1; | |
102 | again: | |
103 | *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x'; | |
104 | if (do_copy) { | |
105 | mem_entry_ptr = &mem_entry_copied; | |
106 | vmsize_copied = vmsize_orig; | |
107 | vmsize_ptr = &vmsize_copied; | |
108 | vmaddr_copied = 0; | |
109 | vmaddr_ptr = &vmaddr_copied; | |
110 | vm_flags = MAP_MEM_VM_COPY; | |
111 | } else { | |
112 | mem_entry_ptr = &mem_entry_shared; | |
113 | vmsize_shared = vmsize_orig; | |
114 | vmsize_ptr = &vmsize_shared; | |
115 | vmaddr_shared = 0; | |
116 | vmaddr_ptr = &vmaddr_shared; | |
117 | vm_flags = MAP_MEM_VM_SHARE; | |
118 | } | |
119 | kr = mach_make_memory_entry_64(mach_task_self(), | |
120 | vmsize_ptr, | |
121 | vmaddr_orig, /* offset */ | |
122 | (vm_flags | | |
123 | VM_PROT_READ | VM_PROT_WRITE), | |
124 | mem_entry_ptr, | |
125 | MACH_PORT_NULL); | |
126 | T_QUIET; | |
127 | T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()", | |
128 | override_tag, do_copy); | |
129 | if (T_RESULT == T_RESULT_FAIL) { | |
130 | goto done; | |
131 | } | |
132 | T_QUIET; | |
133 | T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)", | |
134 | override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig); | |
135 | if (T_RESULT == T_RESULT_FAIL) { | |
136 | goto done; | |
137 | } | |
138 | T_QUIET; | |
139 | T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x", | |
140 | override_tag, do_copy, *mem_entry_ptr); | |
141 | if (T_RESULT == T_RESULT_FAIL) { | |
142 | goto done; | |
143 | } | |
144 | ||
145 | *vmaddr_ptr = 0; | |
146 | if (override_tag) { | |
147 | vm_flags = VM_MAKE_TAG(200); | |
148 | } else { | |
149 | vm_flags = 0; | |
150 | } | |
151 | kr = mach_vm_map(mach_task_self(), | |
152 | vmaddr_ptr, | |
153 | vmsize_orig, | |
154 | 0, /* mask */ | |
155 | vm_flags | VM_FLAGS_ANYWHERE, | |
156 | *mem_entry_ptr, | |
157 | 0, /* offset */ | |
158 | FALSE, /* copy */ | |
159 | VM_PROT_READ | VM_PROT_WRITE, | |
160 | VM_PROT_READ | VM_PROT_WRITE, | |
161 | VM_INHERIT_DEFAULT); | |
162 | T_QUIET; | |
163 | T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()", | |
164 | override_tag, do_copy); | |
165 | if (T_RESULT == T_RESULT_FAIL) { | |
166 | goto done; | |
167 | } | |
168 | ||
169 | *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X'; | |
170 | if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') { | |
171 | T_QUIET; | |
172 | T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied", | |
173 | override_tag, do_copy); | |
174 | if (T_RESULT == T_RESULT_FAIL) { | |
175 | goto done; | |
176 | } | |
177 | } else { | |
178 | T_QUIET; | |
179 | T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared", | |
180 | override_tag, do_copy); | |
181 | if (T_RESULT == T_RESULT_FAIL) { | |
182 | goto done; | |
183 | } | |
184 | } | |
185 | ||
186 | for (i = 0; i < vmsize_orig / vmsize_chunk; i++) { | |
187 | mach_vm_address_t vmaddr_info; | |
188 | mach_vm_size_t vmsize_info; | |
189 | ||
190 | vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk); | |
191 | vmsize_info = 0; | |
192 | depth = 1; | |
193 | ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; | |
194 | kr = mach_vm_region_recurse(mach_task_self(), | |
195 | &vmaddr_info, | |
196 | &vmsize_info, | |
197 | &depth, | |
198 | (vm_region_recurse_info_t) &ri, | |
199 | &ri_count); | |
200 | T_QUIET; | |
201 | T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)", | |
202 | override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk); | |
203 | if (T_RESULT == T_RESULT_FAIL) { | |
204 | goto done; | |
205 | } | |
206 | T_QUIET; | |
207 | T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx", | |
208 | override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info); | |
209 | if (T_RESULT == T_RESULT_FAIL) { | |
210 | goto done; | |
211 | } | |
212 | T_QUIET; | |
213 | T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx", | |
214 | override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk); | |
215 | if (T_RESULT == T_RESULT_FAIL) { | |
216 | goto done; | |
217 | } | |
218 | if (override_tag) { | |
219 | expected_tag = 200; | |
220 | } else { | |
221 | expected_tag = 100 + i; | |
222 | } | |
223 | T_QUIET; | |
224 | T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d", | |
225 | override_tag, do_copy, i, ri.user_tag, expected_tag); | |
226 | if (T_RESULT == T_RESULT_FAIL) { | |
227 | goto done; | |
228 | } | |
229 | } | |
230 | ||
231 | if (++pass < 2) { | |
232 | do_copy = !do_copy; | |
233 | goto again; | |
234 | } | |
235 | ||
236 | done: | |
237 | if (vmaddr_orig != 0) { | |
238 | mach_vm_deallocate(mach_task_self(), | |
239 | vmaddr_orig, | |
240 | vmsize_orig); | |
241 | vmaddr_orig = 0; | |
242 | vmsize_orig = 0; | |
243 | } | |
244 | if (vmaddr_copied != 0) { | |
245 | mach_vm_deallocate(mach_task_self(), | |
246 | vmaddr_copied, | |
247 | vmsize_copied); | |
248 | vmaddr_copied = 0; | |
249 | vmsize_copied = 0; | |
250 | } | |
251 | if (vmaddr_shared != 0) { | |
252 | mach_vm_deallocate(mach_task_self(), | |
253 | vmaddr_shared, | |
254 | vmsize_shared); | |
255 | vmaddr_shared = 0; | |
256 | vmsize_shared = 0; | |
257 | } | |
258 | if (mem_entry_copied != MACH_PORT_NULL) { | |
259 | mach_port_deallocate(mach_task_self(), mem_entry_copied); | |
260 | mem_entry_copied = MACH_PORT_NULL; | |
261 | } | |
262 | if (mem_entry_shared != MACH_PORT_NULL) { | |
263 | mach_port_deallocate(mach_task_self(), mem_entry_shared); | |
264 | mem_entry_shared = MACH_PORT_NULL; | |
265 | } | |
266 | ||
267 | return; | |
268 | } | |
269 | ||
270 | static void | |
271 | test_map_memory_entry(void) | |
272 | { | |
273 | kern_return_t kr; | |
274 | mach_vm_address_t vmaddr1, vmaddr2; | |
275 | mach_vm_size_t vmsize1, vmsize2; | |
276 | mach_port_t mem_entry; | |
277 | unsigned char *cp1, *cp2; | |
278 | ||
279 | vmaddr1 = 0; | |
280 | vmsize1 = 0; | |
281 | vmaddr2 = 0; | |
282 | vmsize2 = 0; | |
283 | mem_entry = MACH_PORT_NULL; | |
284 | ||
285 | vmsize1 = 1; | |
286 | vmaddr1 = 0; | |
287 | kr = mach_vm_allocate(mach_task_self(), | |
288 | &vmaddr1, | |
289 | vmsize1, | |
290 | VM_FLAGS_ANYWHERE); | |
291 | T_QUIET; | |
292 | T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1); | |
293 | if (T_RESULT == T_RESULT_FAIL) { | |
294 | goto done; | |
295 | } | |
296 | ||
297 | cp1 = (unsigned char *)(uintptr_t)vmaddr1; | |
298 | *cp1 = '1'; | |
299 | ||
300 | vmsize2 = 1; | |
301 | mem_entry = MACH_PORT_NULL; | |
302 | kr = mach_make_memory_entry_64(mach_task_self(), | |
303 | &vmsize2, | |
304 | vmaddr1, /* offset */ | |
305 | (MAP_MEM_VM_COPY | | |
306 | VM_PROT_READ | VM_PROT_WRITE), | |
307 | &mem_entry, | |
308 | MACH_PORT_NULL); | |
309 | T_QUIET; | |
310 | T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()"); | |
311 | if (T_RESULT == T_RESULT_FAIL) { | |
312 | goto done; | |
313 | } | |
314 | T_QUIET; | |
315 | T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)", | |
316 | (uint64_t) vmsize2, (uint64_t) vmsize1); | |
317 | if (T_RESULT == T_RESULT_FAIL) { | |
318 | goto done; | |
319 | } | |
320 | T_QUIET; | |
321 | T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry); | |
322 | if (T_RESULT == T_RESULT_FAIL) { | |
323 | goto done; | |
324 | } | |
325 | ||
326 | vmaddr2 = 0; | |
327 | kr = mach_vm_map(mach_task_self(), | |
328 | &vmaddr2, | |
329 | vmsize2, | |
330 | 0, /* mask */ | |
331 | VM_FLAGS_ANYWHERE, | |
332 | mem_entry, | |
333 | 0, /* offset */ | |
334 | TRUE, /* copy */ | |
335 | VM_PROT_READ | VM_PROT_WRITE, | |
336 | VM_PROT_READ | VM_PROT_WRITE, | |
337 | VM_INHERIT_DEFAULT); | |
338 | T_QUIET; | |
339 | T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()"); | |
340 | if (T_RESULT == T_RESULT_FAIL) { | |
341 | goto done; | |
342 | } | |
343 | ||
344 | cp2 = (unsigned char *)(uintptr_t)vmaddr2; | |
345 | T_QUIET; | |
346 | T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x", | |
347 | *cp1, *cp2, '1', '1'); | |
348 | if (T_RESULT == T_RESULT_FAIL) { | |
349 | goto done; | |
350 | } | |
351 | ||
352 | *cp2 = '2'; | |
353 | T_QUIET; | |
354 | T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x", | |
355 | *cp1, *cp2, '1', '2'); | |
356 | if (T_RESULT == T_RESULT_FAIL) { | |
357 | goto done; | |
358 | } | |
359 | ||
360 | done: | |
361 | if (vmaddr1 != 0) { | |
362 | mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1); | |
363 | vmaddr1 = 0; | |
364 | vmsize1 = 0; | |
365 | } | |
366 | if (vmaddr2 != 0) { | |
367 | mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2); | |
368 | vmaddr2 = 0; | |
369 | vmsize2 = 0; | |
370 | } | |
371 | if (mem_entry != MACH_PORT_NULL) { | |
372 | mach_port_deallocate(mach_task_self(), mem_entry); | |
373 | mem_entry = MACH_PORT_NULL; | |
374 | } | |
375 | ||
376 | return; | |
377 | } | |
378 | ||
379 | T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \ | |
380 | VM memory tags should be propagated through memory entries", | |
381 | T_META_ALL_VALID_ARCHS(true)) | |
382 | { | |
383 | test_memory_entry_tagging(0); | |
384 | test_memory_entry_tagging(1); | |
385 | } | |
386 | ||
387 | T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \ | |
388 | mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \ | |
389 | copy", T_META_ALL_VALID_ARCHS(true)) | |
390 | { | |
391 | test_map_memory_entry(); | |
392 | } | |
393 | ||
394 | static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" }; | |
395 | ||
396 | static uint64_t | |
397 | task_footprint(void) | |
398 | { | |
399 | task_vm_info_data_t ti; | |
400 | kern_return_t kr; | |
401 | mach_msg_type_number_t count; | |
402 | ||
403 | count = TASK_VM_INFO_COUNT; | |
404 | kr = task_info(mach_task_self(), | |
405 | TASK_VM_INFO, | |
406 | (task_info_t) &ti, | |
407 | &count); | |
408 | T_QUIET; | |
409 | T_ASSERT_MACH_SUCCESS(kr, "task_info()"); | |
410 | #if defined(__arm64__) || defined(__arm__) | |
411 | T_QUIET; | |
412 | T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)", | |
413 | count, TASK_VM_INFO_COUNT); | |
414 | #endif /* defined(__arm64__) || defined(__arm__) */ | |
415 | return ti.phys_footprint; | |
416 | } | |
417 | ||
418 | T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \ | |
419 | emptying, volatilizing purgeable vm") | |
420 | { | |
421 | kern_return_t kr; | |
422 | mach_vm_address_t vm_addr; | |
423 | mach_vm_size_t vm_size; | |
424 | char *cp; | |
425 | int ret; | |
426 | vm_purgable_t state; | |
427 | uint64_t footprint[8]; | |
428 | ||
429 | vm_addr = 0; | |
430 | vm_size = 1 * 1024 * 1024; | |
431 | T_LOG("--> allocate %llu bytes", vm_size); | |
432 | kr = mach_vm_allocate(mach_task_self(), | |
433 | &vm_addr, | |
434 | vm_size, | |
435 | VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); | |
436 | T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); | |
437 | ||
438 | /* footprint0 */ | |
439 | footprint[0] = task_footprint(); | |
440 | T_LOG(" footprint[0] = %llu", footprint[0]); | |
441 | ||
442 | T_LOG("--> access %llu bytes", vm_size); | |
443 | for (cp = (char *) vm_addr; | |
444 | cp < (char *) (vm_addr + vm_size); | |
445 | cp += vm_kernel_page_size) { | |
446 | *cp = 'x'; | |
447 | } | |
448 | /* footprint1 == footprint0 + vm_size */ | |
449 | footprint[1] = task_footprint(); | |
450 | T_LOG(" footprint[1] = %llu", footprint[1]); | |
451 | if (footprint[1] != footprint[0] + vm_size) { | |
452 | T_LOG("WARN: footprint[1] != footprint[0] + vm_size"); | |
453 | } | |
454 | ||
455 | T_LOG("--> wire %llu bytes", vm_size / 2); | |
456 | ret = mlock((char *)vm_addr, (size_t) (vm_size / 2)); | |
457 | T_ASSERT_POSIX_SUCCESS(ret, "mlock()"); | |
458 | ||
459 | /* footprint2 == footprint1 */ | |
460 | footprint[2] = task_footprint(); | |
461 | T_LOG(" footprint[2] = %llu", footprint[2]); | |
462 | if (footprint[2] != footprint[1]) { | |
463 | T_LOG("WARN: footprint[2] != footprint[1]"); | |
464 | } | |
465 | ||
466 | T_LOG("--> VOLATILE"); | |
467 | state = VM_PURGABLE_VOLATILE; | |
468 | kr = mach_vm_purgable_control(mach_task_self(), | |
469 | vm_addr, | |
470 | VM_PURGABLE_SET_STATE, | |
471 | &state); | |
472 | T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)"); | |
473 | T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s", | |
474 | vm_purgable_state[state]); | |
475 | /* footprint3 == footprint2 - (vm_size / 2) */ | |
476 | footprint[3] = task_footprint(); | |
477 | T_LOG(" footprint[3] = %llu", footprint[3]); | |
478 | if (footprint[3] != footprint[2] - (vm_size / 2)) { | |
479 | T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)"); | |
480 | } | |
481 | ||
482 | T_LOG("--> EMPTY"); | |
483 | state = VM_PURGABLE_EMPTY; | |
484 | kr = mach_vm_purgable_control(mach_task_self(), | |
485 | vm_addr, | |
486 | VM_PURGABLE_SET_STATE, | |
487 | &state); | |
488 | T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)"); | |
489 | if (state != VM_PURGABLE_VOLATILE && | |
490 | state != VM_PURGABLE_EMPTY) { | |
491 | T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s", | |
492 | vm_purgable_state[state]); | |
493 | } | |
494 | /* footprint4 == footprint3 */ | |
495 | footprint[4] = task_footprint(); | |
496 | T_LOG(" footprint[4] = %llu", footprint[4]); | |
497 | if (footprint[4] != footprint[3]) { | |
498 | T_LOG("WARN: footprint[4] != footprint[3]"); | |
499 | } | |
500 | ||
501 | T_LOG("--> unwire %llu bytes", vm_size / 2); | |
502 | ret = munlock((char *)vm_addr, (size_t) (vm_size / 2)); | |
503 | T_ASSERT_POSIX_SUCCESS(ret, "munlock()"); | |
504 | ||
505 | /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */ | |
506 | /* footprint5 == footprint0 */ | |
507 | footprint[5] = task_footprint(); | |
508 | T_LOG(" footprint[5] = %llu", footprint[5]); | |
509 | if (footprint[5] != footprint[4] - (vm_size / 2)) { | |
510 | T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)"); | |
511 | } | |
512 | if (footprint[5] != footprint[0]) { | |
513 | T_LOG("WARN: footprint[5] != footprint[0]"); | |
514 | } | |
515 | ||
516 | T_LOG("--> VOLATILE"); | |
517 | state = VM_PURGABLE_VOLATILE; | |
518 | kr = mach_vm_purgable_control(mach_task_self(), | |
519 | vm_addr, | |
520 | VM_PURGABLE_SET_STATE, | |
521 | &state); | |
522 | T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)"); | |
523 | T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s", | |
524 | vm_purgable_state[state]); | |
525 | /* footprint6 == footprint5 */ | |
526 | /* footprint6 == footprint0 */ | |
527 | footprint[6] = task_footprint(); | |
528 | T_LOG(" footprint[6] = %llu", footprint[6]); | |
529 | if (footprint[6] != footprint[5]) { | |
530 | T_LOG("WARN: footprint[6] != footprint[5]"); | |
531 | } | |
532 | if (footprint[6] != footprint[0]) { | |
533 | T_LOG("WARN: footprint[6] != footprint[0]"); | |
534 | } | |
535 | ||
536 | T_LOG("--> NONVOLATILE"); | |
537 | state = VM_PURGABLE_NONVOLATILE; | |
538 | kr = mach_vm_purgable_control(mach_task_self(), | |
539 | vm_addr, | |
540 | VM_PURGABLE_SET_STATE, | |
541 | &state); | |
542 | T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)"); | |
543 | T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s", | |
544 | vm_purgable_state[state]); | |
545 | /* footprint7 == footprint6 */ | |
546 | /* footprint7 == footprint0 */ | |
547 | footprint[7] = task_footprint(); | |
548 | T_LOG(" footprint[7] = %llu", footprint[7]); | |
549 | if (footprint[7] != footprint[6]) { | |
550 | T_LOG("WARN: footprint[7] != footprint[6]"); | |
551 | } | |
552 | if (footprint[7] != footprint[0]) { | |
553 | T_LOG("WARN: footprint[7] != footprint[0]"); | |
554 | } | |
555 | } | |
556 | ||
557 | T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \ | |
558 | rethink needs madvise(MADV_FREE_HARDER)", | |
559 | T_META_ALL_VALID_ARCHS(true)) | |
560 | { | |
561 | vm_address_t vmaddr = 0, vmaddr2 = 0; | |
562 | vm_size_t vmsize; | |
563 | kern_return_t kr; | |
564 | char *cp; | |
565 | vm_prot_t curprot, maxprot; | |
566 | int ret; | |
567 | task_vm_info_data_t ti; | |
568 | mach_msg_type_number_t ti_count; | |
569 | ||
570 | vmsize = 10 * 1024 * 1024; /* 10MB */ | |
571 | kr = vm_allocate(mach_task_self(), | |
572 | &vmaddr, | |
573 | vmsize, | |
574 | VM_FLAGS_ANYWHERE); | |
575 | T_QUIET; | |
576 | T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()"); | |
577 | if (T_RESULT == T_RESULT_FAIL) { | |
578 | goto done; | |
579 | } | |
580 | ||
581 | for (cp = (char *)(uintptr_t)vmaddr; | |
582 | cp < (char *)(uintptr_t)(vmaddr + vmsize); | |
583 | cp++) { | |
584 | *cp = 'x'; | |
585 | } | |
586 | ||
587 | kr = vm_remap(mach_task_self(), | |
588 | &vmaddr2, | |
589 | vmsize, | |
590 | 0, /* mask */ | |
591 | VM_FLAGS_ANYWHERE, | |
592 | mach_task_self(), | |
593 | vmaddr, | |
594 | FALSE, /* copy */ | |
595 | &curprot, | |
596 | &maxprot, | |
597 | VM_INHERIT_DEFAULT); | |
598 | T_QUIET; | |
599 | T_EXPECT_MACH_SUCCESS(kr, "vm_remap()"); | |
600 | if (T_RESULT == T_RESULT_FAIL) { | |
601 | goto done; | |
602 | } | |
603 | ||
604 | for (cp = (char *)(uintptr_t)vmaddr2; | |
605 | cp < (char *)(uintptr_t)(vmaddr2 + vmsize); | |
606 | cp++) { | |
607 | T_QUIET; | |
608 | T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x", | |
609 | (void *)(uintptr_t)vmaddr, | |
610 | (void *)(uintptr_t)vmaddr2, | |
611 | (void *)cp, | |
612 | (unsigned char)*cp); | |
613 | if (T_RESULT == T_RESULT_FAIL) { | |
614 | goto done; | |
615 | } | |
616 | } | |
617 | cp = (char *)(uintptr_t)vmaddr; | |
618 | *cp = 'X'; | |
619 | cp = (char *)(uintptr_t)vmaddr2; | |
620 | T_QUIET; | |
621 | T_EXPECT_EQ(*cp, 'X', "memory was not properly shared"); | |
622 | if (T_RESULT == T_RESULT_FAIL) { | |
623 | goto done; | |
624 | } | |
625 | ||
626 | #if defined(__x86_64__) || defined(__i386__) | |
627 | if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) { | |
628 | T_LOG("Skipping madvise reusable tests because we're running under translation."); | |
629 | goto done; | |
630 | } | |
631 | #endif /* defined(__x86_64__) || defined(__i386__) */ | |
632 | ret = madvise((char *)(uintptr_t)vmaddr, | |
633 | vmsize, | |
634 | MADV_FREE_REUSABLE); | |
635 | T_QUIET; | |
636 | T_EXPECT_POSIX_SUCCESS(ret, "madvise()"); | |
637 | if (T_RESULT == T_RESULT_FAIL) { | |
638 | goto done; | |
639 | } | |
640 | ||
641 | ti_count = TASK_VM_INFO_COUNT; | |
642 | kr = task_info(mach_task_self(), | |
643 | TASK_VM_INFO, | |
644 | (task_info_t) &ti, | |
645 | &ti_count); | |
646 | T_QUIET; | |
647 | T_EXPECT_MACH_SUCCESS(kr, "task_info()"); | |
648 | if (T_RESULT == T_RESULT_FAIL) { | |
649 | goto done; | |
650 | } | |
651 | ||
652 | T_QUIET; | |
653 | T_EXPECT_EQ(ti.reusable, 2ULL * vmsize, "ti.reusable=%lld expected %lld", | |
654 | ti.reusable, (uint64_t)(2 * vmsize)); | |
655 | if (T_RESULT == T_RESULT_FAIL) { | |
656 | goto done; | |
657 | } | |
658 | ||
659 | done: | |
660 | if (vmaddr != 0) { | |
661 | vm_deallocate(mach_task_self(), vmaddr, vmsize); | |
662 | vmaddr = 0; | |
663 | } | |
664 | if (vmaddr2 != 0) { | |
665 | vm_deallocate(mach_task_self(), vmaddr2, vmsize); | |
666 | vmaddr2 = 0; | |
667 | } | |
668 | } | |
669 | ||
670 | T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \ | |
671 | rdar://problem/37476183 Preview Footprint memory regressions ~100MB \ | |
672 | [ purgeable_malloc became eligible for reuse ]", | |
673 | T_META_ALL_VALID_ARCHS(true)) | |
674 | { | |
675 | #if defined(__x86_64__) || defined(__i386__) | |
676 | if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) { | |
677 | T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)"); | |
678 | } | |
679 | #endif /* defined(__x86_64__) || defined(__i386__) */ | |
680 | vm_address_t vmaddr = 0; | |
681 | vm_size_t vmsize; | |
682 | kern_return_t kr; | |
683 | char *cp; | |
684 | int ret; | |
685 | ||
686 | vmsize = 10 * 1024 * 1024; /* 10MB */ | |
687 | kr = vm_allocate(mach_task_self(), | |
688 | &vmaddr, | |
689 | vmsize, | |
690 | (VM_FLAGS_ANYWHERE | | |
691 | VM_FLAGS_PURGABLE | | |
692 | VM_MAKE_TAG(VM_MEMORY_MALLOC))); | |
693 | T_QUIET; | |
694 | T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()"); | |
695 | if (T_RESULT == T_RESULT_FAIL) { | |
696 | goto done; | |
697 | } | |
698 | ||
699 | for (cp = (char *)(uintptr_t)vmaddr; | |
700 | cp < (char *)(uintptr_t)(vmaddr + vmsize); | |
701 | cp++) { | |
702 | *cp = 'x'; | |
703 | } | |
704 | ||
705 | ret = madvise((char *)(uintptr_t)vmaddr, | |
706 | vmsize, | |
707 | MADV_CAN_REUSE); | |
708 | T_QUIET; | |
709 | T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse"); | |
710 | if (T_RESULT == T_RESULT_FAIL) { | |
711 | goto done; | |
712 | } | |
713 | ||
714 | done: | |
715 | if (vmaddr != 0) { | |
716 | vm_deallocate(mach_task_self(), vmaddr, vmsize); | |
717 | vmaddr = 0; | |
718 | } | |
719 | } | |
720 | ||
721 | #define DEST_PATTERN 0xFEDCBA98 | |
722 | ||
723 | T_DECL(map_read_overwrite, "test overwriting vm map from other map - \ | |
724 | rdar://31075370", | |
725 | T_META_ALL_VALID_ARCHS(true)) | |
726 | { | |
727 | kern_return_t kr; | |
728 | mach_vm_address_t vmaddr1, vmaddr2; | |
729 | mach_vm_size_t vmsize1, vmsize2; | |
730 | int *ip; | |
731 | int i; | |
732 | ||
733 | vmaddr1 = 0; | |
734 | vmsize1 = 4 * 4096; | |
735 | kr = mach_vm_allocate(mach_task_self(), | |
736 | &vmaddr1, | |
737 | vmsize1, | |
738 | VM_FLAGS_ANYWHERE); | |
739 | T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); | |
740 | ||
741 | ip = (int *)(uintptr_t)vmaddr1; | |
742 | for (i = 0; i < vmsize1 / sizeof(*ip); i++) { | |
743 | ip[i] = i; | |
744 | } | |
745 | ||
746 | vmaddr2 = 0; | |
747 | kr = mach_vm_allocate(mach_task_self(), | |
748 | &vmaddr2, | |
749 | vmsize1, | |
750 | VM_FLAGS_ANYWHERE); | |
751 | T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); | |
752 | ||
753 | ip = (int *)(uintptr_t)vmaddr2; | |
754 | for (i = 0; i < vmsize1 / sizeof(*ip); i++) { | |
755 | ip[i] = DEST_PATTERN; | |
756 | } | |
757 | ||
758 | vmsize2 = vmsize1 - 2 * (sizeof(*ip)); | |
759 | kr = mach_vm_read_overwrite(mach_task_self(), | |
760 | vmaddr1 + sizeof(*ip), | |
761 | vmsize2, | |
762 | vmaddr2 + sizeof(*ip), | |
763 | &vmsize2); | |
764 | T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()"); | |
765 | ||
766 | ip = (int *)(uintptr_t)vmaddr2; | |
767 | for (i = 0; i < 1; i++) { | |
768 | T_QUIET; | |
769 | T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x", | |
770 | i, ip[i], DEST_PATTERN); | |
771 | } | |
772 | for (; i < (vmsize1 - 2) / sizeof(*ip); i++) { | |
773 | T_QUIET; | |
774 | T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x", | |
775 | i, ip[i], i); | |
776 | } | |
777 | for (; i < vmsize1 / sizeof(*ip); i++) { | |
778 | T_QUIET; | |
779 | T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x", | |
780 | i, ip[i], DEST_PATTERN); | |
781 | } | |
782 | } | |
783 | ||
784 | T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \ | |
785 | objects - rdar://35610377", | |
786 | T_META_ALL_VALID_ARCHS(true)) | |
787 | { | |
788 | kern_return_t kr; | |
789 | mach_vm_address_t vmaddr1, vmaddr2, vmaddr3; | |
790 | mach_vm_size_t vmsize; | |
791 | vm_prot_t curprot, maxprot; | |
792 | ||
793 | vmsize = 32 * 1024 * 1024; | |
794 | ||
795 | vmaddr1 = 0; | |
796 | kr = mach_vm_allocate(mach_task_self(), | |
797 | &vmaddr1, | |
798 | vmsize, | |
799 | VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); | |
800 | T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); | |
801 | ||
802 | memset((void *)(uintptr_t)vmaddr1, 'x', vmsize); | |
803 | ||
804 | vmaddr2 = 0; | |
805 | kr = mach_vm_remap(mach_task_self(), | |
806 | &vmaddr2, | |
807 | vmsize, | |
808 | 0, /* mask */ | |
809 | VM_FLAGS_ANYWHERE, | |
810 | mach_task_self(), | |
811 | vmaddr1, | |
812 | TRUE, /* copy */ | |
813 | &curprot, | |
814 | &maxprot, | |
815 | VM_INHERIT_DEFAULT); | |
816 | T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1"); | |
817 | ||
818 | vmaddr3 = 0; | |
819 | kr = mach_vm_remap(mach_task_self(), | |
820 | &vmaddr3, | |
821 | vmsize, | |
822 | 0, /* mask */ | |
823 | VM_FLAGS_ANYWHERE, | |
824 | mach_task_self(), | |
825 | vmaddr2, | |
826 | TRUE, /* copy */ | |
827 | &curprot, | |
828 | &maxprot, | |
829 | VM_INHERIT_DEFAULT); | |
830 | T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2"); | |
831 | } | |
832 | ||
833 | T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \ | |
834 | non-purgeable - rdar://31990033", | |
835 | T_META_ALL_VALID_ARCHS(true)) | |
836 | { | |
837 | kern_return_t kr; | |
838 | vm_address_t vmaddr; | |
839 | vm_purgable_t state; | |
840 | ||
841 | vmaddr = 0; | |
842 | kr = vm_allocate(mach_task_self(), &vmaddr, 1, | |
843 | VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); | |
844 | T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); | |
845 | ||
846 | state = VM_PURGABLE_DENY; | |
847 | kr = vm_purgable_control(mach_task_self(), vmaddr, | |
848 | VM_PURGABLE_SET_STATE, &state); | |
849 | T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, | |
850 | "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)", | |
851 | kr, mach_error_string(kr)); | |
852 | ||
853 | kr = vm_deallocate(mach_task_self(), vmaddr, 1); | |
854 | T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
855 | } | |
856 | ||
857 | #define VMSIZE 0x10000 | |
858 | ||
859 | T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981", | |
860 | T_META_ALL_VALID_ARCHS(true)) | |
861 | { | |
862 | kern_return_t kr; | |
863 | mach_vm_address_t vmaddr1, vmaddr2; | |
864 | mach_vm_size_t vmsize; | |
865 | vm_prot_t curprot, maxprot; | |
866 | ||
867 | vmaddr1 = 0; | |
868 | vmsize = VMSIZE; | |
869 | kr = mach_vm_allocate(mach_task_self(), | |
870 | &vmaddr1, | |
871 | vmsize, | |
872 | VM_FLAGS_ANYWHERE); | |
873 | T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); | |
874 | ||
875 | vmaddr2 = 0; | |
876 | vmsize = 0; | |
877 | kr = mach_vm_remap(mach_task_self(), | |
878 | &vmaddr2, | |
879 | vmsize, | |
880 | 0, | |
881 | VM_FLAGS_ANYWHERE, | |
882 | mach_task_self(), | |
883 | vmaddr1, | |
884 | FALSE, | |
885 | &curprot, | |
886 | &maxprot, | |
887 | VM_INHERIT_DEFAULT); | |
888 | T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)", | |
889 | vmsize, kr, mach_error_string(kr)); | |
890 | ||
891 | vmaddr2 = 0; | |
892 | vmsize = (mach_vm_size_t)-2; | |
893 | kr = mach_vm_remap(mach_task_self(), | |
894 | &vmaddr2, | |
895 | vmsize, | |
896 | 0, | |
897 | VM_FLAGS_ANYWHERE, | |
898 | mach_task_self(), | |
899 | vmaddr1, | |
900 | FALSE, | |
901 | &curprot, | |
902 | &maxprot, | |
903 | VM_INHERIT_DEFAULT); | |
904 | T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)", | |
905 | vmsize, kr, mach_error_string(kr)); | |
906 | } | |
907 | ||
908 | extern int __shared_region_check_np(uint64_t *); | |
909 | ||
910 | T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \ | |
911 | - rdar://problem/41481703", | |
912 | T_META_ALL_VALID_ARCHS(true)) | |
913 | { | |
914 | int ret; | |
915 | kern_return_t kr; | |
916 | mach_vm_address_t sr_start; | |
917 | mach_vm_size_t vmsize; | |
918 | mach_vm_address_t vmaddr; | |
919 | mach_port_t mem_entry; | |
920 | ||
921 | ret = __shared_region_check_np(&sr_start); | |
922 | if (ret != 0) { | |
923 | int saved_errno; | |
924 | saved_errno = errno; | |
925 | ||
926 | T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)", | |
927 | saved_errno, strerror(saved_errno)); | |
928 | T_END; | |
929 | } | |
930 | ||
931 | vmsize = PAGE_SIZE; | |
932 | kr = mach_make_memory_entry_64(mach_task_self(), | |
933 | &vmsize, | |
934 | sr_start, | |
935 | MAP_MEM_VM_SHARE | VM_PROT_READ, | |
936 | &mem_entry, | |
937 | MACH_PORT_NULL); | |
938 | T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start); | |
939 | ||
940 | vmaddr = 0; | |
941 | kr = mach_vm_map(mach_task_self(), | |
942 | &vmaddr, | |
943 | vmsize, | |
944 | 0, | |
945 | VM_FLAGS_ANYWHERE, | |
946 | mem_entry, | |
947 | 0, | |
948 | FALSE, | |
949 | VM_PROT_READ, | |
950 | VM_PROT_READ, | |
951 | VM_INHERIT_DEFAULT); | |
952 | T_ASSERT_MACH_SUCCESS(kr, "vm_map()"); | |
953 | } | |
954 | ||
955 | static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" }; | |
956 | static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" }; | |
957 | ||
958 | T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953", | |
959 | T_META_ALL_VALID_ARCHS(true)) | |
960 | { | |
961 | int ret; | |
962 | uint64_t sr_start; | |
963 | kern_return_t kr; | |
964 | mach_vm_address_t address, tmp_address, remap_address; | |
965 | mach_vm_size_t size, tmp_size, remap_size; | |
966 | uint32_t depth; | |
967 | mach_msg_type_number_t count; | |
968 | vm_region_submap_info_data_64_t info; | |
969 | vm_prot_t cur_prot, max_prot; | |
970 | uint32_t before, after, remap; | |
971 | mach_port_t mem_entry; | |
972 | ||
973 | ret = __shared_region_check_np(&sr_start); | |
974 | if (ret != 0) { | |
975 | int saved_errno; | |
976 | saved_errno = errno; | |
977 | ||
978 | T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)", | |
979 | saved_errno, strerror(saved_errno)); | |
980 | T_END; | |
981 | } | |
982 | T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE); | |
983 | T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE); | |
984 | T_LOG("shared region starts at 0x%llx", sr_start); | |
985 | T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE, | |
986 | "shared region starts below BASE"); | |
987 | T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE, | |
988 | "shared region starts above BASE+SIZE"); | |
989 | ||
990 | /* | |
991 | * Step 1 - check that one can not get write access to a read-only | |
992 | * mapping in the shared region. | |
993 | */ | |
994 | size = 0; | |
995 | for (address = SHARED_REGION_BASE; | |
996 | address < SHARED_REGION_BASE + SHARED_REGION_SIZE; | |
997 | address += size) { | |
998 | size = 0; | |
999 | depth = 99; | |
1000 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1001 | kr = mach_vm_region_recurse(mach_task_self(), | |
1002 | &address, | |
1003 | &size, | |
1004 | &depth, | |
1005 | (vm_region_recurse_info_t)&info, | |
1006 | &count); | |
1007 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1008 | if (kr == KERN_INVALID_ADDRESS) { | |
1009 | T_SKIP("could not find read-only nested mapping"); | |
1010 | T_END; | |
1011 | } | |
1012 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1013 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1014 | address, address + size, depth, | |
1015 | prot_str[info.protection], | |
1016 | prot_str[info.max_protection], | |
1017 | share_mode_str[info.share_mode], | |
1018 | info.object_id); | |
1019 | if (depth > 0 && | |
1020 | (info.protection == VM_PROT_READ) && | |
1021 | (info.max_protection == VM_PROT_READ)) { | |
1022 | /* nested and read-only: bingo! */ | |
1023 | break; | |
1024 | } | |
1025 | } | |
1026 | if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) { | |
1027 | T_SKIP("could not find read-only nested mapping"); | |
1028 | T_END; | |
1029 | } | |
1030 | ||
1031 | /* test vm_remap() of RO */ | |
1032 | before = *(uint32_t *)(uintptr_t)address; | |
1033 | remap_address = 0; | |
1034 | remap_size = size; | |
1035 | kr = mach_vm_remap(mach_task_self(), | |
1036 | &remap_address, | |
1037 | remap_size, | |
1038 | 0, | |
1039 | VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, | |
1040 | mach_task_self(), | |
1041 | address, | |
1042 | FALSE, | |
1043 | &cur_prot, | |
1044 | &max_prot, | |
1045 | VM_INHERIT_DEFAULT); | |
1046 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()"); | |
1047 | // T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only"); | |
1048 | // T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only"); | |
1049 | /* check that region is still nested */ | |
1050 | tmp_address = address; | |
1051 | tmp_size = 0; | |
1052 | depth = 99; | |
1053 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1054 | kr = mach_vm_region_recurse(mach_task_self(), | |
1055 | &tmp_address, | |
1056 | &tmp_size, | |
1057 | &depth, | |
1058 | (vm_region_recurse_info_t)&info, | |
1059 | &count); | |
1060 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1061 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1062 | tmp_address, tmp_address + tmp_size, depth, | |
1063 | prot_str[info.protection], | |
1064 | prot_str[info.max_protection], | |
1065 | share_mode_str[info.share_mode], | |
1066 | info.object_id); | |
1067 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1068 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1069 | T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1070 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only"); | |
1071 | // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only"); | |
1072 | /* check that new mapping is read-only */ | |
1073 | tmp_address = remap_address; | |
1074 | tmp_size = 0; | |
1075 | depth = 99; | |
1076 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1077 | kr = mach_vm_region_recurse(mach_task_self(), | |
1078 | &tmp_address, | |
1079 | &tmp_size, | |
1080 | &depth, | |
1081 | (vm_region_recurse_info_t)&info, | |
1082 | &count); | |
1083 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1084 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1085 | tmp_address, tmp_address + tmp_size, depth, | |
1086 | prot_str[info.protection], | |
1087 | prot_str[info.max_protection], | |
1088 | share_mode_str[info.share_mode], | |
1089 | info.object_id); | |
1090 | T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed"); | |
1091 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1092 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only"); | |
1093 | // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only"); | |
1094 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1095 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1096 | // this would crash if actually read-only: | |
1097 | // *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1098 | after = *(uint32_t *)(uintptr_t)address; | |
1099 | T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1100 | // *(uint32_t *)(uintptr_t)remap_address = before; | |
1101 | if (before != after) { | |
1102 | T_FAIL("vm_remap() bypassed copy-on-write"); | |
1103 | } else { | |
1104 | T_PASS("vm_remap() did not bypass copy-on-write"); | |
1105 | } | |
1106 | /* cleanup */ | |
1107 | kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size); | |
1108 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
1109 | T_PASS("vm_remap() read-only"); | |
1110 | ||
1111 | #if defined(VM_MEMORY_ROSETTA) | |
1112 | if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) { | |
1113 | T_PASS("vm_remap_new() is not present"); | |
1114 | goto skip_vm_remap_new_ro; | |
1115 | } | |
1116 | /* test vm_remap_new() of RO */ | |
1117 | before = *(uint32_t *)(uintptr_t)address; | |
1118 | remap_address = 0; | |
1119 | remap_size = size; | |
1120 | cur_prot = VM_PROT_READ | VM_PROT_WRITE; | |
1121 | max_prot = VM_PROT_READ | VM_PROT_WRITE; | |
1122 | kr = mach_vm_remap_new(mach_task_self(), | |
1123 | &remap_address, | |
1124 | remap_size, | |
1125 | 0, | |
1126 | VM_FLAGS_ANYWHERE, | |
1127 | mach_task_self(), | |
1128 | address, | |
1129 | FALSE, | |
1130 | &cur_prot, | |
1131 | &max_prot, | |
1132 | VM_INHERIT_DEFAULT); | |
1133 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()"); | |
1134 | if (kr == KERN_PROTECTION_FAILURE) { | |
1135 | /* wrong but not a security issue... */ | |
1136 | goto skip_vm_remap_new_ro; | |
1137 | } | |
1138 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()"); | |
1139 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1140 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1141 | *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1142 | after = *(uint32_t *)(uintptr_t)address; | |
1143 | T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1144 | *(uint32_t *)(uintptr_t)remap_address = before; | |
1145 | if (before != after) { | |
1146 | T_FAIL("vm_remap_new() bypassed copy-on-write"); | |
1147 | } else { | |
1148 | T_PASS("vm_remap_new() did not bypass copy-on-write"); | |
1149 | } | |
1150 | /* check that region is still nested */ | |
1151 | tmp_address = address; | |
1152 | tmp_size = 0; | |
1153 | depth = 99; | |
1154 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1155 | kr = mach_vm_region_recurse(mach_task_self(), | |
1156 | &tmp_address, | |
1157 | &tmp_size, | |
1158 | &depth, | |
1159 | (vm_region_recurse_info_t)&info, | |
1160 | &count); | |
1161 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1162 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1163 | tmp_address, tmp_address + tmp_size, depth, | |
1164 | prot_str[info.protection], | |
1165 | prot_str[info.max_protection], | |
1166 | share_mode_str[info.share_mode], | |
1167 | info.object_id); | |
1168 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1169 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1170 | T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1171 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only"); | |
1172 | T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only"); | |
1173 | T_PASS("vm_remap_new() read-only"); | |
1174 | skip_vm_remap_new_ro: | |
1175 | #else /* defined(VM_MEMORY_ROSETTA) */ | |
1176 | /* pre-BigSur SDK: no vm_remap_new() */ | |
1177 | T_LOG("No vm_remap_new() to test"); | |
1178 | #endif /* defined(VM_MEMORY_ROSETTA) */ | |
1179 | ||
1180 | /* test mach_make_memory_entry_64(VM_SHARE) of RO */ | |
1181 | before = *(uint32_t *)(uintptr_t)address; | |
1182 | remap_size = size; | |
1183 | mem_entry = MACH_PORT_NULL; | |
1184 | kr = mach_make_memory_entry_64(mach_task_self(), | |
1185 | &remap_size, | |
1186 | address, | |
1187 | MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE, | |
1188 | &mem_entry, | |
1189 | MACH_PORT_NULL); | |
1190 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)"); | |
1191 | if (kr == KERN_PROTECTION_FAILURE) { | |
1192 | /* wrong but not a security issue... */ | |
1193 | goto skip_mem_entry_vm_share_ro; | |
1194 | } | |
1195 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)"); | |
1196 | remap_address = 0; | |
1197 | kr = mach_vm_map(mach_task_self(), | |
1198 | &remap_address, | |
1199 | remap_size, | |
1200 | 0, /* mask */ | |
1201 | VM_FLAGS_ANYWHERE, | |
1202 | mem_entry, | |
1203 | 0, /* offset */ | |
1204 | FALSE, /* copy */ | |
1205 | VM_PROT_READ | VM_PROT_WRITE, | |
1206 | VM_PROT_READ | VM_PROT_WRITE, | |
1207 | VM_INHERIT_DEFAULT); | |
1208 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()"); | |
1209 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1210 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1211 | *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1212 | after = *(uint32_t *)(uintptr_t)address; | |
1213 | T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1214 | *(uint32_t *)(uintptr_t)remap_address = before; | |
1215 | if (before != after) { | |
1216 | T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write"); | |
1217 | } else { | |
1218 | T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write"); | |
1219 | } | |
1220 | /* check that region is still nested */ | |
1221 | tmp_address = address; | |
1222 | tmp_size = 0; | |
1223 | depth = 99; | |
1224 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1225 | kr = mach_vm_region_recurse(mach_task_self(), | |
1226 | &tmp_address, | |
1227 | &tmp_size, | |
1228 | &depth, | |
1229 | (vm_region_recurse_info_t)&info, | |
1230 | &count); | |
1231 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1232 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1233 | tmp_address, tmp_address + tmp_size, depth, | |
1234 | prot_str[info.protection], | |
1235 | prot_str[info.max_protection], | |
1236 | share_mode_str[info.share_mode], | |
1237 | info.object_id); | |
1238 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1239 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1240 | T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1241 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only"); | |
1242 | T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only"); | |
1243 | /* check that new mapping is a copy */ | |
1244 | tmp_address = remap_address; | |
1245 | tmp_size = 0; | |
1246 | depth = 99; | |
1247 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1248 | kr = mach_vm_region_recurse(mach_task_self(), | |
1249 | &tmp_address, | |
1250 | &tmp_size, | |
1251 | &depth, | |
1252 | (vm_region_recurse_info_t)&info, | |
1253 | &count); | |
1254 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1255 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1256 | tmp_address, tmp_address + tmp_size, depth, | |
1257 | prot_str[info.protection], | |
1258 | prot_str[info.max_protection], | |
1259 | share_mode_str[info.share_mode], | |
1260 | info.object_id); | |
1261 | T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed"); | |
1262 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1263 | T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested"); | |
1264 | // T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only"); | |
1265 | // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only"); | |
1266 | /* cleanup */ | |
1267 | kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size); | |
1268 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
1269 | T_PASS("mem_entry(VM_SHARE) read-only"); | |
1270 | skip_mem_entry_vm_share_ro: | |
1271 | ||
1272 | /* test mach_make_memory_entry_64() of RO */ | |
1273 | before = *(uint32_t *)(uintptr_t)address; | |
1274 | remap_size = size; | |
1275 | mem_entry = MACH_PORT_NULL; | |
1276 | kr = mach_make_memory_entry_64(mach_task_self(), | |
1277 | &remap_size, | |
1278 | address, | |
1279 | VM_PROT_READ | VM_PROT_WRITE, | |
1280 | &mem_entry, | |
1281 | MACH_PORT_NULL); | |
1282 | T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()"); | |
1283 | /* check that region is still nested */ | |
1284 | tmp_address = address; | |
1285 | tmp_size = 0; | |
1286 | depth = 99; | |
1287 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1288 | kr = mach_vm_region_recurse(mach_task_self(), | |
1289 | &tmp_address, | |
1290 | &tmp_size, | |
1291 | &depth, | |
1292 | (vm_region_recurse_info_t)&info, | |
1293 | &count); | |
1294 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1295 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1296 | tmp_address, tmp_address + tmp_size, depth, | |
1297 | prot_str[info.protection], | |
1298 | prot_str[info.max_protection], | |
1299 | share_mode_str[info.share_mode], | |
1300 | info.object_id); | |
1301 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1302 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1303 | // T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1304 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only"); | |
1305 | if (depth > 0) { | |
1306 | T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only"); | |
1307 | } | |
1308 | T_PASS("mem_entry() read-only"); | |
1309 | ||
1310 | ||
1311 | /* | |
1312 | * Step 2 - check that one can not share write access with a writable | |
1313 | * mapping in the shared region. | |
1314 | */ | |
1315 | size = 0; | |
1316 | for (address = SHARED_REGION_BASE; | |
1317 | address < SHARED_REGION_BASE + SHARED_REGION_SIZE; | |
1318 | address += size) { | |
1319 | size = 0; | |
1320 | depth = 99; | |
1321 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1322 | kr = mach_vm_region_recurse(mach_task_self(), | |
1323 | &address, | |
1324 | &size, | |
1325 | &depth, | |
1326 | (vm_region_recurse_info_t)&info, | |
1327 | &count); | |
1328 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1329 | if (kr == KERN_INVALID_ADDRESS) { | |
1330 | T_SKIP("could not find writable nested mapping"); | |
1331 | T_END; | |
1332 | } | |
1333 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1334 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1335 | address, address + size, depth, | |
1336 | prot_str[info.protection], | |
1337 | prot_str[info.max_protection], | |
1338 | share_mode_str[info.share_mode], | |
1339 | info.object_id); | |
1340 | if (depth > 0 && (info.protection & VM_PROT_WRITE)) { | |
1341 | /* nested and writable: bingo! */ | |
1342 | break; | |
1343 | } | |
1344 | } | |
1345 | if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) { | |
1346 | T_SKIP("could not find writable nested mapping"); | |
1347 | T_END; | |
1348 | } | |
1349 | ||
1350 | /* test vm_remap() of RW */ | |
1351 | before = *(uint32_t *)(uintptr_t)address; | |
1352 | remap_address = 0; | |
1353 | remap_size = size; | |
1354 | kr = mach_vm_remap(mach_task_self(), | |
1355 | &remap_address, | |
1356 | remap_size, | |
1357 | 0, | |
1358 | VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, | |
1359 | mach_task_self(), | |
1360 | address, | |
1361 | FALSE, | |
1362 | &cur_prot, | |
1363 | &max_prot, | |
1364 | VM_INHERIT_DEFAULT); | |
1365 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()"); | |
1366 | if (!(cur_prot & VM_PROT_WRITE)) { | |
1367 | T_LOG("vm_remap(): 0x%llx not writable %s/%s", | |
1368 | remap_address, prot_str[cur_prot], prot_str[max_prot]); | |
1369 | T_ASSERT_FAIL("vm_remap() remapping not writable"); | |
1370 | } | |
1371 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1372 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1373 | *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1374 | after = *(uint32_t *)(uintptr_t)address; | |
1375 | T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1376 | *(uint32_t *)(uintptr_t)remap_address = before; | |
1377 | if (before != after) { | |
1378 | T_FAIL("vm_remap() bypassed copy-on-write"); | |
1379 | } else { | |
1380 | T_PASS("vm_remap() did not bypass copy-on-write"); | |
1381 | } | |
1382 | /* check that region is still nested */ | |
1383 | tmp_address = address; | |
1384 | tmp_size = 0; | |
1385 | depth = 99; | |
1386 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1387 | kr = mach_vm_region_recurse(mach_task_self(), | |
1388 | &tmp_address, | |
1389 | &tmp_size, | |
1390 | &depth, | |
1391 | (vm_region_recurse_info_t)&info, | |
1392 | &count); | |
1393 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1394 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1395 | tmp_address, tmp_address + tmp_size, depth, | |
1396 | prot_str[info.protection], | |
1397 | prot_str[info.max_protection], | |
1398 | share_mode_str[info.share_mode], | |
1399 | info.object_id); | |
1400 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1401 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1402 | T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1403 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable"); | |
1404 | T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable"); | |
1405 | /* cleanup */ | |
1406 | kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size); | |
1407 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
1408 | ||
1409 | #if defined(VM_MEMORY_ROSETTA) | |
1410 | if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) { | |
1411 | T_PASS("vm_remap_new() is not present"); | |
1412 | goto skip_vm_remap_new_rw; | |
1413 | } | |
1414 | /* test vm_remap_new() of RW */ | |
1415 | before = *(uint32_t *)(uintptr_t)address; | |
1416 | remap_address = 0; | |
1417 | remap_size = size; | |
1418 | cur_prot = VM_PROT_READ | VM_PROT_WRITE; | |
1419 | max_prot = VM_PROT_READ | VM_PROT_WRITE; | |
1420 | kr = mach_vm_remap_new(mach_task_self(), | |
1421 | &remap_address, | |
1422 | remap_size, | |
1423 | 0, | |
1424 | VM_FLAGS_ANYWHERE, | |
1425 | mach_task_self(), | |
1426 | address, | |
1427 | FALSE, | |
1428 | &cur_prot, | |
1429 | &max_prot, | |
1430 | VM_INHERIT_DEFAULT); | |
1431 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()"); | |
1432 | if (kr == KERN_PROTECTION_FAILURE) { | |
1433 | /* wrong but not a security issue... */ | |
1434 | goto skip_vm_remap_new_rw; | |
1435 | } | |
1436 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()"); | |
1437 | if (!(cur_prot & VM_PROT_WRITE)) { | |
1438 | T_LOG("vm_remap_new(): 0x%llx not writable %s/%s", | |
1439 | remap_address, prot_str[cur_prot], prot_str[max_prot]); | |
1440 | T_ASSERT_FAIL("vm_remap_new() remapping not writable"); | |
1441 | } | |
1442 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1443 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1444 | *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1445 | after = *(uint32_t *)(uintptr_t)address; | |
1446 | T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1447 | *(uint32_t *)(uintptr_t)remap_address = before; | |
1448 | if (before != after) { | |
1449 | T_FAIL("vm_remap_new() bypassed copy-on-write"); | |
1450 | } else { | |
1451 | T_PASS("vm_remap_new() did not bypass copy-on-write"); | |
1452 | } | |
1453 | /* check that region is still nested */ | |
1454 | tmp_address = address; | |
1455 | tmp_size = 0; | |
1456 | depth = 99; | |
1457 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1458 | kr = mach_vm_region_recurse(mach_task_self(), | |
1459 | &tmp_address, | |
1460 | &tmp_size, | |
1461 | &depth, | |
1462 | (vm_region_recurse_info_t)&info, | |
1463 | &count); | |
1464 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1465 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1466 | tmp_address, tmp_address + tmp_size, depth, | |
1467 | prot_str[info.protection], | |
1468 | prot_str[info.max_protection], | |
1469 | share_mode_str[info.share_mode], | |
1470 | info.object_id); | |
1471 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1472 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1473 | T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1474 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable"); | |
1475 | T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable"); | |
1476 | /* cleanup */ | |
1477 | kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size); | |
1478 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
1479 | skip_vm_remap_new_rw: | |
1480 | #else /* defined(VM_MEMORY_ROSETTA) */ | |
1481 | /* pre-BigSur SDK: no vm_remap_new() */ | |
1482 | T_LOG("No vm_remap_new() to test"); | |
1483 | #endif /* defined(VM_MEMORY_ROSETTA) */ | |
1484 | ||
1485 | /* test mach_make_memory_entry_64(VM_SHARE) of RW */ | |
1486 | before = *(uint32_t *)(uintptr_t)address; | |
1487 | remap_size = size; | |
1488 | mem_entry = MACH_PORT_NULL; | |
1489 | kr = mach_make_memory_entry_64(mach_task_self(), | |
1490 | &remap_size, | |
1491 | address, | |
1492 | MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE, | |
1493 | &mem_entry, | |
1494 | MACH_PORT_NULL); | |
1495 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)"); | |
1496 | if (kr == KERN_PROTECTION_FAILURE) { | |
1497 | /* wrong but not a security issue... */ | |
1498 | goto skip_mem_entry_vm_share_rw; | |
1499 | } | |
1500 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)"); | |
1501 | T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping"); | |
1502 | // T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar(); | |
1503 | remap_address = 0; | |
1504 | kr = mach_vm_map(mach_task_self(), | |
1505 | &remap_address, | |
1506 | remap_size, | |
1507 | 0, /* mask */ | |
1508 | VM_FLAGS_ANYWHERE, | |
1509 | mem_entry, | |
1510 | 0, /* offset */ | |
1511 | FALSE, /* copy */ | |
1512 | VM_PROT_READ | VM_PROT_WRITE, | |
1513 | VM_PROT_READ | VM_PROT_WRITE, | |
1514 | VM_INHERIT_DEFAULT); | |
1515 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()"); | |
1516 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1517 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1518 | // T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar(); | |
1519 | *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1520 | // T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar(); | |
1521 | after = *(uint32_t *)(uintptr_t)address; | |
1522 | T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1523 | *(uint32_t *)(uintptr_t)remap_address = before; | |
1524 | if (before != after) { | |
1525 | T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write"); | |
1526 | } else { | |
1527 | T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write"); | |
1528 | } | |
1529 | /* check that region is still nested */ | |
1530 | tmp_address = address; | |
1531 | tmp_size = 0; | |
1532 | depth = 99; | |
1533 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1534 | kr = mach_vm_region_recurse(mach_task_self(), | |
1535 | &tmp_address, | |
1536 | &tmp_size, | |
1537 | &depth, | |
1538 | (vm_region_recurse_info_t)&info, | |
1539 | &count); | |
1540 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1541 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1542 | tmp_address, tmp_address + tmp_size, depth, | |
1543 | prot_str[info.protection], | |
1544 | prot_str[info.max_protection], | |
1545 | share_mode_str[info.share_mode], | |
1546 | info.object_id); | |
1547 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1548 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1549 | T_QUIET; T_ASSERT_GT(depth, 0, "still nested"); | |
1550 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable"); | |
1551 | T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable"); | |
1552 | /* cleanup */ | |
1553 | kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size); | |
1554 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
1555 | mach_port_deallocate(mach_task_self(), mem_entry); | |
1556 | skip_mem_entry_vm_share_rw: | |
1557 | ||
1558 | /* test mach_make_memory_entry_64() of RW */ | |
1559 | before = *(uint32_t *)(uintptr_t)address; | |
1560 | remap_size = size; | |
1561 | mem_entry = MACH_PORT_NULL; | |
1562 | kr = mach_make_memory_entry_64(mach_task_self(), | |
1563 | &remap_size, | |
1564 | address, | |
1565 | VM_PROT_READ | VM_PROT_WRITE, | |
1566 | &mem_entry, | |
1567 | MACH_PORT_NULL); | |
1568 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()"); | |
1569 | remap_address = 0; | |
1570 | kr = mach_vm_map(mach_task_self(), | |
1571 | &remap_address, | |
1572 | remap_size, | |
1573 | 0, /* mask */ | |
1574 | VM_FLAGS_ANYWHERE, | |
1575 | mem_entry, | |
1576 | 0, /* offset */ | |
1577 | FALSE, /* copy */ | |
1578 | VM_PROT_READ | VM_PROT_WRITE, | |
1579 | VM_PROT_READ | VM_PROT_WRITE, | |
1580 | VM_INHERIT_DEFAULT); | |
1581 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()"); | |
1582 | remap = *(uint32_t *)(uintptr_t)remap_address; | |
1583 | T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original"); | |
1584 | *(uint32_t *)(uintptr_t)remap_address = before + 1; | |
1585 | after = *(uint32_t *)(uintptr_t)address; | |
1586 | T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after); | |
1587 | *(uint32_t *)(uintptr_t)remap_address = before; | |
1588 | /* check that region is no longer nested */ | |
1589 | tmp_address = address; | |
1590 | tmp_size = 0; | |
1591 | depth = 99; | |
1592 | count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1593 | kr = mach_vm_region_recurse(mach_task_self(), | |
1594 | &tmp_address, | |
1595 | &tmp_size, | |
1596 | &depth, | |
1597 | (vm_region_recurse_info_t)&info, | |
1598 | &count); | |
1599 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()"); | |
1600 | T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x", | |
1601 | tmp_address, tmp_address + tmp_size, depth, | |
1602 | prot_str[info.protection], | |
1603 | prot_str[info.max_protection], | |
1604 | share_mode_str[info.share_mode], | |
1605 | info.object_id); | |
1606 | if (before != after) { | |
1607 | if (depth == 0) { | |
1608 | T_PASS("mem_entry() honored copy-on-write"); | |
1609 | } else { | |
1610 | T_FAIL("mem_entry() did not trigger copy-on_write"); | |
1611 | } | |
1612 | } else { | |
1613 | T_FAIL("mem_entry() did not honor copy-on-write"); | |
1614 | } | |
1615 | T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed"); | |
1616 | // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed"); | |
1617 | T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested"); | |
1618 | T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable"); | |
1619 | T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable"); | |
1620 | /* cleanup */ | |
1621 | kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size); | |
1622 | T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()"); | |
1623 | mach_port_deallocate(mach_task_self(), mem_entry); | |
1624 | } | |
1625 | ||
1626 | T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \ | |
1627 | protection", T_META_ALL_VALID_ARCHS(true)) | |
1628 | { | |
1629 | kern_return_t kr; | |
1630 | mach_vm_address_t vmaddr; | |
1631 | mach_vm_size_t vmsize; | |
1632 | natural_t depth; | |
1633 | vm_region_submap_short_info_data_64_t region_info; | |
1634 | mach_msg_type_number_t region_info_count; | |
1635 | ||
1636 | for (vmaddr = SHARED_REGION_BASE; | |
1637 | vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE; | |
1638 | vmaddr += vmsize) { | |
1639 | depth = 99; | |
1640 | region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; | |
1641 | kr = mach_vm_region_recurse(mach_task_self(), | |
1642 | &vmaddr, | |
1643 | &vmsize, | |
1644 | &depth, | |
1645 | (vm_region_info_t) ®ion_info, | |
1646 | ®ion_info_count); | |
1647 | if (kr == KERN_INVALID_ADDRESS) { | |
1648 | break; | |
1649 | } | |
1650 | T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr); | |
1651 | T_ASSERT_EQ(region_info_count, | |
1652 | VM_REGION_SUBMAP_SHORT_INFO_COUNT_64, | |
1653 | "vm_region_recurse(0x%llx) count = %d expected %d", | |
1654 | vmaddr, region_info_count, | |
1655 | VM_REGION_SUBMAP_SHORT_INFO_COUNT_64); | |
1656 | ||
1657 | T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x", | |
1658 | vmaddr, depth, region_info.protection, | |
1659 | region_info.max_protection); | |
1660 | if (depth == 0) { | |
1661 | /* not a submap mapping: next mapping */ | |
1662 | continue; | |
1663 | } | |
1664 | if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) { | |
1665 | break; | |
1666 | } | |
1667 | kr = mach_vm_copy(mach_task_self(), | |
1668 | vmaddr, | |
1669 | vmsize, | |
1670 | vmaddr); | |
1671 | if (kr == KERN_PROTECTION_FAILURE) { | |
1672 | T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)", | |
1673 | vmaddr, vmsize, kr, mach_error_string(kr)); | |
1674 | continue; | |
1675 | } | |
1676 | T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x", | |
1677 | vmaddr, vmsize, region_info.protection); | |
1678 | depth = 0; | |
1679 | region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; | |
1680 | kr = mach_vm_region_recurse(mach_task_self(), | |
1681 | &vmaddr, | |
1682 | &vmsize, | |
1683 | &depth, | |
1684 | (vm_region_info_t) ®ion_info, | |
1685 | ®ion_info_count); | |
1686 | T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr); | |
1687 | T_ASSERT_EQ(region_info_count, | |
1688 | VM_REGION_SUBMAP_SHORT_INFO_COUNT_64, | |
1689 | "vm_region_recurse() count = %d expected %d", | |
1690 | region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64); | |
1691 | ||
1692 | T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0", | |
1693 | vmaddr, depth); | |
1694 | T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE), | |
1695 | 0, "vm_region_recurse(0x%llx): prot 0x%x", | |
1696 | vmaddr, region_info.protection); | |
1697 | } | |
1698 | } | |
1699 | ||
1700 | T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \ | |
1701 | the shared region triggers code-signing violations", | |
1702 | T_META_ALL_VALID_ARCHS(true)) | |
1703 | { | |
1704 | uint32_t *addr, before, after; | |
1705 | int retval; | |
1706 | int saved_errno; | |
1707 | kern_return_t kr; | |
1708 | vm_address_t map_addr, remap_addr; | |
1709 | vm_prot_t curprot, maxprot; | |
1710 | ||
1711 | addr = (uint32_t *)&printf; | |
1712 | #if __has_feature(ptrauth_calls) | |
1713 | map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer); | |
1714 | #else /* __has_feature(ptrauth_calls) */ | |
1715 | map_addr = (vm_address_t)(uintptr_t)addr; | |
1716 | #endif /* __has_feature(ptrauth_calls) */ | |
1717 | remap_addr = 0; | |
1718 | kr = vm_remap(mach_task_self(), &remap_addr, 4096, | |
1719 | 0, /* mask */ | |
1720 | VM_FLAGS_ANYWHERE, | |
1721 | mach_task_self(), map_addr, | |
1722 | FALSE, /* copy */ | |
1723 | &curprot, &maxprot, | |
1724 | VM_INHERIT_DEFAULT); | |
1725 | T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)", | |
1726 | kr, mach_error_string(kr)); | |
1727 | before = *addr; | |
1728 | retval = mlock(addr, 4096); | |
1729 | after = *addr; | |
1730 | if (retval != 0) { | |
1731 | saved_errno = errno; | |
1732 | T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d", | |
1733 | saved_errno, strerror(saved_errno), EACCES); | |
1734 | } else if (after != before) { | |
1735 | T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after); | |
1736 | } else { | |
1737 | T_PASS("wire shared text"); | |
1738 | } | |
1739 | ||
1740 | addr = (uint32_t *) &fprintf; | |
1741 | before = *addr; | |
1742 | retval = mlock(addr, 4096); | |
1743 | after = *addr; | |
1744 | if (retval != 0) { | |
1745 | saved_errno = errno; | |
1746 | T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d", | |
1747 | saved_errno, strerror(saved_errno), EACCES); | |
1748 | } else if (after != before) { | |
1749 | T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after); | |
1750 | } else { | |
1751 | T_PASS("wire shared text"); | |
1752 | } | |
1753 | ||
1754 | addr = (uint32_t *) &testmain_wire_text; | |
1755 | before = *addr; | |
1756 | retval = mlock(addr, 4096); | |
1757 | after = *addr; | |
1758 | if (retval != 0) { | |
1759 | saved_errno = errno; | |
1760 | T_ASSERT_EQ(saved_errno, EACCES, "wire text error return error %d (%s)", | |
1761 | saved_errno, strerror(saved_errno)); | |
1762 | } else if (after != before) { | |
1763 | T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", addr, before, after); | |
1764 | } else { | |
1765 | T_PASS("wire text"); | |
1766 | } | |
1767 | } |