]> git.saurik.com Git - apple/system_cmds.git/blob - gcore.tproj/vm.c
system_cmds-735.50.6.tar.gz
[apple/system_cmds.git] / gcore.tproj / vm.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 */
4
5 #include "options.h"
6 #include "vm.h"
7 #include "utils.h"
8 #include "region.h"
9 #include "sparse.h"
10
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <ctype.h>
14 #include <errno.h>
15 #include <unistd.h>
16 #include <stdbool.h>
17 #include <assert.h>
18
19 #include <sys/queue.h>
20
21 /*
22 * There should be better APIs to describe the shared region
23 * For now, some hackery.
24 */
25
26 #include <mach/shared_region.h>
27
28 static __inline boolean_t
29 in_shared_region(mach_vm_address_t addr)
30 {
31 const mach_vm_address_t base = SHARED_REGION_BASE;
32 const mach_vm_address_t size = SHARED_REGION_SIZE;
33 return addr >= base && addr < (base + size);
34 }
35
36 /*
37 * On both x64 and arm, there's a globallly-shared
38 * read-only page at _COMM_PAGE_START_ADDRESS
39 * which low-level library routines reference.
40 *
41 * On x64, somewhere randomly chosen between _COMM_PAGE_TEXT_ADDRESS
42 * and the top of the user address space, there's the
43 * pre-emption-free-zone read-execute page.
44 */
45
46 #include <System/machine/cpu_capabilities.h>
47
48 static __inline boolean_t
49 in_comm_region(const mach_vm_address_t addr, const vm_region_submap_info_data_64_t *info)
50 {
51 return addr >= _COMM_PAGE_START_ADDRESS &&
52 SM_TRUESHARED == info->share_mode &&
53 VM_INHERIT_SHARE == info->inheritance &&
54 !info->external_pager && (info->max_protection & VM_PROT_WRITE) == 0;
55 }
56
57 static __inline boolean_t
58 in_zfod_region(const vm_region_submap_info_data_64_t *info)
59 {
60 return info->share_mode == SM_EMPTY && !info->is_submap &&
61 0 == info->object_id && !info->external_pager &&
62 0 == info->pages_dirtied + info->pages_resident + info->pages_swapped_out;
63 }
64
65 static struct region *
66 new_region(mach_vm_offset_t vmaddr, mach_vm_size_t vmsize, const vm_region_submap_info_data_64_t *infop)
67 {
68 struct region *r = calloc(1, sizeof (*r));
69 assert(vmaddr != 0 && vmsize != 0);
70 R_SETADDR(r, vmaddr);
71 R_SETSIZE(r, vmsize);
72 r->r_info = *infop;
73 #ifdef CONFIG_PURGABLE
74 r->r_purgable = VM_PURGABLE_DENY;
75 #endif
76 r->r_insharedregion = in_shared_region(vmaddr);
77 r->r_incommregion = in_comm_region(vmaddr, &r->r_info);
78 r->r_inzfodregion = in_zfod_region(&r->r_info);
79
80 if (r->r_inzfodregion)
81 r->r_op = &zfod_ops;
82 else
83 r->r_op = &vanilla_ops;
84 return r;
85 }
86
87 #ifdef CONFIG_REFSC
88 void
89 del_fileref_region(struct region *r)
90 {
91 assert(&fileref_ops == r->r_op);
92 /* r->r_fileref->fr_libent is a reference into the name table */
93 poison(r->r_fileref, 0xdeadbee9, sizeof (*r->r_fileref));
94 free(r->r_fileref);
95 poison(r, 0xdeadbeeb, sizeof (*r));
96 free(r);
97 }
98 #endif /* CONFIG_REFSC */
99
100 void
101 del_zfod_region(struct region *r)
102 {
103 assert(&zfod_ops == r->r_op);
104 assert(r->r_inzfodregion && 0 == r->r_nsubregions);
105 #ifdef CONFIG_REFSC
106 assert(NULL == r->r_fileref);
107 #endif
108 poison(r, 0xdeadbeed, sizeof (*r));
109 free(r);
110 }
111
112 void
113 del_vanilla_region(struct region *r)
114 {
115 assert(&vanilla_ops == r->r_op);
116 assert(!r->r_inzfodregion && 0 == r->r_nsubregions);
117 #ifdef CONFIG_REFSC
118 assert(NULL == r->r_fileref);
119 #endif
120 poison(r, 0xdeadbeef, sizeof (*r));
121 free(r);
122 }
123
124 /*
125 * "does any part of this address range match the tag?"
126 */
127 int
128 is_tagged(task_t task, mach_vm_offset_t addr, mach_vm_offset_t size, unsigned tag)
129 {
130 mach_vm_offset_t vm_addr = addr;
131 mach_vm_offset_t vm_size = 0;
132 natural_t depth = 0;
133 size_t pgsize = (1u << pageshift_host);
134
135 do {
136 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
137 vm_region_submap_info_data_64_t info;
138
139 kern_return_t ret = mach_vm_region_recurse(task, &vm_addr, &vm_size, &depth, (vm_region_recurse_info_t)&info, &count);
140
141 if (KERN_FAILURE == ret) {
142 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
143 return -1;
144 } else if (KERN_INVALID_ADDRESS == ret) {
145 err_mach(ret, NULL, "invalid address at %llx", vm_addr);
146 return -1;
147 } else if (KERN_SUCCESS != ret) {
148 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
149 return -1;
150 }
151 if (info.is_submap) {
152 depth++;
153 continue;
154 }
155 if (info.user_tag == tag)
156 return 1;
157 if (vm_addr + vm_size > addr + size)
158 return 0;
159 vm_addr += pgsize;
160 } while (1);
161 }
162
163 STAILQ_HEAD(regionhead, region);
164
165 /*
166 * XXX Need something like mach_vm_shared_region_recurse()
167 * to properly identify the shared region address ranges as
168 * we go.
169 */
170
171 static int
172 walk_regions(task_t task, struct regionhead *rhead)
173 {
174 mach_vm_offset_t vm_addr = MACH_VM_MIN_ADDRESS;
175 natural_t depth = 0;
176
177 if (opt->debug > 3)
178 print_memory_region_header();
179
180 while (1) {
181 vm_region_submap_info_data_64_t info;
182 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
183 mach_vm_size_t vm_size;
184
185 kern_return_t ret = mach_vm_region_recurse(task, &vm_addr, &vm_size, &depth, (vm_region_recurse_info_t)&info, &count);
186
187 if (KERN_FAILURE == ret) {
188 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
189 goto bad;
190 } else if (KERN_INVALID_ADDRESS == ret) {
191 break; /* loop termination */
192 } else if (KERN_SUCCESS != ret) {
193 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
194 goto bad;
195 }
196
197 if (opt->debug > 3) {
198 struct region *d = new_region(vm_addr, vm_size, &info);
199 ROP_PRINT(d);
200 ROP_DELETE(d);
201 }
202
203 if (info.is_submap) {
204 #ifdef CONFIG_SUBMAP
205 /* We also want to see submaps -- for debugging purposes. */
206 struct region *r = new_region(vm_addr, vm_size, &info);
207 r->r_depth = depth;
208 STAILQ_INSERT_TAIL(rhead, r, r_linkage);
209 #endif
210 depth++;
211 continue;
212 }
213
214 if (VM_MEMORY_IOKIT == info.user_tag) {
215 vm_addr += vm_size;
216 continue; // ignore immediately: IO memory has side-effects
217 }
218
219 struct region *r = new_region(vm_addr, vm_size, &info);
220 #ifdef CONFIG_SUBMAP
221 r->r_depth = depth;
222 #endif
223 /* grab the page info of the first page in the mapping */
224
225 mach_msg_type_number_t pageinfoCount = VM_PAGE_INFO_BASIC_COUNT;
226 ret = mach_vm_page_info(task, R_ADDR(r), VM_PAGE_INFO_BASIC, (vm_page_info_t)&r->r_pageinfo, &pageinfoCount);
227 if (KERN_SUCCESS != ret)
228 err_mach(ret, r, "getting pageinfo at %llx", R_ADDR(r));
229
230 #ifdef CONFIG_PURGABLE
231 /* record the purgability */
232
233 ret = mach_vm_purgable_control(task, vm_addr, VM_PURGABLE_GET_STATE, &r->r_purgable);
234 if (KERN_SUCCESS != ret)
235 r->r_purgable = VM_PURGABLE_DENY;
236 #endif
237 STAILQ_INSERT_TAIL(rhead, r, r_linkage);
238
239 vm_addr += vm_size;
240 }
241
242 return 0;
243 bad:
244 return EX_OSERR;
245 }
246
247 void
248 del_region_list(struct regionhead *rhead)
249 {
250 struct region *r, *t;
251
252 STAILQ_FOREACH_SAFE(r, rhead, r_linkage, t) {
253 STAILQ_REMOVE(rhead, r, region, r_linkage);
254 ROP_DELETE(r);
255 }
256 free(rhead);
257 }
258
259 struct regionhead *
260 build_region_list(task_t task)
261 {
262 struct regionhead *rhead = malloc(sizeof (*rhead));
263 STAILQ_INIT(rhead);
264 if (0 != walk_regions(task, rhead)) {
265 del_region_list(rhead);
266 return NULL;
267 }
268 return rhead;
269 }
270
271 int
272 walk_region_list(struct regionhead *rhead, walk_region_cbfn_t cbfn, void *arg)
273 {
274 struct region *r, *t;
275
276 STAILQ_FOREACH_SAFE(r, rhead, r_linkage, t) {
277 switch (cbfn(r, arg)) {
278 case WALK_CONTINUE:
279 break;
280 case WALK_DELETE_REGION:
281 STAILQ_REMOVE(rhead, r, region, r_linkage);
282 ROP_DELETE(r);
283 break;
284 case WALK_TERMINATE:
285 goto done;
286 case WALK_ERROR:
287 return -1;
288 }
289 }
290 done:
291 return 0;
292 }
293
294 int pageshift_host;
295 int pageshift_app;
296
297 void
298 setpageshift(void)
299 {
300 if (0 == pageshift_host) {
301 vm_size_t hps = 0;
302 kern_return_t ret = host_page_size(MACH_PORT_NULL, &hps);
303 if (KERN_SUCCESS != ret || hps == 0)
304 err_mach(ret, NULL, "host page size");
305 int pshift = 0;
306 while (((vm_offset_t)1 << pshift) != hps)
307 pshift++;
308 pageshift_host = pshift;
309 }
310 if (opt->debug)
311 printf("host page size: %lu\n", 1ul << pageshift_host);
312
313 if (0 == pageshift_app) {
314 size_t psz = getpagesize();
315 int pshift = 0;
316 while ((1ul << pshift) != psz)
317 pshift++;
318 pageshift_app = pshift;
319 }
320 if (opt->debug && pageshift_app != pageshift_host)
321 printf("app page size: %lu\n", 1ul << pageshift_app);
322 }
323
324 static const char *
325 strshared(const int sm)
326 {
327 switch (sm) {
328 case SM_COW:
329 return "cow";
330 case SM_PRIVATE:
331 return "priv";
332 case SM_EMPTY:
333 return "empty";
334 case SM_SHARED:
335 return "shr";
336 case SM_TRUESHARED:
337 return "true_shr";
338 case SM_PRIVATE_ALIASED:
339 return "priv_alias";
340 case SM_SHARED_ALIASED:
341 return "shr_alias";
342 case SM_LARGE_PAGE:
343 return "large_pg";
344 default:
345 return "share?";
346 }
347 }
348
349 typedef char prot_str_t[9]; /* rwxNCWT& */
350
351 static const char *
352 str_prot(prot_str_t pstr, const vm_prot_t prot)
353 {
354 snprintf(pstr, sizeof (prot_str_t), "%c%c%c",
355 prot & VM_PROT_READ ? 'r' : '-',
356 prot & VM_PROT_WRITE ? 'w' : '-',
357 prot & VM_PROT_EXECUTE ? 'x' : '-');
358 /* for completeness */
359 if (prot & VM_PROT_NO_CHANGE)
360 strlcat(pstr, "N", sizeof (prot_str_t));
361 if (prot & VM_PROT_COPY)
362 strlcat(pstr, "C", sizeof (prot_str_t));
363 if (prot & VM_PROT_WANTS_COPY)
364 strlcat(pstr, "W", sizeof (prot_str_t));
365 if (prot & 0x20)
366 strlcat(pstr, "T", sizeof (prot_str_t));
367 if (prot & VM_PROT_IS_MASK)
368 strlcat(pstr, "&", sizeof (prot_str_t));
369 return pstr;
370 }
371
372 void
373 print_memory_region_header(void)
374 {
375 printf("%-33s %c %-7s %-7s %8s %16s ",
376 "Address Range", 'S', "Size", "Cur/Max", "Obj32", "FirstPgObjectID");
377 printf("%9s %-3s %-11s %5s ",
378 "Offset", "Tag", "Mode", "Refc");
379 #ifdef CONFIG_SUBMAP
380 printf("%5s ", "Depth");
381 #endif
382 printf("%5s %5s %5s %3s ",
383 "Res", "SNP", "Dirty", "Pgr");
384 printf("\n");
385 }
386
387 static __inline char
388 region_type(const struct region *r)
389 {
390 #ifdef CONFIG_REFSC
391 if (r->r_fileref)
392 return 'f';
393 #endif
394 if (r->r_inzfodregion)
395 return 'z';
396 if (r->r_incommregion)
397 return 'c';
398 if (r->r_insharedregion)
399 return 's';
400 return ' ';
401 }
402
403 void
404 print_memory_region(const struct region *r)
405 {
406 prot_str_t pstr, pstr_max;
407 hsize_str_t hstr;
408
409 printf("%016llx-%016llx %c %-7s %s/%s %8x %16llx ",
410 R_ADDR(r), R_ENDADDR(r), region_type(r),
411 str_hsize(hstr, R_SIZE(r)),
412 str_prot(pstr, r->r_info.protection),
413 str_prot(pstr_max, r->r_info.max_protection),
414 r->r_info.object_id, r->r_pageinfo.object_id
415 );
416
417 printf("%9lld %3d %-11s %5u ",
418 r->r_info.external_pager ?
419 r->r_pageinfo.offset : r->r_info.offset,
420 r->r_info.user_tag,
421 strshared(r->r_info.share_mode),
422 r->r_info.ref_count
423 );
424 #ifdef CONFIG_SUBMAP
425 printf("%5u ", r->r_depth);
426 #endif
427
428 if (!r->r_info.is_submap) {
429 printf("%5u %5u %5u %3s ",
430 r->r_info.pages_resident,
431 r->r_info.pages_shared_now_private,
432 r->r_info.pages_dirtied,
433 r->r_info.external_pager ? "ext" : "");
434 #if CONFIG_REFSC
435 if (r->r_fileref)
436 printf("\n %s at %lld ",
437 r->r_fileref->fr_libent->le_filename,
438 r->r_fileref->fr_offset);
439 #endif
440 printf("\n");
441 if (r->r_nsubregions) {
442 printf(" %-33s %7s %12s\t%s\n",
443 "Address Range", "Size", "Type(s)", "Filename(s)");
444 for (unsigned i = 0; i < r->r_nsubregions; i++) {
445 struct subregion *s = r->r_subregions[i];
446 printf(" %016llx-%016llx %7s %12s\t%s\n",
447 S_ADDR(s), S_ENDADDR(s),
448 str_hsize(hstr, S_SIZE(s)),
449 S_MACHO_TYPE(s),
450 S_FILENAME(s));
451 }
452 }
453 } else {
454 switch (r->r_info.user_tag) {
455 case VM_MEMORY_SHARED_PMAP:
456 printf("// VM_MEMORY_SHARED_PMAP");
457 break;
458 case VM_MEMORY_UNSHARED_PMAP:
459 printf("// VM_MEMORY_UNSHARED_PMAP");
460 break;
461 default:
462 printf("// is a submap");
463 break;
464 }
465 printf("\n");
466 }
467 }
468
469 walk_return_t
470 region_print_memory(struct region *r, __unused void *arg)
471 {
472 ROP_PRINT(r);
473 return WALK_CONTINUE;
474 }
475
476 #ifdef RDAR_23744374
477 /*
478 * The reported size of a mapping to a file object gleaned from
479 * mach_vm_region_recurse() can exceed the underlying size of the file.
480 * If we attempt to write out the full reported size, we find that we
481 * error (EFAULT) or if we compress it, we die with the SIGBUS.
482 *
483 * See rdar://23744374
484 *
485 * Figure out what the "non-faulting" size of the object is to
486 * *host* page size resolution.
487 */
488 boolean_t
489 is_actual_size(const task_t task, const struct region *r, mach_vm_size_t *hostvmsize)
490 {
491 if (!r->r_info.external_pager ||
492 (r->r_info.max_protection & VM_PROT_READ) == VM_PROT_NONE)
493 return true;
494
495 const size_t pagesize_host = 1ul << pageshift_host;
496 const unsigned filepages = r->r_info.pages_resident +
497 r->r_info.pages_swapped_out;
498
499 if (pagesize_host * filepages == R_SIZE(r))
500 return true;
501
502 /*
503 * Verify that the last couple of host-pagesize pages
504 * of a file backed mapping are actually pageable in the
505 * underlying object by walking backwards from the end
506 * of the application-pagesize mapping.
507 */
508 *hostvmsize = R_SIZE(r);
509
510 const long npagemax = 1ul << (pageshift_app - pageshift_host);
511 for (long npage = 0; npage < npagemax; npage++) {
512
513 const mach_vm_address_t taddress =
514 R_ENDADDR(r) - pagesize_host * (npage + 1);
515 if (taddress < R_ADDR(r) || taddress >= R_ENDADDR(r))
516 break;
517
518 mach_msg_type_number_t pCount = VM_PAGE_INFO_BASIC_COUNT;
519 vm_page_info_basic_data_t pInfo;
520
521 kern_return_t ret = mach_vm_page_info(task, taddress, VM_PAGE_INFO_BASIC, (vm_page_info_t)&pInfo, &pCount);
522 if (KERN_SUCCESS != ret) {
523 err_mach(ret, NULL, "getting pageinfo at %llx", taddress);
524 break; /* bail */
525 }
526
527 /*
528 * If this page has been in memory before, assume it can
529 * be brought back again
530 */
531 if (pInfo.disposition & (VM_PAGE_QUERY_PAGE_PRESENT | VM_PAGE_QUERY_PAGE_REF | VM_PAGE_QUERY_PAGE_DIRTY | VM_PAGE_QUERY_PAGE_PAGED_OUT))
532 continue;
533
534 /*
535 * Force the page to be fetched to see if it faults
536 */
537 mach_vm_size_t tsize = 1ul << pageshift_host;
538 void *tmp = valloc((size_t)tsize);
539 const mach_vm_address_t vtmp = (mach_vm_address_t)tmp;
540
541 switch (ret = mach_vm_read_overwrite(task,
542 taddress, tsize, vtmp, &tsize)) {
543 case KERN_INVALID_ADDRESS:
544 *hostvmsize = taddress - R_ADDR(r);
545 break;
546 case KERN_SUCCESS:
547 break;
548 default:
549 err_mach(ret, NULL, "mach_vm_overwrite()");
550 break;
551 }
552 free(tmp);
553 }
554 return R_SIZE(r) == *hostvmsize;
555 }
556 #endif