]> git.saurik.com Git - apple/system_cmds.git/blame - gcore.tproj/vm.c
system_cmds-790.tar.gz
[apple/system_cmds.git] / gcore.tproj / vm.c
CommitLineData
cf37c299
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 */
4
5#include "options.h"
6#include "vm.h"
7#include "utils.h"
8#include "region.h"
9#include "sparse.h"
10
11#include <stdio.h>
12#include <stdlib.h>
13#include <ctype.h>
14#include <errno.h>
15#include <unistd.h>
16#include <stdbool.h>
17#include <assert.h>
cf37c299
A
18#include <sys/queue.h>
19
20/*
21 * There should be better APIs to describe the shared region
22 * For now, some hackery.
23 */
24
25#include <mach/shared_region.h>
26
27static __inline boolean_t
28in_shared_region(mach_vm_address_t addr)
29{
30 const mach_vm_address_t base = SHARED_REGION_BASE;
31 const mach_vm_address_t size = SHARED_REGION_SIZE;
32 return addr >= base && addr < (base + size);
33}
34
35/*
36 * On both x64 and arm, there's a globallly-shared
37 * read-only page at _COMM_PAGE_START_ADDRESS
38 * which low-level library routines reference.
39 *
40 * On x64, somewhere randomly chosen between _COMM_PAGE_TEXT_ADDRESS
41 * and the top of the user address space, there's the
42 * pre-emption-free-zone read-execute page.
43 */
44
45#include <System/machine/cpu_capabilities.h>
46
47static __inline boolean_t
48in_comm_region(const mach_vm_address_t addr, const vm_region_submap_info_data_64_t *info)
49{
50 return addr >= _COMM_PAGE_START_ADDRESS &&
51 SM_TRUESHARED == info->share_mode &&
52 VM_INHERIT_SHARE == info->inheritance &&
53 !info->external_pager && (info->max_protection & VM_PROT_WRITE) == 0;
54}
55
56static __inline boolean_t
57in_zfod_region(const vm_region_submap_info_data_64_t *info)
58{
59 return info->share_mode == SM_EMPTY && !info->is_submap &&
60 0 == info->object_id && !info->external_pager &&
61 0 == info->pages_dirtied + info->pages_resident + info->pages_swapped_out;
62}
63
64static struct region *
65new_region(mach_vm_offset_t vmaddr, mach_vm_size_t vmsize, const vm_region_submap_info_data_64_t *infop)
66{
67 struct region *r = calloc(1, sizeof (*r));
68 assert(vmaddr != 0 && vmsize != 0);
69 R_SETADDR(r, vmaddr);
70 R_SETSIZE(r, vmsize);
71 r->r_info = *infop;
cf37c299 72 r->r_purgable = VM_PURGABLE_DENY;
cf37c299
A
73 r->r_insharedregion = in_shared_region(vmaddr);
74 r->r_incommregion = in_comm_region(vmaddr, &r->r_info);
75 r->r_inzfodregion = in_zfod_region(&r->r_info);
76
77 if (r->r_inzfodregion)
78 r->r_op = &zfod_ops;
79 else
80 r->r_op = &vanilla_ops;
81 return r;
82}
83
cf37c299
A
84void
85del_fileref_region(struct region *r)
86{
87 assert(&fileref_ops == r->r_op);
88 /* r->r_fileref->fr_libent is a reference into the name table */
89 poison(r->r_fileref, 0xdeadbee9, sizeof (*r->r_fileref));
90 free(r->r_fileref);
91 poison(r, 0xdeadbeeb, sizeof (*r));
92 free(r);
93}
cf37c299
A
94
95void
96del_zfod_region(struct region *r)
97{
98 assert(&zfod_ops == r->r_op);
99 assert(r->r_inzfodregion && 0 == r->r_nsubregions);
cf37c299 100 assert(NULL == r->r_fileref);
cf37c299
A
101 poison(r, 0xdeadbeed, sizeof (*r));
102 free(r);
103}
104
105void
106del_vanilla_region(struct region *r)
107{
108 assert(&vanilla_ops == r->r_op);
109 assert(!r->r_inzfodregion && 0 == r->r_nsubregions);
cf37c299 110 assert(NULL == r->r_fileref);
cf37c299
A
111 poison(r, 0xdeadbeef, sizeof (*r));
112 free(r);
113}
114
115/*
116 * "does any part of this address range match the tag?"
117 */
118int
119is_tagged(task_t task, mach_vm_offset_t addr, mach_vm_offset_t size, unsigned tag)
120{
121 mach_vm_offset_t vm_addr = addr;
122 mach_vm_offset_t vm_size = 0;
123 natural_t depth = 0;
124 size_t pgsize = (1u << pageshift_host);
125
126 do {
127 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
128 vm_region_submap_info_data_64_t info;
129
130 kern_return_t ret = mach_vm_region_recurse(task, &vm_addr, &vm_size, &depth, (vm_region_recurse_info_t)&info, &count);
131
132 if (KERN_FAILURE == ret) {
133 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
134 return -1;
135 } else if (KERN_INVALID_ADDRESS == ret) {
136 err_mach(ret, NULL, "invalid address at %llx", vm_addr);
137 return -1;
138 } else if (KERN_SUCCESS != ret) {
139 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
140 return -1;
141 }
142 if (info.is_submap) {
143 depth++;
144 continue;
145 }
146 if (info.user_tag == tag)
147 return 1;
148 if (vm_addr + vm_size > addr + size)
149 return 0;
150 vm_addr += pgsize;
151 } while (1);
152}
153
154STAILQ_HEAD(regionhead, region);
155
156/*
157 * XXX Need something like mach_vm_shared_region_recurse()
158 * to properly identify the shared region address ranges as
159 * we go.
160 */
161
162static int
163walk_regions(task_t task, struct regionhead *rhead)
164{
165 mach_vm_offset_t vm_addr = MACH_VM_MIN_ADDRESS;
166 natural_t depth = 0;
167
887d5eed
A
168 if (OPTIONS_DEBUG(opt, 3)) {
169 printf("Building raw region list\n");
cf37c299 170 print_memory_region_header();
887d5eed 171 }
cf37c299
A
172 while (1) {
173 vm_region_submap_info_data_64_t info;
174 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
175 mach_vm_size_t vm_size;
176
177 kern_return_t ret = mach_vm_region_recurse(task, &vm_addr, &vm_size, &depth, (vm_region_recurse_info_t)&info, &count);
178
179 if (KERN_FAILURE == ret) {
180 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
181 goto bad;
182 } else if (KERN_INVALID_ADDRESS == ret) {
183 break; /* loop termination */
184 } else if (KERN_SUCCESS != ret) {
185 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
186 goto bad;
187 }
188
887d5eed 189 if (OPTIONS_DEBUG(opt, 3)) {
cf37c299
A
190 struct region *d = new_region(vm_addr, vm_size, &info);
191 ROP_PRINT(d);
192 ROP_DELETE(d);
193 }
194
195 if (info.is_submap) {
196#ifdef CONFIG_SUBMAP
197 /* We also want to see submaps -- for debugging purposes. */
198 struct region *r = new_region(vm_addr, vm_size, &info);
199 r->r_depth = depth;
200 STAILQ_INSERT_TAIL(rhead, r, r_linkage);
201#endif
202 depth++;
203 continue;
204 }
205
206 if (VM_MEMORY_IOKIT == info.user_tag) {
207 vm_addr += vm_size;
208 continue; // ignore immediately: IO memory has side-effects
209 }
210
211 struct region *r = new_region(vm_addr, vm_size, &info);
212#ifdef CONFIG_SUBMAP
213 r->r_depth = depth;
214#endif
215 /* grab the page info of the first page in the mapping */
216
217 mach_msg_type_number_t pageinfoCount = VM_PAGE_INFO_BASIC_COUNT;
218 ret = mach_vm_page_info(task, R_ADDR(r), VM_PAGE_INFO_BASIC, (vm_page_info_t)&r->r_pageinfo, &pageinfoCount);
219 if (KERN_SUCCESS != ret)
220 err_mach(ret, r, "getting pageinfo at %llx", R_ADDR(r));
221
cf37c299
A
222 /* record the purgability */
223
224 ret = mach_vm_purgable_control(task, vm_addr, VM_PURGABLE_GET_STATE, &r->r_purgable);
225 if (KERN_SUCCESS != ret)
226 r->r_purgable = VM_PURGABLE_DENY;
887d5eed
A
227
228 STAILQ_INSERT_TAIL(rhead, r, r_linkage);
cf37c299
A
229
230 vm_addr += vm_size;
231 }
232
233 return 0;
234bad:
235 return EX_OSERR;
236}
237
238void
239del_region_list(struct regionhead *rhead)
240{
241 struct region *r, *t;
242
243 STAILQ_FOREACH_SAFE(r, rhead, r_linkage, t) {
244 STAILQ_REMOVE(rhead, r, region, r_linkage);
245 ROP_DELETE(r);
246 }
247 free(rhead);
248}
249
250struct regionhead *
251build_region_list(task_t task)
252{
253 struct regionhead *rhead = malloc(sizeof (*rhead));
254 STAILQ_INIT(rhead);
255 if (0 != walk_regions(task, rhead)) {
256 del_region_list(rhead);
257 return NULL;
258 }
259 return rhead;
260}
261
262int
263walk_region_list(struct regionhead *rhead, walk_region_cbfn_t cbfn, void *arg)
264{
265 struct region *r, *t;
266
267 STAILQ_FOREACH_SAFE(r, rhead, r_linkage, t) {
268 switch (cbfn(r, arg)) {
269 case WALK_CONTINUE:
270 break;
271 case WALK_DELETE_REGION:
272 STAILQ_REMOVE(rhead, r, region, r_linkage);
273 ROP_DELETE(r);
274 break;
275 case WALK_TERMINATE:
276 goto done;
277 case WALK_ERROR:
278 return -1;
279 }
280 }
281done:
282 return 0;
283}
284
285int pageshift_host;
286int pageshift_app;
287
288void
289setpageshift(void)
290{
291 if (0 == pageshift_host) {
292 vm_size_t hps = 0;
293 kern_return_t ret = host_page_size(MACH_PORT_NULL, &hps);
294 if (KERN_SUCCESS != ret || hps == 0)
295 err_mach(ret, NULL, "host page size");
296 int pshift = 0;
297 while (((vm_offset_t)1 << pshift) != hps)
298 pshift++;
299 pageshift_host = pshift;
300 }
887d5eed 301 if (OPTIONS_DEBUG(opt, 3))
cf37c299
A
302 printf("host page size: %lu\n", 1ul << pageshift_host);
303
304 if (0 == pageshift_app) {
305 size_t psz = getpagesize();
306 int pshift = 0;
307 while ((1ul << pshift) != psz)
308 pshift++;
309 pageshift_app = pshift;
310 }
887d5eed 311 if (OPTIONS_DEBUG(opt, 3) && pageshift_app != pageshift_host)
cf37c299
A
312 printf("app page size: %lu\n", 1ul << pageshift_app);
313}
314
cf37c299
A
315void
316print_memory_region_header(void)
317{
318 printf("%-33s %c %-7s %-7s %8s %16s ",
319 "Address Range", 'S', "Size", "Cur/Max", "Obj32", "FirstPgObjectID");
320 printf("%9s %-3s %-11s %5s ",
321 "Offset", "Tag", "Mode", "Refc");
322#ifdef CONFIG_SUBMAP
323 printf("%5s ", "Depth");
324#endif
325 printf("%5s %5s %5s %3s ",
326 "Res", "SNP", "Dirty", "Pgr");
327 printf("\n");
328}
329
330static __inline char
331region_type(const struct region *r)
332{
cf37c299
A
333 if (r->r_fileref)
334 return 'f';
cf37c299
A
335 if (r->r_inzfodregion)
336 return 'z';
337 if (r->r_incommregion)
338 return 'c';
339 if (r->r_insharedregion)
340 return 's';
341 return ' ';
342}
343
344void
345print_memory_region(const struct region *r)
346{
cf37c299 347 hsize_str_t hstr;
887d5eed 348 tag_str_t tstr;
cf37c299
A
349
350 printf("%016llx-%016llx %c %-7s %s/%s %8x %16llx ",
351 R_ADDR(r), R_ENDADDR(r), region_type(r),
352 str_hsize(hstr, R_SIZE(r)),
887d5eed
A
353 str_prot(r->r_info.protection),
354 str_prot(r->r_info.max_protection),
cf37c299
A
355 r->r_info.object_id, r->r_pageinfo.object_id
356 );
357
358 printf("%9lld %3d %-11s %5u ",
359 r->r_info.external_pager ?
360 r->r_pageinfo.offset : r->r_info.offset,
361 r->r_info.user_tag,
887d5eed 362 str_shared(r->r_info.share_mode),
cf37c299
A
363 r->r_info.ref_count
364 );
365#ifdef CONFIG_SUBMAP
366 printf("%5u ", r->r_depth);
367#endif
368
369 if (!r->r_info.is_submap) {
370 printf("%5u %5u %5u %3s ",
371 r->r_info.pages_resident,
372 r->r_info.pages_shared_now_private,
373 r->r_info.pages_dirtied,
374 r->r_info.external_pager ? "ext" : "");
887d5eed 375 if (r->r_fileref)
cf37c299 376 printf("\n %s at %lld ",
887d5eed 377 r->r_fileref->fr_pathname,
cf37c299 378 r->r_fileref->fr_offset);
887d5eed
A
379 else
380 printf("%s", str_tagr(tstr, r));
cf37c299
A
381 printf("\n");
382 if (r->r_nsubregions) {
383 printf(" %-33s %7s %12s\t%s\n",
384 "Address Range", "Size", "Type(s)", "Filename(s)");
385 for (unsigned i = 0; i < r->r_nsubregions; i++) {
386 struct subregion *s = r->r_subregions[i];
387 printf(" %016llx-%016llx %7s %12s\t%s\n",
388 S_ADDR(s), S_ENDADDR(s),
389 str_hsize(hstr, S_SIZE(s)),
390 S_MACHO_TYPE(s),
391 S_FILENAME(s));
392 }
393 }
887d5eed
A
394 } else {
395 printf("%5s %5s %5s %3s %s\n", "", "", "", "", str_tagr(tstr, r));
cf37c299
A
396 }
397}
398
399walk_return_t
400region_print_memory(struct region *r, __unused void *arg)
401{
402 ROP_PRINT(r);
403 return WALK_CONTINUE;
404}
405
887d5eed
A
406void
407print_one_memory_region(const struct region *r)
408{
409 print_memory_region_header();
410 ROP_PRINT(r);
411}
412
cf37c299
A
413#ifdef RDAR_23744374
414/*
415 * The reported size of a mapping to a file object gleaned from
416 * mach_vm_region_recurse() can exceed the underlying size of the file.
417 * If we attempt to write out the full reported size, we find that we
418 * error (EFAULT) or if we compress it, we die with the SIGBUS.
419 *
420 * See rdar://23744374
421 *
422 * Figure out what the "non-faulting" size of the object is to
423 * *host* page size resolution.
424 */
887d5eed 425bool
cf37c299
A
426is_actual_size(const task_t task, const struct region *r, mach_vm_size_t *hostvmsize)
427{
428 if (!r->r_info.external_pager ||
429 (r->r_info.max_protection & VM_PROT_READ) == VM_PROT_NONE)
430 return true;
431
432 const size_t pagesize_host = 1ul << pageshift_host;
433 const unsigned filepages = r->r_info.pages_resident +
434 r->r_info.pages_swapped_out;
435
436 if (pagesize_host * filepages == R_SIZE(r))
437 return true;
438
439 /*
440 * Verify that the last couple of host-pagesize pages
441 * of a file backed mapping are actually pageable in the
442 * underlying object by walking backwards from the end
443 * of the application-pagesize mapping.
444 */
445 *hostvmsize = R_SIZE(r);
446
447 const long npagemax = 1ul << (pageshift_app - pageshift_host);
448 for (long npage = 0; npage < npagemax; npage++) {
449
450 const mach_vm_address_t taddress =
451 R_ENDADDR(r) - pagesize_host * (npage + 1);
452 if (taddress < R_ADDR(r) || taddress >= R_ENDADDR(r))
453 break;
454
455 mach_msg_type_number_t pCount = VM_PAGE_INFO_BASIC_COUNT;
456 vm_page_info_basic_data_t pInfo;
457
458 kern_return_t ret = mach_vm_page_info(task, taddress, VM_PAGE_INFO_BASIC, (vm_page_info_t)&pInfo, &pCount);
459 if (KERN_SUCCESS != ret) {
460 err_mach(ret, NULL, "getting pageinfo at %llx", taddress);
461 break; /* bail */
462 }
463
464 /*
465 * If this page has been in memory before, assume it can
466 * be brought back again
467 */
468 if (pInfo.disposition & (VM_PAGE_QUERY_PAGE_PRESENT | VM_PAGE_QUERY_PAGE_REF | VM_PAGE_QUERY_PAGE_DIRTY | VM_PAGE_QUERY_PAGE_PAGED_OUT))
469 continue;
470
471 /*
472 * Force the page to be fetched to see if it faults
473 */
474 mach_vm_size_t tsize = 1ul << pageshift_host;
475 void *tmp = valloc((size_t)tsize);
476 const mach_vm_address_t vtmp = (mach_vm_address_t)tmp;
477
478 switch (ret = mach_vm_read_overwrite(task,
479 taddress, tsize, vtmp, &tsize)) {
480 case KERN_INVALID_ADDRESS:
481 *hostvmsize = taddress - R_ADDR(r);
482 break;
483 case KERN_SUCCESS:
484 break;
485 default:
486 err_mach(ret, NULL, "mach_vm_overwrite()");
487 break;
488 }
489 free(tmp);
490 }
491 return R_SIZE(r) == *hostvmsize;
492}
493#endif