2 * Copyright (c) 2016 Apple Inc. All rights reserved.
19 #include <sys/queue.h>
22 * There should be better APIs to describe the shared region
23 * For now, some hackery.
26 #include <mach/shared_region.h>
28 static __inline boolean_t
29 in_shared_region(mach_vm_address_t addr
)
31 const mach_vm_address_t base
= SHARED_REGION_BASE
;
32 const mach_vm_address_t size
= SHARED_REGION_SIZE
;
33 return addr
>= base
&& addr
< (base
+ size
);
37 * On both x64 and arm, there's a globallly-shared
38 * read-only page at _COMM_PAGE_START_ADDRESS
39 * which low-level library routines reference.
41 * On x64, somewhere randomly chosen between _COMM_PAGE_TEXT_ADDRESS
42 * and the top of the user address space, there's the
43 * pre-emption-free-zone read-execute page.
46 #include <System/machine/cpu_capabilities.h>
48 static __inline boolean_t
49 in_comm_region(const mach_vm_address_t addr
, const vm_region_submap_info_data_64_t
*info
)
51 return addr
>= _COMM_PAGE_START_ADDRESS
&&
52 SM_TRUESHARED
== info
->share_mode
&&
53 VM_INHERIT_SHARE
== info
->inheritance
&&
54 !info
->external_pager
&& (info
->max_protection
& VM_PROT_WRITE
) == 0;
57 static __inline boolean_t
58 in_zfod_region(const vm_region_submap_info_data_64_t
*info
)
60 return info
->share_mode
== SM_EMPTY
&& !info
->is_submap
&&
61 0 == info
->object_id
&& !info
->external_pager
&&
62 0 == info
->pages_dirtied
+ info
->pages_resident
+ info
->pages_swapped_out
;
65 static struct region
*
66 new_region(mach_vm_offset_t vmaddr
, mach_vm_size_t vmsize
, const vm_region_submap_info_data_64_t
*infop
)
68 struct region
*r
= calloc(1, sizeof (*r
));
69 assert(vmaddr
!= 0 && vmsize
!= 0);
73 #ifdef CONFIG_PURGABLE
74 r
->r_purgable
= VM_PURGABLE_DENY
;
76 r
->r_insharedregion
= in_shared_region(vmaddr
);
77 r
->r_incommregion
= in_comm_region(vmaddr
, &r
->r_info
);
78 r
->r_inzfodregion
= in_zfod_region(&r
->r_info
);
80 if (r
->r_inzfodregion
)
83 r
->r_op
= &vanilla_ops
;
89 del_fileref_region(struct region
*r
)
91 assert(&fileref_ops
== r
->r_op
);
92 /* r->r_fileref->fr_libent is a reference into the name table */
93 poison(r
->r_fileref
, 0xdeadbee9, sizeof (*r
->r_fileref
));
95 poison(r
, 0xdeadbeeb, sizeof (*r
));
98 #endif /* CONFIG_REFSC */
101 del_zfod_region(struct region
*r
)
103 assert(&zfod_ops
== r
->r_op
);
104 assert(r
->r_inzfodregion
&& 0 == r
->r_nsubregions
);
106 assert(NULL
== r
->r_fileref
);
108 poison(r
, 0xdeadbeed, sizeof (*r
));
113 del_vanilla_region(struct region
*r
)
115 assert(&vanilla_ops
== r
->r_op
);
116 assert(!r
->r_inzfodregion
&& 0 == r
->r_nsubregions
);
118 assert(NULL
== r
->r_fileref
);
120 poison(r
, 0xdeadbeef, sizeof (*r
));
125 * "does any part of this address range match the tag?"
128 is_tagged(task_t task
, mach_vm_offset_t addr
, mach_vm_offset_t size
, unsigned tag
)
130 mach_vm_offset_t vm_addr
= addr
;
131 mach_vm_offset_t vm_size
= 0;
133 size_t pgsize
= (1u << pageshift_host
);
136 mach_msg_type_number_t count
= VM_REGION_SUBMAP_INFO_COUNT_64
;
137 vm_region_submap_info_data_64_t info
;
139 kern_return_t ret
= mach_vm_region_recurse(task
, &vm_addr
, &vm_size
, &depth
, (vm_region_recurse_info_t
)&info
, &count
);
141 if (KERN_FAILURE
== ret
) {
142 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
144 } else if (KERN_INVALID_ADDRESS
== ret
) {
145 err_mach(ret
, NULL
, "invalid address at %llx", vm_addr
);
147 } else if (KERN_SUCCESS
!= ret
) {
148 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
151 if (info
.is_submap
) {
155 if (info
.user_tag
== tag
)
157 if (vm_addr
+ vm_size
> addr
+ size
)
163 STAILQ_HEAD(regionhead
, region
);
166 * XXX Need something like mach_vm_shared_region_recurse()
167 * to properly identify the shared region address ranges as
172 walk_regions(task_t task
, struct regionhead
*rhead
)
174 mach_vm_offset_t vm_addr
= MACH_VM_MIN_ADDRESS
;
178 print_memory_region_header();
181 vm_region_submap_info_data_64_t info
;
182 mach_msg_type_number_t count
= VM_REGION_SUBMAP_INFO_COUNT_64
;
183 mach_vm_size_t vm_size
;
185 kern_return_t ret
= mach_vm_region_recurse(task
, &vm_addr
, &vm_size
, &depth
, (vm_region_recurse_info_t
)&info
, &count
);
187 if (KERN_FAILURE
== ret
) {
188 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
190 } else if (KERN_INVALID_ADDRESS
== ret
) {
191 break; /* loop termination */
192 } else if (KERN_SUCCESS
!= ret
) {
193 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
197 if (opt
->debug
> 3) {
198 struct region
*d
= new_region(vm_addr
, vm_size
, &info
);
203 if (info
.is_submap
) {
205 /* We also want to see submaps -- for debugging purposes. */
206 struct region
*r
= new_region(vm_addr
, vm_size
, &info
);
208 STAILQ_INSERT_TAIL(rhead
, r
, r_linkage
);
214 if (VM_MEMORY_IOKIT
== info
.user_tag
) {
216 continue; // ignore immediately: IO memory has side-effects
219 struct region
*r
= new_region(vm_addr
, vm_size
, &info
);
223 /* grab the page info of the first page in the mapping */
225 mach_msg_type_number_t pageinfoCount
= VM_PAGE_INFO_BASIC_COUNT
;
226 ret
= mach_vm_page_info(task
, R_ADDR(r
), VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&r
->r_pageinfo
, &pageinfoCount
);
227 if (KERN_SUCCESS
!= ret
)
228 err_mach(ret
, r
, "getting pageinfo at %llx", R_ADDR(r
));
230 #ifdef CONFIG_PURGABLE
231 /* record the purgability */
233 ret
= mach_vm_purgable_control(task
, vm_addr
, VM_PURGABLE_GET_STATE
, &r
->r_purgable
);
234 if (KERN_SUCCESS
!= ret
)
235 r
->r_purgable
= VM_PURGABLE_DENY
;
237 STAILQ_INSERT_TAIL(rhead
, r
, r_linkage
);
248 del_region_list(struct regionhead
*rhead
)
250 struct region
*r
, *t
;
252 STAILQ_FOREACH_SAFE(r
, rhead
, r_linkage
, t
) {
253 STAILQ_REMOVE(rhead
, r
, region
, r_linkage
);
260 build_region_list(task_t task
)
262 struct regionhead
*rhead
= malloc(sizeof (*rhead
));
264 if (0 != walk_regions(task
, rhead
)) {
265 del_region_list(rhead
);
272 walk_region_list(struct regionhead
*rhead
, walk_region_cbfn_t cbfn
, void *arg
)
274 struct region
*r
, *t
;
276 STAILQ_FOREACH_SAFE(r
, rhead
, r_linkage
, t
) {
277 switch (cbfn(r
, arg
)) {
280 case WALK_DELETE_REGION
:
281 STAILQ_REMOVE(rhead
, r
, region
, r_linkage
);
300 if (0 == pageshift_host
) {
302 kern_return_t ret
= host_page_size(MACH_PORT_NULL
, &hps
);
303 if (KERN_SUCCESS
!= ret
|| hps
== 0)
304 err_mach(ret
, NULL
, "host page size");
306 while (((vm_offset_t
)1 << pshift
) != hps
)
308 pageshift_host
= pshift
;
311 printf("host page size: %lu\n", 1ul << pageshift_host
);
313 if (0 == pageshift_app
) {
314 size_t psz
= getpagesize();
316 while ((1ul << pshift
) != psz
)
318 pageshift_app
= pshift
;
320 if (opt
->debug
&& pageshift_app
!= pageshift_host
)
321 printf("app page size: %lu\n", 1ul << pageshift_app
);
325 strshared(const int sm
)
338 case SM_PRIVATE_ALIASED
:
340 case SM_SHARED_ALIASED
:
349 typedef char prot_str_t
[9]; /* rwxNCWT& */
352 str_prot(prot_str_t pstr
, const vm_prot_t prot
)
354 snprintf(pstr
, sizeof (prot_str_t
), "%c%c%c",
355 prot
& VM_PROT_READ
? 'r' : '-',
356 prot
& VM_PROT_WRITE
? 'w' : '-',
357 prot
& VM_PROT_EXECUTE
? 'x' : '-');
358 /* for completeness */
359 if (prot
& VM_PROT_NO_CHANGE
)
360 strlcat(pstr
, "N", sizeof (prot_str_t
));
361 if (prot
& VM_PROT_COPY
)
362 strlcat(pstr
, "C", sizeof (prot_str_t
));
363 if (prot
& VM_PROT_WANTS_COPY
)
364 strlcat(pstr
, "W", sizeof (prot_str_t
));
366 strlcat(pstr
, "T", sizeof (prot_str_t
));
367 if (prot
& VM_PROT_IS_MASK
)
368 strlcat(pstr
, "&", sizeof (prot_str_t
));
373 print_memory_region_header(void)
375 printf("%-33s %c %-7s %-7s %8s %16s ",
376 "Address Range", 'S', "Size", "Cur/Max", "Obj32", "FirstPgObjectID");
377 printf("%9s %-3s %-11s %5s ",
378 "Offset", "Tag", "Mode", "Refc");
380 printf("%5s ", "Depth");
382 printf("%5s %5s %5s %3s ",
383 "Res", "SNP", "Dirty", "Pgr");
388 region_type(const struct region
*r
)
394 if (r
->r_inzfodregion
)
396 if (r
->r_incommregion
)
398 if (r
->r_insharedregion
)
404 print_memory_region(const struct region
*r
)
406 prot_str_t pstr
, pstr_max
;
409 printf("%016llx-%016llx %c %-7s %s/%s %8x %16llx ",
410 R_ADDR(r
), R_ENDADDR(r
), region_type(r
),
411 str_hsize(hstr
, R_SIZE(r
)),
412 str_prot(pstr
, r
->r_info
.protection
),
413 str_prot(pstr_max
, r
->r_info
.max_protection
),
414 r
->r_info
.object_id
, r
->r_pageinfo
.object_id
417 printf("%9lld %3d %-11s %5u ",
418 r
->r_info
.external_pager
?
419 r
->r_pageinfo
.offset
: r
->r_info
.offset
,
421 strshared(r
->r_info
.share_mode
),
425 printf("%5u ", r
->r_depth
);
428 if (!r
->r_info
.is_submap
) {
429 printf("%5u %5u %5u %3s ",
430 r
->r_info
.pages_resident
,
431 r
->r_info
.pages_shared_now_private
,
432 r
->r_info
.pages_dirtied
,
433 r
->r_info
.external_pager
? "ext" : "");
436 printf("\n %s at %lld ",
437 r
->r_fileref
->fr_libent
->le_filename
,
438 r
->r_fileref
->fr_offset
);
441 if (r
->r_nsubregions
) {
442 printf(" %-33s %7s %12s\t%s\n",
443 "Address Range", "Size", "Type(s)", "Filename(s)");
444 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
445 struct subregion
*s
= r
->r_subregions
[i
];
446 printf(" %016llx-%016llx %7s %12s\t%s\n",
447 S_ADDR(s
), S_ENDADDR(s
),
448 str_hsize(hstr
, S_SIZE(s
)),
454 switch (r
->r_info
.user_tag
) {
455 case VM_MEMORY_SHARED_PMAP
:
456 printf("// VM_MEMORY_SHARED_PMAP");
458 case VM_MEMORY_UNSHARED_PMAP
:
459 printf("// VM_MEMORY_UNSHARED_PMAP");
462 printf("// is a submap");
470 region_print_memory(struct region
*r
, __unused
void *arg
)
473 return WALK_CONTINUE
;
478 * The reported size of a mapping to a file object gleaned from
479 * mach_vm_region_recurse() can exceed the underlying size of the file.
480 * If we attempt to write out the full reported size, we find that we
481 * error (EFAULT) or if we compress it, we die with the SIGBUS.
483 * See rdar://23744374
485 * Figure out what the "non-faulting" size of the object is to
486 * *host* page size resolution.
489 is_actual_size(const task_t task
, const struct region
*r
, mach_vm_size_t
*hostvmsize
)
491 if (!r
->r_info
.external_pager
||
492 (r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_NONE
)
495 const size_t pagesize_host
= 1ul << pageshift_host
;
496 const unsigned filepages
= r
->r_info
.pages_resident
+
497 r
->r_info
.pages_swapped_out
;
499 if (pagesize_host
* filepages
== R_SIZE(r
))
503 * Verify that the last couple of host-pagesize pages
504 * of a file backed mapping are actually pageable in the
505 * underlying object by walking backwards from the end
506 * of the application-pagesize mapping.
508 *hostvmsize
= R_SIZE(r
);
510 const long npagemax
= 1ul << (pageshift_app
- pageshift_host
);
511 for (long npage
= 0; npage
< npagemax
; npage
++) {
513 const mach_vm_address_t taddress
=
514 R_ENDADDR(r
) - pagesize_host
* (npage
+ 1);
515 if (taddress
< R_ADDR(r
) || taddress
>= R_ENDADDR(r
))
518 mach_msg_type_number_t pCount
= VM_PAGE_INFO_BASIC_COUNT
;
519 vm_page_info_basic_data_t pInfo
;
521 kern_return_t ret
= mach_vm_page_info(task
, taddress
, VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&pInfo
, &pCount
);
522 if (KERN_SUCCESS
!= ret
) {
523 err_mach(ret
, NULL
, "getting pageinfo at %llx", taddress
);
528 * If this page has been in memory before, assume it can
529 * be brought back again
531 if (pInfo
.disposition
& (VM_PAGE_QUERY_PAGE_PRESENT
| VM_PAGE_QUERY_PAGE_REF
| VM_PAGE_QUERY_PAGE_DIRTY
| VM_PAGE_QUERY_PAGE_PAGED_OUT
))
535 * Force the page to be fetched to see if it faults
537 mach_vm_size_t tsize
= 1ul << pageshift_host
;
538 void *tmp
= valloc((size_t)tsize
);
539 const mach_vm_address_t vtmp
= (mach_vm_address_t
)tmp
;
541 switch (ret
= mach_vm_read_overwrite(task
,
542 taddress
, tsize
, vtmp
, &tsize
)) {
543 case KERN_INVALID_ADDRESS
:
544 *hostvmsize
= taddress
- R_ADDR(r
);
549 err_mach(ret
, NULL
, "mach_vm_overwrite()");
554 return R_SIZE(r
) == *hostvmsize
;