2 * Copyright (c) 2016 Apple Inc. All rights reserved.
18 #include <sys/queue.h>
21 * There should be better APIs to describe the shared region
22 * For now, some hackery.
25 #include <mach/shared_region.h>
27 static __inline boolean_t
28 in_shared_region(mach_vm_address_t addr
)
30 const mach_vm_address_t base
= SHARED_REGION_BASE
;
31 const mach_vm_address_t size
= SHARED_REGION_SIZE
;
32 return addr
>= base
&& addr
< (base
+ size
);
36 * On both x64 and arm, there's a globallly-shared
37 * read-only page at _COMM_PAGE_START_ADDRESS
38 * which low-level library routines reference.
40 * On x64, somewhere randomly chosen between _COMM_PAGE_TEXT_ADDRESS
41 * and the top of the user address space, there's the
42 * pre-emption-free-zone read-execute page.
45 #include <System/machine/cpu_capabilities.h>
47 static __inline boolean_t
48 in_comm_region(const mach_vm_address_t addr
, const vm_region_submap_info_data_64_t
*info
)
50 return addr
>= _COMM_PAGE_START_ADDRESS
&&
51 SM_TRUESHARED
== info
->share_mode
&&
52 VM_INHERIT_SHARE
== info
->inheritance
&&
53 !info
->external_pager
&& (info
->max_protection
& VM_PROT_WRITE
) == 0;
56 static __inline boolean_t
57 in_zfod_region(const vm_region_submap_info_data_64_t
*info
)
59 return info
->share_mode
== SM_EMPTY
&& !info
->is_submap
&&
60 0 == info
->object_id
&& !info
->external_pager
&&
61 0 == info
->pages_dirtied
+ info
->pages_resident
+ info
->pages_swapped_out
;
64 static struct region
*
65 new_region(mach_vm_offset_t vmaddr
, mach_vm_size_t vmsize
, const vm_region_submap_info_data_64_t
*infop
)
67 struct region
*r
= calloc(1, sizeof (*r
));
68 assert(vmaddr
!= 0 && vmsize
!= 0);
72 r
->r_purgable
= VM_PURGABLE_DENY
;
73 r
->r_insharedregion
= in_shared_region(vmaddr
);
74 r
->r_incommregion
= in_comm_region(vmaddr
, &r
->r_info
);
75 r
->r_inzfodregion
= in_zfod_region(&r
->r_info
);
77 if (r
->r_inzfodregion
)
80 r
->r_op
= &vanilla_ops
;
85 del_fileref_region(struct region
*r
)
87 assert(&fileref_ops
== r
->r_op
);
88 /* r->r_fileref->fr_libent is a reference into the name table */
89 poison(r
->r_fileref
, 0xdeadbee9, sizeof (*r
->r_fileref
));
91 poison(r
, 0xdeadbeeb, sizeof (*r
));
96 del_zfod_region(struct region
*r
)
98 assert(&zfod_ops
== r
->r_op
);
99 assert(r
->r_inzfodregion
&& 0 == r
->r_nsubregions
);
100 assert(NULL
== r
->r_fileref
);
101 poison(r
, 0xdeadbeed, sizeof (*r
));
106 del_vanilla_region(struct region
*r
)
108 assert(&vanilla_ops
== r
->r_op
);
109 assert(!r
->r_inzfodregion
&& 0 == r
->r_nsubregions
);
110 assert(NULL
== r
->r_fileref
);
111 poison(r
, 0xdeadbeef, sizeof (*r
));
116 * "does any part of this address range match the tag?"
119 is_tagged(task_t task
, mach_vm_offset_t addr
, mach_vm_offset_t size
, unsigned tag
)
121 mach_vm_offset_t vm_addr
= addr
;
122 mach_vm_offset_t vm_size
= 0;
124 size_t pgsize
= (1u << pageshift_host
);
127 mach_msg_type_number_t count
= VM_REGION_SUBMAP_INFO_COUNT_64
;
128 vm_region_submap_info_data_64_t info
;
130 kern_return_t ret
= mach_vm_region_recurse(task
, &vm_addr
, &vm_size
, &depth
, (vm_region_recurse_info_t
)&info
, &count
);
132 if (KERN_FAILURE
== ret
) {
133 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
135 } else if (KERN_INVALID_ADDRESS
== ret
) {
136 err_mach(ret
, NULL
, "invalid address at %llx", vm_addr
);
138 } else if (KERN_SUCCESS
!= ret
) {
139 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
142 if (info
.is_submap
) {
146 if (info
.user_tag
== tag
)
148 if (vm_addr
+ vm_size
> addr
+ size
)
154 STAILQ_HEAD(regionhead
, region
);
157 * XXX Need something like mach_vm_shared_region_recurse()
158 * to properly identify the shared region address ranges as
163 walk_regions(task_t task
, struct regionhead
*rhead
)
165 mach_vm_offset_t vm_addr
= MACH_VM_MIN_ADDRESS
;
168 if (OPTIONS_DEBUG(opt
, 3)) {
169 printf("Building raw region list\n");
170 print_memory_region_header();
173 vm_region_submap_info_data_64_t info
;
174 mach_msg_type_number_t count
= VM_REGION_SUBMAP_INFO_COUNT_64
;
175 mach_vm_size_t vm_size
;
177 kern_return_t ret
= mach_vm_region_recurse(task
, &vm_addr
, &vm_size
, &depth
, (vm_region_recurse_info_t
)&info
, &count
);
179 if (KERN_FAILURE
== ret
) {
180 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
182 } else if (KERN_INVALID_ADDRESS
== ret
) {
183 break; /* loop termination */
184 } else if (KERN_SUCCESS
!= ret
) {
185 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
189 if (OPTIONS_DEBUG(opt
, 3)) {
190 struct region
*d
= new_region(vm_addr
, vm_size
, &info
);
195 if (info
.is_submap
) {
197 /* We also want to see submaps -- for debugging purposes. */
198 struct region
*r
= new_region(vm_addr
, vm_size
, &info
);
200 STAILQ_INSERT_TAIL(rhead
, r
, r_linkage
);
206 if (VM_MEMORY_IOKIT
== info
.user_tag
) {
208 continue; // ignore immediately: IO memory has side-effects
211 struct region
*r
= new_region(vm_addr
, vm_size
, &info
);
215 /* grab the page info of the first page in the mapping */
217 mach_msg_type_number_t pageinfoCount
= VM_PAGE_INFO_BASIC_COUNT
;
218 ret
= mach_vm_page_info(task
, R_ADDR(r
), VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&r
->r_pageinfo
, &pageinfoCount
);
219 if (KERN_SUCCESS
!= ret
)
220 err_mach(ret
, r
, "getting pageinfo at %llx", R_ADDR(r
));
222 /* record the purgability */
224 ret
= mach_vm_purgable_control(task
, vm_addr
, VM_PURGABLE_GET_STATE
, &r
->r_purgable
);
225 if (KERN_SUCCESS
!= ret
)
226 r
->r_purgable
= VM_PURGABLE_DENY
;
228 STAILQ_INSERT_TAIL(rhead
, r
, r_linkage
);
239 del_region_list(struct regionhead
*rhead
)
241 struct region
*r
, *t
;
243 STAILQ_FOREACH_SAFE(r
, rhead
, r_linkage
, t
) {
244 STAILQ_REMOVE(rhead
, r
, region
, r_linkage
);
251 build_region_list(task_t task
)
253 struct regionhead
*rhead
= malloc(sizeof (*rhead
));
255 if (0 != walk_regions(task
, rhead
)) {
256 del_region_list(rhead
);
263 walk_region_list(struct regionhead
*rhead
, walk_region_cbfn_t cbfn
, void *arg
)
265 struct region
*r
, *t
;
267 STAILQ_FOREACH_SAFE(r
, rhead
, r_linkage
, t
) {
268 switch (cbfn(r
, arg
)) {
271 case WALK_DELETE_REGION
:
272 STAILQ_REMOVE(rhead
, r
, region
, r_linkage
);
291 if (0 == pageshift_host
) {
293 kern_return_t ret
= host_page_size(MACH_PORT_NULL
, &hps
);
294 if (KERN_SUCCESS
!= ret
|| hps
== 0)
295 err_mach(ret
, NULL
, "host page size");
297 while (((vm_offset_t
)1 << pshift
) != hps
)
299 pageshift_host
= pshift
;
301 if (OPTIONS_DEBUG(opt
, 3))
302 printf("host page size: %lu\n", 1ul << pageshift_host
);
304 if (0 == pageshift_app
) {
305 size_t psz
= getpagesize();
307 while ((1ul << pshift
) != psz
)
309 pageshift_app
= pshift
;
311 if (OPTIONS_DEBUG(opt
, 3) && pageshift_app
!= pageshift_host
)
312 printf("app page size: %lu\n", 1ul << pageshift_app
);
316 print_memory_region_header(void)
318 printf("%-33s %c %-7s %-7s %8s %16s ",
319 "Address Range", 'S', "Size", "Cur/Max", "Obj32", "FirstPgObjectID");
320 printf("%9s %-3s %-11s %5s ",
321 "Offset", "Tag", "Mode", "Refc");
323 printf("%5s ", "Depth");
325 printf("%5s %5s %5s %3s ",
326 "Res", "SNP", "Dirty", "Pgr");
331 region_type(const struct region
*r
)
335 if (r
->r_inzfodregion
)
337 if (r
->r_incommregion
)
339 if (r
->r_insharedregion
)
345 print_memory_region(const struct region
*r
)
350 printf("%016llx-%016llx %c %-7s %s/%s %8x %16llx ",
351 R_ADDR(r
), R_ENDADDR(r
), region_type(r
),
352 str_hsize(hstr
, R_SIZE(r
)),
353 str_prot(r
->r_info
.protection
),
354 str_prot(r
->r_info
.max_protection
),
355 r
->r_info
.object_id
, r
->r_pageinfo
.object_id
358 printf("%9lld %3d %-11s %5u ",
359 r
->r_info
.external_pager
?
360 r
->r_pageinfo
.offset
: r
->r_info
.offset
,
362 str_shared(r
->r_info
.share_mode
),
366 printf("%5u ", r
->r_depth
);
369 if (!r
->r_info
.is_submap
) {
370 printf("%5u %5u %5u %3s ",
371 r
->r_info
.pages_resident
,
372 r
->r_info
.pages_shared_now_private
,
373 r
->r_info
.pages_dirtied
,
374 r
->r_info
.external_pager
? "ext" : "");
376 printf("\n %s at %lld ",
377 r
->r_fileref
->fr_pathname
,
378 r
->r_fileref
->fr_offset
);
380 printf("%s", str_tagr(tstr
, r
));
382 if (r
->r_nsubregions
) {
383 printf(" %-33s %7s %12s\t%s\n",
384 "Address Range", "Size", "Type(s)", "Filename(s)");
385 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
386 struct subregion
*s
= r
->r_subregions
[i
];
387 printf(" %016llx-%016llx %7s %12s\t%s\n",
388 S_ADDR(s
), S_ENDADDR(s
),
389 str_hsize(hstr
, S_SIZE(s
)),
395 printf("%5s %5s %5s %3s %s\n", "", "", "", "", str_tagr(tstr
, r
));
400 region_print_memory(struct region
*r
, __unused
void *arg
)
403 return WALK_CONTINUE
;
407 print_one_memory_region(const struct region
*r
)
409 print_memory_region_header();
415 * The reported size of a mapping to a file object gleaned from
416 * mach_vm_region_recurse() can exceed the underlying size of the file.
417 * If we attempt to write out the full reported size, we find that we
418 * error (EFAULT) or if we compress it, we die with the SIGBUS.
420 * See rdar://23744374
422 * Figure out what the "non-faulting" size of the object is to
423 * *host* page size resolution.
426 is_actual_size(const task_t task
, const struct region
*r
, mach_vm_size_t
*hostvmsize
)
428 if (!r
->r_info
.external_pager
||
429 (r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_NONE
)
432 const size_t pagesize_host
= 1ul << pageshift_host
;
433 const unsigned filepages
= r
->r_info
.pages_resident
+
434 r
->r_info
.pages_swapped_out
;
436 if (pagesize_host
* filepages
== R_SIZE(r
))
440 * Verify that the last couple of host-pagesize pages
441 * of a file backed mapping are actually pageable in the
442 * underlying object by walking backwards from the end
443 * of the application-pagesize mapping.
445 *hostvmsize
= R_SIZE(r
);
447 const long npagemax
= 1ul << (pageshift_app
- pageshift_host
);
448 for (long npage
= 0; npage
< npagemax
; npage
++) {
450 const mach_vm_address_t taddress
=
451 R_ENDADDR(r
) - pagesize_host
* (npage
+ 1);
452 if (taddress
< R_ADDR(r
) || taddress
>= R_ENDADDR(r
))
455 mach_msg_type_number_t pCount
= VM_PAGE_INFO_BASIC_COUNT
;
456 vm_page_info_basic_data_t pInfo
;
458 kern_return_t ret
= mach_vm_page_info(task
, taddress
, VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&pInfo
, &pCount
);
459 if (KERN_SUCCESS
!= ret
) {
460 err_mach(ret
, NULL
, "getting pageinfo at %llx", taddress
);
465 * If this page has been in memory before, assume it can
466 * be brought back again
468 if (pInfo
.disposition
& (VM_PAGE_QUERY_PAGE_PRESENT
| VM_PAGE_QUERY_PAGE_REF
| VM_PAGE_QUERY_PAGE_DIRTY
| VM_PAGE_QUERY_PAGE_PAGED_OUT
))
472 * Force the page to be fetched to see if it faults
474 mach_vm_size_t tsize
= 1ul << pageshift_host
;
475 void *tmp
= valloc((size_t)tsize
);
476 const mach_vm_address_t vtmp
= (mach_vm_address_t
)tmp
;
478 switch (ret
= mach_vm_read_overwrite(task
,
479 taddress
, tsize
, vtmp
, &tsize
)) {
480 case KERN_INVALID_ADDRESS
:
481 *hostvmsize
= taddress
- R_ADDR(r
);
486 err_mach(ret
, NULL
, "mach_vm_overwrite()");
491 return R_SIZE(r
) == *hostvmsize
;