]>
Commit | Line | Data |
---|---|---|
cf37c299 A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | */ | |
4 | ||
5 | #include "options.h" | |
6 | #include "vm.h" | |
7 | #include "region.h" | |
8 | #include "utils.h" | |
9 | #include "dyld.h" | |
10 | #include "threads.h" | |
11 | #include "sparse.h" | |
12 | #include "vanilla.h" | |
13 | #include "corefile.h" | |
14 | ||
15 | #include <sys/types.h> | |
16 | #include <sys/sysctl.h> | |
17 | #include <sys/stat.h> | |
18 | #include <sys/mman.h> | |
19 | #include <libproc.h> | |
20 | ||
21 | #include <stdio.h> | |
22 | #include <string.h> | |
23 | #include <strings.h> | |
24 | #include <stdlib.h> | |
25 | #include <stdarg.h> | |
26 | #include <signal.h> | |
27 | #include <unistd.h> | |
28 | #include <errno.h> | |
29 | #include <ctype.h> | |
30 | #include <fcntl.h> | |
31 | #include <assert.h> | |
32 | ||
33 | #include <mach/mach.h> | |
34 | ||
35 | static struct subregion * | |
36 | new_subregion( | |
37 | const mach_vm_offset_t vmaddr, | |
38 | const mach_vm_offset_t vmsize, | |
39 | const native_segment_command_t *sc, | |
40 | const struct libent *le) | |
41 | { | |
42 | struct subregion *s = malloc(sizeof (*s)); | |
43 | ||
44 | assert(vmaddr != 0 && vmsize != 0); | |
45 | assert(vmaddr < vmaddr + vmsize); | |
46 | s->s_segcmd = *sc; | |
47 | ||
48 | S_SETADDR(s, vmaddr); | |
49 | S_SETSIZE(s, vmsize); | |
50 | ||
51 | s->s_libent = le; | |
887d5eed | 52 | s->s_isuuidref = false; |
cf37c299 A |
53 | return s; |
54 | } | |
55 | ||
56 | static void | |
57 | del_subregion(struct subregion *s) | |
58 | { | |
59 | poison(s, 0xfacefac1, sizeof (*s)); | |
60 | free(s); | |
61 | } | |
62 | ||
63 | static walk_return_t | |
64 | clean_subregions(struct region *r) | |
65 | { | |
887d5eed A |
66 | if (r->r_nsubregions) { |
67 | assert(r->r_subregions); | |
68 | for (unsigned i = 0; i < r->r_nsubregions; i++) | |
69 | del_subregion(r->r_subregions[i]); | |
70 | poison(r->r_subregions, 0xfac1fac1, sizeof (r->r_subregions[0]) * r->r_nsubregions); | |
71 | free(r->r_subregions); | |
72 | r->r_nsubregions = 0; | |
73 | r->r_subregions = NULL; | |
74 | } else { | |
75 | assert(NULL == r->r_subregions); | |
76 | } | |
cf37c299 A |
77 | return WALK_CONTINUE; |
78 | } | |
79 | ||
80 | void | |
81 | del_sparse_region(struct region *r) | |
82 | { | |
83 | clean_subregions(r); | |
84 | poison(r, 0xcafecaff, sizeof (*r)); | |
85 | free(r); | |
86 | } | |
87 | ||
88 | #define NULLsc ((native_segment_command_t *)0) | |
89 | ||
90 | static bool | |
91 | issamesubregiontype(const struct subregion *s0, const struct subregion *s1) { | |
92 | return 0 == strncmp(S_MACHO_TYPE(s0), S_MACHO_TYPE(s1), sizeof (NULLsc->segname)); | |
93 | } | |
94 | ||
95 | bool | |
96 | issubregiontype(const struct subregion *s, const char *sctype) { | |
97 | return 0 == strncmp(S_MACHO_TYPE(s), sctype, sizeof (NULLsc->segname)); | |
98 | } | |
99 | ||
100 | static void | |
101 | elide_subregion(struct region *r, unsigned ind) | |
102 | { | |
103 | del_subregion(r->r_subregions[ind]); | |
104 | for (unsigned j = ind; j < r->r_nsubregions - 1; j++) | |
105 | r->r_subregions[j] = r->r_subregions[j+1]; | |
106 | assert(r->r_nsubregions != 0); | |
107 | r->r_subregions[--r->r_nsubregions] = NULL; | |
108 | } | |
109 | ||
110 | struct subregionlist { | |
111 | STAILQ_ENTRY(subregionlist) srl_linkage; | |
112 | struct subregion *srl_s; | |
113 | }; | |
114 | typedef STAILQ_HEAD(, subregionlist) subregionlisthead_t; | |
115 | ||
116 | static walk_return_t | |
117 | add_subregions_for_libent( | |
118 | subregionlisthead_t *srlh, | |
119 | const struct region *r, | |
120 | const native_mach_header_t *mh, | |
887d5eed | 121 | const mach_vm_offset_t __unused mh_taddr, // address in target |
cf37c299 A |
122 | const struct libent *le) |
123 | { | |
124 | const struct load_command *lc = (const void *)(mh + 1); | |
887d5eed | 125 | mach_vm_offset_t objoff = le->le_objoff; |
cf37c299 A |
126 | for (unsigned n = 0; n < mh->ncmds; n++) { |
127 | ||
128 | const native_segment_command_t *sc; | |
129 | ||
130 | switch (lc->cmd) { | |
131 | case NATIVE_LC_SEGMENT: | |
132 | sc = (const void *)lc; | |
133 | ||
887d5eed | 134 | if (0 == sc->vmaddr && strcmp(sc->segname, SEG_PAGEZERO) == 0) |
cf37c299 | 135 | break; |
887d5eed A |
136 | mach_vm_offset_t lo = sc->vmaddr + objoff; |
137 | mach_vm_offset_t hi = lo + sc->vmsize; | |
cf37c299 A |
138 | |
139 | /* Eliminate non-overlapping sections first */ | |
140 | ||
887d5eed | 141 | if (R_ENDADDR(r) - 1 < lo) |
cf37c299 | 142 | break; |
887d5eed | 143 | if (hi - 1 < R_ADDR(r)) |
cf37c299 | 144 | break; |
887d5eed | 145 | |
cf37c299 A |
146 | /* |
147 | * Some part of this segment is in the region. | |
148 | * Trim the edges in the case where we span regions. | |
149 | */ | |
887d5eed A |
150 | if (lo < R_ADDR(r)) |
151 | lo = R_ADDR(r); | |
152 | if (hi > R_ENDADDR(r)) | |
153 | hi = R_ENDADDR(r); | |
cf37c299 A |
154 | |
155 | struct subregionlist *srl = calloc(1, sizeof (*srl)); | |
887d5eed | 156 | struct subregion *s = new_subregion(lo, hi - lo, sc, le); |
cf37c299 A |
157 | assert(sc->fileoff >= 0); |
158 | srl->srl_s = s; | |
159 | STAILQ_INSERT_HEAD(srlh, srl, srl_linkage); | |
160 | ||
887d5eed | 161 | if (OPTIONS_DEBUG(opt, 2)) { |
cf37c299 | 162 | hsize_str_t hstr; |
530d02b6 | 163 | printr(r, "subregion %llx-%llx %7s %12s\t%s [%s off %lu for %lu nsects %u flags %x]\n", |
cf37c299 A |
164 | S_ADDR(s), S_ENDADDR(s), |
165 | str_hsize(hstr, S_SIZE(s)), | |
887d5eed | 166 | sc->segname, |
cf37c299 | 167 | S_FILENAME(s), |
887d5eed | 168 | str_prot(sc->initprot), |
530d02b6 A |
169 | (unsigned long)sc->fileoff, |
170 | (unsigned long)sc->filesize, | |
cf37c299 A |
171 | sc->nsects, sc->flags); |
172 | } | |
173 | break; | |
174 | default: | |
175 | break; | |
176 | } | |
177 | if (lc->cmdsize) | |
178 | lc = (const void *)((caddr_t)lc + lc->cmdsize); | |
179 | else | |
180 | break; | |
181 | } | |
182 | return WALK_CONTINUE; | |
183 | } | |
184 | ||
185 | /* | |
186 | * Because we aggregate information from multiple sources, there may | |
187 | * be duplicate subregions. Eliminate them here. | |
188 | * | |
189 | * Note that the each library in the shared cache points | |
190 | * separately at a single, unified (large!) __LINKEDIT section; these | |
191 | * get removed here too. | |
192 | * | |
193 | * Assumes the subregion array is sorted by address! | |
194 | */ | |
195 | static void | |
196 | eliminate_duplicate_subregions(struct region *r) | |
197 | { | |
198 | unsigned i = 1; | |
199 | while (i < r->r_nsubregions) { | |
200 | struct subregion *s0 = r->r_subregions[i-1]; | |
201 | struct subregion *s1 = r->r_subregions[i]; | |
202 | ||
203 | if (S_ADDR(s0) != S_ADDR(s1) || S_SIZE(s0) != S_SIZE(s1)) { | |
204 | i++; | |
205 | continue; | |
206 | } | |
207 | if (memcmp(&s0->s_segcmd, &s1->s_segcmd, sizeof (s0->s_segcmd)) != 0) { | |
208 | i++; | |
209 | continue; | |
210 | } | |
887d5eed | 211 | if (OPTIONS_DEBUG(opt, 3)) |
cf37c299 A |
212 | printr(r, "eliding duplicate %s subregion (%llx-%llx) file %s\n", |
213 | S_MACHO_TYPE(s1), S_ADDR(s1), S_ENDADDR(s1), S_FILENAME(s1)); | |
214 | /* If the duplicate subregions aren't mapping the same file (?), forget the name */ | |
215 | if (s0->s_libent != s1->s_libent) | |
216 | s0->s_libent = s1->s_libent = NULL; | |
217 | elide_subregion(r, i); | |
218 | } | |
219 | } | |
220 | ||
221 | /* | |
222 | * See if any of the dyld information we have can better describe this | |
223 | * region of the target address space. | |
224 | */ | |
225 | walk_return_t | |
226 | decorate_memory_region(struct region *r, void *arg) | |
227 | { | |
887d5eed A |
228 | if (r->r_inzfodregion || r->r_incommregion) |
229 | return WALK_CONTINUE; | |
230 | ||
cf37c299 A |
231 | const dyld_process_info dpi = arg; |
232 | ||
233 | __block walk_return_t retval = WALK_CONTINUE; | |
234 | __block subregionlisthead_t srlhead = STAILQ_HEAD_INITIALIZER(srlhead); | |
235 | ||
887d5eed | 236 | _dyld_process_info_for_each_image(dpi, ^(uint64_t __unused mhaddr, const uuid_t uuid, __unused const char *path) { |
cf37c299 A |
237 | if (WALK_CONTINUE == retval) { |
238 | const struct libent *le = libent_lookup_byuuid(uuid); | |
239 | assert(le->le_mhaddr == mhaddr); | |
887d5eed A |
240 | bool shouldskip = false; |
241 | if (V_SIZE(&le->le_vr)) | |
242 | shouldskip = (R_ENDADDR(r) < V_ADDR(&le->le_vr) || | |
243 | R_ADDR(r) > V_ENDADDR(&le->le_vr)); | |
244 | if (!shouldskip) | |
245 | retval = add_subregions_for_libent(&srlhead, r, le->le_mh, le->le_mhaddr, le); | |
cf37c299 A |
246 | } |
247 | }); | |
248 | if (WALK_CONTINUE != retval) | |
249 | goto done; | |
250 | ||
251 | /* | |
252 | * Take the unsorted list of subregions, if any, | |
253 | * and hang a sorted array of ranges on the region structure. | |
254 | */ | |
255 | if (!STAILQ_EMPTY(&srlhead)) { | |
256 | struct subregionlist *srl; | |
257 | STAILQ_FOREACH(srl, &srlhead, srl_linkage) { | |
258 | r->r_nsubregions++; | |
259 | } | |
260 | assert(r->r_nsubregions); | |
261 | ||
262 | r->r_subregions = calloc(r->r_nsubregions, sizeof (void *)); | |
263 | unsigned i = 0; | |
264 | STAILQ_FOREACH(srl, &srlhead, srl_linkage) { | |
265 | r->r_subregions[i++] = srl->srl_s; | |
266 | } | |
267 | qsort_b(r->r_subregions, r->r_nsubregions, sizeof (void *), | |
268 | ^(const void *a, const void *b) { | |
269 | const struct subregion *lhs = *(struct subregion **)a; | |
270 | const struct subregion *rhs = *(struct subregion **)b; | |
271 | if (S_ADDR(lhs) > S_ADDR(rhs)) | |
272 | return 1; | |
273 | if (S_ADDR(lhs) < S_ADDR(rhs)) | |
274 | return -1; | |
275 | return 0; | |
276 | }); | |
277 | ||
278 | eliminate_duplicate_subregions(r); | |
279 | ||
887d5eed A |
280 | if (r->r_info.external_pager) { |
281 | /* | |
282 | * Only very specific segment types get to be filerefs | |
283 | */ | |
284 | for (i = 0; i < r->r_nsubregions; i++) { | |
285 | struct subregion *s = r->r_subregions[i]; | |
286 | /* | |
287 | * Anything marked writable is trivially disqualified; we're | |
288 | * going to copy it anyway. | |
289 | */ | |
290 | if (s->s_segcmd.initprot & VM_PROT_WRITE) | |
291 | continue; | |
292 | ||
293 | /* __TEXT and __LINKEDIT are our real targets */ | |
294 | if (!issubregiontype(s, SEG_TEXT) && !issubregiontype(s, SEG_LINKEDIT) && !issubregiontype(s, "__UNICODE")) { | |
295 | if (OPTIONS_DEBUG(opt, 3)) { | |
296 | hsize_str_t hstr; | |
297 | printvr(S_RANGE(s), "skipping read-only %s segment %s\n", S_MACHO_TYPE(s), str_hsize(hstr, S_SIZE(s))); | |
298 | } | |
299 | continue; | |
300 | } | |
301 | if (r->r_insharedregion) { | |
302 | /* | |
303 | * Part of the shared region: things get more complicated. | |
304 | */ | |
305 | if (r->r_fileref) { | |
306 | /* | |
307 | * There's a file reference here for the whole region. | |
308 | * For __TEXT subregions, we could, in principle (though | |
309 | * see below) generate references to the individual | |
310 | * dylibs that dyld reports in the region. If the | |
311 | * debugger could then use the __LINKEDIT info in the | |
312 | * file, then we'd be done. But as long as the dump | |
313 | * includes __LINKEDIT sections, we're going to | |
314 | * end up generating a file reference to the combined | |
315 | * __LINKEDIT section in the shared cache anyway, so | |
316 | * we might as well do that for the __TEXT regions as | |
317 | * well. | |
318 | */ | |
319 | s->s_libent = r->r_fileref->fr_libent; | |
320 | s->s_isuuidref = true; | |
321 | } else { | |
322 | /* | |
323 | * If we get here, it's likely that the shared cache | |
324 | * name can't be found e.g. update_dyld_shared_cache(1). | |
325 | * For __TEXT subregions, we could generate refs to | |
326 | * the individual dylibs, but note that the mach header | |
327 | * and segment commands in memory are still pointing | |
328 | * into the shared cache so any act of reconstruction | |
329 | * is fiendishly complex. So copy it. | |
330 | */ | |
331 | assert(!s->s_isuuidref); | |
332 | } | |
333 | } else { | |
334 | /* Just a regular dylib? */ | |
335 | if (s->s_libent) | |
336 | s->s_isuuidref = true; | |
337 | } | |
338 | } | |
339 | } | |
340 | } | |
cf37c299 A |
341 | assert(WALK_CONTINUE == retval); |
342 | ||
343 | done: | |
344 | if (!STAILQ_EMPTY(&srlhead)) { | |
345 | struct subregionlist *srl, *trl; | |
346 | STAILQ_FOREACH_SAFE(srl, &srlhead, srl_linkage, trl) { | |
347 | free(srl); | |
348 | } | |
349 | } | |
350 | return retval; | |
351 | } | |
352 | ||
353 | /* | |
354 | * Strip region of all decoration | |
355 | * | |
356 | * Invoked (on every region!) after an error during the initial | |
887d5eed | 357 | * 'decoration' phase to discard potentially incomplete information. |
cf37c299 A |
358 | */ |
359 | walk_return_t | |
360 | undecorate_memory_region(struct region *r, __unused void *arg) | |
361 | { | |
362 | assert(&sparse_ops != r->r_op); | |
363 | return r->r_nsubregions ? clean_subregions(r) : WALK_CONTINUE; | |
364 | } | |
365 | ||
366 | /* | |
367 | * This optimization occurs -after- the vanilla_region_optimizations(), | |
368 | * and -after- we've tagged zfod and first-pass fileref's. | |
369 | */ | |
370 | walk_return_t | |
371 | sparse_region_optimization(struct region *r, __unused void *arg) | |
372 | { | |
373 | assert(&sparse_ops != r->r_op); | |
374 | ||
375 | if (r->r_inzfodregion) { | |
376 | /* | |
377 | * Pure zfod region: almost certainly a more compact | |
378 | * representation - keep it that way. | |
379 | */ | |
887d5eed A |
380 | if (OPTIONS_DEBUG(opt, 3)) |
381 | printr(r, "retaining zfod region\n"); | |
cf37c299 A |
382 | assert(&zfod_ops == r->r_op); |
383 | return clean_subregions(r); | |
384 | } | |
385 | ||
887d5eed A |
386 | if (r->r_insharedregion && 0 == r->r_nsubregions) { |
387 | /* | |
388 | * A segment in the shared region needs to be | |
389 | * identified with an LC_SEGMENT that dyld claims, | |
390 | * otherwise (we assert) it's not useful to the dump. | |
391 | */ | |
392 | if (OPTIONS_DEBUG(opt, 2)) { | |
393 | hsize_str_t hstr; | |
394 | printr(r, "not referenced in dyld info => " | |
395 | "eliding %s range in shared region\n", | |
396 | str_hsize(hstr, R_SIZE(r))); | |
397 | } | |
398 | if (0 == r->r_info.pages_dirtied && 0 == r->r_info.pages_swapped_out) | |
399 | return WALK_DELETE_REGION; | |
400 | if (OPTIONS_DEBUG(opt, 2)) { | |
401 | hsize_str_t hstr; | |
402 | printr(r, "dirty pages, but not referenced in dyld info => " | |
403 | "NOT eliding %s range in shared region\n", | |
404 | str_hsize(hstr, R_SIZE(r))); | |
405 | } | |
406 | } | |
407 | ||
408 | if (r->r_fileref) { | |
409 | /* | |
410 | * Already have a fileref for the whole region: already | |
411 | * a more compact representation - keep it that way. | |
412 | */ | |
413 | if (OPTIONS_DEBUG(opt, 3)) | |
414 | printr(r, "retaining fileref region\n"); | |
415 | assert(&fileref_ops == r->r_op); | |
416 | return clean_subregions(r); | |
417 | } | |
cf37c299 A |
418 | |
419 | if (r->r_nsubregions > 1) { | |
420 | /* | |
421 | * Merge adjacent or identical subregions that have no file reference | |
422 | * (Reducing the number of subregions reduces header overhead and | |
423 | * improves compressability) | |
424 | */ | |
425 | unsigned i = 1; | |
426 | while (i < r->r_nsubregions) { | |
427 | struct subregion *s0 = r->r_subregions[i-1]; | |
428 | struct subregion *s1 = r->r_subregions[i]; | |
429 | ||
887d5eed | 430 | if (s0->s_isuuidref) { |
cf37c299 A |
431 | i++; |
432 | continue; /* => destined to be a fileref */ | |
433 | } | |
434 | if (!issamesubregiontype(s0, s1)) { | |
435 | i++; | |
436 | continue; /* merge-able subregions must have same "type" */ | |
437 | } | |
438 | ||
439 | if (S_ENDADDR(s0) == S_ADDR(s1)) { | |
440 | /* directly adjacent subregions */ | |
887d5eed | 441 | if (OPTIONS_DEBUG(opt, 2)) |
cf37c299 A |
442 | printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent\n", |
443 | S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1)); | |
cf37c299 A |
444 | S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0)); |
445 | elide_subregion(r, i); | |
446 | continue; | |
447 | } | |
448 | ||
449 | const mach_vm_size_t pfn[2] = { | |
450 | S_ADDR(s0) >> pageshift_host, | |
451 | S_ADDR(s1) >> pageshift_host | |
452 | }; | |
453 | const mach_vm_size_t endpfn[2] = { | |
454 | (S_ENDADDR(s0) - 1) >> pageshift_host, | |
455 | (S_ENDADDR(s1) - 1) >> pageshift_host | |
456 | }; | |
457 | ||
458 | if (pfn[0] == pfn[1] && pfn[0] == endpfn[0] && pfn[0] == endpfn[1]) { | |
459 | /* two small subregions share a host page */ | |
887d5eed | 460 | if (OPTIONS_DEBUG(opt, 2)) |
cf37c299 A |
461 | printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- same page\n", |
462 | S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1)); | |
cf37c299 A |
463 | S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0)); |
464 | elide_subregion(r, i); | |
465 | continue; | |
466 | } | |
467 | ||
468 | if (pfn[1] == 1 + endpfn[0]) { | |
469 | /* subregions are pagewise-adjacent: bigger chunks to compress */ | |
887d5eed | 470 | if (OPTIONS_DEBUG(opt, 2)) |
cf37c299 A |
471 | printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent pages\n", |
472 | S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1)); | |
cf37c299 A |
473 | S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0)); |
474 | elide_subregion(r, i); | |
475 | continue; | |
476 | } | |
477 | ||
478 | i++; /* this isn't the subregion we're looking for */ | |
479 | } | |
480 | } | |
481 | ||
887d5eed A |
482 | if (1 == r->r_nsubregions) { |
483 | struct subregion *s = r->r_subregions[0]; | |
484 | if (!s->s_isuuidref && | |
485 | R_ADDR(r) == S_ADDR(s) && R_ENDADDR(r) == S_ENDADDR(s)) { | |
486 | if (OPTIONS_DEBUG(opt, 3)) | |
487 | printr(r, "subregion (%llx-%llx) reverts to region\n", | |
488 | S_ADDR(s), S_ENDADDR(s)); | |
489 | return clean_subregions(r); | |
490 | } | |
491 | } | |
492 | ||
cf37c299 A |
493 | if (r->r_nsubregions) |
494 | r->r_op = &sparse_ops; | |
495 | ||
496 | return WALK_CONTINUE; | |
497 | } |