]> git.saurik.com Git - apple/system_cmds.git/blob - gcore.tproj/sparse.c
ad5d91725952c19c9d31a01b52f3ca069c7bef5c
[apple/system_cmds.git] / gcore.tproj / sparse.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 */
4
5 #include "options.h"
6 #include "vm.h"
7 #include "region.h"
8 #include "utils.h"
9 #include "dyld.h"
10 #include "threads.h"
11 #include "sparse.h"
12 #include "vanilla.h"
13 #include "corefile.h"
14
15 #include <sys/types.h>
16 #include <sys/sysctl.h>
17 #include <sys/stat.h>
18 #include <sys/mman.h>
19 #include <libproc.h>
20
21 #include <stdio.h>
22 #include <string.h>
23 #include <strings.h>
24 #include <stdlib.h>
25 #include <stdarg.h>
26 #include <signal.h>
27 #include <unistd.h>
28 #include <errno.h>
29 #include <ctype.h>
30 #include <fcntl.h>
31 #include <assert.h>
32
33 #include <mach/mach.h>
34
35 static struct subregion *
36 new_subregion(
37 const mach_vm_offset_t vmaddr,
38 const mach_vm_offset_t vmsize,
39 const native_segment_command_t *sc,
40 const struct libent *le)
41 {
42 struct subregion *s = malloc(sizeof (*s));
43
44 assert(vmaddr != 0 && vmsize != 0);
45 assert(vmaddr < vmaddr + vmsize);
46 s->s_segcmd = *sc;
47
48 S_SETADDR(s, vmaddr);
49 S_SETSIZE(s, vmsize);
50
51 s->s_libent = le;
52 s->s_isuuidref = false;
53 return s;
54 }
55
56 static void
57 del_subregion(struct subregion *s)
58 {
59 poison(s, 0xfacefac1, sizeof (*s));
60 free(s);
61 }
62
63 static walk_return_t
64 clean_subregions(struct region *r)
65 {
66 if (r->r_nsubregions) {
67 assert(r->r_subregions);
68 for (unsigned i = 0; i < r->r_nsubregions; i++)
69 del_subregion(r->r_subregions[i]);
70 poison(r->r_subregions, 0xfac1fac1, sizeof (r->r_subregions[0]) * r->r_nsubregions);
71 free(r->r_subregions);
72 r->r_nsubregions = 0;
73 r->r_subregions = NULL;
74 } else {
75 assert(NULL == r->r_subregions);
76 }
77 return WALK_CONTINUE;
78 }
79
80 void
81 del_sparse_region(struct region *r)
82 {
83 clean_subregions(r);
84 poison(r, 0xcafecaff, sizeof (*r));
85 free(r);
86 }
87
88 #define NULLsc ((native_segment_command_t *)0)
89
90 static bool
91 issamesubregiontype(const struct subregion *s0, const struct subregion *s1) {
92 return 0 == strncmp(S_MACHO_TYPE(s0), S_MACHO_TYPE(s1), sizeof (NULLsc->segname));
93 }
94
95 bool
96 issubregiontype(const struct subregion *s, const char *sctype) {
97 return 0 == strncmp(S_MACHO_TYPE(s), sctype, sizeof (NULLsc->segname));
98 }
99
100 static void
101 elide_subregion(struct region *r, unsigned ind)
102 {
103 del_subregion(r->r_subregions[ind]);
104 for (unsigned j = ind; j < r->r_nsubregions - 1; j++)
105 r->r_subregions[j] = r->r_subregions[j+1];
106 assert(r->r_nsubregions != 0);
107 r->r_subregions[--r->r_nsubregions] = NULL;
108 }
109
110 struct subregionlist {
111 STAILQ_ENTRY(subregionlist) srl_linkage;
112 struct subregion *srl_s;
113 };
114 typedef STAILQ_HEAD(, subregionlist) subregionlisthead_t;
115
116 static walk_return_t
117 add_subregions_for_libent(
118 subregionlisthead_t *srlh,
119 const struct region *r,
120 const native_mach_header_t *mh,
121 const mach_vm_offset_t __unused mh_taddr, // address in target
122 const struct libent *le)
123 {
124 const struct load_command *lc = (const void *)(mh + 1);
125 mach_vm_offset_t objoff = le->le_objoff;
126 for (unsigned n = 0; n < mh->ncmds; n++) {
127
128 const native_segment_command_t *sc;
129
130 switch (lc->cmd) {
131 case NATIVE_LC_SEGMENT:
132 sc = (const void *)lc;
133
134 if (0 == sc->vmaddr && strcmp(sc->segname, SEG_PAGEZERO) == 0)
135 break;
136 mach_vm_offset_t lo = sc->vmaddr + objoff;
137 mach_vm_offset_t hi = lo + sc->vmsize;
138
139 /* Eliminate non-overlapping sections first */
140
141 if (R_ENDADDR(r) - 1 < lo)
142 break;
143 if (hi - 1 < R_ADDR(r))
144 break;
145
146 /*
147 * Some part of this segment is in the region.
148 * Trim the edges in the case where we span regions.
149 */
150 if (lo < R_ADDR(r))
151 lo = R_ADDR(r);
152 if (hi > R_ENDADDR(r))
153 hi = R_ENDADDR(r);
154
155 struct subregionlist *srl = calloc(1, sizeof (*srl));
156 struct subregion *s = new_subregion(lo, hi - lo, sc, le);
157 assert(sc->fileoff >= 0);
158 srl->srl_s = s;
159 STAILQ_INSERT_HEAD(srlh, srl, srl_linkage);
160
161 if (OPTIONS_DEBUG(opt, 2)) {
162 hsize_str_t hstr;
163 printr(r, "subregion %llx-%llx %7s %12s\t%s [%s off %zd for %zd nsects %u flags %x]\n",
164 S_ADDR(s), S_ENDADDR(s),
165 str_hsize(hstr, S_SIZE(s)),
166 sc->segname,
167 S_FILENAME(s),
168 str_prot(sc->initprot),
169 sc->fileoff, sc->filesize,
170 sc->nsects, sc->flags);
171 }
172 break;
173 default:
174 break;
175 }
176 if (lc->cmdsize)
177 lc = (const void *)((caddr_t)lc + lc->cmdsize);
178 else
179 break;
180 }
181 return WALK_CONTINUE;
182 }
183
184 /*
185 * Because we aggregate information from multiple sources, there may
186 * be duplicate subregions. Eliminate them here.
187 *
188 * Note that the each library in the shared cache points
189 * separately at a single, unified (large!) __LINKEDIT section; these
190 * get removed here too.
191 *
192 * Assumes the subregion array is sorted by address!
193 */
194 static void
195 eliminate_duplicate_subregions(struct region *r)
196 {
197 unsigned i = 1;
198 while (i < r->r_nsubregions) {
199 struct subregion *s0 = r->r_subregions[i-1];
200 struct subregion *s1 = r->r_subregions[i];
201
202 if (S_ADDR(s0) != S_ADDR(s1) || S_SIZE(s0) != S_SIZE(s1)) {
203 i++;
204 continue;
205 }
206 if (memcmp(&s0->s_segcmd, &s1->s_segcmd, sizeof (s0->s_segcmd)) != 0) {
207 i++;
208 continue;
209 }
210 if (OPTIONS_DEBUG(opt, 3))
211 printr(r, "eliding duplicate %s subregion (%llx-%llx) file %s\n",
212 S_MACHO_TYPE(s1), S_ADDR(s1), S_ENDADDR(s1), S_FILENAME(s1));
213 /* If the duplicate subregions aren't mapping the same file (?), forget the name */
214 if (s0->s_libent != s1->s_libent)
215 s0->s_libent = s1->s_libent = NULL;
216 elide_subregion(r, i);
217 }
218 }
219
220 /*
221 * See if any of the dyld information we have can better describe this
222 * region of the target address space.
223 */
224 walk_return_t
225 decorate_memory_region(struct region *r, void *arg)
226 {
227 if (r->r_inzfodregion || r->r_incommregion)
228 return WALK_CONTINUE;
229
230 const dyld_process_info dpi = arg;
231
232 __block walk_return_t retval = WALK_CONTINUE;
233 __block subregionlisthead_t srlhead = STAILQ_HEAD_INITIALIZER(srlhead);
234
235 _dyld_process_info_for_each_image(dpi, ^(uint64_t __unused mhaddr, const uuid_t uuid, __unused const char *path) {
236 if (WALK_CONTINUE == retval) {
237 const struct libent *le = libent_lookup_byuuid(uuid);
238 assert(le->le_mhaddr == mhaddr);
239 bool shouldskip = false;
240 if (V_SIZE(&le->le_vr))
241 shouldskip = (R_ENDADDR(r) < V_ADDR(&le->le_vr) ||
242 R_ADDR(r) > V_ENDADDR(&le->le_vr));
243 if (!shouldskip)
244 retval = add_subregions_for_libent(&srlhead, r, le->le_mh, le->le_mhaddr, le);
245 }
246 });
247 if (WALK_CONTINUE != retval)
248 goto done;
249
250 /*
251 * Take the unsorted list of subregions, if any,
252 * and hang a sorted array of ranges on the region structure.
253 */
254 if (!STAILQ_EMPTY(&srlhead)) {
255 struct subregionlist *srl;
256 STAILQ_FOREACH(srl, &srlhead, srl_linkage) {
257 r->r_nsubregions++;
258 }
259 assert(r->r_nsubregions);
260
261 r->r_subregions = calloc(r->r_nsubregions, sizeof (void *));
262 unsigned i = 0;
263 STAILQ_FOREACH(srl, &srlhead, srl_linkage) {
264 r->r_subregions[i++] = srl->srl_s;
265 }
266 qsort_b(r->r_subregions, r->r_nsubregions, sizeof (void *),
267 ^(const void *a, const void *b) {
268 const struct subregion *lhs = *(struct subregion **)a;
269 const struct subregion *rhs = *(struct subregion **)b;
270 if (S_ADDR(lhs) > S_ADDR(rhs))
271 return 1;
272 if (S_ADDR(lhs) < S_ADDR(rhs))
273 return -1;
274 return 0;
275 });
276
277 eliminate_duplicate_subregions(r);
278
279 if (r->r_info.external_pager) {
280 /*
281 * Only very specific segment types get to be filerefs
282 */
283 for (i = 0; i < r->r_nsubregions; i++) {
284 struct subregion *s = r->r_subregions[i];
285 /*
286 * Anything marked writable is trivially disqualified; we're
287 * going to copy it anyway.
288 */
289 if (s->s_segcmd.initprot & VM_PROT_WRITE)
290 continue;
291
292 /* __TEXT and __LINKEDIT are our real targets */
293 if (!issubregiontype(s, SEG_TEXT) && !issubregiontype(s, SEG_LINKEDIT) && !issubregiontype(s, "__UNICODE")) {
294 if (OPTIONS_DEBUG(opt, 3)) {
295 hsize_str_t hstr;
296 printvr(S_RANGE(s), "skipping read-only %s segment %s\n", S_MACHO_TYPE(s), str_hsize(hstr, S_SIZE(s)));
297 }
298 continue;
299 }
300 if (r->r_insharedregion) {
301 /*
302 * Part of the shared region: things get more complicated.
303 */
304 if (r->r_fileref) {
305 /*
306 * There's a file reference here for the whole region.
307 * For __TEXT subregions, we could, in principle (though
308 * see below) generate references to the individual
309 * dylibs that dyld reports in the region. If the
310 * debugger could then use the __LINKEDIT info in the
311 * file, then we'd be done. But as long as the dump
312 * includes __LINKEDIT sections, we're going to
313 * end up generating a file reference to the combined
314 * __LINKEDIT section in the shared cache anyway, so
315 * we might as well do that for the __TEXT regions as
316 * well.
317 */
318 s->s_libent = r->r_fileref->fr_libent;
319 s->s_isuuidref = true;
320 } else {
321 /*
322 * If we get here, it's likely that the shared cache
323 * name can't be found e.g. update_dyld_shared_cache(1).
324 * For __TEXT subregions, we could generate refs to
325 * the individual dylibs, but note that the mach header
326 * and segment commands in memory are still pointing
327 * into the shared cache so any act of reconstruction
328 * is fiendishly complex. So copy it.
329 */
330 assert(!s->s_isuuidref);
331 }
332 } else {
333 /* Just a regular dylib? */
334 if (s->s_libent)
335 s->s_isuuidref = true;
336 }
337 }
338 }
339 }
340 assert(WALK_CONTINUE == retval);
341
342 done:
343 if (!STAILQ_EMPTY(&srlhead)) {
344 struct subregionlist *srl, *trl;
345 STAILQ_FOREACH_SAFE(srl, &srlhead, srl_linkage, trl) {
346 free(srl);
347 }
348 }
349 return retval;
350 }
351
352 /*
353 * Strip region of all decoration
354 *
355 * Invoked (on every region!) after an error during the initial
356 * 'decoration' phase to discard potentially incomplete information.
357 */
358 walk_return_t
359 undecorate_memory_region(struct region *r, __unused void *arg)
360 {
361 assert(&sparse_ops != r->r_op);
362 return r->r_nsubregions ? clean_subregions(r) : WALK_CONTINUE;
363 }
364
365 /*
366 * This optimization occurs -after- the vanilla_region_optimizations(),
367 * and -after- we've tagged zfod and first-pass fileref's.
368 */
369 walk_return_t
370 sparse_region_optimization(struct region *r, __unused void *arg)
371 {
372 assert(&sparse_ops != r->r_op);
373
374 if (r->r_inzfodregion) {
375 /*
376 * Pure zfod region: almost certainly a more compact
377 * representation - keep it that way.
378 */
379 if (OPTIONS_DEBUG(opt, 3))
380 printr(r, "retaining zfod region\n");
381 assert(&zfod_ops == r->r_op);
382 return clean_subregions(r);
383 }
384
385 if (r->r_insharedregion && 0 == r->r_nsubregions) {
386 /*
387 * A segment in the shared region needs to be
388 * identified with an LC_SEGMENT that dyld claims,
389 * otherwise (we assert) it's not useful to the dump.
390 */
391 if (OPTIONS_DEBUG(opt, 2)) {
392 hsize_str_t hstr;
393 printr(r, "not referenced in dyld info => "
394 "eliding %s range in shared region\n",
395 str_hsize(hstr, R_SIZE(r)));
396 }
397 if (0 == r->r_info.pages_dirtied && 0 == r->r_info.pages_swapped_out)
398 return WALK_DELETE_REGION;
399 if (OPTIONS_DEBUG(opt, 2)) {
400 hsize_str_t hstr;
401 printr(r, "dirty pages, but not referenced in dyld info => "
402 "NOT eliding %s range in shared region\n",
403 str_hsize(hstr, R_SIZE(r)));
404 }
405 }
406
407 if (r->r_fileref) {
408 /*
409 * Already have a fileref for the whole region: already
410 * a more compact representation - keep it that way.
411 */
412 if (OPTIONS_DEBUG(opt, 3))
413 printr(r, "retaining fileref region\n");
414 assert(&fileref_ops == r->r_op);
415 return clean_subregions(r);
416 }
417
418 if (r->r_nsubregions > 1) {
419 /*
420 * Merge adjacent or identical subregions that have no file reference
421 * (Reducing the number of subregions reduces header overhead and
422 * improves compressability)
423 */
424 unsigned i = 1;
425 while (i < r->r_nsubregions) {
426 struct subregion *s0 = r->r_subregions[i-1];
427 struct subregion *s1 = r->r_subregions[i];
428
429 if (s0->s_isuuidref) {
430 i++;
431 continue; /* => destined to be a fileref */
432 }
433 if (!issamesubregiontype(s0, s1)) {
434 i++;
435 continue; /* merge-able subregions must have same "type" */
436 }
437
438 if (S_ENDADDR(s0) == S_ADDR(s1)) {
439 /* directly adjacent subregions */
440 if (OPTIONS_DEBUG(opt, 2))
441 printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent\n",
442 S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1));
443 S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0));
444 elide_subregion(r, i);
445 continue;
446 }
447
448 const mach_vm_size_t pfn[2] = {
449 S_ADDR(s0) >> pageshift_host,
450 S_ADDR(s1) >> pageshift_host
451 };
452 const mach_vm_size_t endpfn[2] = {
453 (S_ENDADDR(s0) - 1) >> pageshift_host,
454 (S_ENDADDR(s1) - 1) >> pageshift_host
455 };
456
457 if (pfn[0] == pfn[1] && pfn[0] == endpfn[0] && pfn[0] == endpfn[1]) {
458 /* two small subregions share a host page */
459 if (OPTIONS_DEBUG(opt, 2))
460 printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- same page\n",
461 S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1));
462 S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0));
463 elide_subregion(r, i);
464 continue;
465 }
466
467 if (pfn[1] == 1 + endpfn[0]) {
468 /* subregions are pagewise-adjacent: bigger chunks to compress */
469 if (OPTIONS_DEBUG(opt, 2))
470 printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent pages\n",
471 S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1));
472 S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0));
473 elide_subregion(r, i);
474 continue;
475 }
476
477 i++; /* this isn't the subregion we're looking for */
478 }
479 }
480
481 if (1 == r->r_nsubregions) {
482 struct subregion *s = r->r_subregions[0];
483 if (!s->s_isuuidref &&
484 R_ADDR(r) == S_ADDR(s) && R_ENDADDR(r) == S_ENDADDR(s)) {
485 if (OPTIONS_DEBUG(opt, 3))
486 printr(r, "subregion (%llx-%llx) reverts to region\n",
487 S_ADDR(s), S_ENDADDR(s));
488 return clean_subregions(r);
489 }
490 }
491
492 if (r->r_nsubregions)
493 r->r_op = &sparse_ops;
494
495 return WALK_CONTINUE;
496 }