]> git.saurik.com Git - apple/system_cmds.git/blame - gcore.tproj/sparse.c
system_cmds-735.20.1.tar.gz
[apple/system_cmds.git] / gcore.tproj / sparse.c
CommitLineData
cf37c299
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 */
4
5#include "options.h"
6#include "vm.h"
7#include "region.h"
8#include "utils.h"
9#include "dyld.h"
10#include "threads.h"
11#include "sparse.h"
12#include "vanilla.h"
13#include "corefile.h"
14
15#include <sys/types.h>
16#include <sys/sysctl.h>
17#include <sys/stat.h>
18#include <sys/mman.h>
19#include <libproc.h>
20
21#include <stdio.h>
22#include <string.h>
23#include <strings.h>
24#include <stdlib.h>
25#include <stdarg.h>
26#include <signal.h>
27#include <unistd.h>
28#include <errno.h>
29#include <ctype.h>
30#include <fcntl.h>
31#include <assert.h>
32
33#include <mach/mach.h>
34
35static struct subregion *
36new_subregion(
37 const mach_vm_offset_t vmaddr,
38 const mach_vm_offset_t vmsize,
39 const native_segment_command_t *sc,
40 const struct libent *le)
41{
42 struct subregion *s = malloc(sizeof (*s));
43
44 assert(vmaddr != 0 && vmsize != 0);
45 assert(vmaddr < vmaddr + vmsize);
46 s->s_segcmd = *sc;
47
48 S_SETADDR(s, vmaddr);
49 S_SETSIZE(s, vmsize);
50
51 s->s_libent = le;
52 s->s_isfileref = false;
53 return s;
54}
55
56static void
57del_subregion(struct subregion *s)
58{
59 poison(s, 0xfacefac1, sizeof (*s));
60 free(s);
61}
62
63static walk_return_t
64clean_subregions(struct region *r)
65{
66 for (unsigned i = 0; i < r->r_nsubregions; i++)
67 del_subregion(r->r_subregions[i]);
68 poison(r->r_subregions, 0xfac1fac1, sizeof (r->r_subregions[0]) * r->r_nsubregions);
69 free(r->r_subregions);
70 r->r_nsubregions = 0;
71 return WALK_CONTINUE;
72}
73
74void
75del_sparse_region(struct region *r)
76{
77 clean_subregions(r);
78 poison(r, 0xcafecaff, sizeof (*r));
79 free(r);
80}
81
82#define NULLsc ((native_segment_command_t *)0)
83
84static bool
85issamesubregiontype(const struct subregion *s0, const struct subregion *s1) {
86 return 0 == strncmp(S_MACHO_TYPE(s0), S_MACHO_TYPE(s1), sizeof (NULLsc->segname));
87}
88
89bool
90issubregiontype(const struct subregion *s, const char *sctype) {
91 return 0 == strncmp(S_MACHO_TYPE(s), sctype, sizeof (NULLsc->segname));
92}
93
94static void
95elide_subregion(struct region *r, unsigned ind)
96{
97 del_subregion(r->r_subregions[ind]);
98 for (unsigned j = ind; j < r->r_nsubregions - 1; j++)
99 r->r_subregions[j] = r->r_subregions[j+1];
100 assert(r->r_nsubregions != 0);
101 r->r_subregions[--r->r_nsubregions] = NULL;
102}
103
104struct subregionlist {
105 STAILQ_ENTRY(subregionlist) srl_linkage;
106 struct subregion *srl_s;
107};
108typedef STAILQ_HEAD(, subregionlist) subregionlisthead_t;
109
110static walk_return_t
111add_subregions_for_libent(
112 subregionlisthead_t *srlh,
113 const struct region *r,
114 const native_mach_header_t *mh,
115 const mach_vm_offset_t mh_taddr,
116 const struct libent *le)
117{
118 const struct load_command *lc = (const void *)(mh + 1);
119 mach_vm_offset_t scoffset = MACH_VM_MAX_ADDRESS;
120
121 for (unsigned n = 0; n < mh->ncmds; n++) {
122
123 const native_segment_command_t *sc;
124
125 switch (lc->cmd) {
126 case NATIVE_LC_SEGMENT:
127 sc = (const void *)lc;
128
129 char scsegname[17];
130 strlcpy(scsegname, sc->segname, sizeof (scsegname));
131
132 if (0 == sc->vmaddr &&
133 strcmp(scsegname, SEG_PAGEZERO) == 0)
134 break;
135
136 /* -Depends- on finding a __TEXT segment first! */
137
138 if (MACH_VM_MAX_ADDRESS == scoffset) {
139 if (strcmp(scsegname, SEG_TEXT) == 0)
140 scoffset = mh_taddr - sc->vmaddr;
141 else {
142 /*
143 * Treat as error - don't want a partial description
144 * to cause something to be omitted from the dump.
145 */
146 printr(r, "expected %s segment, found %s segment\n", SEG_TEXT, scsegname);
147 return WALK_ERROR;
148 }
149 }
150
151 /* Eliminate non-overlapping sections first */
152
153 if (R_ENDADDR(r) - 1 < sc->vmaddr + scoffset)
154 break;
155 if (sc->vmaddr + scoffset + sc->vmsize - 1 < R_ADDR(r))
156 break;
157 /*
158 * Some part of this segment is in the region.
159 * Trim the edges in the case where we span regions.
160 */
161 mach_vm_offset_t loaddr = sc->vmaddr + scoffset;
162 mach_vm_offset_t hiaddr = loaddr + sc->vmsize;
163 if (loaddr < R_ADDR(r))
164 loaddr = R_ADDR(r);
165 if (hiaddr > R_ENDADDR(r))
166 hiaddr = R_ENDADDR(r);
167
168 struct subregionlist *srl = calloc(1, sizeof (*srl));
169 struct subregion *s = new_subregion(loaddr, hiaddr - loaddr, sc, le);
170 assert(sc->fileoff >= 0);
171 srl->srl_s = s;
172 STAILQ_INSERT_HEAD(srlh, srl, srl_linkage);
173
174 if (opt->debug > 3) {
175 hsize_str_t hstr;
176 printr(r, "subregion %llx-%llx %7s %12s\t%s [%x/%x off %zd for %zd nsects %u flags %x]\n",
177 S_ADDR(s), S_ENDADDR(s),
178 str_hsize(hstr, S_SIZE(s)),
179 scsegname,
180 S_FILENAME(s),
181 sc->initprot, sc->maxprot,
182 sc->fileoff, sc->filesize,
183 sc->nsects, sc->flags);
184 }
185 break;
186 default:
187 break;
188 }
189 if (lc->cmdsize)
190 lc = (const void *)((caddr_t)lc + lc->cmdsize);
191 else
192 break;
193 }
194 return WALK_CONTINUE;
195}
196
197/*
198 * Because we aggregate information from multiple sources, there may
199 * be duplicate subregions. Eliminate them here.
200 *
201 * Note that the each library in the shared cache points
202 * separately at a single, unified (large!) __LINKEDIT section; these
203 * get removed here too.
204 *
205 * Assumes the subregion array is sorted by address!
206 */
207static void
208eliminate_duplicate_subregions(struct region *r)
209{
210 unsigned i = 1;
211 while (i < r->r_nsubregions) {
212 struct subregion *s0 = r->r_subregions[i-1];
213 struct subregion *s1 = r->r_subregions[i];
214
215 if (S_ADDR(s0) != S_ADDR(s1) || S_SIZE(s0) != S_SIZE(s1)) {
216 i++;
217 continue;
218 }
219 if (memcmp(&s0->s_segcmd, &s1->s_segcmd, sizeof (s0->s_segcmd)) != 0) {
220 i++;
221 continue;
222 }
223 if (opt->debug)
224 printr(r, "eliding duplicate %s subregion (%llx-%llx) file %s\n",
225 S_MACHO_TYPE(s1), S_ADDR(s1), S_ENDADDR(s1), S_FILENAME(s1));
226 /* If the duplicate subregions aren't mapping the same file (?), forget the name */
227 if (s0->s_libent != s1->s_libent)
228 s0->s_libent = s1->s_libent = NULL;
229 elide_subregion(r, i);
230 }
231}
232
233/*
234 * See if any of the dyld information we have can better describe this
235 * region of the target address space.
236 */
237walk_return_t
238decorate_memory_region(struct region *r, void *arg)
239{
240 const dyld_process_info dpi = arg;
241
242 __block walk_return_t retval = WALK_CONTINUE;
243 __block subregionlisthead_t srlhead = STAILQ_HEAD_INITIALIZER(srlhead);
244
245 _dyld_process_info_for_each_image(dpi, ^(uint64_t mhaddr, const uuid_t uuid, __unused const char *path) {
246 if (WALK_CONTINUE == retval) {
247 const struct libent *le = libent_lookup_byuuid(uuid);
248 assert(le->le_mhaddr == mhaddr);
249 /*
250 * Core dumps conventionally contain the whole executable, but we're trying
251 * to elide everything that can't be found in a file elsewhere.
252 */
253#if 0
254 if (MH_EXECUTE == le->le_mh->filetype)
255 return; // cause the whole a.out to be emitted
256#endif
257 retval = add_subregions_for_libent(&srlhead, r, le->le_mh, le->le_mhaddr, le);
258 }
259 });
260 if (WALK_CONTINUE != retval)
261 goto done;
262
263 /*
264 * Take the unsorted list of subregions, if any,
265 * and hang a sorted array of ranges on the region structure.
266 */
267 if (!STAILQ_EMPTY(&srlhead)) {
268 struct subregionlist *srl;
269 STAILQ_FOREACH(srl, &srlhead, srl_linkage) {
270 r->r_nsubregions++;
271 }
272 assert(r->r_nsubregions);
273
274 r->r_subregions = calloc(r->r_nsubregions, sizeof (void *));
275 unsigned i = 0;
276 STAILQ_FOREACH(srl, &srlhead, srl_linkage) {
277 r->r_subregions[i++] = srl->srl_s;
278 }
279 qsort_b(r->r_subregions, r->r_nsubregions, sizeof (void *),
280 ^(const void *a, const void *b) {
281 const struct subregion *lhs = *(struct subregion **)a;
282 const struct subregion *rhs = *(struct subregion **)b;
283 if (S_ADDR(lhs) > S_ADDR(rhs))
284 return 1;
285 if (S_ADDR(lhs) < S_ADDR(rhs))
286 return -1;
287 return 0;
288 });
289
290 eliminate_duplicate_subregions(r);
291
292 const struct libent *lesc = NULL; /* libent ref for shared cache */
293 if (r->r_insharedregion) {
294 uuid_t uusc;
295 if (get_sc_uuid(dpi, uusc)) {
296 lesc = libent_lookup_byuuid(uusc);
297 assert(NULL == lesc->le_mh && 0 == lesc->le_mhaddr);
298 }
299 }
300
301 /*
302 * Only very specific segment types get to be filerefs
303 */
304 for (i = 0; i < r->r_nsubregions; i++) {
305 struct subregion *s = r->r_subregions[i];
306 /*
307 * Anything writable is trivially disqualified
308 */
309 if (s->s_segcmd.initprot & VM_PROT_WRITE)
310 continue;
311 /*
312 * As long as there's a filename, __TEXT and __LINKEDIT
313 * end up as a file reference.
314 *
315 * __LINKEDIT is more complicated: the segment commands point
316 * at a unified segment in the shared cache mapping.
317 * Ditto for __UNICODE(?)
318 */
319 if (issubregiontype(s, SEG_TEXT)) {
320 /* fall through */;
321 } else if (issubregiontype(s, SEG_LINKEDIT)) {
322 if (r->r_insharedregion)
323 s->s_libent = lesc;
324 } else if (issubregiontype(s, "__UNICODE")) {
325 if (r->r_insharedregion)
326 s->s_libent = lesc;
327 } else
328 continue;
329
330 if (s->s_libent)
331 s->s_isfileref = true;
332 }
333 }
334 assert(WALK_CONTINUE == retval);
335
336done:
337 if (!STAILQ_EMPTY(&srlhead)) {
338 struct subregionlist *srl, *trl;
339 STAILQ_FOREACH_SAFE(srl, &srlhead, srl_linkage, trl) {
340 free(srl);
341 }
342 }
343 return retval;
344}
345
346/*
347 * Strip region of all decoration
348 *
349 * Invoked (on every region!) after an error during the initial
350 * 'decoration' phase to discard to discard potentially incomplete
351 * information.
352 */
353walk_return_t
354undecorate_memory_region(struct region *r, __unused void *arg)
355{
356 assert(&sparse_ops != r->r_op);
357 return r->r_nsubregions ? clean_subregions(r) : WALK_CONTINUE;
358}
359
360/*
361 * This optimization occurs -after- the vanilla_region_optimizations(),
362 * and -after- we've tagged zfod and first-pass fileref's.
363 */
364walk_return_t
365sparse_region_optimization(struct region *r, __unused void *arg)
366{
367 assert(&sparse_ops != r->r_op);
368
369 if (r->r_inzfodregion) {
370 /*
371 * Pure zfod region: almost certainly a more compact
372 * representation - keep it that way.
373 */
374 assert(&zfod_ops == r->r_op);
375 return clean_subregions(r);
376 }
377
378#ifdef CONFIG_REFSC
379 if (r->r_fileref) {
380 /*
381 * Already have a fileref for the whole region: almost
382 * certainly a more compact representation - keep
383 * it that way.
384 */
385 assert(&fileref_ops == r->r_op);
386 return clean_subregions(r);
387 }
388#endif
389
390 if (r->r_insharedregion && 0 == r->r_nsubregions) {
391 /*
392 * A segment in the shared region needs to be
393 * identified with an LC_SEGMENT that dyld claims,
394 * otherwise (we assert) it's not useful to the dump.
395 */
396 if (opt->debug) {
397 hsize_str_t hstr;
398 printr(r, "not referenced in dyld info => "
399 "eliding %s range in shared region\n",
400 str_hsize(hstr, R_SIZE(r)));
401 }
402 return WALK_DELETE_REGION;
403 }
404
405 if (r->r_nsubregions > 1) {
406 /*
407 * Merge adjacent or identical subregions that have no file reference
408 * (Reducing the number of subregions reduces header overhead and
409 * improves compressability)
410 */
411 unsigned i = 1;
412 while (i < r->r_nsubregions) {
413 struct subregion *s0 = r->r_subregions[i-1];
414 struct subregion *s1 = r->r_subregions[i];
415
416 if (s0->s_isfileref) {
417 i++;
418 continue; /* => destined to be a fileref */
419 }
420 if (!issamesubregiontype(s0, s1)) {
421 i++;
422 continue; /* merge-able subregions must have same "type" */
423 }
424
425 if (S_ENDADDR(s0) == S_ADDR(s1)) {
426 /* directly adjacent subregions */
427#if 1
428 if (opt->debug)
429 printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent\n",
430 S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1));
431#endif
432 S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0));
433 elide_subregion(r, i);
434 continue;
435 }
436
437 const mach_vm_size_t pfn[2] = {
438 S_ADDR(s0) >> pageshift_host,
439 S_ADDR(s1) >> pageshift_host
440 };
441 const mach_vm_size_t endpfn[2] = {
442 (S_ENDADDR(s0) - 1) >> pageshift_host,
443 (S_ENDADDR(s1) - 1) >> pageshift_host
444 };
445
446 if (pfn[0] == pfn[1] && pfn[0] == endpfn[0] && pfn[0] == endpfn[1]) {
447 /* two small subregions share a host page */
448#if 1
449 if (opt->debug)
450 printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- same page\n",
451 S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1));
452#endif
453 S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0));
454 elide_subregion(r, i);
455 continue;
456 }
457
458 if (pfn[1] == 1 + endpfn[0]) {
459 /* subregions are pagewise-adjacent: bigger chunks to compress */
460#if 1
461 if (opt->debug)
462 printr(r, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent pages\n",
463 S_ADDR(s0), S_ENDADDR(s0), S_ADDR(s1), S_ENDADDR(s1));
464#endif
465 S_SETSIZE(s0, S_ENDADDR(s1) - S_ADDR(s0));
466 elide_subregion(r, i);
467 continue;
468 }
469
470 i++; /* this isn't the subregion we're looking for */
471 }
472 }
473
474 if (r->r_nsubregions)
475 r->r_op = &sparse_ops;
476
477 return WALK_CONTINUE;
478}