]> git.saurik.com Git - apple/system_cmds.git/blob - gcore.tproj/corefile.c
system_cmds-880.100.5.tar.gz
[apple/system_cmds.git] / gcore.tproj / corefile.c
1 /*
2 * Copyright (c) 2016-2018 Apple Inc. All rights reserved.
3 */
4
5 #include "options.h"
6 #include "corefile.h"
7 #include "sparse.h"
8 #include "utils.h"
9 #include "vm.h"
10
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <unistd.h>
16 #include <errno.h>
17 #include <assert.h>
18 #include <compression.h>
19 #include <sys/param.h>
20 #include <libgen.h>
21 #include <sys/stat.h>
22
23 native_mach_header_t *
24 make_corefile_mach_header(void *data)
25 {
26 native_mach_header_t *mh = data;
27 mh->magic = NATIVE_MH_MAGIC;
28 mh->filetype = MH_CORE;
29 #if defined(__LP64__)
30 const int is64 = 1;
31 #else
32 const int is64 = 0;
33 #endif
34 #if defined(__i386__) || defined(__x86_64__)
35 mh->cputype = is64 ? CPU_TYPE_X86_64 : CPU_TYPE_I386;
36 mh->cpusubtype = is64 ? CPU_SUBTYPE_X86_64_ALL : CPU_SUBTYPE_I386_ALL;
37 #elif defined(__arm__) || defined(__arm64__)
38 mh->cputype = is64 ? CPU_TYPE_ARM64 : CPU_TYPE_ARM;
39 mh->cpusubtype = is64 ? CPU_SUBTYPE_ARM64_ALL : CPU_SUBTYPE_ARM_ALL;
40 #else
41 #error undefined
42 #endif
43 return mh;
44 }
45
46 struct proto_coreinfo_command *
47 make_coreinfo_command(native_mach_header_t *mh, void *data, const uuid_t aoutid, uint64_t address, uint64_t dyninfo)
48 {
49 struct proto_coreinfo_command *cc = data;
50 cc->cmd = proto_LC_COREINFO;
51 cc->cmdsize = sizeof (*cc);
52 cc->version = 1;
53 cc->type = proto_CORETYPE_USER;
54 cc->pageshift = (uint16_t)pageshift_host;
55 cc->address = address;
56 uuid_copy(cc->uuid, aoutid);
57 cc->dyninfo = dyninfo;
58 mach_header_inc_ncmds(mh, 1);
59 mach_header_inc_sizeofcmds(mh, cc->cmdsize);
60 return cc;
61 }
62
63 native_segment_command_t *
64 make_native_segment_command(void *data, const struct vm_range *vr, const struct file_range *fr, vm_prot_t maxprot, vm_prot_t initprot)
65 {
66 native_segment_command_t *sc = data;
67 sc->cmd = NATIVE_LC_SEGMENT;
68 sc->cmdsize = sizeof (*sc);
69 assert(V_SIZE(vr));
70 sc->vmaddr = (unsigned long)V_ADDR(vr);
71 sc->vmsize = (unsigned long)V_SIZE(vr);
72 sc->fileoff = (unsigned long)F_OFF(fr);
73 sc->filesize = (unsigned long)F_SIZE(fr);
74 sc->maxprot = maxprot;
75 sc->initprot = initprot;
76 sc->nsects = 0;
77 sc->flags = 0;
78 return sc;
79 }
80
81 static struct proto_coredata_command *
82 make_coredata_command(void *data, const struct vm_range *vr, const struct file_range *fr, const vm_region_submap_info_data_64_t *info, unsigned comptype, unsigned purgable)
83 {
84 struct proto_coredata_command *cc = data;
85 cc->cmd = proto_LC_COREDATA;
86 cc->cmdsize = sizeof (*cc);
87 assert(V_SIZE(vr));
88 cc->vmaddr = V_ADDR(vr);
89 cc->vmsize = V_SIZE(vr);
90 cc->fileoff = F_OFF(fr);
91 cc->filesize = F_SIZE(fr);
92 cc->maxprot = info->max_protection;
93 cc->prot = info->protection;
94 cc->flags = COMP_MAKE_FLAGS(comptype);
95 cc->share_mode = info->share_mode;
96 assert(purgable <= UINT8_MAX);
97 cc->purgable = (uint8_t)purgable;
98 assert(info->user_tag <= UINT8_MAX);
99 cc->tag = (uint8_t)info->user_tag;
100 cc->extp = info->external_pager;
101 return cc;
102 }
103
104 static size_t
105 sizeof_segment_command(void) {
106 return opt->extended ?
107 sizeof (struct proto_coredata_command) : sizeof (native_segment_command_t);
108 }
109
110 static struct load_command *
111 make_segment_command(void *data, const struct vm_range *vr, const struct file_range *fr, const vm_region_submap_info_data_64_t *info, unsigned comptype, int purgable)
112 {
113 if (opt->extended)
114 make_coredata_command(data, vr, fr, info, comptype, purgable);
115 else
116 make_native_segment_command(data, vr, fr, info->max_protection, info->protection);
117 return data;
118 }
119
120 /*
121 * Increment the mach-o header data when we succeed
122 */
123 static void
124 commit_load_command(struct write_segment_data *wsd, const struct load_command *lc)
125 {
126 wsd->wsd_lc = (caddr_t)lc + lc->cmdsize;
127 native_mach_header_t *mh = wsd->wsd_mh;
128 mach_header_inc_ncmds(mh, 1);
129 mach_header_inc_sizeofcmds(mh, lc->cmdsize);
130 }
131
132 #pragma mark -- Regions written as "file references" --
133
134 static size_t
135 cmdsize_fileref_command(const char *nm)
136 {
137 size_t cmdsize = sizeof (struct proto_fileref_command);
138 size_t len;
139 if (0 != (len = strlen(nm))) {
140 len++; // NUL-terminated for mmap sanity
141 cmdsize += roundup(len, sizeof (long));
142 }
143 return cmdsize;
144 }
145
146 static void
147 size_fileref_subregion(const struct subregion *s, struct size_core *sc)
148 {
149 assert(S_LIBENT(s));
150
151 size_t cmdsize = cmdsize_fileref_command(S_PATHNAME(s));
152 sc->headersize += cmdsize;
153 sc->count++;
154 sc->memsize += S_SIZE(s);
155 }
156
157 static void
158 size_fileref_region(const struct region *r, struct size_core *sc)
159 {
160 assert(0 == r->r_nsubregions);
161 assert(!r->r_inzfodregion);
162
163 size_t cmdsize = cmdsize_fileref_command(r->r_fileref->fr_pathname);
164 sc->headersize += cmdsize;
165 sc->count++;
166 sc->memsize += R_SIZE(r);
167 }
168
169 static struct proto_fileref_command *
170 make_fileref_command(void *data, const char *pathname, const uuid_t uuid,
171 const struct vm_range *vr, const struct file_range *fr,
172 const vm_region_submap_info_data_64_t *info, unsigned purgable)
173 {
174 struct proto_fileref_command *fc = data;
175 size_t len;
176
177 fc->cmd = proto_LC_FILEREF;
178 fc->cmdsize = sizeof (*fc);
179 if (0 != (len = strlen(pathname))) {
180 /*
181 * Strings live immediately after the
182 * command, and are included in the cmdsize
183 */
184 fc->filename.offset = sizeof (*fc);
185 void *s = fc + 1;
186 strlcpy(s, pathname, ++len); // NUL-terminated for mmap sanity
187 fc->cmdsize += roundup(len, sizeof (long));
188 assert(cmdsize_fileref_command(pathname) == fc->cmdsize);
189 }
190
191 /*
192 * A file reference allows different kinds of identifiers for
193 * the reference to be reconstructed.
194 */
195 assert(info->external_pager);
196
197 if (!uuid_is_null(uuid)) {
198 uuid_copy(fc->id, uuid);
199 fc->flags = FREF_MAKE_FLAGS(kFREF_ID_UUID);
200 } else {
201 struct stat st;
202 if (-1 != stat(pathname, &st) && 0 != st.st_mtimespec.tv_sec) {
203 /* "little-endian format timespec structure" */
204 struct timespec ts = st.st_mtimespec;
205 ts.tv_nsec = 0; // allow touch(1) to fix things
206 memset(fc->id, 0, sizeof(fc->id));
207 memcpy(fc->id, &ts, sizeof(ts));
208 fc->flags = FREF_MAKE_FLAGS(kFREF_ID_MTIMESPEC_LE);
209 } else
210 fc->flags = FREF_MAKE_FLAGS(kFREF_ID_NONE);
211 }
212
213 fc->vmaddr = V_ADDR(vr);
214 assert(V_SIZE(vr));
215 fc->vmsize = V_SIZE(vr);
216
217 assert(F_OFF(fr) >= 0);
218 fc->fileoff = F_OFF(fr);
219 fc->filesize = F_SIZE(fr);
220
221 assert(info->max_protection & VM_PROT_READ);
222 fc->maxprot = info->max_protection;
223 fc->prot = info->protection;
224
225 fc->share_mode = info->share_mode;
226 assert(purgable <= UINT8_MAX);
227 fc->purgable = (uint8_t)purgable;
228 assert(info->user_tag <= UINT8_MAX);
229 fc->tag = (uint8_t)info->user_tag;
230 fc->extp = info->external_pager;
231 return fc;
232 }
233
234 /*
235 * It's almost always more efficient to write out a reference to the
236 * data than write out the data itself.
237 */
238 static walk_return_t
239 write_fileref_subregion(const struct region *r, const struct subregion *s, struct write_segment_data *wsd)
240 {
241 assert(S_LIBENT(s));
242 if (OPTIONS_DEBUG(opt, 1) && !issubregiontype(s, SEG_TEXT) && !issubregiontype(s, SEG_LINKEDIT))
243 printf("%s: unusual segment type %s from %s\n", __func__, S_MACHO_TYPE(s), S_FILENAME(s));
244 assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
245 assert((r->r_info.protection & VM_PROT_WRITE) == 0);
246
247 const struct libent *le = S_LIBENT(s);
248 const struct file_range fr = {
249 .off = S_MACHO_FILEOFF(s),
250 .size = S_SIZE(s),
251 };
252 const struct proto_fileref_command *fc = make_fileref_command(wsd->wsd_lc, le->le_pathname, le->le_uuid, S_RANGE(s), &fr, &r->r_info, r->r_purgable);
253
254 commit_load_command(wsd, (const void *)fc);
255 if (OPTIONS_DEBUG(opt, 3)) {
256 hsize_str_t hstr;
257 printr(r, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", S_FILENAME(s), S_MACHO_TYPE(s), (uint64_t)fc->vmaddr, (uint64_t)fc->vmaddr + fc->vmsize, (int64_t)fc->fileoff, str_hsize(hstr, fc->filesize));
258 }
259 return WALK_CONTINUE;
260 }
261
262 /*
263 * Note that we may be asked to write reference segments whose protections
264 * are rw- -- this -should- be ok as we don't convert the region to a file
265 * reference unless we know it hasn't been modified.
266 */
267 static walk_return_t
268 write_fileref_region(const struct region *r, struct write_segment_data *wsd)
269 {
270 assert(0 == r->r_nsubregions);
271 assert(r->r_info.user_tag != VM_MEMORY_IOKIT);
272 assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
273 assert(!r->r_inzfodregion);
274
275 const struct libent *le = r->r_fileref->fr_libent;
276 const char *pathname = r->r_fileref->fr_pathname;
277 const struct file_range fr = {
278 .off = r->r_fileref->fr_offset,
279 .size = R_SIZE(r),
280 };
281 const struct proto_fileref_command *fc = make_fileref_command(wsd->wsd_lc, pathname, le ? le->le_uuid : UUID_NULL, R_RANGE(r), &fr, &r->r_info, r->r_purgable);
282
283 commit_load_command(wsd, (const void *)fc);
284 if (OPTIONS_DEBUG(opt, 3)) {
285 hsize_str_t hstr;
286 printr(r, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", pathname, "(type?)", (uint64_t)fc->vmaddr, (uint64_t)fc->vmaddr + fc->vmsize, (int64_t)fc->fileoff, str_hsize(hstr, fc->filesize));
287 }
288 return WALK_CONTINUE;
289 }
290
291 const struct regionop fileref_ops = {
292 print_memory_region,
293 write_fileref_region,
294 del_fileref_region,
295 };
296
297
298 #pragma mark -- ZFOD segments written only to the header --
299
300 static void
301 size_zfod_region(const struct region *r, struct size_core *sc)
302 {
303 assert(0 == r->r_nsubregions);
304 assert(r->r_inzfodregion);
305 sc->headersize += sizeof_segment_command();
306 sc->count++;
307 sc->memsize += R_SIZE(r);
308 }
309
310 static walk_return_t
311 write_zfod_region(const struct region *r, struct write_segment_data *wsd)
312 {
313 assert(r->r_info.user_tag != VM_MEMORY_IOKIT);
314 assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
315
316 const struct file_range fr = {
317 .off = wsd->wsd_foffset,
318 .size = 0,
319 };
320 make_segment_command(wsd->wsd_lc, R_RANGE(r), &fr, &r->r_info, 0, VM_PURGABLE_EMPTY);
321 commit_load_command(wsd, wsd->wsd_lc);
322 return WALK_CONTINUE;
323 }
324
325 const struct regionop zfod_ops = {
326 print_memory_region,
327 write_zfod_region,
328 del_zfod_region,
329 };
330
331 #pragma mark -- Regions containing data --
332
333 static walk_return_t
334 pwrite_memory(struct write_segment_data *wsd, const void *addr, size_t size, const struct vm_range *vr)
335 {
336 assert(size);
337
338 ssize_t nwritten;
339 const int error = bounded_pwrite(wsd->wsd_fd, addr, size, wsd->wsd_foffset, &wsd->wsd_nocache, &nwritten);
340
341 if (error || OPTIONS_DEBUG(opt, 3)) {
342 hsize_str_t hsz;
343 printvr(vr, "writing %ld bytes at offset %lld -> ", size, wsd->wsd_foffset);
344 if (error)
345 printf("err #%d - %s ", error, strerror(error));
346 else {
347 printf("%s ", str_hsize(hsz, nwritten));
348 if (size != (size_t)nwritten)
349 printf("[%zd - incomplete write!] ", nwritten);
350 else if (size != V_SIZE(vr))
351 printf("(%s in memory) ",
352 str_hsize(hsz, V_SIZE(vr)));
353 }
354 printf("\n");
355 }
356
357 walk_return_t step = WALK_CONTINUE;
358 switch (error) {
359 case 0:
360 if (size != (size_t)nwritten)
361 step = WALK_ERROR;
362 else {
363 wsd->wsd_foffset += nwritten;
364 wsd->wsd_nwritten += nwritten;
365 }
366 break;
367 case EFAULT: // transient mapping failure?
368 break;
369 default: // EROFS, ENOSPC, EFBIG etc. */
370 step = WALK_ERROR;
371 break;
372 }
373 return step;
374 }
375
376
377 /*
378 * Write a contiguous range of memory into the core file.
379 * Apply compression, and chunk if necessary.
380 */
381 static int
382 segment_compflags(compression_algorithm ca, unsigned *algnum)
383 {
384 switch (ca) {
385 case COMPRESSION_LZ4:
386 *algnum = kCOMP_LZ4;
387 break;
388 case COMPRESSION_ZLIB:
389 *algnum = kCOMP_ZLIB;
390 break;
391 case COMPRESSION_LZMA:
392 *algnum = kCOMP_LZMA;
393 break;
394 case COMPRESSION_LZFSE:
395 *algnum = kCOMP_LZFSE;
396 break;
397 default:
398 err(EX_SOFTWARE, "unsupported compression algorithm %x", ca);
399 }
400 return 0;
401 }
402
403 static bool
404 is_file_mapped_shared(const struct region *r)
405 {
406 if (r->r_info.external_pager)
407 switch (r->r_info.share_mode) {
408 case SM_TRUESHARED: // sm=shm
409 case SM_SHARED: // sm=ali
410 case SM_SHARED_ALIASED: // sm=s/a
411 return true;
412 default:
413 break;
414 }
415 return false;
416 }
417
418 static walk_return_t
419 map_memory_range(struct write_segment_data *wsd, const struct region *r, const struct vm_range *vr, struct vm_range *dp)
420 {
421 if (r->r_incommregion) {
422 /*
423 * Special case: for commpage access, copy from our own address space.
424 */
425 V_SETADDR(dp, 0);
426 V_SETSIZE(dp, V_SIZE(vr));
427
428 kern_return_t kr = mach_vm_allocate(mach_task_self(), &dp->addr, dp->size, VM_FLAGS_ANYWHERE);
429 if (KERN_SUCCESS != kr || 0 == dp->addr) {
430 err_mach(kr, r, "mach_vm_allocate c %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
431 print_one_memory_region(r);
432 return WALK_ERROR;
433 }
434 if (OPTIONS_DEBUG(opt, 3))
435 printr(r, "copying from self %llx-%llx\n", V_ADDR(vr), V_ENDADDR(vr));
436 memcpy((void *)dp->addr, (const void *)V_ADDR(vr), V_SIZE(vr));
437 return WALK_CONTINUE;
438 }
439
440 if (!r->r_insharedregion && 0 == (r->r_info.protection & VM_PROT_READ)) {
441 assert(0 != (r->r_info.max_protection & VM_PROT_READ)); // simple_region_optimization()
442
443 /*
444 * Special case: region that doesn't currently have read permission.
445 * (e.g. --x/r-x permissions with tag 64 - JS JIT generated code
446 * from com.apple.WebKit.WebContent)
447 */
448 const mach_vm_offset_t pagesize_host = 1u << pageshift_host;
449 if (OPTIONS_DEBUG(opt, 3))
450 printr(r, "unreadable (%s/%s), remap with read permission\n",
451 str_prot(r->r_info.protection), str_prot(r->r_info.max_protection));
452 V_SETADDR(dp, 0);
453 V_SETSIZE(dp, V_SIZE(vr));
454 vm_prot_t cprot, mprot;
455 kern_return_t kr = mach_vm_remap(mach_task_self(), &dp->addr, V_SIZE(dp), pagesize_host - 1, true, wsd->wsd_task, V_ADDR(vr), true, &cprot, &mprot, VM_INHERIT_NONE);
456 if (KERN_SUCCESS != kr) {
457 err_mach(kr, r, "mach_vm_remap() %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
458 return WALK_ERROR;
459 }
460 assert(r->r_info.protection == cprot && r->r_info.max_protection == mprot);
461 kr = mach_vm_protect(mach_task_self(), V_ADDR(dp), V_SIZE(dp), false, VM_PROT_READ);
462 if (KERN_SUCCESS != kr) {
463 err_mach(kr, r, "mach_vm_protect() %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
464 mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
465 return WALK_ERROR;
466 }
467 return WALK_CONTINUE;
468 }
469
470 /*
471 * Most segments with data are read here
472 */
473 vm_offset_t data32 = 0;
474 mach_msg_type_number_t data32_count;
475 kern_return_t kr = mach_vm_read(wsd->wsd_task, V_ADDR(vr), V_SIZE(vr), &data32, &data32_count);
476 switch (kr) {
477 case KERN_SUCCESS:
478 V_SETADDR(dp, data32);
479 V_SETSIZE(dp, data32_count);
480 break;
481 case KERN_INVALID_ADDRESS:
482 if (!r->r_insharedregion &&
483 (VM_MEMORY_SKYWALK == r->r_info.user_tag || is_file_mapped_shared(r))) {
484 if (OPTIONS_DEBUG(opt, 1)) {
485 /* not necessarily an error: mitigation below */
486 tag_str_t tstr;
487 printr(r, "mach_vm_read() failed (%s) -- substituting zeroed region\n", str_tagr(tstr, r));
488 if (OPTIONS_DEBUG(opt, 2))
489 print_one_memory_region(r);
490 }
491 V_SETSIZE(dp, V_SIZE(vr));
492 kr = mach_vm_allocate(mach_task_self(), &dp->addr, V_SIZE(dp), VM_FLAGS_ANYWHERE);
493 if (KERN_SUCCESS != kr || 0 == V_ADDR(dp))
494 err_mach(kr, r, "mach_vm_allocate() z %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
495 break;
496 }
497 /*FALLTHROUGH*/
498 default:
499 err_mach(kr, r, "mach_vm_read() %llx-%llx", V_ADDR(vr), V_SIZE(vr));
500 if (OPTIONS_DEBUG(opt, 1))
501 print_one_memory_region(r);
502 break;
503 }
504 if (kr != KERN_SUCCESS) {
505 V_SETADDR(dp, 0);
506 return WALK_ERROR;
507 }
508
509 /*
510 * Sometimes (e.g. searchd) we may not be able to fetch all the pages
511 * from the underlying mapped file, in which case replace those pages
512 * with zfod pages (at least they compress efficiently) rather than
513 * taking a SIGBUS when compressing them.
514 *
515 * XXX Perhaps we should just catch the SIGBUS, and if the faulting address
516 * is in the right range, substitute zfod pages and rerun region compression?
517 * Complex though, because the compression code may be multithreaded.
518 */
519 if (!r->r_insharedregion && is_file_mapped_shared(r)) {
520 const mach_vm_offset_t pagesize_host = 1u << pageshift_host;
521
522 if (r->r_info.pages_resident * pagesize_host == V_SIZE(dp))
523 return WALK_CONTINUE; // all pages resident, so skip ..
524
525 if (OPTIONS_DEBUG(opt, 2))
526 printr(r, "probing %llu pages in mapped-shared file\n", V_SIZE(dp) / pagesize_host);
527
528 kr = KERN_SUCCESS;
529 for (mach_vm_offset_t a = V_ADDR(dp); a < V_ENDADDR(dp); a += pagesize_host) {
530
531 mach_msg_type_number_t pCount = VM_PAGE_INFO_BASIC_COUNT;
532 vm_page_info_basic_data_t pInfo;
533
534 kr = mach_vm_page_info(mach_task_self(), a, VM_PAGE_INFO_BASIC, (vm_page_info_t)&pInfo, &pCount);
535 if (KERN_SUCCESS != kr) {
536 err_mach(kr, NULL, "mach_vm_page_info() at %llx", a);
537 break;
538 }
539 /* If the VM has the page somewhere, assume we can bring it back */
540 if (pInfo.disposition & (VM_PAGE_QUERY_PAGE_PRESENT | VM_PAGE_QUERY_PAGE_REF | VM_PAGE_QUERY_PAGE_DIRTY))
541 continue;
542
543 /* Force the page to be fetched to see if it faults */
544 mach_vm_size_t tsize = pagesize_host;
545 void *tmp = valloc((size_t)tsize);
546 const mach_vm_address_t vtmp = (mach_vm_address_t)tmp;
547
548 switch (kr = mach_vm_read_overwrite(mach_task_self(), a, tsize, vtmp, &tsize)) {
549 case KERN_SUCCESS:
550 break;
551 case KERN_INVALID_ADDRESS: {
552 /* Content can't be found: replace it and the rest of the region with zero-fill pages */
553 if (OPTIONS_DEBUG(opt, 2)) {
554 printr(r, "mach_vm_read_overwrite() failed after %llu pages -- substituting zfod\n", (a - V_ADDR(dp)) / pagesize_host);
555 print_one_memory_region(r);
556 }
557 mach_vm_address_t va = a;
558 kr = mach_vm_allocate(mach_task_self(), &va, V_ENDADDR(dp) - va, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
559 if (KERN_SUCCESS != kr) {
560 err_mach(kr, r, "mach_vm_allocate() %llx", a);
561 } else {
562 assert(a == va);
563 a = V_ENDADDR(dp); // no need to look any further
564 }
565 break;
566 }
567 default:
568 err_mach(kr, r, "mach_vm_overwrite() %llx", a);
569 break;
570 }
571 free(tmp);
572 if (KERN_SUCCESS != kr)
573 break;
574 }
575 if (KERN_SUCCESS != kr) {
576 kr = mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
577 if (KERN_SUCCESS != kr && OPTIONS_DEBUG(opt, 1))
578 err_mach(kr, r, "mach_vm_deallocate() pre %llx-%llx", V_ADDR(dp), V_ENDADDR(dp));
579 V_SETADDR(dp, 0);
580 return WALK_ERROR;
581 }
582 }
583
584 return WALK_CONTINUE;
585 }
586
587 static walk_return_t
588 write_memory_range(struct write_segment_data *wsd, const struct region *r, mach_vm_offset_t vmaddr, mach_vm_offset_t vmsize)
589 {
590 assert(R_ADDR(r) <= vmaddr && R_ENDADDR(r) >= vmaddr + vmsize);
591
592 mach_vm_offset_t resid = vmsize;
593 walk_return_t step = WALK_CONTINUE;
594
595 do {
596 vmsize = resid;
597
598 /*
599 * Since some regions can be inconveniently large,
600 * chop them into multiple chunks as we compress them.
601 * (mach_vm_read has 32-bit limitations too).
602 */
603 vmsize = vmsize > INT32_MAX ? INT32_MAX : vmsize;
604 if (opt->chunksize > 0 && vmsize > opt->chunksize)
605 vmsize = opt->chunksize;
606 assert(vmsize <= INT32_MAX);
607
608 const struct vm_range vr = {
609 .addr = vmaddr,
610 .size = vmsize,
611 };
612 struct vm_range d, *dp = &d;
613
614 step = map_memory_range(wsd, r, &vr, dp);
615 if (WALK_CONTINUE != step)
616 break;
617 assert(0 != V_ADDR(dp) && 0 != V_SIZE(dp));
618 const void *srcaddr = (const void *)V_ADDR(dp);
619
620 mach_vm_behavior_set(mach_task_self(), V_ADDR(dp), V_SIZE(dp), VM_BEHAVIOR_SEQUENTIAL);
621
622 void *dstbuf = NULL;
623 unsigned algorithm = 0;
624 size_t filesize;
625
626 if (opt->extended) {
627 dstbuf = malloc(V_SIZEOF(dp));
628 if (dstbuf) {
629 filesize = compression_encode_buffer(dstbuf, V_SIZEOF(dp), srcaddr, V_SIZEOF(dp), NULL, opt->calgorithm);
630 if (filesize > 0 && filesize < V_SIZEOF(dp)) {
631 srcaddr = dstbuf; /* the data source is now heap, compressed */
632 mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
633 V_SETADDR(dp, 0);
634 if (segment_compflags(opt->calgorithm, &algorithm) != 0) {
635 free(dstbuf);
636 mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
637 V_SETADDR(dp, 0);
638 step = WALK_ERROR;
639 break;
640 }
641 } else {
642 free(dstbuf);
643 dstbuf = NULL;
644 filesize = V_SIZEOF(dp);
645 }
646 } else
647 filesize = V_SIZEOF(dp);
648 assert(filesize <= V_SIZEOF(dp));
649 } else
650 filesize = V_SIZEOF(dp);
651
652 assert(filesize);
653
654 const struct file_range fr = {
655 .off = wsd->wsd_foffset,
656 .size = filesize,
657 };
658 make_segment_command(wsd->wsd_lc, &vr, &fr, &r->r_info, algorithm, r->r_purgable);
659 step = pwrite_memory(wsd, srcaddr, filesize, &vr);
660 if (dstbuf)
661 free(dstbuf);
662 if (V_ADDR(dp)) {
663 kern_return_t kr = mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
664 if (KERN_SUCCESS != kr && OPTIONS_DEBUG(opt, 1))
665 err_mach(kr, r, "mach_vm_deallocate() post %llx-%llx", V_ADDR(dp), V_SIZE(dp));
666 }
667
668 if (WALK_ERROR == step)
669 break;
670 commit_load_command(wsd, wsd->wsd_lc);
671 resid -= vmsize;
672 vmaddr += vmsize;
673 } while (resid);
674
675 return step;
676 }
677
678 #ifdef RDAR_23744374
679 /*
680 * Sigh. This is a workaround.
681 * Find the vmsize as if the VM system manages ranges in host pagesize units
682 * rather than application pagesize units.
683 */
684 static mach_vm_size_t
685 getvmsize_host(const task_t task, const struct region *r)
686 {
687 mach_vm_size_t vmsize_host = R_SIZE(r);
688
689 if (pageshift_host != pageshift_app) {
690 is_actual_size(task, r, &vmsize_host);
691 if (OPTIONS_DEBUG(opt, 1) && R_SIZE(r) != vmsize_host)
692 printr(r, "(region size tweak: was %llx, is %llx)\n", R_SIZE(r), vmsize_host);
693 }
694 return vmsize_host;
695 }
696 #else
697 static __inline mach_vm_size_t
698 getvmsize_host(__unused const task_t task, const struct region *r)
699 {
700 return R_SIZE(r);
701 }
702 #endif
703
704 static walk_return_t
705 write_sparse_region(const struct region *r, struct write_segment_data *wsd)
706 {
707 assert(r->r_nsubregions);
708 assert(!r->r_inzfodregion);
709 assert(NULL == r->r_fileref);
710
711 const mach_vm_size_t vmsize_host = getvmsize_host(wsd->wsd_task, r);
712 walk_return_t step = WALK_CONTINUE;
713
714 for (unsigned i = 0; i < r->r_nsubregions; i++) {
715 const struct subregion *s = r->r_subregions[i];
716
717 if (s->s_isuuidref)
718 step = write_fileref_subregion(r, s, wsd);
719 else {
720 /* Write this one out as real data */
721 mach_vm_size_t vmsize = S_SIZE(s);
722 if (R_SIZE(r) != vmsize_host) {
723 if (S_ADDR(s) + vmsize > R_ADDR(r) + vmsize_host) {
724 vmsize = R_ADDR(r) + vmsize_host - S_ADDR(s);
725 if (OPTIONS_DEBUG(opt, 3))
726 printr(r, "(subregion size tweak: was %llx, is %llx)\n",
727 S_SIZE(s), vmsize);
728 }
729 }
730 step = write_memory_range(wsd, r, S_ADDR(s), vmsize);
731 }
732 if (WALK_ERROR == step)
733 break;
734 }
735 return step;
736 }
737
738 static walk_return_t
739 write_vanilla_region(const struct region *r, struct write_segment_data *wsd)
740 {
741 assert(0 == r->r_nsubregions);
742 assert(!r->r_inzfodregion);
743 assert(NULL == r->r_fileref);
744
745 const mach_vm_size_t vmsize_host = getvmsize_host(wsd->wsd_task, r);
746 return write_memory_range(wsd, r, R_ADDR(r), vmsize_host);
747 }
748
749 walk_return_t
750 region_write_memory(struct region *r, void *arg)
751 {
752 assert(r->r_info.user_tag != VM_MEMORY_IOKIT); // elided in walk_regions()
753 assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
754 return ROP_WRITE(r, arg);
755 }
756
757 /*
758 * Handles the cases where segments are broken into chunks i.e. when
759 * writing compressed segments.
760 */
761 static unsigned long
762 count_memory_range(mach_vm_offset_t vmsize)
763 {
764 unsigned long count;
765 if (opt->chunksize) {
766 count = (size_t)vmsize / opt->chunksize;
767 if (vmsize != (mach_vm_offset_t)count * opt->chunksize)
768 count++;
769 } else
770 count = 1;
771 return count;
772 }
773
774 /*
775 * A sparse region is likely a writable data segment described by
776 * native_segment_command_t somewhere in the address space.
777 */
778 static void
779 size_sparse_subregion(const struct subregion *s, struct size_core *sc)
780 {
781 const unsigned long count = count_memory_range(S_SIZE(s));
782 sc->headersize += sizeof_segment_command() * count;
783 sc->count += count;
784 sc->memsize += S_SIZE(s);
785 }
786
787 static void
788 size_sparse_region(const struct region *r, struct size_core *sc_sparse, struct size_core *sc_fileref)
789 {
790 assert(0 != r->r_nsubregions);
791
792 unsigned long entry_total = sc_sparse->count + sc_fileref->count;
793 for (unsigned i = 0; i < r->r_nsubregions; i++) {
794 const struct subregion *s = r->r_subregions[i];
795 if (s->s_isuuidref)
796 size_fileref_subregion(s, sc_fileref);
797 else
798 size_sparse_subregion(s, sc_sparse);
799 }
800 if (OPTIONS_DEBUG(opt, 3)) {
801 /* caused by compression breaking a large region into chunks */
802 entry_total = (sc_fileref->count + sc_sparse->count) - entry_total;
803 if (entry_total > r->r_nsubregions)
804 printr(r, "range contains %u subregions requires %lu segment commands\n",
805 r->r_nsubregions, entry_total);
806 }
807 }
808
809 const struct regionop sparse_ops = {
810 print_memory_region,
811 write_sparse_region,
812 del_sparse_region,
813 };
814
815 static void
816 size_vanilla_region(const struct region *r, struct size_core *sc)
817 {
818 assert(0 == r->r_nsubregions);
819
820 const unsigned long count = count_memory_range(R_SIZE(r));
821 sc->headersize += sizeof_segment_command() * count;
822 sc->count += count;
823 sc->memsize += R_SIZE(r);
824
825 if (OPTIONS_DEBUG(opt, 3) && count != 1)
826 printr(r, "range with 1 region, but requires %lu segment commands\n", count);
827 }
828
829 const struct regionop vanilla_ops = {
830 print_memory_region,
831 write_vanilla_region,
832 del_vanilla_region,
833 };
834
835 walk_return_t
836 region_size_memory(struct region *r, void *arg)
837 {
838 struct size_segment_data *ssd = arg;
839
840 if (&zfod_ops == r->r_op)
841 size_zfod_region(r, &ssd->ssd_zfod);
842 else if (&fileref_ops == r->r_op)
843 size_fileref_region(r, &ssd->ssd_fileref);
844 else if (&sparse_ops == r->r_op)
845 size_sparse_region(r, &ssd->ssd_sparse, &ssd->ssd_fileref);
846 else if (&vanilla_ops == r->r_op)
847 size_vanilla_region(r, &ssd->ssd_vanilla);
848 else
849 errx(EX_SOFTWARE, "%s: bad op", __func__);
850
851 return WALK_CONTINUE;
852 }