2 * Copyright (c) 2016-2018 Apple Inc. All rights reserved.
18 #include <compression.h>
19 #include <sys/param.h>
23 native_mach_header_t
*
24 make_corefile_mach_header(void *data
)
26 native_mach_header_t
*mh
= data
;
27 mh
->magic
= NATIVE_MH_MAGIC
;
28 mh
->filetype
= MH_CORE
;
34 #if defined(__i386__) || defined(__x86_64__)
35 mh
->cputype
= is64
? CPU_TYPE_X86_64
: CPU_TYPE_I386
;
36 mh
->cpusubtype
= is64
? CPU_SUBTYPE_X86_64_ALL
: CPU_SUBTYPE_I386_ALL
;
37 #elif defined(__arm__) || defined(__arm64__)
38 mh
->cputype
= is64
? CPU_TYPE_ARM64
: CPU_TYPE_ARM
;
39 mh
->cpusubtype
= is64
? CPU_SUBTYPE_ARM64_ALL
: CPU_SUBTYPE_ARM_ALL
;
46 struct proto_coreinfo_command
*
47 make_coreinfo_command(native_mach_header_t
*mh
, void *data
, const uuid_t aoutid
, uint64_t address
, uint64_t dyninfo
)
49 struct proto_coreinfo_command
*cc
= data
;
50 cc
->cmd
= proto_LC_COREINFO
;
51 cc
->cmdsize
= sizeof (*cc
);
53 cc
->type
= proto_CORETYPE_USER
;
54 cc
->pageshift
= (uint16_t)pageshift_host
;
55 cc
->address
= address
;
56 uuid_copy(cc
->uuid
, aoutid
);
57 cc
->dyninfo
= dyninfo
;
58 mach_header_inc_ncmds(mh
, 1);
59 mach_header_inc_sizeofcmds(mh
, cc
->cmdsize
);
63 native_segment_command_t
*
64 make_native_segment_command(void *data
, const struct vm_range
*vr
, const struct file_range
*fr
, vm_prot_t maxprot
, vm_prot_t initprot
)
66 native_segment_command_t
*sc
= data
;
67 sc
->cmd
= NATIVE_LC_SEGMENT
;
68 sc
->cmdsize
= sizeof (*sc
);
70 sc
->vmaddr
= (unsigned long)V_ADDR(vr
);
71 sc
->vmsize
= (unsigned long)V_SIZE(vr
);
72 sc
->fileoff
= (unsigned long)F_OFF(fr
);
73 sc
->filesize
= (unsigned long)F_SIZE(fr
);
74 sc
->maxprot
= maxprot
;
75 sc
->initprot
= initprot
;
81 static struct proto_coredata_command
*
82 make_coredata_command(void *data
, const struct vm_range
*vr
, const struct file_range
*fr
, const vm_region_submap_info_data_64_t
*info
, unsigned comptype
, unsigned purgable
)
84 struct proto_coredata_command
*cc
= data
;
85 cc
->cmd
= proto_LC_COREDATA
;
86 cc
->cmdsize
= sizeof (*cc
);
88 cc
->vmaddr
= V_ADDR(vr
);
89 cc
->vmsize
= V_SIZE(vr
);
90 cc
->fileoff
= F_OFF(fr
);
91 cc
->filesize
= F_SIZE(fr
);
92 cc
->maxprot
= info
->max_protection
;
93 cc
->prot
= info
->protection
;
94 cc
->flags
= COMP_MAKE_FLAGS(comptype
);
95 cc
->share_mode
= info
->share_mode
;
96 assert(purgable
<= UINT8_MAX
);
97 cc
->purgable
= (uint8_t)purgable
;
98 assert(info
->user_tag
<= UINT8_MAX
);
99 cc
->tag
= (uint8_t)info
->user_tag
;
100 cc
->extp
= info
->external_pager
;
105 sizeof_segment_command(void) {
106 return opt
->extended
?
107 sizeof (struct proto_coredata_command
) : sizeof (native_segment_command_t
);
110 static struct load_command
*
111 make_segment_command(void *data
, const struct vm_range
*vr
, const struct file_range
*fr
, const vm_region_submap_info_data_64_t
*info
, unsigned comptype
, int purgable
)
114 make_coredata_command(data
, vr
, fr
, info
, comptype
, purgable
);
116 make_native_segment_command(data
, vr
, fr
, info
->max_protection
, info
->protection
);
121 * Increment the mach-o header data when we succeed
124 commit_load_command(struct write_segment_data
*wsd
, const struct load_command
*lc
)
126 wsd
->wsd_lc
= (caddr_t
)lc
+ lc
->cmdsize
;
127 native_mach_header_t
*mh
= wsd
->wsd_mh
;
128 mach_header_inc_ncmds(mh
, 1);
129 mach_header_inc_sizeofcmds(mh
, lc
->cmdsize
);
132 #pragma mark -- Regions written as "file references" --
135 cmdsize_fileref_command(const char *nm
)
137 size_t cmdsize
= sizeof (struct proto_fileref_command
);
139 if (0 != (len
= strlen(nm
))) {
140 len
++; // NUL-terminated for mmap sanity
141 cmdsize
+= roundup(len
, sizeof (long));
147 size_fileref_subregion(const struct subregion
*s
, struct size_core
*sc
)
151 size_t cmdsize
= cmdsize_fileref_command(S_PATHNAME(s
));
152 sc
->headersize
+= cmdsize
;
154 sc
->memsize
+= S_SIZE(s
);
158 size_fileref_region(const struct region
*r
, struct size_core
*sc
)
160 assert(0 == r
->r_nsubregions
);
161 assert(!r
->r_inzfodregion
);
163 size_t cmdsize
= cmdsize_fileref_command(r
->r_fileref
->fr_pathname
);
164 sc
->headersize
+= cmdsize
;
166 sc
->memsize
+= R_SIZE(r
);
169 static struct proto_fileref_command
*
170 make_fileref_command(void *data
, const char *pathname
, const uuid_t uuid
,
171 const struct vm_range
*vr
, const struct file_range
*fr
,
172 const vm_region_submap_info_data_64_t
*info
, unsigned purgable
)
174 struct proto_fileref_command
*fc
= data
;
177 fc
->cmd
= proto_LC_FILEREF
;
178 fc
->cmdsize
= sizeof (*fc
);
179 if (0 != (len
= strlen(pathname
))) {
181 * Strings live immediately after the
182 * command, and are included in the cmdsize
184 fc
->filename
.offset
= sizeof (*fc
);
186 strlcpy(s
, pathname
, ++len
); // NUL-terminated for mmap sanity
187 fc
->cmdsize
+= roundup(len
, sizeof (long));
188 assert(cmdsize_fileref_command(pathname
) == fc
->cmdsize
);
192 * A file reference allows different kinds of identifiers for
193 * the reference to be reconstructed.
195 assert(info
->external_pager
);
197 if (!uuid_is_null(uuid
)) {
198 uuid_copy(fc
->id
, uuid
);
199 fc
->flags
= FREF_MAKE_FLAGS(kFREF_ID_UUID
);
202 if (-1 != stat(pathname
, &st
) && 0 != st
.st_mtimespec
.tv_sec
) {
203 /* "little-endian format timespec structure" */
204 struct timespec ts
= st
.st_mtimespec
;
205 ts
.tv_nsec
= 0; // allow touch(1) to fix things
206 memset(fc
->id
, 0, sizeof(fc
->id
));
207 memcpy(fc
->id
, &ts
, sizeof(ts
));
208 fc
->flags
= FREF_MAKE_FLAGS(kFREF_ID_MTIMESPEC_LE
);
210 fc
->flags
= FREF_MAKE_FLAGS(kFREF_ID_NONE
);
213 fc
->vmaddr
= V_ADDR(vr
);
215 fc
->vmsize
= V_SIZE(vr
);
217 assert(F_OFF(fr
) >= 0);
218 fc
->fileoff
= F_OFF(fr
);
219 fc
->filesize
= F_SIZE(fr
);
221 assert(info
->max_protection
& VM_PROT_READ
);
222 fc
->maxprot
= info
->max_protection
;
223 fc
->prot
= info
->protection
;
225 fc
->share_mode
= info
->share_mode
;
226 assert(purgable
<= UINT8_MAX
);
227 fc
->purgable
= (uint8_t)purgable
;
228 assert(info
->user_tag
<= UINT8_MAX
);
229 fc
->tag
= (uint8_t)info
->user_tag
;
230 fc
->extp
= info
->external_pager
;
235 * It's almost always more efficient to write out a reference to the
236 * data than write out the data itself.
239 write_fileref_subregion(const struct region
*r
, const struct subregion
*s
, struct write_segment_data
*wsd
)
242 if (OPTIONS_DEBUG(opt
, 1) && !issubregiontype(s
, SEG_TEXT
) && !issubregiontype(s
, SEG_LINKEDIT
))
243 printf("%s: unusual segment type %s from %s\n", __func__
, S_MACHO_TYPE(s
), S_FILENAME(s
));
244 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
245 assert((r
->r_info
.protection
& VM_PROT_WRITE
) == 0);
247 const struct libent
*le
= S_LIBENT(s
);
248 const struct file_range fr
= {
249 .off
= S_MACHO_FILEOFF(s
),
252 const struct proto_fileref_command
*fc
= make_fileref_command(wsd
->wsd_lc
, le
->le_pathname
, le
->le_uuid
, S_RANGE(s
), &fr
, &r
->r_info
, r
->r_purgable
);
254 commit_load_command(wsd
, (const void *)fc
);
255 if (OPTIONS_DEBUG(opt
, 3)) {
257 printr(r
, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", S_FILENAME(s
), S_MACHO_TYPE(s
), (uint64_t)fc
->vmaddr
, (uint64_t)fc
->vmaddr
+ fc
->vmsize
, (int64_t)fc
->fileoff
, str_hsize(hstr
, fc
->filesize
));
259 return WALK_CONTINUE
;
263 * Note that we may be asked to write reference segments whose protections
264 * are rw- -- this -should- be ok as we don't convert the region to a file
265 * reference unless we know it hasn't been modified.
268 write_fileref_region(const struct region
*r
, struct write_segment_data
*wsd
)
270 assert(0 == r
->r_nsubregions
);
271 assert(r
->r_info
.user_tag
!= VM_MEMORY_IOKIT
);
272 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
273 assert(!r
->r_inzfodregion
);
275 const struct libent
*le
= r
->r_fileref
->fr_libent
;
276 const char *pathname
= r
->r_fileref
->fr_pathname
;
277 const struct file_range fr
= {
278 .off
= r
->r_fileref
->fr_offset
,
281 const struct proto_fileref_command
*fc
= make_fileref_command(wsd
->wsd_lc
, pathname
, le
? le
->le_uuid
: UUID_NULL
, R_RANGE(r
), &fr
, &r
->r_info
, r
->r_purgable
);
283 commit_load_command(wsd
, (const void *)fc
);
284 if (OPTIONS_DEBUG(opt
, 3)) {
286 printr(r
, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", pathname
, "(type?)", (uint64_t)fc
->vmaddr
, (uint64_t)fc
->vmaddr
+ fc
->vmsize
, (int64_t)fc
->fileoff
, str_hsize(hstr
, fc
->filesize
));
288 return WALK_CONTINUE
;
291 const struct regionop fileref_ops
= {
293 write_fileref_region
,
298 #pragma mark -- ZFOD segments written only to the header --
301 size_zfod_region(const struct region
*r
, struct size_core
*sc
)
303 assert(0 == r
->r_nsubregions
);
304 assert(r
->r_inzfodregion
);
305 sc
->headersize
+= sizeof_segment_command();
307 sc
->memsize
+= R_SIZE(r
);
311 write_zfod_region(const struct region
*r
, struct write_segment_data
*wsd
)
313 assert(r
->r_info
.user_tag
!= VM_MEMORY_IOKIT
);
314 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
316 const struct file_range fr
= {
317 .off
= wsd
->wsd_foffset
,
320 make_segment_command(wsd
->wsd_lc
, R_RANGE(r
), &fr
, &r
->r_info
, 0, VM_PURGABLE_EMPTY
);
321 commit_load_command(wsd
, wsd
->wsd_lc
);
322 return WALK_CONTINUE
;
325 const struct regionop zfod_ops
= {
331 #pragma mark -- Regions containing data --
334 pwrite_memory(struct write_segment_data
*wsd
, const void *addr
, size_t size
, const struct vm_range
*vr
)
339 const int error
= bounded_pwrite(wsd
->wsd_fd
, addr
, size
, wsd
->wsd_foffset
, &wsd
->wsd_nocache
, &nwritten
);
341 if (error
|| OPTIONS_DEBUG(opt
, 3)) {
343 printvr(vr
, "writing %ld bytes at offset %lld -> ", size
, wsd
->wsd_foffset
);
345 printf("err #%d - %s ", error
, strerror(error
));
347 printf("%s ", str_hsize(hsz
, nwritten
));
348 if (size
!= (size_t)nwritten
)
349 printf("[%zd - incomplete write!] ", nwritten
);
350 else if (size
!= V_SIZE(vr
))
351 printf("(%s in memory) ",
352 str_hsize(hsz
, V_SIZE(vr
)));
357 walk_return_t step
= WALK_CONTINUE
;
360 if (size
!= (size_t)nwritten
)
363 wsd
->wsd_foffset
+= nwritten
;
364 wsd
->wsd_nwritten
+= nwritten
;
367 case EFAULT
: // transient mapping failure?
369 default: // EROFS, ENOSPC, EFBIG etc. */
378 * Write a contiguous range of memory into the core file.
379 * Apply compression, and chunk if necessary.
382 segment_compflags(compression_algorithm ca
, unsigned *algnum
)
385 case COMPRESSION_LZ4
:
388 case COMPRESSION_ZLIB
:
389 *algnum
= kCOMP_ZLIB
;
391 case COMPRESSION_LZMA
:
392 *algnum
= kCOMP_LZMA
;
394 case COMPRESSION_LZFSE
:
395 *algnum
= kCOMP_LZFSE
;
398 err(EX_SOFTWARE
, "unsupported compression algorithm %x", ca
);
404 is_file_mapped_shared(const struct region
*r
)
406 if (r
->r_info
.external_pager
)
407 switch (r
->r_info
.share_mode
) {
408 case SM_TRUESHARED
: // sm=shm
409 case SM_SHARED
: // sm=ali
410 case SM_SHARED_ALIASED
: // sm=s/a
419 map_memory_range(struct write_segment_data
*wsd
, const struct region
*r
, const struct vm_range
*vr
, struct vm_range
*dp
)
421 if (r
->r_incommregion
) {
423 * Special case: for commpage access, copy from our own address space.
426 V_SETSIZE(dp
, V_SIZE(vr
));
428 kern_return_t kr
= mach_vm_allocate(mach_task_self(), &dp
->addr
, dp
->size
, VM_FLAGS_ANYWHERE
);
429 if (KERN_SUCCESS
!= kr
|| 0 == dp
->addr
) {
430 err_mach(kr
, r
, "mach_vm_allocate c %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
431 print_one_memory_region(r
);
434 if (OPTIONS_DEBUG(opt
, 3))
435 printr(r
, "copying from self %llx-%llx\n", V_ADDR(vr
), V_ENDADDR(vr
));
436 memcpy((void *)dp
->addr
, (const void *)V_ADDR(vr
), V_SIZE(vr
));
437 return WALK_CONTINUE
;
440 if (!r
->r_insharedregion
&& 0 == (r
->r_info
.protection
& VM_PROT_READ
)) {
441 assert(0 != (r
->r_info
.max_protection
& VM_PROT_READ
)); // simple_region_optimization()
444 * Special case: region that doesn't currently have read permission.
445 * (e.g. --x/r-x permissions with tag 64 - JS JIT generated code
446 * from com.apple.WebKit.WebContent)
448 const mach_vm_offset_t pagesize_host
= 1u << pageshift_host
;
449 if (OPTIONS_DEBUG(opt
, 3))
450 printr(r
, "unreadable (%s/%s), remap with read permission\n",
451 str_prot(r
->r_info
.protection
), str_prot(r
->r_info
.max_protection
));
453 V_SETSIZE(dp
, V_SIZE(vr
));
454 vm_prot_t cprot
, mprot
;
455 kern_return_t kr
= mach_vm_remap(mach_task_self(), &dp
->addr
, V_SIZE(dp
), pagesize_host
- 1, true, wsd
->wsd_task
, V_ADDR(vr
), true, &cprot
, &mprot
, VM_INHERIT_NONE
);
456 if (KERN_SUCCESS
!= kr
) {
457 err_mach(kr
, r
, "mach_vm_remap() %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
460 assert(r
->r_info
.protection
== cprot
&& r
->r_info
.max_protection
== mprot
);
461 kr
= mach_vm_protect(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
), false, VM_PROT_READ
);
462 if (KERN_SUCCESS
!= kr
) {
463 err_mach(kr
, r
, "mach_vm_protect() %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
464 mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
467 return WALK_CONTINUE
;
471 * Most segments with data are read here
473 vm_offset_t data32
= 0;
474 mach_msg_type_number_t data32_count
;
475 kern_return_t kr
= mach_vm_read(wsd
->wsd_task
, V_ADDR(vr
), V_SIZE(vr
), &data32
, &data32_count
);
478 V_SETADDR(dp
, data32
);
479 V_SETSIZE(dp
, data32_count
);
481 case KERN_INVALID_ADDRESS
:
482 if (!r
->r_insharedregion
&&
483 (VM_MEMORY_SKYWALK
== r
->r_info
.user_tag
|| is_file_mapped_shared(r
))) {
484 if (OPTIONS_DEBUG(opt
, 1)) {
485 /* not necessarily an error: mitigation below */
487 printr(r
, "mach_vm_read() failed (%s) -- substituting zeroed region\n", str_tagr(tstr
, r
));
488 if (OPTIONS_DEBUG(opt
, 2))
489 print_one_memory_region(r
);
491 V_SETSIZE(dp
, V_SIZE(vr
));
492 kr
= mach_vm_allocate(mach_task_self(), &dp
->addr
, V_SIZE(dp
), VM_FLAGS_ANYWHERE
);
493 if (KERN_SUCCESS
!= kr
|| 0 == V_ADDR(dp
))
494 err_mach(kr
, r
, "mach_vm_allocate() z %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
499 err_mach(kr
, r
, "mach_vm_read() %llx-%llx", V_ADDR(vr
), V_SIZE(vr
));
500 if (OPTIONS_DEBUG(opt
, 1))
501 print_one_memory_region(r
);
504 if (kr
!= KERN_SUCCESS
) {
510 * Sometimes (e.g. searchd) we may not be able to fetch all the pages
511 * from the underlying mapped file, in which case replace those pages
512 * with zfod pages (at least they compress efficiently) rather than
513 * taking a SIGBUS when compressing them.
515 * XXX Perhaps we should just catch the SIGBUS, and if the faulting address
516 * is in the right range, substitute zfod pages and rerun region compression?
517 * Complex though, because the compression code may be multithreaded.
519 if (!r
->r_insharedregion
&& is_file_mapped_shared(r
)) {
520 const mach_vm_offset_t pagesize_host
= 1u << pageshift_host
;
522 if (r
->r_info
.pages_resident
* pagesize_host
== V_SIZE(dp
))
523 return WALK_CONTINUE
; // all pages resident, so skip ..
525 if (OPTIONS_DEBUG(opt
, 2))
526 printr(r
, "probing %llu pages in mapped-shared file\n", V_SIZE(dp
) / pagesize_host
);
529 for (mach_vm_offset_t a
= V_ADDR(dp
); a
< V_ENDADDR(dp
); a
+= pagesize_host
) {
531 mach_msg_type_number_t pCount
= VM_PAGE_INFO_BASIC_COUNT
;
532 vm_page_info_basic_data_t pInfo
;
534 kr
= mach_vm_page_info(mach_task_self(), a
, VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&pInfo
, &pCount
);
535 if (KERN_SUCCESS
!= kr
) {
536 err_mach(kr
, NULL
, "mach_vm_page_info() at %llx", a
);
539 /* If the VM has the page somewhere, assume we can bring it back */
540 if (pInfo
.disposition
& (VM_PAGE_QUERY_PAGE_PRESENT
| VM_PAGE_QUERY_PAGE_REF
| VM_PAGE_QUERY_PAGE_DIRTY
))
543 /* Force the page to be fetched to see if it faults */
544 mach_vm_size_t tsize
= pagesize_host
;
545 void *tmp
= valloc((size_t)tsize
);
546 const mach_vm_address_t vtmp
= (mach_vm_address_t
)tmp
;
548 switch (kr
= mach_vm_read_overwrite(mach_task_self(), a
, tsize
, vtmp
, &tsize
)) {
551 case KERN_INVALID_ADDRESS
: {
552 /* Content can't be found: replace it and the rest of the region with zero-fill pages */
553 if (OPTIONS_DEBUG(opt
, 2)) {
554 printr(r
, "mach_vm_read_overwrite() failed after %llu pages -- substituting zfod\n", (a
- V_ADDR(dp
)) / pagesize_host
);
555 print_one_memory_region(r
);
557 mach_vm_address_t va
= a
;
558 kr
= mach_vm_allocate(mach_task_self(), &va
, V_ENDADDR(dp
) - va
, VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
);
559 if (KERN_SUCCESS
!= kr
) {
560 err_mach(kr
, r
, "mach_vm_allocate() %llx", a
);
563 a
= V_ENDADDR(dp
); // no need to look any further
568 err_mach(kr
, r
, "mach_vm_overwrite() %llx", a
);
572 if (KERN_SUCCESS
!= kr
)
575 if (KERN_SUCCESS
!= kr
) {
576 kr
= mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
577 if (KERN_SUCCESS
!= kr
&& OPTIONS_DEBUG(opt
, 1))
578 err_mach(kr
, r
, "mach_vm_deallocate() pre %llx-%llx", V_ADDR(dp
), V_ENDADDR(dp
));
584 return WALK_CONTINUE
;
588 write_memory_range(struct write_segment_data
*wsd
, const struct region
*r
, mach_vm_offset_t vmaddr
, mach_vm_offset_t vmsize
)
590 assert(R_ADDR(r
) <= vmaddr
&& R_ENDADDR(r
) >= vmaddr
+ vmsize
);
592 mach_vm_offset_t resid
= vmsize
;
593 walk_return_t step
= WALK_CONTINUE
;
599 * Since some regions can be inconveniently large,
600 * chop them into multiple chunks as we compress them.
601 * (mach_vm_read has 32-bit limitations too).
603 vmsize
= vmsize
> INT32_MAX
? INT32_MAX
: vmsize
;
604 if (opt
->chunksize
> 0 && vmsize
> opt
->chunksize
)
605 vmsize
= opt
->chunksize
;
606 assert(vmsize
<= INT32_MAX
);
608 const struct vm_range vr
= {
612 struct vm_range d
, *dp
= &d
;
614 step
= map_memory_range(wsd
, r
, &vr
, dp
);
615 if (WALK_CONTINUE
!= step
)
617 assert(0 != V_ADDR(dp
) && 0 != V_SIZE(dp
));
618 const void *srcaddr
= (const void *)V_ADDR(dp
);
620 mach_vm_behavior_set(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
), VM_BEHAVIOR_SEQUENTIAL
);
623 unsigned algorithm
= 0;
627 dstbuf
= malloc(V_SIZEOF(dp
));
629 filesize
= compression_encode_buffer(dstbuf
, V_SIZEOF(dp
), srcaddr
, V_SIZEOF(dp
), NULL
, opt
->calgorithm
);
630 if (filesize
> 0 && filesize
< V_SIZEOF(dp
)) {
631 srcaddr
= dstbuf
; /* the data source is now heap, compressed */
632 mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
634 if (segment_compflags(opt
->calgorithm
, &algorithm
) != 0) {
636 mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
644 filesize
= V_SIZEOF(dp
);
647 filesize
= V_SIZEOF(dp
);
648 assert(filesize
<= V_SIZEOF(dp
));
650 filesize
= V_SIZEOF(dp
);
654 const struct file_range fr
= {
655 .off
= wsd
->wsd_foffset
,
658 make_segment_command(wsd
->wsd_lc
, &vr
, &fr
, &r
->r_info
, algorithm
, r
->r_purgable
);
659 step
= pwrite_memory(wsd
, srcaddr
, filesize
, &vr
);
663 kern_return_t kr
= mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
664 if (KERN_SUCCESS
!= kr
&& OPTIONS_DEBUG(opt
, 1))
665 err_mach(kr
, r
, "mach_vm_deallocate() post %llx-%llx", V_ADDR(dp
), V_SIZE(dp
));
668 if (WALK_ERROR
== step
)
670 commit_load_command(wsd
, wsd
->wsd_lc
);
680 * Sigh. This is a workaround.
681 * Find the vmsize as if the VM system manages ranges in host pagesize units
682 * rather than application pagesize units.
684 static mach_vm_size_t
685 getvmsize_host(const task_t task
, const struct region
*r
)
687 mach_vm_size_t vmsize_host
= R_SIZE(r
);
689 if (pageshift_host
!= pageshift_app
) {
690 is_actual_size(task
, r
, &vmsize_host
);
691 if (OPTIONS_DEBUG(opt
, 1) && R_SIZE(r
) != vmsize_host
)
692 printr(r
, "(region size tweak: was %llx, is %llx)\n", R_SIZE(r
), vmsize_host
);
697 static __inline mach_vm_size_t
698 getvmsize_host(__unused
const task_t task
, const struct region
*r
)
705 write_sparse_region(const struct region
*r
, struct write_segment_data
*wsd
)
707 assert(r
->r_nsubregions
);
708 assert(!r
->r_inzfodregion
);
709 assert(NULL
== r
->r_fileref
);
711 const mach_vm_size_t vmsize_host
= getvmsize_host(wsd
->wsd_task
, r
);
712 walk_return_t step
= WALK_CONTINUE
;
714 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
715 const struct subregion
*s
= r
->r_subregions
[i
];
718 step
= write_fileref_subregion(r
, s
, wsd
);
720 /* Write this one out as real data */
721 mach_vm_size_t vmsize
= S_SIZE(s
);
722 if (R_SIZE(r
) != vmsize_host
) {
723 if (S_ADDR(s
) + vmsize
> R_ADDR(r
) + vmsize_host
) {
724 vmsize
= R_ADDR(r
) + vmsize_host
- S_ADDR(s
);
725 if (OPTIONS_DEBUG(opt
, 3))
726 printr(r
, "(subregion size tweak: was %llx, is %llx)\n",
730 step
= write_memory_range(wsd
, r
, S_ADDR(s
), vmsize
);
732 if (WALK_ERROR
== step
)
739 write_vanilla_region(const struct region
*r
, struct write_segment_data
*wsd
)
741 assert(0 == r
->r_nsubregions
);
742 assert(!r
->r_inzfodregion
);
743 assert(NULL
== r
->r_fileref
);
745 const mach_vm_size_t vmsize_host
= getvmsize_host(wsd
->wsd_task
, r
);
746 return write_memory_range(wsd
, r
, R_ADDR(r
), vmsize_host
);
750 region_write_memory(struct region
*r
, void *arg
)
752 assert(r
->r_info
.user_tag
!= VM_MEMORY_IOKIT
); // elided in walk_regions()
753 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
754 return ROP_WRITE(r
, arg
);
758 * Handles the cases where segments are broken into chunks i.e. when
759 * writing compressed segments.
762 count_memory_range(mach_vm_offset_t vmsize
)
765 if (opt
->chunksize
) {
766 count
= (size_t)vmsize
/ opt
->chunksize
;
767 if (vmsize
!= (mach_vm_offset_t
)count
* opt
->chunksize
)
775 * A sparse region is likely a writable data segment described by
776 * native_segment_command_t somewhere in the address space.
779 size_sparse_subregion(const struct subregion
*s
, struct size_core
*sc
)
781 const unsigned long count
= count_memory_range(S_SIZE(s
));
782 sc
->headersize
+= sizeof_segment_command() * count
;
784 sc
->memsize
+= S_SIZE(s
);
788 size_sparse_region(const struct region
*r
, struct size_core
*sc_sparse
, struct size_core
*sc_fileref
)
790 assert(0 != r
->r_nsubregions
);
792 unsigned long entry_total
= sc_sparse
->count
+ sc_fileref
->count
;
793 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
794 const struct subregion
*s
= r
->r_subregions
[i
];
796 size_fileref_subregion(s
, sc_fileref
);
798 size_sparse_subregion(s
, sc_sparse
);
800 if (OPTIONS_DEBUG(opt
, 3)) {
801 /* caused by compression breaking a large region into chunks */
802 entry_total
= (sc_fileref
->count
+ sc_sparse
->count
) - entry_total
;
803 if (entry_total
> r
->r_nsubregions
)
804 printr(r
, "range contains %u subregions requires %lu segment commands\n",
805 r
->r_nsubregions
, entry_total
);
809 const struct regionop sparse_ops
= {
816 size_vanilla_region(const struct region
*r
, struct size_core
*sc
)
818 assert(0 == r
->r_nsubregions
);
820 const unsigned long count
= count_memory_range(R_SIZE(r
));
821 sc
->headersize
+= sizeof_segment_command() * count
;
823 sc
->memsize
+= R_SIZE(r
);
825 if (OPTIONS_DEBUG(opt
, 3) && count
!= 1)
826 printr(r
, "range with 1 region, but requires %lu segment commands\n", count
);
829 const struct regionop vanilla_ops
= {
831 write_vanilla_region
,
836 region_size_memory(struct region
*r
, void *arg
)
838 struct size_segment_data
*ssd
= arg
;
840 if (&zfod_ops
== r
->r_op
)
841 size_zfod_region(r
, &ssd
->ssd_zfod
);
842 else if (&fileref_ops
== r
->r_op
)
843 size_fileref_region(r
, &ssd
->ssd_fileref
);
844 else if (&sparse_ops
== r
->r_op
)
845 size_sparse_region(r
, &ssd
->ssd_sparse
, &ssd
->ssd_fileref
);
846 else if (&vanilla_ops
== r
->r_op
)
847 size_vanilla_region(r
, &ssd
->ssd_vanilla
);
849 errx(EX_SOFTWARE
, "%s: bad op", __func__
);
851 return WALK_CONTINUE
;