]> git.saurik.com Git - apple/system_cmds.git/blobdiff - gcore.tproj/corefile.c
system_cmds-790.tar.gz
[apple/system_cmds.git] / gcore.tproj / corefile.c
index addcda4a3228caee066f4ae56450abe7d879f81e..8242b6e2ab038d389925125dc921a51bfdd9ca47 100644 (file)
@@ -18,6 +18,7 @@
 #include <compression.h>
 #include <sys/param.h>
 #include <libgen.h>
+#include <sys/stat.h>
 
 native_mach_header_t *
 make_corefile_mach_header(void *data)
@@ -50,6 +51,7 @@ make_coreinfo_command(native_mach_header_t *mh, void *data, const uuid_t aoutid,
     cc->cmdsize = sizeof (*cc);
     cc->version = 1;
     cc->type = proto_CORETYPE_USER;
+       cc->pageshift = (uint16_t)pageshift_host;
     cc->address = address;
     uuid_copy(cc->uuid, aoutid);
     cc->dyninfo = dyninfo;
@@ -58,30 +60,63 @@ make_coreinfo_command(native_mach_header_t *mh, void *data, const uuid_t aoutid,
     return cc;
 }
 
-static native_segment_command_t *
-make_native_segment_command(void *data, mach_vm_offset_t vmaddr, mach_vm_size_t vmsize, off_t fileoff, size_t filesize, unsigned maxprot, unsigned initprot, unsigned comptype)
+native_segment_command_t *
+make_native_segment_command(void *data, const struct vm_range *vr, const struct file_range *fr, vm_prot_t maxprot, vm_prot_t initprot)
 {
     native_segment_command_t *sc = data;
     sc->cmd = NATIVE_LC_SEGMENT;
     sc->cmdsize = sizeof (*sc);
-    assert(vmsize);
-#if defined(__LP64__)
-    sc->vmaddr = vmaddr;
-    sc->vmsize = vmsize;
-    sc->fileoff = fileoff;
-#else
-    sc->vmaddr = (uintptr_t)vmaddr;
-    sc->vmsize = (size_t)vmsize;
-    sc->fileoff = (long)fileoff;
-#endif
-    sc->filesize = filesize;
+    assert(V_SIZE(vr));
+       sc->vmaddr = (unsigned long)V_ADDR(vr);
+       sc->vmsize = (unsigned long)V_SIZE(vr);
+       sc->fileoff = (unsigned long)F_OFF(fr);
+       sc->filesize = (unsigned long)F_SIZE(fr);
     sc->maxprot = maxprot;
     sc->initprot = initprot;
     sc->nsects = 0;
-    sc->flags = proto_SG_COMP_MAKE_FLAGS(comptype);
+    sc->flags = 0;
     return sc;
 }
 
+static struct proto_coredata_command *
+make_coredata_command(void *data, const struct vm_range *vr, const struct file_range *fr, const vm_region_submap_info_data_64_t *info, unsigned comptype, unsigned purgable)
+{
+       struct proto_coredata_command *cc = data;
+       cc->cmd = proto_LC_COREDATA;
+       cc->cmdsize = sizeof (*cc);
+       assert(V_SIZE(vr));
+       cc->vmaddr = V_ADDR(vr);
+       cc->vmsize = V_SIZE(vr);
+       cc->fileoff = F_OFF(fr);
+       cc->filesize = F_SIZE(fr);
+       cc->maxprot = info->max_protection;
+       cc->prot = info->protection;
+       cc->flags = COMP_MAKE_FLAGS(comptype);
+       cc->share_mode = info->share_mode;
+       assert(purgable <= UINT8_MAX);
+       cc->purgable = (uint8_t)purgable;
+       assert(info->user_tag <= UINT8_MAX);
+       cc->tag = (uint8_t)info->user_tag;
+       cc->extp = info->external_pager;
+       return cc;
+}
+
+static size_t
+sizeof_segment_command(void) {
+       return opt->extended ?
+               sizeof (struct proto_coredata_command) : sizeof (native_segment_command_t);
+}
+
+static struct load_command *
+make_segment_command(void *data, const struct vm_range *vr, const struct file_range *fr, const vm_region_submap_info_data_64_t *info, unsigned comptype, int purgable)
+{
+       if (opt->extended)
+               make_coredata_command(data, vr, fr, info, comptype, purgable);
+       else
+               make_native_segment_command(data, vr, fr, info->max_protection, info->protection);
+       return data;
+}
+
 /*
  * Increment the mach-o header data when we succeed
  */
@@ -119,53 +154,81 @@ size_fileref_subregion(const struct subregion *s, struct size_core *sc)
     sc->memsize += S_SIZE(s);
 }
 
-#ifdef CONFIG_REFSC
 static void
 size_fileref_region(const struct region *r, struct size_core *sc)
 {
     assert(0 == r->r_nsubregions);
     assert(!r->r_inzfodregion);
 
-    size_t cmdsize = cmdsize_fileref_command(r->r_fileref->fr_libent->le_pathname);
+    size_t cmdsize = cmdsize_fileref_command(r->r_fileref->fr_pathname);
     sc->headersize += cmdsize;
     sc->count++;
     sc->memsize += R_SIZE(r);
 }
-#endif
 
 static struct proto_fileref_command *
-make_fileref_command(void *data, const struct libent *le, mach_vm_offset_t vmaddr, mach_vm_size_t vmsize, off_t fileoff, off_t filesize, unsigned maxprot, unsigned initprot)
+make_fileref_command(void *data, const char *pathname, const uuid_t uuid,
+    const struct vm_range *vr, const struct file_range *fr,
+       const vm_region_submap_info_data_64_t *info, unsigned purgable)
 {
-    struct proto_fileref_command *fr = data;
+    struct proto_fileref_command *fc = data;
     size_t len;
 
-    fr->cmd = proto_LC_FILEREF;
-    fr->cmdsize = sizeof (*fr);
-    if (0 != (len = strlen(le->le_pathname))) {
+    fc->cmd = proto_LC_FILEREF;
+    fc->cmdsize = sizeof (*fc);
+    if (0 != (len = strlen(pathname))) {
         /*
          * Strings live immediately after the
          * command, and are included in the cmdsize
          */
-        fr->filename.offset = sizeof (*fr);
-        void *s = fr + 1;
-        strlcpy(s, le->le_pathname, ++len); // NUL-terminated for mmap sanity
-        fr->cmdsize += roundup(len, sizeof (long));
-        assert(cmdsize_fileref_command(le->le_pathname) == fr->cmdsize);
+        fc->filename.offset = sizeof (*fc);
+        void *s = fc + 1;
+        strlcpy(s, pathname, ++len); // NUL-terminated for mmap sanity
+        fc->cmdsize += roundup(len, sizeof (long));
+        assert(cmdsize_fileref_command(pathname) == fc->cmdsize);
     }
-    uuid_copy(fr->uuid, le->le_uuid);
-
-    fr->vmaddr = vmaddr;
 
-    assert(vmsize);
-    fr->vmsize = vmsize;
-    assert(fileoff >= 0);
-    fr->fileoff = fileoff;
-    fr->filesize = filesize;
-
-    assert(maxprot & VM_PROT_READ);
-    fr->maxprot = maxprot;
-    fr->initprot = initprot;
-    return fr;
+       /*
+        * A file reference allows different kinds of identifiers for
+        * the reference to be reconstructed.
+        */
+       assert(info->external_pager);
+
+       if (!uuid_is_null(uuid)) {
+               uuid_copy(fc->id, uuid);
+               fc->flags = FREF_MAKE_FLAGS(kFREF_ID_UUID);
+       } else {
+               struct stat st;
+               if (-1 != stat(pathname, &st) && 0 != st.st_mtimespec.tv_sec) {
+                       /* "little-endian format timespec structure" */
+                       struct timespec ts = st.st_mtimespec;
+                       ts.tv_nsec = 0; // allow touch(1) to fix things
+                       memset(fc->id, 0, sizeof(fc->id));
+                       memcpy(fc->id, &ts, sizeof(ts));
+                       fc->flags = FREF_MAKE_FLAGS(kFREF_ID_MTIMESPEC_LE);
+               } else
+                       fc->flags = FREF_MAKE_FLAGS(kFREF_ID_NONE);
+       }
+
+       fc->vmaddr = V_ADDR(vr);
+       assert(V_SIZE(vr));
+       fc->vmsize = V_SIZE(vr);
+
+       assert(F_OFF(fr) >= 0);
+       fc->fileoff = F_OFF(fr);
+       fc->filesize = F_SIZE(fr);
+
+    assert(info->max_protection & VM_PROT_READ);
+    fc->maxprot = info->max_protection;
+    fc->prot = info->protection;
+
+    fc->share_mode = info->share_mode;
+    assert(purgable <= UINT8_MAX);
+    fc->purgable = (uint8_t)purgable;
+    assert(info->user_tag <= UINT8_MAX);
+    fc->tag = (uint8_t)info->user_tag;
+    fc->extp = info->external_pager;
+    return fc;
 }
 
 /*
@@ -176,23 +239,26 @@ static walk_return_t
 write_fileref_subregion(const struct region *r, const struct subregion *s, struct write_segment_data *wsd)
 {
     assert(S_LIBENT(s));
-    if (opt->debug && !issubregiontype(s, SEG_TEXT) && !issubregiontype(s, SEG_LINKEDIT))
+    if (OPTIONS_DEBUG(opt, 1) && !issubregiontype(s, SEG_TEXT) && !issubregiontype(s, SEG_LINKEDIT))
         printf("%s: unusual segment type %s from %s\n", __func__, S_MACHO_TYPE(s), S_FILENAME(s));
     assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
     assert((r->r_info.protection & VM_PROT_WRITE) == 0);
 
     const struct libent *le = S_LIBENT(s);
-    const struct proto_fileref_command *fc = make_fileref_command(wsd->wsd_lc, le, S_ADDR(s), S_SIZE(s), S_MACHO_FILEOFF(s), S_SIZE(s), r->r_info.max_protection, r->r_info.protection);
+       const struct file_range fr = {
+               .off = S_MACHO_FILEOFF(s),
+               .size = S_SIZE(s),
+       };
+    const struct proto_fileref_command *fc = make_fileref_command(wsd->wsd_lc, le->le_pathname, le->le_uuid, S_RANGE(s), &fr, &r->r_info, r->r_purgable);
+
     commit_load_command(wsd, (const void *)fc);
-    if (opt->debug > 1) {
+    if (OPTIONS_DEBUG(opt, 3)) {
         hsize_str_t hstr;
         printr(r, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", S_FILENAME(s), S_MACHO_TYPE(s), (uint64_t)fc->vmaddr, (uint64_t)fc->vmaddr + fc->vmsize, (int64_t)fc->fileoff, str_hsize(hstr, fc->filesize));
     }
     return WALK_CONTINUE;
 }
 
-#ifdef CONFIG_REFSC
-
 /*
  * Note that we may be asked to write reference segments whose protections
  * are rw- -- this -should- be ok as we don't convert the region to a file
@@ -206,12 +272,18 @@ write_fileref_region(const struct region *r, struct write_segment_data *wsd)
     assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
     assert(!r->r_inzfodregion);
 
-    const struct libent *le = r->r_fileref->fr_libent;
-    const struct proto_fileref_command *fc = make_fileref_command(wsd->wsd_lc, le, R_ADDR(r), R_SIZE(r), r->r_fileref->fr_offset, (size_t)R_SIZE(r), r->r_info.max_protection, r->r_info.protection);
+       const struct libent *le = r->r_fileref->fr_libent;
+       const char *pathname = r->r_fileref->fr_pathname;
+       const struct file_range fr = {
+               .off = r->r_fileref->fr_offset,
+               .size = R_SIZE(r),
+       };
+       const struct proto_fileref_command *fc = make_fileref_command(wsd->wsd_lc, pathname, le ? le->le_uuid : UUID_NULL, R_RANGE(r), &fr, &r->r_info, r->r_purgable);
+
     commit_load_command(wsd, (const void *)fc);
-    if (opt->debug > 1) {
+    if (OPTIONS_DEBUG(opt, 3)) {
         hsize_str_t hstr;
-        printr(r, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", le->le_filename, "(type?)", (uint64_t)fc->vmaddr, (uint64_t)fc->vmaddr + fc->vmsize, (int64_t)fc->fileoff, str_hsize(hstr, fc->filesize));
+               printr(r, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", pathname, "(type?)", (uint64_t)fc->vmaddr, (uint64_t)fc->vmaddr + fc->vmsize, (int64_t)fc->fileoff, str_hsize(hstr, fc->filesize));
     }
     return WALK_CONTINUE;
 }
@@ -222,7 +294,6 @@ const struct regionop fileref_ops = {
     del_fileref_region,
 };
 
-#endif /* CONFIG_REFSC */
 
 #pragma mark -- ZFOD segments written only to the header --
 
@@ -231,7 +302,7 @@ size_zfod_region(const struct region *r, struct size_core *sc)
 {
     assert(0 == r->r_nsubregions);
     assert(r->r_inzfodregion);
-    sc->headersize += sizeof (native_segment_command_t);
+    sc->headersize += sizeof_segment_command();
     sc->count++;
     sc->memsize += R_SIZE(r);
 }
@@ -242,8 +313,12 @@ write_zfod_region(const struct region *r, struct write_segment_data *wsd)
     assert(r->r_info.user_tag != VM_MEMORY_IOKIT);
     assert((r->r_info.max_protection & VM_PROT_READ) == VM_PROT_READ);
 
-    const void *sc = make_native_segment_command(wsd->wsd_lc, R_ADDR(r), R_SIZE(r), wsd->wsd_foffset, 0, r->r_info.max_protection, r->r_info.protection, 0);
-    commit_load_command(wsd, sc);
+       const struct file_range fr = {
+               .off = wsd->wsd_foffset,
+               .size = 0,
+       };
+    make_segment_command(wsd->wsd_lc, R_RANGE(r), &fr, &r->r_info, 0, VM_PURGABLE_EMPTY);
+    commit_load_command(wsd, wsd->wsd_lc);
     return WALK_CONTINUE;
 }
 
@@ -256,35 +331,25 @@ const struct regionop zfod_ops = {
 #pragma mark -- Regions containing data --
 
 static walk_return_t
-pwrite_memory(struct write_segment_data *wsd, const void *addr, size_t size, mach_vm_offset_t memaddr, size_t memsize)
+pwrite_memory(struct write_segment_data *wsd, const void *addr, size_t size, const struct vm_range *vr)
 {
     assert(size);
 
-    int error = 0;
-    ssize_t nwritten = 0;
-
-    if (opt->sizebound > 0 &&
-        wsd->wsd_foffset + (off_t)size > opt->sizebound) {
-        error = EFBIG;
-    } else {
-        nwritten = pwrite(wsd->wsd_fd, addr, size, wsd->wsd_foffset);
-        if (nwritten < 0)
-            error = errno;
-    }
+    ssize_t nwritten;
+       const int error = bounded_pwrite(wsd->wsd_fd, addr, size, wsd->wsd_foffset, &wsd->wsd_nocache, &nwritten);
 
-    if (error || opt->debug > 1) {
+    if (error || OPTIONS_DEBUG(opt, 3)) {
         hsize_str_t hsz;
-        printf("%llx-%llx writing %ld bytes at offset %lld -> ",
-               memaddr, memaddr+memsize, size, wsd->wsd_foffset);
+        printvr(vr, "writing %ld bytes at offset %lld -> ", size, wsd->wsd_foffset);
         if (error)
             printf("err #%d - %s ", error, strerror(error));
         else {
             printf("%s ", str_hsize(hsz, nwritten));
             if (size != (size_t)nwritten)
                 printf("[%zd - incomplete write!] ", nwritten);
-            else if (size != memsize)
+            else if (size != V_SIZE(vr))
                 printf("(%s in memory) ",
-                       str_hsize(hsz, memsize));
+                       str_hsize(hsz, V_SIZE(vr)));
         }
         printf("\n");
     }
@@ -318,16 +383,16 @@ segment_compflags(compression_algorithm ca, unsigned *algnum)
 {
     switch (ca) {
         case COMPRESSION_LZ4:
-            *algnum = proto_SG_COMP_LZ4;
+            *algnum = kCOMP_LZ4;
             break;
         case COMPRESSION_ZLIB:
-            *algnum = proto_SG_COMP_ZLIB;
+            *algnum = kCOMP_ZLIB;
             break;
         case COMPRESSION_LZMA:
-            *algnum = proto_SG_COMP_LZMA;
+            *algnum = kCOMP_LZMA;
             break;
         case COMPRESSION_LZFSE:
-            *algnum = proto_SG_COMP_LZFSE;
+            *algnum = kCOMP_LZFSE;
             break;
         default:
             err(EX_SOFTWARE, "unsupported compression algorithm %x", ca);
@@ -335,6 +400,190 @@ segment_compflags(compression_algorithm ca, unsigned *algnum)
     return 0;
 }
 
+static bool
+is_file_mapped_shared(const struct region *r)
+{
+    if (r->r_info.external_pager)
+        switch (r->r_info.share_mode) {
+            case SM_TRUESHARED:     // sm=shm
+            case SM_SHARED:         // sm=ali
+            case SM_SHARED_ALIASED: // sm=s/a
+                return true;
+            default:
+                break;
+        }
+    return false;
+}
+
+static walk_return_t
+map_memory_range(struct write_segment_data *wsd, const struct region *r, const struct vm_range *vr, struct vm_range *dp)
+{
+    if (r->r_incommregion) {
+        /*
+         * Special case: for commpage access, copy from our own address space.
+         */
+        V_SETADDR(dp, 0);
+        V_SETSIZE(dp, V_SIZE(vr));
+
+        kern_return_t kr = mach_vm_allocate(mach_task_self(), &dp->addr, dp->size, VM_FLAGS_ANYWHERE);
+        if (KERN_SUCCESS != kr || 0 == dp->addr) {
+            err_mach(kr, r, "mach_vm_allocate c %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
+            print_one_memory_region(r);
+            return WALK_ERROR;
+        }
+        if (OPTIONS_DEBUG(opt, 3))
+            printr(r, "copying from self %llx-%llx\n", V_ADDR(vr), V_ENDADDR(vr));
+        memcpy((void *)dp->addr, (const void *)V_ADDR(vr), V_SIZE(vr));
+        return WALK_CONTINUE;
+    }
+
+    if (!r->r_insharedregion && 0 == (r->r_info.protection & VM_PROT_READ)) {
+        assert(0 != (r->r_info.max_protection & VM_PROT_READ)); // simple_region_optimization()
+
+        /*
+         * Special case: region that doesn't currently have read permission.
+         * (e.g. --x/r-x permissions with tag 64 - JS JIT generated code
+         * from com.apple.WebKit.WebContent)
+         */
+        const mach_vm_offset_t pagesize_host = 1u << pageshift_host;
+        if (OPTIONS_DEBUG(opt, 3))
+            printr(r, "unreadable (%s/%s), remap with read permission\n",
+                str_prot(r->r_info.protection), str_prot(r->r_info.max_protection));
+        V_SETADDR(dp, 0);
+        V_SETSIZE(dp, V_SIZE(vr));
+        vm_prot_t cprot, mprot;
+        kern_return_t kr = mach_vm_remap(mach_task_self(), &dp->addr, V_SIZE(dp), pagesize_host - 1, true, wsd->wsd_task, V_ADDR(vr), true, &cprot, &mprot, VM_INHERIT_NONE);
+            if (KERN_SUCCESS != kr) {
+                err_mach(kr, r, "mach_vm_remap() %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
+                return WALK_ERROR;
+            }
+            assert(r->r_info.protection == cprot && r->r_info.max_protection == mprot);
+            kr = mach_vm_protect(mach_task_self(), V_ADDR(dp), V_SIZE(dp), false, VM_PROT_READ);
+            if (KERN_SUCCESS != kr) {
+                err_mach(kr, r, "mach_vm_protect() %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
+                mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
+                return WALK_ERROR;
+            }
+        return WALK_CONTINUE;
+    }
+
+    /*
+     * Most segments with data are read here
+     */
+    vm_offset_t data32 = 0;
+    mach_msg_type_number_t data32_count;
+    kern_return_t kr = mach_vm_read(wsd->wsd_task, V_ADDR(vr), V_SIZE(vr), &data32, &data32_count);
+    switch (kr) {
+        case KERN_SUCCESS:
+            V_SETADDR(dp, data32);
+            V_SETSIZE(dp, data32_count);
+            break;
+        case KERN_INVALID_ADDRESS:
+            if (!r->r_insharedregion &&
+                (VM_MEMORY_SKYWALK == r->r_info.user_tag || is_file_mapped_shared(r))) {
+                if (OPTIONS_DEBUG(opt, 1)) {
+                    /* not necessarily an error: mitigation below */
+                    tag_str_t tstr;
+                    printr(r, "mach_vm_read() failed (%s) -- substituting zeroed region\n", str_tagr(tstr, r));
+                    if (OPTIONS_DEBUG(opt, 2))
+                        print_one_memory_region(r);
+                }
+                V_SETSIZE(dp, V_SIZE(vr));
+                kr = mach_vm_allocate(mach_task_self(), &dp->addr, V_SIZE(dp), VM_FLAGS_ANYWHERE);
+                if (KERN_SUCCESS != kr || 0 == V_ADDR(dp))
+                    err_mach(kr, r, "mach_vm_allocate() z %llx-%llx", V_ADDR(vr), V_ENDADDR(vr));
+                break;
+            }
+            /*FALLTHROUGH*/
+        default:
+            err_mach(kr, r, "mach_vm_read() %llx-%llx", V_ADDR(vr), V_SIZE(vr));
+            if (OPTIONS_DEBUG(opt, 1))
+                print_one_memory_region(r);
+            break;
+    }
+    if (kr != KERN_SUCCESS) {
+        V_SETADDR(dp, 0);
+        return WALK_ERROR;
+    }
+
+    /*
+     * Sometimes (e.g. searchd) we may not be able to fetch all the pages
+     * from the underlying mapped file, in which case replace those pages
+     * with zfod pages (at least they compress efficiently) rather than
+     * taking a SIGBUS when compressing them.
+     *
+     * XXX Perhaps we should just catch the SIGBUS, and if the faulting address
+     * is in the right range, substitute zfod pages and rerun region compression?
+     * Complex though, because the compression code may be multithreaded.
+     */
+    if (!r->r_insharedregion && is_file_mapped_shared(r)) {
+        const mach_vm_offset_t pagesize_host = 1u << pageshift_host;
+
+        if (r->r_info.pages_resident * pagesize_host == V_SIZE(dp))
+            return WALK_CONTINUE;   // all pages resident, so skip ..
+
+        if (OPTIONS_DEBUG(opt, 2))
+            printr(r, "probing %llu pages in mapped-shared file\n", V_SIZE(dp) / pagesize_host);
+
+        kr = KERN_SUCCESS;
+        for (mach_vm_offset_t a = V_ADDR(dp); a < V_ENDADDR(dp); a += pagesize_host) {
+
+            mach_msg_type_number_t pCount = VM_PAGE_INFO_BASIC_COUNT;
+            vm_page_info_basic_data_t pInfo;
+
+            kr = mach_vm_page_info(mach_task_self(), a, VM_PAGE_INFO_BASIC, (vm_page_info_t)&pInfo, &pCount);
+            if (KERN_SUCCESS != kr) {
+                err_mach(kr, NULL, "mach_vm_page_info() at %llx", a);
+                break;
+            }
+            /* If the VM has the page somewhere, assume we can bring it back */
+            if (pInfo.disposition & (VM_PAGE_QUERY_PAGE_PRESENT | VM_PAGE_QUERY_PAGE_REF | VM_PAGE_QUERY_PAGE_DIRTY))
+                continue;
+
+            /* Force the page to be fetched to see if it faults */
+            mach_vm_size_t tsize = pagesize_host;
+            void *tmp = valloc((size_t)tsize);
+            const mach_vm_address_t vtmp = (mach_vm_address_t)tmp;
+
+            switch (kr = mach_vm_read_overwrite(mach_task_self(), a, tsize, vtmp, &tsize)) {
+                case KERN_SUCCESS:
+                    break;
+                case KERN_INVALID_ADDRESS: {
+                    /* Content can't be found: replace it and the rest of the region with zero-fill pages */
+                    if (OPTIONS_DEBUG(opt, 2)) {
+                        printr(r, "mach_vm_read_overwrite() failed after %llu pages -- substituting zfod\n", (a - V_ADDR(dp)) / pagesize_host);
+                        print_one_memory_region(r);
+                    }
+                    mach_vm_address_t va = a;
+                    kr = mach_vm_allocate(mach_task_self(), &va, V_ENDADDR(dp) - va, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
+                    if (KERN_SUCCESS != kr) {
+                        err_mach(kr, r, "mach_vm_allocate() %llx", a);
+                    } else {
+                        assert(a == va);
+                        a = V_ENDADDR(dp);  // no need to look any further
+                    }
+                    break;
+                }
+                default:
+                    err_mach(kr, r, "mach_vm_overwrite() %llx", a);
+                    break;
+            }
+            free(tmp);
+            if (KERN_SUCCESS != kr)
+                break;
+        }
+        if (KERN_SUCCESS != kr) {
+            kr = mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
+            if (KERN_SUCCESS != kr && OPTIONS_DEBUG(opt, 1))
+                err_mach(kr, r, "mach_vm_deallocate() pre %llx-%llx", V_ADDR(dp), V_ENDADDR(dp));
+            V_SETADDR(dp, 0);
+            return WALK_ERROR;
+        }
+    }
+
+    return WALK_CONTINUE;
+}
+
 static walk_return_t
 write_memory_range(struct write_segment_data *wsd, const struct region *r, mach_vm_offset_t vmaddr, mach_vm_offset_t vmsize)
 {
@@ -344,10 +593,6 @@ write_memory_range(struct write_segment_data *wsd, const struct region *r, mach_
     walk_return_t step = WALK_CONTINUE;
 
     do {
-        unsigned algorithm = 0;
-        void *dstbuf = NULL;
-        size_t filesize;
-
         vmsize = resid;
 
         /*
@@ -360,93 +605,72 @@ write_memory_range(struct write_segment_data *wsd, const struct region *r, mach_
             vmsize = opt->chunksize;
         assert(vmsize <= INT32_MAX);
 
-        mach_vm_offset_t data;
-        mach_vm_offset_t data_count;
-
-        kern_return_t kr;
-        const void *srcaddr;
-
-        if (r->r_incommregion) {
-            /*
-             * For commpage access, we just copy from our own address space.
-             */
-            data = 0;
-            data_count = vmsize;
-            kr = mach_vm_allocate(mach_task_self(), &data, data_count, VM_FLAGS_ANYWHERE);
-            if (KERN_SUCCESS != kr || data == 0) {
-                err_mach(kr, r, "subregion %llx-%llx, mach_vm_allocate()", vmaddr, vmaddr + vmsize);
-                if (opt->debug) {
-                    print_memory_region_header();
-                    ROP_PRINT(r);
-                }
-                break;
-            }
-            if (opt->debug)
-                printr(r, "subregion %llx-%llx, copying from self\n", vmaddr, vmaddr+vmsize);
-            srcaddr = (const void *)memcpy((void *)data, (void *)vmaddr, vmsize);
-        } else {
-            /*
-             * Most segments with data are mapped here
-             */
-            vm_offset_t data32 = 0;
-            mach_msg_type_number_t data32_count;
-            kr = mach_vm_read(wsd->wsd_task, vmaddr, vmsize, &data32, &data32_count);
-            if (KERN_SUCCESS != kr || data32 == 0 || data32_count < vmsize) {
-                err_mach(kr, r, "subregion %llx-%llx, mach_vm_read()", vmaddr, vmaddr + vmsize);
-                if (opt->debug) {
-                    print_memory_region_header();
-                    ROP_PRINT(r);
-                }
-                break;
-            }
-            data = data32;
-            data_count = data32_count;
-            mach_vm_behavior_set(mach_task_self(), data, data_count, VM_BEHAVIOR_SEQUENTIAL);
-            srcaddr = (const void *)data;
-        }
+        const struct vm_range vr = {
+            .addr = vmaddr,
+            .size = vmsize,
+        };
+        struct vm_range d, *dp = &d;
 
-        assert(vmsize);
+        step = map_memory_range(wsd, r, &vr, dp);
+        if (WALK_CONTINUE != step)
+            break;
+        assert(0 != V_ADDR(dp) && 0 != V_SIZE(dp));
+        const void *srcaddr = (const void *)V_ADDR(dp);
 
-        if (opt->compress) {
-            dstbuf = malloc((size_t)vmsize);
-            if (dstbuf) {
+        mach_vm_behavior_set(mach_task_self(), V_ADDR(dp), V_SIZE(dp), VM_BEHAVIOR_SEQUENTIAL);
 
-                filesize = compression_encode_buffer(dstbuf, (size_t)vmsize, srcaddr, (size_t)vmsize, NULL, opt->calgorithm);
+        void *dstbuf = NULL;
+        unsigned algorithm = 0;
+        size_t filesize;
 
-                if (filesize > 0 && filesize < vmsize) {
-                    srcaddr = dstbuf;
-                    if (segment_compflags(opt->calgorithm, &algorithm) != 0) {
-                        free(dstbuf);
-                        mach_vm_deallocate(mach_task_self(), data, data_count);
-                        return WALK_ERROR;
-                    }
-                } else {
-                    free(dstbuf);
-                    dstbuf = NULL;
-                    filesize = (size_t)vmsize;
-                }
-            } else
-                filesize = (size_t)vmsize;
-        } else
-            filesize = (size_t)vmsize;
+               if (opt->extended) {
+                       dstbuf = malloc(V_SIZEOF(dp));
+                       if (dstbuf) {
+                               filesize = compression_encode_buffer(dstbuf, V_SIZEOF(dp), srcaddr, V_SIZEOF(dp), NULL, opt->calgorithm);
+                               if (filesize > 0 && filesize < V_SIZEOF(dp)) {
+                                       srcaddr = dstbuf;       /* the data source is now heap, compressed */
+                    mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
+                    V_SETADDR(dp, 0);
+                                       if (segment_compflags(opt->calgorithm, &algorithm) != 0) {
+                                               free(dstbuf);
+                        mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
+                        V_SETADDR(dp, 0);
+                                               step = WALK_ERROR;
+                        break;
+                                       }
+                               } else {
+                                       free(dstbuf);
+                                       dstbuf = NULL;
+                                       filesize = V_SIZEOF(dp);
+                               }
+                       } else
+                               filesize = V_SIZEOF(dp);
+                       assert(filesize <= V_SIZEOF(dp));
+               } else
+                       filesize = V_SIZEOF(dp);
 
         assert(filesize);
 
-        native_segment_command_t *sc = make_native_segment_command(wsd->wsd_lc, vmaddr, vmsize, wsd->wsd_foffset, filesize, r->r_info.max_protection, r->r_info.protection, algorithm);
-
-        assert((sc->flags == 0) ^ (sc->filesize < sc->vmsize));
-
-        step = pwrite_memory(wsd, srcaddr, sc->filesize, vmaddr, sc->vmsize);
-        if (dstbuf)
-            free(dstbuf);
-        mach_vm_deallocate(mach_task_self(), data, data_count);
+               const struct file_range fr = {
+                       .off = wsd->wsd_foffset,
+                       .size = filesize,
+               };
+               make_segment_command(wsd->wsd_lc, &vr, &fr, &r->r_info, algorithm, r->r_purgable);
+               step = pwrite_memory(wsd, srcaddr, filesize, &vr);
+               if (dstbuf)
+                       free(dstbuf);
+        if (V_ADDR(dp)) {
+            kern_return_t kr = mach_vm_deallocate(mach_task_self(), V_ADDR(dp), V_SIZE(dp));
+            if (KERN_SUCCESS != kr && OPTIONS_DEBUG(opt, 1))
+                err_mach(kr, r, "mach_vm_deallocate() post %llx-%llx", V_ADDR(dp), V_SIZE(dp));
+        }
 
-        if (WALK_ERROR == step)
-            break;
-        commit_load_command(wsd, (const void *)sc);
-        resid -= vmsize;
-        vmaddr += vmsize;
-    } while (resid);
+               if (WALK_ERROR == step)
+                       break;
+               commit_load_command(wsd, wsd->wsd_lc);
+               resid -= vmsize;
+               vmaddr += vmsize;
+       } while (resid);
 
     return step;
 }
@@ -464,7 +688,7 @@ getvmsize_host(const task_t task, const struct region *r)
 
     if (pageshift_host != pageshift_app) {
         is_actual_size(task, r, &vmsize_host);
-        if (opt->debug && R_SIZE(r) != vmsize_host)
+        if (OPTIONS_DEBUG(opt, 1) && R_SIZE(r) != vmsize_host)
             printr(r, "(region size tweak: was %llx, is %llx)\n", R_SIZE(r), vmsize_host);
     }
     return vmsize_host;
@@ -482,9 +706,7 @@ write_sparse_region(const struct region *r, struct write_segment_data *wsd)
 {
     assert(r->r_nsubregions);
     assert(!r->r_inzfodregion);
-#ifdef CONFIG_REFSC
     assert(NULL == r->r_fileref);
-#endif
 
     const mach_vm_size_t vmsize_host = getvmsize_host(wsd->wsd_task, r);
     walk_return_t step = WALK_CONTINUE;
@@ -492,7 +714,7 @@ write_sparse_region(const struct region *r, struct write_segment_data *wsd)
     for (unsigned i = 0; i < r->r_nsubregions; i++) {
         const struct subregion *s = r->r_subregions[i];
 
-        if (s->s_isfileref)
+        if (s->s_isuuidref)
             step = write_fileref_subregion(r, s, wsd);
         else {
             /* Write this one out as real data */
@@ -500,7 +722,7 @@ write_sparse_region(const struct region *r, struct write_segment_data *wsd)
             if (R_SIZE(r) != vmsize_host) {
                 if (S_ADDR(s) + vmsize > R_ADDR(r) + vmsize_host) {
                     vmsize = R_ADDR(r) + vmsize_host - S_ADDR(s);
-                    if (opt->debug)
+                    if (OPTIONS_DEBUG(opt, 3))
                         printr(r, "(subregion size tweak: was %llx, is %llx)\n",
                                S_SIZE(s), vmsize);
                 }
@@ -518,9 +740,7 @@ write_vanilla_region(const struct region *r, struct write_segment_data *wsd)
 {
     assert(0 == r->r_nsubregions);
     assert(!r->r_inzfodregion);
-#ifdef CONFIG_REFSC
     assert(NULL == r->r_fileref);
-#endif
 
     const mach_vm_size_t vmsize_host = getvmsize_host(wsd->wsd_task, r);
     return write_memory_range(wsd, r, R_ADDR(r), vmsize_host);
@@ -542,7 +762,7 @@ static unsigned long
 count_memory_range(mach_vm_offset_t vmsize)
 {
     unsigned long count;
-    if (opt->compress && opt->chunksize > 0) {
+    if (opt->chunksize) {
         count = (size_t)vmsize / opt->chunksize;
         if (vmsize != (mach_vm_offset_t)count * opt->chunksize)
             count++;
@@ -559,7 +779,7 @@ static void
 size_sparse_subregion(const struct subregion *s, struct size_core *sc)
 {
     const unsigned long count = count_memory_range(S_SIZE(s));
-    sc->headersize += sizeof (native_segment_command_t) * count;
+    sc->headersize += sizeof_segment_command() * count;
     sc->count += count;
     sc->memsize += S_SIZE(s);
 }
@@ -572,12 +792,12 @@ size_sparse_region(const struct region *r, struct size_core *sc_sparse, struct s
     unsigned long entry_total = sc_sparse->count + sc_fileref->count;
     for (unsigned i = 0; i < r->r_nsubregions; i++) {
         const struct subregion *s = r->r_subregions[i];
-        if (s->s_isfileref)
+        if (s->s_isuuidref)
             size_fileref_subregion(s, sc_fileref);
         else
             size_sparse_subregion(s, sc_sparse);
     }
-    if (opt->debug) {
+    if (OPTIONS_DEBUG(opt, 3)) {
         /* caused by compression breaking a large region into chunks */
         entry_total = (sc_fileref->count + sc_sparse->count) - entry_total;
         if (entry_total > r->r_nsubregions)
@@ -598,11 +818,11 @@ size_vanilla_region(const struct region *r, struct size_core *sc)
     assert(0 == r->r_nsubregions);
 
     const unsigned long count = count_memory_range(R_SIZE(r));
-    sc->headersize += sizeof (native_segment_command_t) * count;
+    sc->headersize += sizeof_segment_command() * count;
     sc->count += count;
     sc->memsize += R_SIZE(r);
 
-    if (opt->debug && count > 1)
+    if (OPTIONS_DEBUG(opt, 3) && count != 1)
         printr(r, "range with 1 region, but requires %lu segment commands\n", count);
 }
 
@@ -619,10 +839,8 @@ region_size_memory(struct region *r, void *arg)
 
     if (&zfod_ops == r->r_op)
         size_zfod_region(r, &ssd->ssd_zfod);
-#ifdef CONFIG_REFSC
     else if (&fileref_ops == r->r_op)
         size_fileref_region(r, &ssd->ssd_fileref);
-#endif
     else if (&sparse_ops == r->r_op)
         size_sparse_region(r, &ssd->ssd_sparse, &ssd->ssd_fileref);
     else if (&vanilla_ops == r->r_op)