+ *volatile_compressed_pmap_size_p = volatile_compressed_pmap_count * PAGE_SIZE;
+
+ return KERN_SUCCESS;
+}
+
+void
+vm_map_sizes(vm_map_t map,
+ vm_map_size_t * psize,
+ vm_map_size_t * pfree,
+ vm_map_size_t * plargest_free)
+{
+ vm_map_entry_t entry;
+ vm_map_offset_t prev;
+ vm_map_size_t free, total_free, largest_free;
+ boolean_t end;
+
+ if (!map)
+ {
+ *psize = *pfree = *plargest_free = 0;
+ return;
+ }
+ total_free = largest_free = 0;
+
+ vm_map_lock_read(map);
+ if (psize) *psize = map->max_offset - map->min_offset;
+
+ prev = map->min_offset;
+ for (entry = vm_map_first_entry(map);; entry = entry->vme_next)
+ {
+ end = (entry == vm_map_to_entry(map));
+
+ if (end) free = entry->vme_end - prev;
+ else free = entry->vme_start - prev;
+
+ total_free += free;
+ if (free > largest_free) largest_free = free;
+
+ if (end) break;
+ prev = entry->vme_end;
+ }
+ vm_map_unlock_read(map);
+ if (pfree) *pfree = total_free;
+ if (plargest_free) *plargest_free = largest_free;
+}
+
+#if VM_SCAN_FOR_SHADOW_CHAIN
+int vm_map_shadow_max(vm_map_t map);
+int vm_map_shadow_max(
+ vm_map_t map)
+{
+ int shadows, shadows_max;
+ vm_map_entry_t entry;
+ vm_object_t object, next_object;
+
+ if (map == NULL)
+ return 0;
+
+ shadows_max = 0;
+
+ vm_map_lock_read(map);
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ if (entry->is_sub_map) {
+ continue;
+ }
+ object = VME_OBJECT(entry);
+ if (object == NULL) {
+ continue;
+ }
+ vm_object_lock_shared(object);
+ for (shadows = 0;
+ object->shadow != NULL;
+ shadows++, object = next_object) {
+ next_object = object->shadow;
+ vm_object_lock_shared(next_object);
+ vm_object_unlock(object);
+ }
+ vm_object_unlock(object);
+ if (shadows > shadows_max) {
+ shadows_max = shadows;
+ }
+ }
+
+ vm_map_unlock_read(map);
+
+ return shadows_max;
+}
+#endif /* VM_SCAN_FOR_SHADOW_CHAIN */
+
+void vm_commit_pagezero_status(vm_map_t lmap) {
+ pmap_advise_pagezero_range(lmap->pmap, lmap->min_offset);
+}
+
+#if __x86_64__
+void
+vm_map_set_high_start(
+ vm_map_t map,
+ vm_map_offset_t high_start)
+{
+ map->vmmap_high_start = high_start;
+}
+#endif /* __x86_64__ */
+
+#if PMAP_CS
+kern_return_t
+vm_map_entry_cs_associate(
+ vm_map_t map,
+ vm_map_entry_t entry,
+ vm_map_kernel_flags_t vmk_flags)
+{
+ vm_object_t cs_object, cs_shadow;
+ vm_object_offset_t cs_offset;
+ void *cs_blobs;
+ struct vnode *cs_vnode;
+ kern_return_t cs_ret;
+
+ if (map->pmap == NULL ||
+ entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */
+ VME_OBJECT(entry) == VM_OBJECT_NULL ||
+ ! (entry->protection & VM_PROT_EXECUTE)) {
+ return KERN_SUCCESS;
+ }
+
+ vm_map_lock_assert_exclusive(map);
+
+ if (entry->used_for_jit) {
+ cs_ret = pmap_cs_associate(map->pmap,
+ PMAP_CS_ASSOCIATE_JIT,
+ entry->vme_start,
+ entry->vme_end - entry->vme_start);
+ goto done;
+ }
+
+ if (vmk_flags.vmkf_remap_prot_copy) {
+ cs_ret = pmap_cs_associate(map->pmap,
+ PMAP_CS_ASSOCIATE_COW,
+ entry->vme_start,
+ entry->vme_end - entry->vme_start);
+ goto done;
+ }
+
+ vm_object_lock_shared(VME_OBJECT(entry));
+ cs_offset = VME_OFFSET(entry);
+ for (cs_object = VME_OBJECT(entry);
+ (cs_object != VM_OBJECT_NULL &&
+ !cs_object->code_signed);
+ cs_object = cs_shadow) {
+ cs_shadow = cs_object->shadow;
+ if (cs_shadow != VM_OBJECT_NULL) {
+ cs_offset += cs_object->vo_shadow_offset;
+ vm_object_lock_shared(cs_shadow);
+ }
+ vm_object_unlock(cs_object);
+ }
+ if (cs_object == VM_OBJECT_NULL) {
+ return KERN_SUCCESS;
+ }
+
+ cs_offset += cs_object->paging_offset;
+ cs_vnode = vnode_pager_lookup_vnode(cs_object->pager);
+ cs_ret = vnode_pager_get_cs_blobs(cs_vnode,
+ &cs_blobs);
+ assert(cs_ret == KERN_SUCCESS);
+ cs_ret = cs_associate_blob_with_mapping(map->pmap,
+ entry->vme_start,
+ (entry->vme_end -
+ entry->vme_start),
+ cs_offset,
+ cs_blobs);
+ vm_object_unlock(cs_object);
+ cs_object = VM_OBJECT_NULL;
+
+ done:
+ if (cs_ret == KERN_SUCCESS) {
+ DTRACE_VM2(vm_map_entry_cs_associate_success,
+ vm_map_offset_t, entry->vme_start,
+ vm_map_offset_t, entry->vme_end);
+ if (vm_map_executable_immutable) {
+ /*
+ * Prevent this executable
+ * mapping from being unmapped
+ * or modified.
+ */
+ entry->permanent = TRUE;
+ }
+ /*
+ * pmap says it will validate the
+ * code-signing validity of pages
+ * faulted in via this mapping, so
+ * this map entry should be marked so
+ * that vm_fault() bypasses code-signing
+ * validation for faults coming through
+ * this mapping.
+ */
+ entry->pmap_cs_associated = TRUE;
+ } else if (cs_ret == KERN_NOT_SUPPORTED) {
+ /*
+ * pmap won't check the code-signing
+ * validity of pages faulted in via
+ * this mapping, so VM should keep
+ * doing it.
+ */
+ DTRACE_VM3(vm_map_entry_cs_associate_off,
+ vm_map_offset_t, entry->vme_start,
+ vm_map_offset_t, entry->vme_end,
+ int, cs_ret);
+ } else {
+ /*
+ * A real error: do not allow
+ * execution in this mapping.
+ */
+ DTRACE_VM3(vm_map_entry_cs_associate_failure,
+ vm_map_offset_t, entry->vme_start,
+ vm_map_offset_t, entry->vme_end,
+ int, cs_ret);
+ entry->protection &= ~VM_PROT_EXECUTE;
+ entry->max_protection &= ~VM_PROT_EXECUTE;
+ }
+
+ return cs_ret;
+}
+#endif /* PMAP_CS */
+
+/*
+ * FORKED CORPSE FOOTPRINT
+ *
+ * A forked corpse gets a copy of the original VM map but its pmap is mostly
+ * empty since it never ran and never got to fault in any pages.
+ * Collecting footprint info (via "sysctl vm.self_region_footprint") for
+ * a forked corpse would therefore return very little information.
+ *
+ * When forking a corpse, we can pass the VM_MAP_FORK_CORPSE_FOOTPRINT option
+ * to vm_map_fork() to collect footprint information from the original VM map
+ * and its pmap, and store it in the forked corpse's VM map. That information
+ * is stored in place of the VM map's "hole list" since we'll never need to
+ * lookup for holes in the corpse's map.
+ *
+ * The corpse's footprint info looks like this:
+ *
+ * vm_map->vmmap_corpse_footprint points to pageable kernel memory laid out
+ * as follows:
+ * +---------------------------------------+
+ * header-> | cf_size |
+ * +-------------------+-------------------+
+ * | cf_last_region | cf_last_zeroes |
+ * +-------------------+-------------------+
+ * region1-> | cfr_vaddr |
+ * +-------------------+-------------------+
+ * | cfr_num_pages | d0 | d1 | d2 | d3 |
+ * +---------------------------------------+
+ * | d4 | d5 | ... |
+ * +---------------------------------------+
+ * | ... |
+ * +-------------------+-------------------+
+ * | dy | dz | na | na | cfr_vaddr... | <-region2
+ * +-------------------+-------------------+
+ * | cfr_vaddr (ctd) | cfr_num_pages |
+ * +---------------------------------------+
+ * | d0 | d1 ... |
+ * +---------------------------------------+
+ * ...
+ * +---------------------------------------+
+ * last region-> | cfr_vaddr |
+ * +---------------------------------------+
+ * + cfr_num_pages | d0 | d1 | d2 | d3 |
+ * +---------------------------------------+
+ * ...
+ * +---------------------------------------+
+ * | dx | dy | dz | na | na | na | na | na |
+ * +---------------------------------------+
+ *
+ * where:
+ * cf_size: total size of the buffer (rounded to page size)
+ * cf_last_region: offset in the buffer of the last "region" sub-header
+ * cf_last_zeroes: number of trailing "zero" dispositions at the end
+ * of last region
+ * cfr_vaddr: virtual address of the start of the covered "region"
+ * cfr_num_pages: number of pages in the covered "region"
+ * d*: disposition of the page at that virtual address
+ * Regions in the buffer are word-aligned.
+ *
+ * We estimate the size of the buffer based on the number of memory regions
+ * and the virtual size of the address space. While copying each memory region
+ * during vm_map_fork(), we also collect the footprint info for that region
+ * and store it in the buffer, packing it as much as possible (coalescing
+ * contiguous memory regions to avoid having too many region headers and
+ * avoiding long streaks of "zero" page dispositions by splitting footprint
+ * "regions", so the number of regions in the footprint buffer might not match
+ * the number of memory regions in the address space.
+ *
+ * We also have to copy the original task's "nonvolatile" ledgers since that's
+ * part of the footprint and will need to be reported to any tool asking for
+ * the footprint information of the forked corpse.
+ */
+
+uint64_t vm_map_corpse_footprint_count = 0;
+uint64_t vm_map_corpse_footprint_size_avg = 0;
+uint64_t vm_map_corpse_footprint_size_max = 0;
+uint64_t vm_map_corpse_footprint_full = 0;
+uint64_t vm_map_corpse_footprint_no_buf = 0;
+
+/*
+ * vm_map_corpse_footprint_new_region:
+ * closes the current footprint "region" and creates a new one
+ *
+ * Returns NULL if there's not enough space in the buffer for a new region.
+ */
+static struct vm_map_corpse_footprint_region *
+vm_map_corpse_footprint_new_region(
+ struct vm_map_corpse_footprint_header *footprint_header)
+{
+ uintptr_t footprint_edge;
+ uint32_t new_region_offset;
+ struct vm_map_corpse_footprint_region *footprint_region;
+ struct vm_map_corpse_footprint_region *new_footprint_region;
+
+ footprint_edge = ((uintptr_t)footprint_header +
+ footprint_header->cf_size);
+ footprint_region = ((struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region));
+ assert((uintptr_t)footprint_region + sizeof (*footprint_region) <=
+ footprint_edge);
+
+ /* get rid of trailing zeroes in the last region */
+ assert(footprint_region->cfr_num_pages >=
+ footprint_header->cf_last_zeroes);
+ footprint_region->cfr_num_pages -=
+ footprint_header->cf_last_zeroes;
+ footprint_header->cf_last_zeroes = 0;
+
+ /* reuse this region if it's now empty */
+ if (footprint_region->cfr_num_pages == 0) {
+ return footprint_region;
+ }
+
+ /* compute offset of new region */
+ new_region_offset = footprint_header->cf_last_region;
+ new_region_offset += sizeof (*footprint_region);
+ new_region_offset += footprint_region->cfr_num_pages;
+ new_region_offset = roundup(new_region_offset, sizeof (int));
+
+ /* check if we're going over the edge */
+ if (((uintptr_t)footprint_header +
+ new_region_offset +
+ sizeof (*footprint_region)) >=
+ footprint_edge) {
+ /* over the edge: no new region */
+ return NULL;
+ }
+
+ /* adjust offset of last region in header */
+ footprint_header->cf_last_region = new_region_offset;
+
+ new_footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+ new_footprint_region->cfr_vaddr = 0;
+ new_footprint_region->cfr_num_pages = 0;
+ /* caller needs to initialize new region */
+
+ return new_footprint_region;
+}
+
+/*
+ * vm_map_corpse_footprint_collect:
+ * collect footprint information for "old_entry" in "old_map" and
+ * stores it in "new_map"'s vmmap_footprint_info.
+ */
+kern_return_t
+vm_map_corpse_footprint_collect(
+ vm_map_t old_map,
+ vm_map_entry_t old_entry,
+ vm_map_t new_map)
+{
+ vm_map_offset_t va;
+ int disp;
+ kern_return_t kr;
+ struct vm_map_corpse_footprint_header *footprint_header;
+ struct vm_map_corpse_footprint_region *footprint_region;
+ struct vm_map_corpse_footprint_region *new_footprint_region;
+ unsigned char *next_disp_p;
+ uintptr_t footprint_edge;
+ uint32_t num_pages_tmp;
+
+ va = old_entry->vme_start;
+
+ vm_map_lock_assert_exclusive(old_map);
+ vm_map_lock_assert_exclusive(new_map);
+
+ assert(new_map->has_corpse_footprint);
+ assert(!old_map->has_corpse_footprint);
+ if (!new_map->has_corpse_footprint ||
+ old_map->has_corpse_footprint) {
+ /*
+ * This can only transfer footprint info from a
+ * map with a live pmap to a map with a corpse footprint.
+ */
+ return KERN_NOT_SUPPORTED;
+ }
+
+ if (new_map->vmmap_corpse_footprint == NULL) {
+ vm_offset_t buf;
+ vm_size_t buf_size;
+
+ buf = 0;
+ buf_size = (sizeof (*footprint_header) +
+ (old_map->hdr.nentries
+ *
+ (sizeof (*footprint_region) +
+ + 3)) /* potential alignment for each region */
+ +
+ ((old_map->size / PAGE_SIZE)
+ *
+ sizeof (char))); /* disposition for each page */
+// printf("FBDP corpse map %p guestimate footprint size 0x%llx\n", new_map, (uint64_t) buf_size);
+ buf_size = round_page(buf_size);
+
+ /* limit buffer to 1 page to validate overflow detection */
+// buf_size = PAGE_SIZE;
+
+ /* limit size to a somewhat sane amount */
+#if CONFIG_EMBEDDED
+#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */
+#else /* CONFIG_EMBEDDED */
+#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (8*1024*1024) /* 8MB */
+#endif /* CONFIG_EMBEDDED */
+ if (buf_size > VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE) {
+ buf_size = VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE;
+ }
+
+ /*
+ * Allocate the pageable buffer (with a trailing guard page).
+ * It will be zero-filled on demand.
+ */
+ kr = kernel_memory_allocate(kernel_map,
+ &buf,
+ (buf_size
+ + PAGE_SIZE), /* trailing guard page */
+ 0, /* mask */
+ KMA_PAGEABLE | KMA_GUARD_LAST,
+ VM_KERN_MEMORY_DIAG);
+ if (kr != KERN_SUCCESS) {
+ vm_map_corpse_footprint_no_buf++;
+ return kr;
+ }
+
+ /* initialize header and 1st region */
+ footprint_header = (struct vm_map_corpse_footprint_header *)buf;
+ new_map->vmmap_corpse_footprint = footprint_header;
+
+ footprint_header->cf_size = buf_size;
+ footprint_header->cf_last_region =
+ sizeof (*footprint_header);
+ footprint_header->cf_last_zeroes = 0;
+
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+ footprint_region->cfr_vaddr = 0;
+ footprint_region->cfr_num_pages = 0;
+ } else {
+ /* retrieve header and last region */
+ footprint_header = (struct vm_map_corpse_footprint_header *)
+ new_map->vmmap_corpse_footprint;
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+ }
+ footprint_edge = ((uintptr_t)footprint_header +
+ footprint_header->cf_size);
+
+ if ((footprint_region->cfr_vaddr +
+ (((vm_map_offset_t)footprint_region->cfr_num_pages) *
+ PAGE_SIZE))
+ != old_entry->vme_start) {
+ uint64_t num_pages_delta;
+ uint32_t region_offset_delta;
+
+ /*
+ * Not the next contiguous virtual address:
+ * start a new region or store "zero" dispositions for
+ * the missing pages?
+ */
+ /* size of gap in actual page dispositions */
+ num_pages_delta = (((old_entry->vme_start -
+ footprint_region->cfr_vaddr) / PAGE_SIZE)
+ - footprint_region->cfr_num_pages);
+ /* size of gap as a new footprint region header */
+ region_offset_delta =
+ (sizeof (*footprint_region) +
+ roundup((footprint_region->cfr_num_pages -
+ footprint_header->cf_last_zeroes),
+ sizeof (int)) -
+ (footprint_region->cfr_num_pages -
+ footprint_header->cf_last_zeroes));
+// printf("FBDP %s:%d region 0x%x 0x%llx 0x%x vme_start 0x%llx pages_delta 0x%llx region_delta 0x%x\n", __FUNCTION__, __LINE__, footprint_header->cf_last_region, footprint_region->cfr_vaddr, footprint_region->cfr_num_pages, old_entry->vme_start, num_pages_delta, region_offset_delta);
+ if (region_offset_delta < num_pages_delta ||
+ os_add3_overflow(footprint_region->cfr_num_pages,
+ (uint32_t) num_pages_delta,
+ 1,
+ &num_pages_tmp)) {
+ /*
+ * Storing data for this gap would take more space
+ * than inserting a new footprint region header:
+ * let's start a new region and save space. If it's a
+ * tie, let's avoid using a new region, since that
+ * would require more region hops to find the right
+ * range during lookups.
+ *
+ * If the current region's cfr_num_pages would overflow
+ * if we added "zero" page dispositions for the gap,
+ * no choice but to start a new region.
+ */
+// printf("FBDP %s:%d new region\n", __FUNCTION__, __LINE__);
+ new_footprint_region =
+ vm_map_corpse_footprint_new_region(footprint_header);
+ /* check that we're not going over the edge */
+ if (new_footprint_region == NULL) {
+ goto over_the_edge;
+ }
+ footprint_region = new_footprint_region;
+ /* initialize new region as empty */
+ footprint_region->cfr_vaddr = old_entry->vme_start;
+ footprint_region->cfr_num_pages = 0;
+ } else {
+ /*
+ * Store "zero" page dispositions for the missing
+ * pages.
+ */
+// printf("FBDP %s:%d zero gap\n", __FUNCTION__, __LINE__);
+ for (; num_pages_delta > 0; num_pages_delta--) {
+ next_disp_p =
+ ((unsigned char *) footprint_region +
+ sizeof (*footprint_region) +
+ footprint_region->cfr_num_pages);
+ /* check that we're not going over the edge */
+ if ((uintptr_t)next_disp_p >= footprint_edge) {
+ goto over_the_edge;
+ }
+ /* store "zero" disposition for this gap page */
+ footprint_region->cfr_num_pages++;
+ *next_disp_p = (unsigned char) 0;
+ footprint_header->cf_last_zeroes++;
+ }
+ }
+ }
+
+ for (va = old_entry->vme_start;
+ va < old_entry->vme_end;
+ va += PAGE_SIZE) {
+ vm_object_t object;
+
+ object = VME_OBJECT(old_entry);
+ if (!old_entry->is_sub_map &&
+ old_entry->iokit_acct &&
+ object != VM_OBJECT_NULL &&
+ object->internal &&
+ object->purgable == VM_PURGABLE_DENY) {
+ /*
+ * Non-purgeable IOKit memory: phys_footprint
+ * includes the entire virtual mapping.
+ * Since the forked corpse's VM map entry will not
+ * have "iokit_acct", pretend that this page's
+ * disposition is "present & internal", so that it
+ * shows up in the forked corpse's footprint.
+ */
+ disp = (PMAP_QUERY_PAGE_PRESENT |
+ PMAP_QUERY_PAGE_INTERNAL);
+ } else {
+ disp = 0;
+ pmap_query_page_info(old_map->pmap,
+ va,
+ &disp);
+ }
+
+// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP collect map %p va 0x%llx disp 0x%x\n", new_map, va, disp);
+
+ if (disp == 0 && footprint_region->cfr_num_pages == 0) {
+ /*
+ * Ignore "zero" dispositions at start of
+ * region: just move start of region.
+ */
+ footprint_region->cfr_vaddr += PAGE_SIZE;
+ continue;
+ }
+
+ /* would region's cfr_num_pages overflow? */
+ if (os_add_overflow(footprint_region->cfr_num_pages, 1,
+ &num_pages_tmp)) {
+ /* overflow: create a new region */
+ new_footprint_region =
+ vm_map_corpse_footprint_new_region(
+ footprint_header);
+ if (new_footprint_region == NULL) {
+ goto over_the_edge;
+ }
+ footprint_region = new_footprint_region;
+ footprint_region->cfr_vaddr = va;
+ footprint_region->cfr_num_pages = 0;
+ }
+
+ next_disp_p = ((unsigned char *)footprint_region +
+ sizeof (*footprint_region) +
+ footprint_region->cfr_num_pages);
+ /* check that we're not going over the edge */
+ if ((uintptr_t)next_disp_p >= footprint_edge) {
+ goto over_the_edge;
+ }
+ /* store this dispostion */
+ *next_disp_p = (unsigned char) disp;
+ footprint_region->cfr_num_pages++;
+
+ if (disp != 0) {
+ /* non-zero disp: break the current zero streak */
+ footprint_header->cf_last_zeroes = 0;
+ /* done */
+ continue;
+ }
+
+ /* zero disp: add to the current streak of zeroes */
+ footprint_header->cf_last_zeroes++;
+ if ((footprint_header->cf_last_zeroes +
+ roundup((footprint_region->cfr_num_pages -
+ footprint_header->cf_last_zeroes) &
+ (sizeof (int) - 1),
+ sizeof (int))) <
+ (sizeof (*footprint_header))) {
+ /*
+ * There are not enough trailing "zero" dispositions
+ * (+ the extra padding we would need for the previous
+ * region); creating a new region would not save space
+ * at this point, so let's keep this "zero" disposition
+ * in this region and reconsider later.
+ */
+ continue;
+ }
+ /*
+ * Create a new region to avoid having too many consecutive
+ * "zero" dispositions.
+ */
+ new_footprint_region =
+ vm_map_corpse_footprint_new_region(footprint_header);
+ if (new_footprint_region == NULL) {
+ goto over_the_edge;
+ }
+ footprint_region = new_footprint_region;
+ /* initialize the new region as empty ... */
+ footprint_region->cfr_num_pages = 0;
+ /* ... and skip this "zero" disp */
+ footprint_region->cfr_vaddr = va + PAGE_SIZE;
+ }