+#endif
+
+#if PMAP_CS
+kern_return_t
+vm_map_entry_cs_associate(
+ vm_map_t map,
+ vm_map_entry_t entry,
+ vm_map_kernel_flags_t vmk_flags)
+{
+ vm_object_t cs_object, cs_shadow;
+ vm_object_offset_t cs_offset;
+ void *cs_blobs;
+ struct vnode *cs_vnode;
+ kern_return_t cs_ret;
+
+ if (map->pmap == NULL ||
+ entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */
+ VME_OBJECT(entry) == VM_OBJECT_NULL ||
+ !(entry->protection & VM_PROT_EXECUTE)) {
+ return KERN_SUCCESS;
+ }
+
+ vm_map_lock_assert_exclusive(map);
+
+ if (entry->used_for_jit) {
+ cs_ret = pmap_cs_associate(map->pmap,
+ PMAP_CS_ASSOCIATE_JIT,
+ entry->vme_start,
+ entry->vme_end - entry->vme_start);
+ goto done;
+ }
+
+ if (vmk_flags.vmkf_remap_prot_copy) {
+ cs_ret = pmap_cs_associate(map->pmap,
+ PMAP_CS_ASSOCIATE_COW,
+ entry->vme_start,
+ entry->vme_end - entry->vme_start);
+ goto done;
+ }
+
+ vm_object_lock_shared(VME_OBJECT(entry));
+ cs_offset = VME_OFFSET(entry);
+ for (cs_object = VME_OBJECT(entry);
+ (cs_object != VM_OBJECT_NULL &&
+ !cs_object->code_signed);
+ cs_object = cs_shadow) {
+ cs_shadow = cs_object->shadow;
+ if (cs_shadow != VM_OBJECT_NULL) {
+ cs_offset += cs_object->vo_shadow_offset;
+ vm_object_lock_shared(cs_shadow);
+ }
+ vm_object_unlock(cs_object);
+ }
+ if (cs_object == VM_OBJECT_NULL) {
+ return KERN_SUCCESS;
+ }
+
+ cs_offset += cs_object->paging_offset;
+ cs_vnode = vnode_pager_lookup_vnode(cs_object->pager);
+ cs_ret = vnode_pager_get_cs_blobs(cs_vnode,
+ &cs_blobs);
+ assert(cs_ret == KERN_SUCCESS);
+ cs_ret = cs_associate_blob_with_mapping(map->pmap,
+ entry->vme_start,
+ (entry->vme_end -
+ entry->vme_start),
+ cs_offset,
+ cs_blobs);
+ vm_object_unlock(cs_object);
+ cs_object = VM_OBJECT_NULL;
+
+done:
+ if (cs_ret == KERN_SUCCESS) {
+ DTRACE_VM2(vm_map_entry_cs_associate_success,
+ vm_map_offset_t, entry->vme_start,
+ vm_map_offset_t, entry->vme_end);
+ if (vm_map_executable_immutable) {
+ /*
+ * Prevent this executable
+ * mapping from being unmapped
+ * or modified.
+ */
+ entry->permanent = TRUE;
+ }
+ /*
+ * pmap says it will validate the
+ * code-signing validity of pages
+ * faulted in via this mapping, so
+ * this map entry should be marked so
+ * that vm_fault() bypasses code-signing
+ * validation for faults coming through
+ * this mapping.
+ */
+ entry->pmap_cs_associated = TRUE;
+ } else if (cs_ret == KERN_NOT_SUPPORTED) {
+ /*
+ * pmap won't check the code-signing
+ * validity of pages faulted in via
+ * this mapping, so VM should keep
+ * doing it.
+ */
+ DTRACE_VM3(vm_map_entry_cs_associate_off,
+ vm_map_offset_t, entry->vme_start,
+ vm_map_offset_t, entry->vme_end,
+ int, cs_ret);
+ } else {
+ /*
+ * A real error: do not allow
+ * execution in this mapping.
+ */
+ DTRACE_VM3(vm_map_entry_cs_associate_failure,
+ vm_map_offset_t, entry->vme_start,
+ vm_map_offset_t, entry->vme_end,
+ int, cs_ret);
+ entry->protection &= ~VM_PROT_EXECUTE;
+ entry->max_protection &= ~VM_PROT_EXECUTE;
+ }
+
+ return cs_ret;
+}
+#endif /* PMAP_CS */
+
+/*
+ * FORKED CORPSE FOOTPRINT
+ *
+ * A forked corpse gets a copy of the original VM map but its pmap is mostly
+ * empty since it never ran and never got to fault in any pages.
+ * Collecting footprint info (via "sysctl vm.self_region_footprint") for
+ * a forked corpse would therefore return very little information.
+ *
+ * When forking a corpse, we can pass the VM_MAP_FORK_CORPSE_FOOTPRINT option
+ * to vm_map_fork() to collect footprint information from the original VM map
+ * and its pmap, and store it in the forked corpse's VM map. That information
+ * is stored in place of the VM map's "hole list" since we'll never need to
+ * lookup for holes in the corpse's map.
+ *
+ * The corpse's footprint info looks like this:
+ *
+ * vm_map->vmmap_corpse_footprint points to pageable kernel memory laid out
+ * as follows:
+ * +---------------------------------------+
+ * header-> | cf_size |
+ * +-------------------+-------------------+
+ * | cf_last_region | cf_last_zeroes |
+ * +-------------------+-------------------+
+ * region1-> | cfr_vaddr |
+ * +-------------------+-------------------+
+ * | cfr_num_pages | d0 | d1 | d2 | d3 |
+ * +---------------------------------------+
+ * | d4 | d5 | ... |
+ * +---------------------------------------+
+ * | ... |
+ * +-------------------+-------------------+
+ * | dy | dz | na | na | cfr_vaddr... | <-region2
+ * +-------------------+-------------------+
+ * | cfr_vaddr (ctd) | cfr_num_pages |
+ * +---------------------------------------+
+ * | d0 | d1 ... |
+ * +---------------------------------------+
+ * ...
+ * +---------------------------------------+
+ * last region-> | cfr_vaddr |
+ * +---------------------------------------+
+ * + cfr_num_pages | d0 | d1 | d2 | d3 |
+ * +---------------------------------------+
+ * ...
+ * +---------------------------------------+
+ * | dx | dy | dz | na | na | na | na | na |
+ * +---------------------------------------+
+ *
+ * where:
+ * cf_size: total size of the buffer (rounded to page size)
+ * cf_last_region: offset in the buffer of the last "region" sub-header
+ * cf_last_zeroes: number of trailing "zero" dispositions at the end
+ * of last region
+ * cfr_vaddr: virtual address of the start of the covered "region"
+ * cfr_num_pages: number of pages in the covered "region"
+ * d*: disposition of the page at that virtual address
+ * Regions in the buffer are word-aligned.
+ *
+ * We estimate the size of the buffer based on the number of memory regions
+ * and the virtual size of the address space. While copying each memory region
+ * during vm_map_fork(), we also collect the footprint info for that region
+ * and store it in the buffer, packing it as much as possible (coalescing
+ * contiguous memory regions to avoid having too many region headers and
+ * avoiding long streaks of "zero" page dispositions by splitting footprint
+ * "regions", so the number of regions in the footprint buffer might not match
+ * the number of memory regions in the address space.
+ *
+ * We also have to copy the original task's "nonvolatile" ledgers since that's
+ * part of the footprint and will need to be reported to any tool asking for
+ * the footprint information of the forked corpse.
+ */
+
+uint64_t vm_map_corpse_footprint_count = 0;
+uint64_t vm_map_corpse_footprint_size_avg = 0;
+uint64_t vm_map_corpse_footprint_size_max = 0;
+uint64_t vm_map_corpse_footprint_full = 0;
+uint64_t vm_map_corpse_footprint_no_buf = 0;
+
+/*
+ * vm_map_corpse_footprint_new_region:
+ * closes the current footprint "region" and creates a new one
+ *
+ * Returns NULL if there's not enough space in the buffer for a new region.
+ */
+static struct vm_map_corpse_footprint_region *
+vm_map_corpse_footprint_new_region(
+ struct vm_map_corpse_footprint_header *footprint_header)
+{
+ uintptr_t footprint_edge;
+ uint32_t new_region_offset;
+ struct vm_map_corpse_footprint_region *footprint_region;
+ struct vm_map_corpse_footprint_region *new_footprint_region;
+
+ footprint_edge = ((uintptr_t)footprint_header +
+ footprint_header->cf_size);
+ footprint_region = ((struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region));
+ assert((uintptr_t)footprint_region + sizeof(*footprint_region) <=
+ footprint_edge);
+
+ /* get rid of trailing zeroes in the last region */
+ assert(footprint_region->cfr_num_pages >=
+ footprint_header->cf_last_zeroes);
+ footprint_region->cfr_num_pages -=
+ footprint_header->cf_last_zeroes;
+ footprint_header->cf_last_zeroes = 0;
+
+ /* reuse this region if it's now empty */
+ if (footprint_region->cfr_num_pages == 0) {
+ return footprint_region;
+ }
+
+ /* compute offset of new region */
+ new_region_offset = footprint_header->cf_last_region;
+ new_region_offset += sizeof(*footprint_region);
+ new_region_offset += footprint_region->cfr_num_pages;
+ new_region_offset = roundup(new_region_offset, sizeof(int));
+
+ /* check if we're going over the edge */
+ if (((uintptr_t)footprint_header +
+ new_region_offset +
+ sizeof(*footprint_region)) >=
+ footprint_edge) {
+ /* over the edge: no new region */
+ return NULL;
+ }
+
+ /* adjust offset of last region in header */
+ footprint_header->cf_last_region = new_region_offset;
+
+ new_footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+ new_footprint_region->cfr_vaddr = 0;
+ new_footprint_region->cfr_num_pages = 0;
+ /* caller needs to initialize new region */
+
+ return new_footprint_region;
+}
+
+/*
+ * vm_map_corpse_footprint_collect:
+ * collect footprint information for "old_entry" in "old_map" and
+ * stores it in "new_map"'s vmmap_footprint_info.
+ */
+kern_return_t
+vm_map_corpse_footprint_collect(
+ vm_map_t old_map,
+ vm_map_entry_t old_entry,
+ vm_map_t new_map)
+{
+ vm_map_offset_t va;
+ int disp;
+ kern_return_t kr;
+ struct vm_map_corpse_footprint_header *footprint_header;
+ struct vm_map_corpse_footprint_region *footprint_region;
+ struct vm_map_corpse_footprint_region *new_footprint_region;
+ unsigned char *next_disp_p;
+ uintptr_t footprint_edge;
+ uint32_t num_pages_tmp;
+
+ va = old_entry->vme_start;
+
+ vm_map_lock_assert_exclusive(old_map);
+ vm_map_lock_assert_exclusive(new_map);
+
+ assert(new_map->has_corpse_footprint);
+ assert(!old_map->has_corpse_footprint);
+ if (!new_map->has_corpse_footprint ||
+ old_map->has_corpse_footprint) {
+ /*
+ * This can only transfer footprint info from a
+ * map with a live pmap to a map with a corpse footprint.
+ */
+ return KERN_NOT_SUPPORTED;
+ }
+
+ if (new_map->vmmap_corpse_footprint == NULL) {
+ vm_offset_t buf;
+ vm_size_t buf_size;
+
+ buf = 0;
+ buf_size = (sizeof(*footprint_header) +
+ (old_map->hdr.nentries
+ *
+ (sizeof(*footprint_region) +
+ +3)) /* potential alignment for each region */
+ +
+ ((old_map->size / PAGE_SIZE)
+ *
+ sizeof(char))); /* disposition for each page */
+// printf("FBDP corpse map %p guestimate footprint size 0x%llx\n", new_map, (uint64_t) buf_size);
+ buf_size = round_page(buf_size);
+
+ /* limit buffer to 1 page to validate overflow detection */
+// buf_size = PAGE_SIZE;
+
+ /* limit size to a somewhat sane amount */
+#if CONFIG_EMBEDDED
+#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */
+#else /* CONFIG_EMBEDDED */
+#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (8*1024*1024) /* 8MB */
+#endif /* CONFIG_EMBEDDED */
+ if (buf_size > VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE) {
+ buf_size = VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE;
+ }
+
+ /*
+ * Allocate the pageable buffer (with a trailing guard page).
+ * It will be zero-filled on demand.
+ */
+ kr = kernel_memory_allocate(kernel_map,
+ &buf,
+ (buf_size
+ + PAGE_SIZE), /* trailing guard page */
+ 0, /* mask */
+ KMA_PAGEABLE | KMA_GUARD_LAST,
+ VM_KERN_MEMORY_DIAG);
+ if (kr != KERN_SUCCESS) {
+ vm_map_corpse_footprint_no_buf++;
+ return kr;
+ }
+
+ /* initialize header and 1st region */
+ footprint_header = (struct vm_map_corpse_footprint_header *)buf;
+ new_map->vmmap_corpse_footprint = footprint_header;
+
+ footprint_header->cf_size = buf_size;
+ footprint_header->cf_last_region =
+ sizeof(*footprint_header);
+ footprint_header->cf_last_zeroes = 0;
+
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+ footprint_region->cfr_vaddr = 0;
+ footprint_region->cfr_num_pages = 0;
+ } else {
+ /* retrieve header and last region */
+ footprint_header = (struct vm_map_corpse_footprint_header *)
+ new_map->vmmap_corpse_footprint;
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+ }
+ footprint_edge = ((uintptr_t)footprint_header +
+ footprint_header->cf_size);
+
+ if ((footprint_region->cfr_vaddr +
+ (((vm_map_offset_t)footprint_region->cfr_num_pages) *
+ PAGE_SIZE))
+ != old_entry->vme_start) {
+ uint64_t num_pages_delta;
+ uint32_t region_offset_delta;
+
+ /*
+ * Not the next contiguous virtual address:
+ * start a new region or store "zero" dispositions for
+ * the missing pages?
+ */
+ /* size of gap in actual page dispositions */
+ num_pages_delta = (((old_entry->vme_start -
+ footprint_region->cfr_vaddr) / PAGE_SIZE)
+ - footprint_region->cfr_num_pages);
+ /* size of gap as a new footprint region header */
+ region_offset_delta =
+ (sizeof(*footprint_region) +
+ roundup((footprint_region->cfr_num_pages -
+ footprint_header->cf_last_zeroes),
+ sizeof(int)) -
+ (footprint_region->cfr_num_pages -
+ footprint_header->cf_last_zeroes));
+// printf("FBDP %s:%d region 0x%x 0x%llx 0x%x vme_start 0x%llx pages_delta 0x%llx region_delta 0x%x\n", __FUNCTION__, __LINE__, footprint_header->cf_last_region, footprint_region->cfr_vaddr, footprint_region->cfr_num_pages, old_entry->vme_start, num_pages_delta, region_offset_delta);
+ if (region_offset_delta < num_pages_delta ||
+ os_add3_overflow(footprint_region->cfr_num_pages,
+ (uint32_t) num_pages_delta,
+ 1,
+ &num_pages_tmp)) {
+ /*
+ * Storing data for this gap would take more space
+ * than inserting a new footprint region header:
+ * let's start a new region and save space. If it's a
+ * tie, let's avoid using a new region, since that
+ * would require more region hops to find the right
+ * range during lookups.
+ *
+ * If the current region's cfr_num_pages would overflow
+ * if we added "zero" page dispositions for the gap,
+ * no choice but to start a new region.
+ */
+// printf("FBDP %s:%d new region\n", __FUNCTION__, __LINE__);
+ new_footprint_region =
+ vm_map_corpse_footprint_new_region(footprint_header);
+ /* check that we're not going over the edge */
+ if (new_footprint_region == NULL) {
+ goto over_the_edge;
+ }
+ footprint_region = new_footprint_region;
+ /* initialize new region as empty */
+ footprint_region->cfr_vaddr = old_entry->vme_start;
+ footprint_region->cfr_num_pages = 0;
+ } else {
+ /*
+ * Store "zero" page dispositions for the missing
+ * pages.
+ */
+// printf("FBDP %s:%d zero gap\n", __FUNCTION__, __LINE__);
+ for (; num_pages_delta > 0; num_pages_delta--) {
+ next_disp_p =
+ ((unsigned char *) footprint_region +
+ sizeof(*footprint_region) +
+ footprint_region->cfr_num_pages);
+ /* check that we're not going over the edge */
+ if ((uintptr_t)next_disp_p >= footprint_edge) {
+ goto over_the_edge;
+ }
+ /* store "zero" disposition for this gap page */
+ footprint_region->cfr_num_pages++;
+ *next_disp_p = (unsigned char) 0;
+ footprint_header->cf_last_zeroes++;
+ }
+ }
+ }
+
+ for (va = old_entry->vme_start;
+ va < old_entry->vme_end;
+ va += PAGE_SIZE) {
+ vm_object_t object;
+
+ object = VME_OBJECT(old_entry);
+ if (!old_entry->is_sub_map &&
+ old_entry->iokit_acct &&
+ object != VM_OBJECT_NULL &&
+ object->internal &&
+ object->purgable == VM_PURGABLE_DENY) {
+ /*
+ * Non-purgeable IOKit memory: phys_footprint
+ * includes the entire virtual mapping.
+ * Since the forked corpse's VM map entry will not
+ * have "iokit_acct", pretend that this page's
+ * disposition is "present & internal", so that it
+ * shows up in the forked corpse's footprint.
+ */
+ disp = (PMAP_QUERY_PAGE_PRESENT |
+ PMAP_QUERY_PAGE_INTERNAL);
+ } else {
+ disp = 0;
+ pmap_query_page_info(old_map->pmap,
+ va,
+ &disp);
+ }
+
+// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP collect map %p va 0x%llx disp 0x%x\n", new_map, va, disp);
+
+ if (disp == 0 && footprint_region->cfr_num_pages == 0) {
+ /*
+ * Ignore "zero" dispositions at start of
+ * region: just move start of region.
+ */
+ footprint_region->cfr_vaddr += PAGE_SIZE;
+ continue;
+ }
+
+ /* would region's cfr_num_pages overflow? */
+ if (os_add_overflow(footprint_region->cfr_num_pages, 1,
+ &num_pages_tmp)) {
+ /* overflow: create a new region */
+ new_footprint_region =
+ vm_map_corpse_footprint_new_region(
+ footprint_header);
+ if (new_footprint_region == NULL) {
+ goto over_the_edge;
+ }
+ footprint_region = new_footprint_region;
+ footprint_region->cfr_vaddr = va;
+ footprint_region->cfr_num_pages = 0;
+ }
+
+ next_disp_p = ((unsigned char *)footprint_region +
+ sizeof(*footprint_region) +
+ footprint_region->cfr_num_pages);
+ /* check that we're not going over the edge */
+ if ((uintptr_t)next_disp_p >= footprint_edge) {
+ goto over_the_edge;
+ }
+ /* store this dispostion */
+ *next_disp_p = (unsigned char) disp;
+ footprint_region->cfr_num_pages++;
+
+ if (disp != 0) {
+ /* non-zero disp: break the current zero streak */
+ footprint_header->cf_last_zeroes = 0;
+ /* done */
+ continue;
+ }
+
+ /* zero disp: add to the current streak of zeroes */
+ footprint_header->cf_last_zeroes++;
+ if ((footprint_header->cf_last_zeroes +
+ roundup((footprint_region->cfr_num_pages -
+ footprint_header->cf_last_zeroes) &
+ (sizeof(int) - 1),
+ sizeof(int))) <
+ (sizeof(*footprint_header))) {
+ /*
+ * There are not enough trailing "zero" dispositions
+ * (+ the extra padding we would need for the previous
+ * region); creating a new region would not save space
+ * at this point, so let's keep this "zero" disposition
+ * in this region and reconsider later.
+ */
+ continue;
+ }
+ /*
+ * Create a new region to avoid having too many consecutive
+ * "zero" dispositions.
+ */
+ new_footprint_region =
+ vm_map_corpse_footprint_new_region(footprint_header);
+ if (new_footprint_region == NULL) {
+ goto over_the_edge;
+ }
+ footprint_region = new_footprint_region;
+ /* initialize the new region as empty ... */
+ footprint_region->cfr_num_pages = 0;
+ /* ... and skip this "zero" disp */
+ footprint_region->cfr_vaddr = va + PAGE_SIZE;
+ }
+
+ return KERN_SUCCESS;
+
+over_the_edge:
+// printf("FBDP map %p footprint was full for va 0x%llx\n", new_map, va);
+ vm_map_corpse_footprint_full++;
+ return KERN_RESOURCE_SHORTAGE;
+}
+
+/*
+ * vm_map_corpse_footprint_collect_done:
+ * completes the footprint collection by getting rid of any remaining
+ * trailing "zero" dispositions and trimming the unused part of the
+ * kernel buffer
+ */
+void
+vm_map_corpse_footprint_collect_done(
+ vm_map_t new_map)
+{
+ struct vm_map_corpse_footprint_header *footprint_header;
+ struct vm_map_corpse_footprint_region *footprint_region;
+ vm_size_t buf_size, actual_size;
+ kern_return_t kr;
+
+ assert(new_map->has_corpse_footprint);
+ if (!new_map->has_corpse_footprint ||
+ new_map->vmmap_corpse_footprint == NULL) {
+ return;
+ }
+
+ footprint_header = (struct vm_map_corpse_footprint_header *)
+ new_map->vmmap_corpse_footprint;
+ buf_size = footprint_header->cf_size;
+
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header +
+ footprint_header->cf_last_region);
+
+ /* get rid of trailing zeroes in last region */
+ assert(footprint_region->cfr_num_pages >= footprint_header->cf_last_zeroes);
+ footprint_region->cfr_num_pages -= footprint_header->cf_last_zeroes;
+ footprint_header->cf_last_zeroes = 0;
+
+ actual_size = (vm_size_t)(footprint_header->cf_last_region +
+ sizeof(*footprint_region) +
+ footprint_region->cfr_num_pages);
+
+// printf("FBDP map %p buf_size 0x%llx actual_size 0x%llx\n", new_map, (uint64_t) buf_size, (uint64_t) actual_size);
+ vm_map_corpse_footprint_size_avg =
+ (((vm_map_corpse_footprint_size_avg *
+ vm_map_corpse_footprint_count) +
+ actual_size) /
+ (vm_map_corpse_footprint_count + 1));
+ vm_map_corpse_footprint_count++;
+ if (actual_size > vm_map_corpse_footprint_size_max) {
+ vm_map_corpse_footprint_size_max = actual_size;
+ }
+
+ actual_size = round_page(actual_size);
+ if (buf_size > actual_size) {
+ kr = vm_deallocate(kernel_map,
+ ((vm_address_t)footprint_header +
+ actual_size +
+ PAGE_SIZE), /* trailing guard page */
+ (buf_size - actual_size));
+ assertf(kr == KERN_SUCCESS,
+ "trim: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n",
+ footprint_header,
+ (uint64_t) buf_size,
+ (uint64_t) actual_size,
+ kr);
+ kr = vm_protect(kernel_map,
+ ((vm_address_t)footprint_header +
+ actual_size),
+ PAGE_SIZE,
+ FALSE, /* set_maximum */
+ VM_PROT_NONE);
+ assertf(kr == KERN_SUCCESS,
+ "guard: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n",
+ footprint_header,
+ (uint64_t) buf_size,
+ (uint64_t) actual_size,
+ kr);
+ }
+
+ footprint_header->cf_size = actual_size;
+}
+
+/*
+ * vm_map_corpse_footprint_query_page_info:
+ * retrieves the disposition of the page at virtual address "vaddr"
+ * in the forked corpse's VM map
+ *
+ * This is the equivalent of pmap_query_page_info() for a forked corpse.
+ */
+kern_return_t
+vm_map_corpse_footprint_query_page_info(
+ vm_map_t map,
+ vm_map_offset_t va,
+ int *disp)
+{
+ struct vm_map_corpse_footprint_header *footprint_header;
+ struct vm_map_corpse_footprint_region *footprint_region;
+ uint32_t footprint_region_offset;
+ vm_map_offset_t region_start, region_end;
+ int disp_idx;
+ kern_return_t kr;
+
+ if (!map->has_corpse_footprint) {
+ *disp = 0;
+ kr = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ footprint_header = map->vmmap_corpse_footprint;
+ if (footprint_header == NULL) {
+ *disp = 0;
+// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp);
+ kr = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ /* start looking at the hint ("cf_hint_region") */
+ footprint_region_offset = footprint_header->cf_hint_region;
+
+lookup_again:
+ if (footprint_region_offset < sizeof(*footprint_header)) {
+ /* hint too low: start from 1st region */
+ footprint_region_offset = sizeof(*footprint_header);
+ }
+ if (footprint_region_offset >= footprint_header->cf_last_region) {
+ /* hint too high: re-start from 1st region */
+ footprint_region_offset = sizeof(*footprint_header);
+ }
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header + footprint_region_offset);
+ region_start = footprint_region->cfr_vaddr;
+ region_end = (region_start +
+ ((vm_map_offset_t)(footprint_region->cfr_num_pages) *
+ PAGE_SIZE));
+ if (va < region_start &&
+ footprint_region_offset != sizeof(*footprint_header)) {
+ /* our range starts before the hint region */
+
+ /* reset the hint (in a racy way...) */
+ footprint_header->cf_hint_region = sizeof(*footprint_header);
+ /* lookup "va" again from 1st region */
+ footprint_region_offset = sizeof(*footprint_header);
+ goto lookup_again;
+ }
+
+ while (va >= region_end) {
+ if (footprint_region_offset >= footprint_header->cf_last_region) {
+ break;
+ }
+ /* skip the region's header */
+ footprint_region_offset += sizeof(*footprint_region);
+ /* skip the region's page dispositions */
+ footprint_region_offset += footprint_region->cfr_num_pages;
+ /* align to next word boundary */
+ footprint_region_offset =
+ roundup(footprint_region_offset,
+ sizeof(int));
+ footprint_region = (struct vm_map_corpse_footprint_region *)
+ ((char *)footprint_header + footprint_region_offset);
+ region_start = footprint_region->cfr_vaddr;
+ region_end = (region_start +
+ ((vm_map_offset_t)(footprint_region->cfr_num_pages) *
+ PAGE_SIZE));
+ }
+ if (va < region_start || va >= region_end) {
+ /* page not found */
+ *disp = 0;
+// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp);
+ kr = KERN_SUCCESS;
+ goto done;
+ }
+
+ /* "va" found: set the lookup hint for next lookup (in a racy way...) */
+ footprint_header->cf_hint_region = footprint_region_offset;
+
+ /* get page disposition for "va" in this region */
+ disp_idx = (int) ((va - footprint_region->cfr_vaddr) / PAGE_SIZE);
+ *disp = (int) (footprint_region->cfr_disposition[disp_idx]);
+
+ kr = KERN_SUCCESS;
+done:
+// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp);
+ /* dtrace -n 'vminfo:::footprint_query_page_info { printf("map 0x%p va 0x%llx disp 0x%x kr 0x%x", arg0, arg1, arg2, arg3); }' */
+ DTRACE_VM4(footprint_query_page_info,
+ vm_map_t, map,
+ vm_map_offset_t, va,
+ int, *disp,
+ kern_return_t, kr);
+
+ return kr;
+}
+
+
+static void
+vm_map_corpse_footprint_destroy(
+ vm_map_t map)
+{
+ if (map->has_corpse_footprint &&
+ map->vmmap_corpse_footprint != 0) {
+ struct vm_map_corpse_footprint_header *footprint_header;
+ vm_size_t buf_size;
+ kern_return_t kr;
+
+ footprint_header = map->vmmap_corpse_footprint;
+ buf_size = footprint_header->cf_size;
+ kr = vm_deallocate(kernel_map,
+ (vm_offset_t) map->vmmap_corpse_footprint,
+ ((vm_size_t) buf_size
+ + PAGE_SIZE)); /* trailing guard page */
+ assertf(kr == KERN_SUCCESS, "kr=0x%x\n", kr);
+ map->vmmap_corpse_footprint = 0;
+ map->has_corpse_footprint = FALSE;
+ }
+}
+
+/*
+ * vm_map_copy_footprint_ledgers:
+ * copies any ledger that's relevant to the memory footprint of "old_task"
+ * into the forked corpse's task ("new_task")
+ */
+void
+vm_map_copy_footprint_ledgers(
+ task_t old_task,
+ task_t new_task)
+{
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.phys_footprint);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.internal);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.internal_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.iokit_mapped);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.page_table);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.tagged_footprint);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.tagged_footprint_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.media_footprint);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.media_footprint_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.graphics_footprint);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.graphics_footprint_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.neural_footprint);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.neural_footprint_compressed);
+ vm_map_copy_ledger(old_task, new_task, task_ledgers.wired_mem);
+}
+
+/*
+ * vm_map_copy_ledger:
+ * copy a single ledger from "old_task" to "new_task"
+ */
+void
+vm_map_copy_ledger(
+ task_t old_task,
+ task_t new_task,
+ int ledger_entry)
+{
+ ledger_amount_t old_balance, new_balance, delta;
+
+ assert(new_task->map->has_corpse_footprint);
+ if (!new_task->map->has_corpse_footprint) {
+ return;
+ }
+
+ /* turn off sanity checks for the ledger we're about to mess with */
+ ledger_disable_panic_on_negative(new_task->ledger,
+ ledger_entry);
+
+ /* adjust "new_task" to match "old_task" */
+ ledger_get_balance(old_task->ledger,
+ ledger_entry,
+ &old_balance);
+ ledger_get_balance(new_task->ledger,
+ ledger_entry,
+ &new_balance);
+ if (new_balance == old_balance) {
+ /* new == old: done */
+ } else if (new_balance > old_balance) {
+ /* new > old ==> new -= new - old */
+ delta = new_balance - old_balance;
+ ledger_debit(new_task->ledger,
+ ledger_entry,
+ delta);
+ } else {
+ /* new < old ==> new += old - new */
+ delta = old_balance - new_balance;
+ ledger_credit(new_task->ledger,
+ ledger_entry,
+ delta);
+ }
+}
+
+#if MACH_ASSERT
+
+extern int pmap_ledgers_panic;
+extern int pmap_ledgers_panic_leeway;
+
+#define LEDGER_DRIFT(__LEDGER) \
+ int __LEDGER##_over; \
+ ledger_amount_t __LEDGER##_over_total; \
+ ledger_amount_t __LEDGER##_over_max; \
+ int __LEDGER##_under; \
+ ledger_amount_t __LEDGER##_under_total; \
+ ledger_amount_t __LEDGER##_under_max
+
+struct {
+ uint64_t num_pmaps_checked;
+
+ LEDGER_DRIFT(phys_footprint);
+ LEDGER_DRIFT(internal);
+ LEDGER_DRIFT(internal_compressed);
+ LEDGER_DRIFT(iokit_mapped);
+ LEDGER_DRIFT(alternate_accounting);
+ LEDGER_DRIFT(alternate_accounting_compressed);
+ LEDGER_DRIFT(page_table);
+ LEDGER_DRIFT(purgeable_volatile);
+ LEDGER_DRIFT(purgeable_nonvolatile);
+ LEDGER_DRIFT(purgeable_volatile_compressed);
+ LEDGER_DRIFT(purgeable_nonvolatile_compressed);
+ LEDGER_DRIFT(tagged_nofootprint);
+ LEDGER_DRIFT(tagged_footprint);
+ LEDGER_DRIFT(tagged_nofootprint_compressed);
+ LEDGER_DRIFT(tagged_footprint_compressed);
+ LEDGER_DRIFT(network_volatile);
+ LEDGER_DRIFT(network_nonvolatile);
+ LEDGER_DRIFT(network_volatile_compressed);
+ LEDGER_DRIFT(network_nonvolatile_compressed);
+ LEDGER_DRIFT(media_nofootprint);
+ LEDGER_DRIFT(media_footprint);
+ LEDGER_DRIFT(media_nofootprint_compressed);
+ LEDGER_DRIFT(media_footprint_compressed);
+ LEDGER_DRIFT(graphics_nofootprint);
+ LEDGER_DRIFT(graphics_footprint);
+ LEDGER_DRIFT(graphics_nofootprint_compressed);
+ LEDGER_DRIFT(graphics_footprint_compressed);
+ LEDGER_DRIFT(neural_nofootprint);
+ LEDGER_DRIFT(neural_footprint);
+ LEDGER_DRIFT(neural_nofootprint_compressed);
+ LEDGER_DRIFT(neural_footprint_compressed);
+} pmap_ledgers_drift;
+
+void
+vm_map_pmap_check_ledgers(
+ pmap_t pmap,
+ ledger_t ledger,
+ int pid,
+ char *procname)
+{
+ ledger_amount_t bal;
+ boolean_t do_panic;
+
+ do_panic = FALSE;
+
+ pmap_ledgers_drift.num_pmaps_checked++;
+
+#define LEDGER_CHECK_BALANCE(__LEDGER) \
+MACRO_BEGIN \
+ int panic_on_negative = TRUE; \
+ ledger_get_balance(ledger, \
+ task_ledgers.__LEDGER, \
+ &bal); \
+ ledger_get_panic_on_negative(ledger, \
+ task_ledgers.__LEDGER, \
+ &panic_on_negative); \
+ if (bal != 0) { \
+ if (panic_on_negative || \
+ (pmap_ledgers_panic && \
+ pmap_ledgers_panic_leeway > 0 && \
+ (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \
+ bal < (-pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \
+ do_panic = TRUE; \
+ } \
+ printf("LEDGER BALANCE proc %d (%s) " \
+ "\"%s\" = %lld\n", \
+ pid, procname, #__LEDGER, bal); \
+ if (bal > 0) { \
+ pmap_ledgers_drift.__LEDGER##_over++; \
+ pmap_ledgers_drift.__LEDGER##_over_total += bal; \
+ if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \
+ pmap_ledgers_drift.__LEDGER##_over_max = bal; \
+ } \
+ } else if (bal < 0) { \
+ pmap_ledgers_drift.__LEDGER##_under++; \
+ pmap_ledgers_drift.__LEDGER##_under_total += bal; \
+ if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \
+ pmap_ledgers_drift.__LEDGER##_under_max = bal; \
+ } \
+ } \
+ } \
+MACRO_END
+
+ LEDGER_CHECK_BALANCE(phys_footprint);
+ LEDGER_CHECK_BALANCE(internal);
+ LEDGER_CHECK_BALANCE(internal_compressed);
+ LEDGER_CHECK_BALANCE(iokit_mapped);
+ LEDGER_CHECK_BALANCE(alternate_accounting);
+ LEDGER_CHECK_BALANCE(alternate_accounting_compressed);
+ LEDGER_CHECK_BALANCE(page_table);
+ LEDGER_CHECK_BALANCE(purgeable_volatile);
+ LEDGER_CHECK_BALANCE(purgeable_nonvolatile);
+ LEDGER_CHECK_BALANCE(purgeable_volatile_compressed);
+ LEDGER_CHECK_BALANCE(purgeable_nonvolatile_compressed);
+ LEDGER_CHECK_BALANCE(tagged_nofootprint);
+ LEDGER_CHECK_BALANCE(tagged_footprint);
+ LEDGER_CHECK_BALANCE(tagged_nofootprint_compressed);
+ LEDGER_CHECK_BALANCE(tagged_footprint_compressed);
+ LEDGER_CHECK_BALANCE(network_volatile);
+ LEDGER_CHECK_BALANCE(network_nonvolatile);
+ LEDGER_CHECK_BALANCE(network_volatile_compressed);
+ LEDGER_CHECK_BALANCE(network_nonvolatile_compressed);
+ LEDGER_CHECK_BALANCE(media_nofootprint);
+ LEDGER_CHECK_BALANCE(media_footprint);
+ LEDGER_CHECK_BALANCE(media_nofootprint_compressed);
+ LEDGER_CHECK_BALANCE(media_footprint_compressed);
+ LEDGER_CHECK_BALANCE(graphics_nofootprint);
+ LEDGER_CHECK_BALANCE(graphics_footprint);
+ LEDGER_CHECK_BALANCE(graphics_nofootprint_compressed);
+ LEDGER_CHECK_BALANCE(graphics_footprint_compressed);
+ LEDGER_CHECK_BALANCE(neural_nofootprint);
+ LEDGER_CHECK_BALANCE(neural_footprint);
+ LEDGER_CHECK_BALANCE(neural_nofootprint_compressed);
+ LEDGER_CHECK_BALANCE(neural_footprint_compressed);
+
+ if (do_panic) {
+ if (pmap_ledgers_panic) {
+ panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
+ pmap, pid, procname);
+ } else {
+ printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
+ pmap, pid, procname);
+ }
+ }
+}
+#endif /* MACH_ASSERT */