+ return object;
+}
+
+/*
+ * Export routines to other components for the things we access locally through
+ * macros.
+ */
+#undef current_map
+vm_map_t
+current_map(void)
+{
+ return (current_map_fast());
+}
+
+/*
+ * vm_map_reference:
+ *
+ * Most code internal to the osfmk will go through a
+ * macro defining this. This is always here for the
+ * use of other kernel components.
+ */
+#undef vm_map_reference
+void
+vm_map_reference(
+ register vm_map_t map)
+{
+ if (map == VM_MAP_NULL)
+ return;
+
+ lck_mtx_lock(&map->s_lock);
+#if TASK_SWAPPER
+ assert(map->res_count > 0);
+ assert(map->ref_count >= map->res_count);
+ map->res_count++;
+#endif
+ map->ref_count++;
+ lck_mtx_unlock(&map->s_lock);
+}
+
+/*
+ * vm_map_deallocate:
+ *
+ * Removes a reference from the specified map,
+ * destroying it if no references remain.
+ * The map should not be locked.
+ */
+void
+vm_map_deallocate(
+ register vm_map_t map)
+{
+ unsigned int ref;
+
+ if (map == VM_MAP_NULL)
+ return;
+
+ lck_mtx_lock(&map->s_lock);
+ ref = --map->ref_count;
+ if (ref > 0) {
+ vm_map_res_deallocate(map);
+ lck_mtx_unlock(&map->s_lock);
+ return;
+ }
+ assert(map->ref_count == 0);
+ lck_mtx_unlock(&map->s_lock);
+
+#if TASK_SWAPPER
+ /*
+ * The map residence count isn't decremented here because
+ * the vm_map_delete below will traverse the entire map,
+ * deleting entries, and the residence counts on objects
+ * and sharing maps will go away then.
+ */
+#endif
+
+ vm_map_destroy(map, VM_MAP_NO_FLAGS);
+}
+
+
+void
+vm_map_disable_NX(vm_map_t map)
+{
+ if (map == NULL)
+ return;
+ if (map->pmap == NULL)
+ return;
+
+ pmap_disable_NX(map->pmap);
+}
+
+void
+vm_map_disallow_data_exec(vm_map_t map)
+{
+ if (map == NULL)
+ return;
+
+ map->map_disallow_data_exec = TRUE;
+}
+
+/* XXX Consider making these constants (VM_MAX_ADDRESS and MACH_VM_MAX_ADDRESS)
+ * more descriptive.
+ */
+void
+vm_map_set_32bit(vm_map_t map)
+{
+ map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS;
+}
+
+
+void
+vm_map_set_64bit(vm_map_t map)
+{
+ map->max_offset = (vm_map_offset_t)MACH_VM_MAX_ADDRESS;
+}
+
+vm_map_offset_t
+vm_compute_max_offset(unsigned is64)
+{
+ return (is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS);
+}
+
+uint64_t
+vm_map_get_max_aslr_slide_pages(vm_map_t map)
+{
+ return (1 << (vm_map_is_64bit(map) ? 16 : 8));
+}
+
+boolean_t
+vm_map_is_64bit(
+ vm_map_t map)
+{
+ return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS);
+}
+
+boolean_t
+vm_map_has_hard_pagezero(
+ vm_map_t map,
+ vm_map_offset_t pagezero_size)
+{
+ /*
+ * XXX FBDP
+ * We should lock the VM map (for read) here but we can get away
+ * with it for now because there can't really be any race condition:
+ * the VM map's min_offset is changed only when the VM map is created
+ * and when the zero page is established (when the binary gets loaded),
+ * and this routine gets called only when the task terminates and the
+ * VM map is being torn down, and when a new map is created via
+ * load_machfile()/execve().
+ */
+ return (map->min_offset >= pagezero_size);
+}
+
+void
+vm_map_set_4GB_pagezero(vm_map_t map)
+{
+#pragma unused(map)
+
+}
+
+void
+vm_map_clear_4GB_pagezero(vm_map_t map)
+{
+#pragma unused(map)
+}
+
+/*
+ * Raise a VM map's maximun offset.
+ */
+kern_return_t
+vm_map_raise_max_offset(
+ vm_map_t map,
+ vm_map_offset_t new_max_offset)
+{
+ kern_return_t ret;
+
+ vm_map_lock(map);
+ ret = KERN_INVALID_ADDRESS;
+
+ if (new_max_offset >= map->max_offset) {
+ if (!vm_map_is_64bit(map)) {
+ if (new_max_offset <= (vm_map_offset_t)VM_MAX_ADDRESS) {
+ map->max_offset = new_max_offset;
+ ret = KERN_SUCCESS;
+ }
+ } else {
+ if (new_max_offset <= (vm_map_offset_t)MACH_VM_MAX_ADDRESS) {
+ map->max_offset = new_max_offset;
+ ret = KERN_SUCCESS;
+ }
+ }
+ }
+
+ vm_map_unlock(map);
+ return ret;
+}
+
+
+/*
+ * Raise a VM map's minimum offset.
+ * To strictly enforce "page zero" reservation.
+ */
+kern_return_t
+vm_map_raise_min_offset(
+ vm_map_t map,
+ vm_map_offset_t new_min_offset)
+{
+ vm_map_entry_t first_entry;
+
+ new_min_offset = vm_map_round_page(new_min_offset,
+ VM_MAP_PAGE_MASK(map));
+
+ vm_map_lock(map);
+
+ if (new_min_offset < map->min_offset) {
+ /*
+ * Can't move min_offset backwards, as that would expose
+ * a part of the address space that was previously, and for
+ * possibly good reasons, inaccessible.
+ */
+ vm_map_unlock(map);
+ return KERN_INVALID_ADDRESS;
+ }
+
+ first_entry = vm_map_first_entry(map);
+ if (first_entry != vm_map_to_entry(map) &&
+ first_entry->vme_start < new_min_offset) {
+ /*
+ * Some memory was already allocated below the new
+ * minimun offset. It's too late to change it now...
+ */
+ vm_map_unlock(map);
+ return KERN_NO_SPACE;
+ }
+
+ map->min_offset = new_min_offset;
+
+ vm_map_unlock(map);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Set the limit on the maximum amount of user wired memory allowed for this map.
+ * This is basically a copy of the MEMLOCK rlimit value maintained by the BSD side of
+ * the kernel. The limits are checked in the mach VM side, so we keep a copy so we
+ * don't have to reach over to the BSD data structures.
+ */
+
+void
+vm_map_set_user_wire_limit(vm_map_t map,
+ vm_size_t limit)
+{
+ map->user_wire_limit = limit;
+}
+
+
+void vm_map_switch_protect(vm_map_t map,
+ boolean_t val)
+{
+ vm_map_lock(map);
+ map->switch_protect=val;
+ vm_map_unlock(map);
+}
+
+/*
+ * IOKit has mapped a region into this map; adjust the pmap's ledgers appropriately.
+ * phys_footprint is a composite limit consisting of iokit + physmem, so we need to
+ * bump both counters.
+ */
+void
+vm_map_iokit_mapped_region(vm_map_t map, vm_size_t bytes)
+{
+ pmap_t pmap = vm_map_pmap(map);
+
+ ledger_credit(pmap->ledger, task_ledgers.iokit_mem, bytes);
+ ledger_credit(pmap->ledger, task_ledgers.phys_footprint, bytes);
+}
+
+void
+vm_map_iokit_unmapped_region(vm_map_t map, vm_size_t bytes)
+{
+ pmap_t pmap = vm_map_pmap(map);
+
+ ledger_debit(pmap->ledger, task_ledgers.iokit_mem, bytes);
+ ledger_debit(pmap->ledger, task_ledgers.phys_footprint, bytes);
+}
+
+/* Add (generate) code signature for memory range */
+#if CONFIG_DYNAMIC_CODE_SIGNING
+kern_return_t vm_map_sign(vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end)
+{
+ vm_map_entry_t entry;
+ vm_page_t m;
+ vm_object_t object;
+
+ /*
+ * Vet all the input parameters and current type and state of the
+ * underlaying object. Return with an error if anything is amiss.
+ */
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_map_lock_read(map);
+
+ if (!vm_map_lookup_entry(map, start, &entry) || entry->is_sub_map) {
+ /*
+ * Must pass a valid non-submap address.
+ */
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+ if((entry->vme_start > start) || (entry->vme_end < end)) {
+ /*
+ * Map entry doesn't cover the requested range. Not handling
+ * this situation currently.
+ */
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ object = entry->object.vm_object;
+ if (object == VM_OBJECT_NULL) {
+ /*
+ * Object must already be present or we can't sign.
+ */
+ vm_map_unlock_read(map);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_object_lock(object);
+ vm_map_unlock_read(map);
+
+ while(start < end) {
+ uint32_t refmod;
+
+ m = vm_page_lookup(object, start - entry->vme_start + entry->offset );
+ if (m==VM_PAGE_NULL) {
+ /* shoud we try to fault a page here? we can probably
+ * demand it exists and is locked for this request */
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+ /* deal with special page status */
+ if (m->busy ||
+ (m->unusual && (m->error || m->restart || m->private || m->absent))) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ /* Page is OK... now "validate" it */
+ /* This is the place where we'll call out to create a code
+ * directory, later */
+ m->cs_validated = TRUE;
+
+ /* The page is now "clean" for codesigning purposes. That means
+ * we don't consider it as modified (wpmapped) anymore. But
+ * we'll disconnect the page so we note any future modification
+ * attempts. */
+ m->wpmapped = FALSE;
+ refmod = pmap_disconnect(m->phys_page);
+
+ /* Pull the dirty status from the pmap, since we cleared the
+ * wpmapped bit */
+ if ((refmod & VM_MEM_MODIFIED) && !m->dirty) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+
+ /* On to the next page */
+ start += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+}
+#endif
+
+#if CONFIG_FREEZE
+
+kern_return_t vm_map_freeze_walk(
+ vm_map_t map,
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ unsigned int dirty_budget,
+ boolean_t *has_shared)
+{
+ vm_map_entry_t entry;
+
+ vm_map_lock_read(map);
+
+ *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+ *has_shared = FALSE;
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ unsigned int purgeable, clean, dirty, wired;
+ boolean_t shared;
+
+ if ((entry->object.vm_object == 0) ||
+ (entry->is_sub_map) ||
+ (entry->object.vm_object->phys_contiguous)) {
+ continue;
+ }
+
+ default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared, entry->object.vm_object, NULL);
+
+ *purgeable_count += purgeable;
+ *wired_count += wired;
+ *clean_count += clean;
+ *dirty_count += dirty;
+
+ if (shared) {
+ *has_shared = TRUE;
+ }
+
+ /* Adjust pageout budget and finish up if reached */
+ if (dirty_budget) {
+ dirty_budget -= dirty;
+ if (dirty_budget == 0) {
+ break;
+ }
+ }
+ }
+
+ vm_map_unlock_read(map);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t vm_map_freeze(
+ vm_map_t map,
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ unsigned int dirty_budget,
+ boolean_t *has_shared)
+{
+ vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL;
+ kern_return_t kr = KERN_SUCCESS;
+ boolean_t default_freezer_active = TRUE;
+
+ *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+ *has_shared = FALSE;
+
+ /*
+ * We need the exclusive lock here so that we can
+ * block any page faults or lookups while we are
+ * in the middle of freezing this vm map.
+ */
+ vm_map_lock(map);
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ default_freezer_active = FALSE;
+ }
+
+ if (default_freezer_active) {
+ if (map->default_freezer_handle == NULL) {
+ map->default_freezer_handle = default_freezer_handle_allocate();
+ }
+
+ if ((kr = default_freezer_handle_init(map->default_freezer_handle)) != KERN_SUCCESS) {
+ /*
+ * Can happen if default_freezer_handle passed in is NULL
+ * Or, a table has already been allocated and associated
+ * with this handle, i.e. the map is already frozen.
+ */
+ goto done;
+ }
+ }
+
+ for (entry2 = vm_map_first_entry(map);
+ entry2 != vm_map_to_entry(map);
+ entry2 = entry2->vme_next) {
+
+ vm_object_t src_object = entry2->object.vm_object;