/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <task_swapper.h>
#include <mach_assert.h>
+
+#include <vm/vm_options.h>
+
#include <libkern/OSAtomic.h>
#include <mach/kern_return.h>
#include <kern/zalloc.h>
#include <vm/cpm.h>
+#include <vm/vm_compressor_pager.h>
#include <vm/vm_init.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <ipc/ipc_port.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
-#include <machine/db_machdep.h>
#include <kern/xpr.h>
#include <mach/vm_map_server.h>
#include <vm/vm_shared_region.h>
#include <vm/vm_map_store.h>
+extern u_int32_t random(void); /* from <libkern/libkern.h> */
/* Internal prototypes
*/
vm_map_entry_t *entry);
static vm_map_entry_t _vm_map_entry_create(
- struct vm_map_header *map_header);
+ struct vm_map_header *map_header, boolean_t map_locked);
static void _vm_map_entry_dispose(
struct vm_map_header *map_header,
vm_map_t dst_map,
vm_map_entry_t entry,
vm_map_copy_t copy,
- vm_map_address_t start);
+ vm_map_address_t start,
+ boolean_t discard_on_success);
static kern_return_t vm_map_copy_overwrite_aligned(
vm_map_t dst_map,
vm_map_t map,
vm_map_address_t *addr, /* IN/OUT */
vm_map_copy_t copy,
- boolean_t overwrite);
+ boolean_t overwrite,
+ boolean_t consume_on_success);
static void vm_map_fork_share(
vm_map_t old_map,
vm_object_offset_t offset,
vm_object_size_t range,
vm_region_extended_info_t extended,
- boolean_t look_for_pages);
+ boolean_t look_for_pages,
+ mach_msg_type_number_t count);
static kern_return_t vm_map_wire_nested(
vm_map_t map,
vm_prot_t access_type,
boolean_t user_wire,
pmap_t map_pmap,
- vm_map_offset_t pmap_addr);
+ vm_map_offset_t pmap_addr,
+ ppnum_t *physpage_p);
static kern_return_t vm_map_unwire_nested(
vm_map_t map,
vm_object_offset_t offset,
int max_refcnt,
int depth,
- vm_region_extended_info_t extended);
+ vm_region_extended_info_t extended,
+ mach_msg_type_number_t count);
static int vm_map_region_count_obj_refs(
vm_map_entry_t entry,
vm_map_offset_t start,
vm_map_offset_t end);
-#if CONFIG_FREEZE
-struct default_freezer_table;
-__private_extern__ void* default_freezer_mapping_create(vm_object_t, vm_offset_t);
-__private_extern__ void default_freezer_mapping_free(void**, boolean_t all);
-#endif
/*
* Macros to copy a vm_map_entry. We must be careful to correctly
* wire count; it's used for map splitting and zone changing in
* vm_map_copyout.
*/
-#define vm_map_entry_copy(NEW,OLD) \
-MACRO_BEGIN \
+
+#define vm_map_entry_copy(NEW,OLD) \
+MACRO_BEGIN \
+boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \
*(NEW) = *(OLD); \
(NEW)->is_shared = FALSE; \
(NEW)->needs_wakeup = FALSE; \
(NEW)->wired_count = 0; \
(NEW)->user_wired_count = 0; \
(NEW)->permanent = FALSE; \
+ (NEW)->used_for_jit = FALSE; \
+ (NEW)->from_reserved_zone = _vmec_reserved; \
+ (NEW)->iokit_acct = FALSE; \
MACRO_END
-#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
+#define vm_map_entry_copy_full(NEW,OLD) \
+MACRO_BEGIN \
+boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \
+(*(NEW) = *(OLD)); \
+(NEW)->from_reserved_zone = _vmecf_reserved; \
+MACRO_END
/*
* Decide if we want to allow processes to execute from their data or stack areas.
static zone_t vm_map_zone; /* zone for vm_map structures */
static zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */
-static zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
+static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking
+ * allocations */
static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */
static vm_size_t map_data_size;
static void *kentry_data;
static vm_size_t kentry_data_size;
-static int kentry_count = 2048; /* to init kentry_data_size */
-#if CONFIG_EMBEDDED
-#define NO_COALESCE_LIMIT 0
-#else
#define NO_COALESCE_LIMIT ((1024 * 128) - 1)
-#endif
/* Skip acquiring locks if we're in the midst of a kernel core dump */
unsigned int not_in_kdp = 1;
start,
&map_entry) ||
map_entry->vme_end < end ||
- map_entry->is_sub_map) {
+ map_entry->is_sub_map ||
+ !(map_entry->protection & VM_PROT_EXECUTE)) {
/* that memory is not properly mapped */
kr = KERN_INVALID_ARGUMENT;
goto done;
lck_grp_t vm_map_lck_grp;
lck_grp_attr_t vm_map_lck_grp_attr;
lck_attr_t vm_map_lck_attr;
+lck_attr_t vm_map_lck_rw_attr;
/*
*
* vm_map_zone: used to allocate maps.
* vm_map_entry_zone: used to allocate map entries.
- * vm_map_kentry_zone: used to allocate map entries for the kernel.
+ * vm_map_entry_reserved_zone: fallback zone for kernel map entries
*
* The kernel allocates map entries from a special zone that is initially
* "crammed" with memory. It would be difficult (perhaps impossible) for
vm_map_init(
void)
{
+ vm_size_t entry_zone_alloc_size;
+ const char *mez_name = "VM map entries";
+
vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40*1024,
PAGE_SIZE, "maps");
zone_change(vm_map_zone, Z_NOENCRYPT, TRUE);
-
+#if defined(__LP64__)
+ entry_zone_alloc_size = PAGE_SIZE * 5;
+#else
+ entry_zone_alloc_size = PAGE_SIZE * 6;
+#endif
vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
- 1024*1024, PAGE_SIZE*5,
- "non-kernel map entries");
+ 1024*1024, entry_zone_alloc_size,
+ mez_name);
zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE);
+ zone_change(vm_map_entry_zone, Z_NOCALLOUT, TRUE);
+ zone_change(vm_map_entry_zone, Z_GZALLOC_EXEMPT, TRUE);
- vm_map_kentry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
- kentry_data_size, kentry_data_size,
- "kernel map entries");
- zone_change(vm_map_kentry_zone, Z_NOENCRYPT, TRUE);
+ vm_map_entry_reserved_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
+ kentry_data_size * 64, kentry_data_size,
+ "Reserved VM map entries");
+ zone_change(vm_map_entry_reserved_zone, Z_NOENCRYPT, TRUE);
vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy),
- 16*1024, PAGE_SIZE, "map copies");
+ 16*1024, PAGE_SIZE, "VM map copies");
zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE);
/*
* Cram the map and kentry zones with initial data.
- * Set kentry_zone non-collectible to aid zone_gc().
+ * Set reserved_zone non-collectible to aid zone_gc().
*/
zone_change(vm_map_zone, Z_COLLECT, FALSE);
- zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE);
- zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE);
- zone_change(vm_map_kentry_zone, Z_FOREIGN, TRUE);
- zone_change(vm_map_kentry_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
+
+ zone_change(vm_map_entry_reserved_zone, Z_COLLECT, FALSE);
+ zone_change(vm_map_entry_reserved_zone, Z_EXPAND, FALSE);
+ zone_change(vm_map_entry_reserved_zone, Z_FOREIGN, TRUE);
+ zone_change(vm_map_entry_reserved_zone, Z_NOCALLOUT, TRUE);
+ zone_change(vm_map_entry_reserved_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
zone_change(vm_map_copy_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
+ zone_change(vm_map_entry_reserved_zone, Z_GZALLOC_EXEMPT, TRUE);
- zcram(vm_map_zone, map_data, map_data_size);
- zcram(vm_map_kentry_zone, kentry_data, kentry_data_size);
+ zcram(vm_map_zone, (vm_offset_t)map_data, map_data_size);
+ zcram(vm_map_entry_reserved_zone, (vm_offset_t)kentry_data, kentry_data_size);
lck_grp_attr_setdefault(&vm_map_lck_grp_attr);
lck_grp_init(&vm_map_lck_grp, "vm_map", &vm_map_lck_grp_attr);
lck_attr_setdefault(&vm_map_lck_attr);
+
+ lck_attr_setdefault(&vm_map_lck_rw_attr);
+ lck_attr_cleardebug(&vm_map_lck_rw_attr);
+
+#if CONFIG_FREEZE
+ default_freezer_init();
+#endif /* CONFIG_FREEZE */
}
void
vm_map_steal_memory(
void)
{
+ uint32_t kentry_initial_pages;
+
map_data_size = round_page(10 * sizeof(struct _vm_map));
map_data = pmap_steal_memory(map_data_size);
-#if 0
/*
- * Limiting worst case: vm_map_kentry_zone needs to map each "available"
- * physical page (i.e. that beyond the kernel image and page tables)
- * individually; we guess at most one entry per eight pages in the
- * real world. This works out to roughly .1 of 1% of physical memory,
- * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
+ * kentry_initial_pages corresponds to the number of kernel map entries
+ * required during bootstrap until the asynchronous replenishment
+ * scheme is activated and/or entries are available from the general
+ * map entry pool.
*/
+#if defined(__LP64__)
+ kentry_initial_pages = 10;
+#else
+ kentry_initial_pages = 6;
#endif
- kentry_count = pmap_free_pages() / 8;
+#if CONFIG_GZALLOC
+ /* If using the guard allocator, reserve more memory for the kernel
+ * reserved map entry pool.
+ */
+ if (gzalloc_enabled())
+ kentry_initial_pages *= 1024;
+#endif
- kentry_data_size =
- round_page(kentry_count * sizeof(struct vm_map_entry));
+ kentry_data_size = kentry_initial_pages * PAGE_SIZE;
kentry_data = pmap_steal_memory(kentry_data_size);
}
+void vm_kernel_reserved_entry_init(void) {
+ zone_prio_refill_configure(vm_map_entry_reserved_zone, (6*PAGE_SIZE)/sizeof(struct vm_map_entry));
+}
+
/*
* vm_map_create:
*
vm_map_store_init( &(result->hdr) );
+ result->hdr.page_shift = PAGE_SHIFT;
+
result->size = 0;
result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */
result->user_wire_size = 0;
result->max_offset = max;
result->wiring_required = FALSE;
result->no_zero_fill = FALSE;
- result->mapped = FALSE;
+ result->mapped_in_other_pmaps = FALSE;
result->wait_for_space = FALSE;
result->switch_protect = FALSE;
result->disable_vmentry_reuse = FALSE;
result->color_rr = (color_seed++) & vm_color_mask;
result->jit_entry_exists = FALSE;
#if CONFIG_FREEZE
- result->default_freezer_toc = NULL;
+ result->default_freezer_handle = NULL;
#endif
vm_map_lock_init(result);
lck_mtx_init_ext(&result->s_lock, &result->s_lock_ext, &vm_map_lck_grp, &vm_map_lck_attr);
* Allocates a VM map entry for insertion in the
* given map (or map copy). No fields are filled.
*/
-#define vm_map_entry_create(map) \
- _vm_map_entry_create(&(map)->hdr)
+#define vm_map_entry_create(map, map_locked) _vm_map_entry_create(&(map)->hdr, map_locked)
-#define vm_map_copy_entry_create(copy) \
- _vm_map_entry_create(&(copy)->cpy_hdr)
+#define vm_map_copy_entry_create(copy, map_locked) \
+ _vm_map_entry_create(&(copy)->cpy_hdr, map_locked)
+unsigned reserved_zalloc_count, nonreserved_zalloc_count;
static vm_map_entry_t
_vm_map_entry_create(
- register struct vm_map_header *map_header)
+ struct vm_map_header *map_header, boolean_t __unused map_locked)
{
- register zone_t zone;
- register vm_map_entry_t entry;
+ zone_t zone;
+ vm_map_entry_t entry;
- if (map_header->entries_pageable)
- zone = vm_map_entry_zone;
- else
- zone = vm_map_kentry_zone;
+ zone = vm_map_entry_zone;
+
+ assert(map_header->entries_pageable ? !map_locked : TRUE);
+
+ if (map_header->entries_pageable) {
+ entry = (vm_map_entry_t) zalloc(zone);
+ }
+ else {
+ entry = (vm_map_entry_t) zalloc_canblock(zone, FALSE);
+
+ if (entry == VM_MAP_ENTRY_NULL) {
+ zone = vm_map_entry_reserved_zone;
+ entry = (vm_map_entry_t) zalloc(zone);
+ OSAddAtomic(1, &reserved_zalloc_count);
+ } else
+ OSAddAtomic(1, &nonreserved_zalloc_count);
+ }
- entry = (vm_map_entry_t) zalloc(zone);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
- vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE);
+ entry->from_reserved_zone = (zone == vm_map_entry_reserved_zone);
+ vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE);
+#if MAP_ENTRY_CREATION_DEBUG
+ entry->vme_creation_maphdr = map_header;
+ fastbacktrace(&entry->vme_creation_bt[0],
+ (sizeof(entry->vme_creation_bt)/sizeof(uintptr_t)));
+#endif
return(entry);
}
* of the stores
*/
#define vm_map_entry_dispose(map, entry) \
- vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE); \
_vm_map_entry_dispose(&(map)->hdr, (entry))
#define vm_map_copy_entry_dispose(map, entry) \
{
register zone_t zone;
- if (map_header->entries_pageable)
+ if (map_header->entries_pageable || !(entry->from_reserved_zone))
zone = vm_map_entry_zone;
else
- zone = vm_map_kentry_zone;
+ zone = vm_map_entry_reserved_zone;
+
+ if (!map_header->entries_pageable) {
+ if (zone == vm_map_entry_zone)
+ OSAddAtomic(-1, &nonreserved_zalloc_count);
+ else
+ OSAddAtomic(-1, &reserved_zalloc_count);
+ }
zfree(zone, entry);
}
flags, VM_MAP_NULL);
#if CONFIG_FREEZE
- if (map->default_freezer_toc){
- default_freezer_mapping_free( &(map->default_freezer_toc), TRUE);
+ if (map->default_freezer_handle) {
+ default_freezer_handle_deallocate(map->default_freezer_handle);
+ map->default_freezer_handle = NULL;
}
#endif
vm_map_unlock(map);
if (flags & VM_FLAGS_GUARD_AFTER) {
/* account for the back guard page in the size */
- size += PAGE_SIZE_64;
+ size += VM_MAP_PAGE_SIZE(map);
}
- new_entry = vm_map_entry_create(map);
+ new_entry = vm_map_entry_create(map, FALSE);
/*
* Look for the first possible address; if there's already
if (flags & VM_FLAGS_GUARD_BEFORE) {
/* reserve space for the front guard page */
- start += PAGE_SIZE_64;
+ start += VM_MAP_PAGE_SIZE(map);
}
end = ((start + mask) & ~mask);
if (flags & VM_FLAGS_GUARD_BEFORE) {
/* go back for the front guard page */
- start -= PAGE_SIZE_64;
+ start -= VM_MAP_PAGE_SIZE(map);
}
*address = start;
+ assert(start < end);
new_entry->vme_start = start;
new_entry->vme_end = end;
assert(page_aligned(new_entry->vme_start));
assert(page_aligned(new_entry->vme_end));
+ assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start,
+ VM_MAP_PAGE_MASK(map)));
+ assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end,
+ VM_MAP_PAGE_MASK(map)));
new_entry->is_shared = FALSE;
new_entry->is_sub_map = FALSE;
- new_entry->use_pmap = FALSE;
+ new_entry->use_pmap = TRUE;
new_entry->object.vm_object = VM_OBJECT_NULL;
new_entry->offset = (vm_object_offset_t) 0;
new_entry->needs_wakeup = FALSE;
new_entry->no_cache = FALSE;
new_entry->permanent = FALSE;
- new_entry->superpage_size = 0;
+ new_entry->superpage_size = FALSE;
+ if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) {
+ new_entry->map_aligned = TRUE;
+ } else {
+ new_entry->map_aligned = FALSE;
+ }
+
+ new_entry->used_for_jit = 0;
new_entry->alias = 0;
new_entry->zero_wired_pages = FALSE;
+ new_entry->iokit_acct = FALSE;
VM_GET_FLAGS_ALIAS(flags, new_entry->alias);
* In/out conditions:
* The source map should not be locked on entry.
*/
-static void
+__unused static void
vm_map_pmap_enter(
vm_map_t map,
register vm_map_offset_t addr,
while (addr < end_addr) {
register vm_page_t m;
+
+ /*
+ * TODO:
+ * From vm_map_enter(), we come into this function without the map
+ * lock held or the object lock held.
+ * We haven't taken a reference on the object either.
+ * We should do a proper lookup on the map to make sure
+ * that things are sane before we go locking objects that
+ * could have been deallocated from under us.
+ */
+
vm_object_lock(object);
m = vm_page_lookup(object, offset);
type_of_fault = DBG_CACHE_HIT_FAULT;
kr = vm_fault_enter(m, map->pmap, addr, protection, protection,
VM_PAGE_WIRED(m), FALSE, FALSE, FALSE,
+ 0, /* XXX need user tag / alias? */
+ 0, /* alternate accounting? */
+ NULL,
&type_of_fault);
vm_object_unlock(object);
#endif /* MACHINE_PMAP_IS_EMPTY */
}
+#define MAX_TRIES_TO_GET_RANDOM_ADDRESS 1000
+kern_return_t
+vm_map_random_address_for_size(
+ vm_map_t map,
+ vm_map_offset_t *address,
+ vm_map_size_t size)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ int tries = 0;
+ vm_map_offset_t random_addr = 0;
+ vm_map_offset_t hole_end;
+
+ vm_map_entry_t next_entry = VM_MAP_ENTRY_NULL;
+ vm_map_entry_t prev_entry = VM_MAP_ENTRY_NULL;
+ vm_map_size_t vm_hole_size = 0;
+ vm_map_size_t addr_space_size;
+
+ addr_space_size = vm_map_max(map) - vm_map_min(map);
+
+ assert(page_aligned(size));
+
+ while (tries < MAX_TRIES_TO_GET_RANDOM_ADDRESS) {
+ random_addr = ((vm_map_offset_t)random()) << PAGE_SHIFT;
+ random_addr = vm_map_trunc_page(
+ vm_map_min(map) +(random_addr % addr_space_size),
+ VM_MAP_PAGE_MASK(map));
+
+ if (vm_map_lookup_entry(map, random_addr, &prev_entry) == FALSE) {
+ if (prev_entry == vm_map_to_entry(map)) {
+ next_entry = vm_map_first_entry(map);
+ } else {
+ next_entry = prev_entry->vme_next;
+ }
+ if (next_entry == vm_map_to_entry(map)) {
+ hole_end = vm_map_max(map);
+ } else {
+ hole_end = next_entry->vme_start;
+ }
+ vm_hole_size = hole_end - random_addr;
+ if (vm_hole_size >= size) {
+ *address = random_addr;
+ break;
+ }
+ }
+ tries++;
+ }
+
+ if (tries == MAX_TRIES_TO_GET_RANDOM_ADDRESS) {
+ kr = KERN_NO_SPACE;
+ }
+ return kr;
+}
+
/*
* Routine: vm_map_enter
*
boolean_t map_locked = FALSE;
boolean_t pmap_empty = TRUE;
boolean_t new_mapping_established = FALSE;
+ boolean_t keep_map_locked = ((flags & VM_FLAGS_KEEP_MAP_LOCKED) != 0);
boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0);
boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0);
boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0);
boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0);
boolean_t is_submap = ((flags & VM_FLAGS_SUBMAP) != 0);
boolean_t permanent = ((flags & VM_FLAGS_PERMANENT) != 0);
+ boolean_t entry_for_jit = ((flags & VM_FLAGS_MAP_JIT) != 0);
+ boolean_t iokit_acct = ((flags & VM_FLAGS_IOKIT_ACCT) != 0);
unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT);
char alias;
vm_map_offset_t effective_min_offset, effective_max_offset;
kern_return_t kr;
+ boolean_t clear_map_aligned = FALSE;
if (superpage_size) {
switch (superpage_size) {
}
-#if CONFIG_EMBEDDED
- if (cur_protection & VM_PROT_WRITE){
- if ((cur_protection & VM_PROT_EXECUTE) && !(flags & VM_FLAGS_MAP_JIT)){
- printf("EMBEDDED: %s curprot cannot be write+execute. turning off execute\n", __PRETTY_FUNCTION__);
- cur_protection &= ~VM_PROT_EXECUTE;
- }
- }
-#endif /* CONFIG_EMBEDDED */
if (is_submap) {
if (purgable) {
assert(page_aligned(*address));
assert(page_aligned(size));
+ if (!VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))) {
+ /*
+ * In most cases, the caller rounds the size up to the
+ * map's page size.
+ * If we get a size that is explicitly not map-aligned here,
+ * we'll have to respect the caller's wish and mark the
+ * mapping as "not map-aligned" to avoid tripping the
+ * map alignment checks later.
+ */
+ clear_map_aligned = TRUE;
+ }
+ if (!anywhere &&
+ !VM_MAP_PAGE_ALIGNED(*address, VM_MAP_PAGE_MASK(map))) {
+ /*
+ * We've been asked to map at a fixed address and that
+ * address is not aligned to the map's specific alignment.
+ * The caller should know what it's doing (i.e. most likely
+ * mapping some fragmented copy map, transferring memory from
+ * a VM map with a different alignment), so clear map_aligned
+ * for this new VM map entry and proceed.
+ */
+ clear_map_aligned = TRUE;
+ }
+
/*
* Only zero-fill objects are allowed to be purgable.
* LP64todo - limit purgable objects to 32-bits for now
*address,
*address + size,
map->hdr.entries_pageable);
+ vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map));
}
StartAgain: ;
vm_map_lock(map);
map_locked = TRUE;
- if ((flags & VM_FLAGS_MAP_JIT) && (map->jit_entry_exists)){
- result = KERN_INVALID_ARGUMENT;
- goto BailOut;
+ if (entry_for_jit) {
+ if (map->jit_entry_exists) {
+ result = KERN_INVALID_ARGUMENT;
+ goto BailOut;
+ }
+ /*
+ * Get a random start address.
+ */
+ result = vm_map_random_address_for_size(map, address, size);
+ if (result != KERN_SUCCESS) {
+ goto BailOut;
+ }
+ start = *address;
}
+
/*
* Calculate the first possible address.
*/
} else {
if (start < (entry->vme_next)->vme_start ) {
start = entry->vme_end;
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(map));
} else {
/*
* Need to do a lookup.
if (entry == NULL) {
vm_map_entry_t tmp_entry;
- if (vm_map_lookup_entry(map, start, &tmp_entry))
+ if (vm_map_lookup_entry(map, start, &tmp_entry)) {
+ assert(!entry_for_jit);
start = tmp_entry->vme_end;
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(map));
+ }
entry = tmp_entry;
}
}
*/
end = ((start + mask) & ~mask);
+ end = vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(map));
if (end < start)
RETURN(KERN_NO_SPACE);
start = end;
+ assert(VM_MAP_PAGE_ALIGNED(start,
+ VM_MAP_PAGE_MASK(map)));
end += size;
if ((end > effective_max_offset) || (end < start)) {
if (map->wait_for_space) {
+ assert(!keep_map_locked);
if (size <= (effective_max_offset -
effective_min_offset)) {
assert_wait((event_t)map,
entry = next;
start = entry->vme_end;
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(map));
}
*address = start;
+ assert(VM_MAP_PAGE_ALIGNED(*address,
+ VM_MAP_PAGE_MASK(map)));
} else {
/*
* Verify that:
* address range, saving them in our "zap_old_map".
*/
(void) vm_map_delete(map, start, end,
- VM_MAP_REMOVE_SAVE_ENTRIES,
+ (VM_MAP_REMOVE_SAVE_ENTRIES |
+ VM_MAP_REMOVE_NO_MAP_ALIGN),
zap_old_map);
}
entry->protection != cur_protection ||
entry->max_protection != max_protection ||
entry->inheritance != inheritance ||
+ entry->iokit_acct != iokit_acct ||
entry->alias != alias) {
/* not the same mapping ! */
RETURN(KERN_NO_SPACE);
* semantics.
*/
- if (purgable) {
+ if (purgable || entry_for_jit) {
if (object == VM_OBJECT_NULL) {
object = vm_object_allocate(size);
object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
- object->purgable = VM_PURGABLE_NONVOLATILE;
+ object->true_share = TRUE;
+ if (purgable) {
+ task_t owner;
+ object->purgable = VM_PURGABLE_NONVOLATILE;
+ if (map->pmap == kernel_pmap) {
+ /*
+ * Purgeable mappings made in a kernel
+ * map are "owned" by the kernel itself
+ * rather than the current user task
+ * because they're likely to be used by
+ * more than this user task (see
+ * execargs_purgeable_allocate(), for
+ * example).
+ */
+ owner = kernel_task;
+ } else {
+ owner = current_task();
+ }
+ assert(object->vo_purgeable_owner == NULL);
+ assert(object->resident_page_count == 0);
+ assert(object->wired_page_count == 0);
+ vm_object_lock(object);
+ vm_purgeable_nonvolatile_enqueue(object, owner);
+ vm_object_unlock(object);
+ }
offset = (vm_object_offset_t)0;
}
} else if ((is_submap == FALSE) &&
(entry->vme_end == start) &&
(!entry->is_shared) &&
(!entry->is_sub_map) &&
- ((alias == VM_MEMORY_REALLOC) || (entry->alias == alias)) &&
- (entry->inheritance == inheritance) &&
+ (!entry->in_transition) &&
+ (!entry->needs_wakeup) &&
+ (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
(entry->protection == cur_protection) &&
(entry->max_protection == max_protection) &&
- (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
- (entry->in_transition == 0) &&
+ (entry->inheritance == inheritance) &&
+ ((alias == VM_MEMORY_REALLOC) || (entry->alias == alias)) &&
(entry->no_cache == no_cache) &&
+ (entry->permanent == permanent) &&
+ (!entry->superpage_size && !superpage_size) &&
+ /*
+ * No coalescing if not map-aligned, to avoid propagating
+ * that condition any further than needed:
+ */
+ (!entry->map_aligned || !clear_map_aligned) &&
+ (!entry->zero_wired_pages) &&
+ (!entry->used_for_jit && !entry_for_jit) &&
+ (entry->iokit_acct == iokit_acct) &&
+
((entry->vme_end - entry->vme_start) + size <=
(alias == VM_MEMORY_REALLOC ?
ANON_CHUNK_SIZE :
NO_COALESCE_LIMIT)) &&
+
(entry->wired_count == 0)) { /* implies user_wired_count == 0 */
if (vm_object_coalesce(entry->object.vm_object,
VM_OBJECT_NULL,
* new range.
*/
map->size += (end - entry->vme_end);
+ assert(entry->vme_start < end);
+ assert(VM_MAP_PAGE_ALIGNED(end,
+ VM_MAP_PAGE_MASK(map)));
entry->vme_end = end;
vm_map_store_update_first_free(map, map->first_free);
+ new_mapping_established = TRUE;
RETURN(KERN_SUCCESS);
}
}
FALSE, FALSE,
cur_protection, max_protection,
VM_BEHAVIOR_DEFAULT,
- (flags & VM_FLAGS_MAP_JIT)? VM_INHERIT_NONE: inheritance,
+ (entry_for_jit)? VM_INHERIT_NONE: inheritance,
0, no_cache,
- permanent, superpage_size);
+ permanent,
+ superpage_size,
+ clear_map_aligned,
+ is_submap);
new_entry->alias = alias;
- if (flags & VM_FLAGS_MAP_JIT){
+ if (entry_for_jit){
if (!(map->jit_entry_exists)){
new_entry->used_for_jit = TRUE;
map->jit_entry_exists = TRUE;
}
}
+ assert(!new_entry->iokit_acct);
+ if (!is_submap &&
+ object != VM_OBJECT_NULL &&
+ object->purgable != VM_PURGABLE_DENY) {
+ assert(new_entry->use_pmap);
+ assert(!new_entry->iokit_acct);
+ /*
+ * Turn off pmap accounting since
+ * purgeable objects have their
+ * own ledgers.
+ */
+ new_entry->use_pmap = FALSE;
+ } else if (!is_submap &&
+ iokit_acct) {
+ /* alternate accounting */
+ assert(!new_entry->iokit_acct);
+ assert(new_entry->use_pmap);
+ new_entry->iokit_acct = TRUE;
+ new_entry->use_pmap = FALSE;
+ vm_map_iokit_mapped_region(
+ map,
+ (new_entry->vme_end -
+ new_entry->vme_start));
+ } else if (!is_submap) {
+ assert(!new_entry->iokit_acct);
+ assert(new_entry->use_pmap);
+ }
+
if (is_submap) {
vm_map_t submap;
boolean_t submap_is_64bit;
boolean_t use_pmap;
- new_entry->is_sub_map = TRUE;
+ assert(new_entry->is_sub_map);
+ assert(!new_entry->use_pmap);
+ assert(!new_entry->iokit_acct);
submap = (vm_map_t) object;
submap_is_64bit = vm_map_is_64bit(submap);
use_pmap = (alias == VM_MEMORY_SHARED_PMAP);
- #ifndef NO_NESTED_PMAP
+#ifndef NO_NESTED_PMAP
if (use_pmap && submap->pmap == NULL) {
+ ledger_t ledger = map->pmap->ledger;
/* we need a sub pmap to nest... */
- submap->pmap = pmap_create(0, submap_is_64bit);
+ submap->pmap = pmap_create(ledger, 0,
+ submap_is_64bit);
if (submap->pmap == NULL) {
/* let's proceed without nesting... */
}
pmap_empty = FALSE;
}
}
- #endif /* NO_NESTED_PMAP */
+#endif /* NO_NESTED_PMAP */
}
entry = new_entry;
sp_object->phys_contiguous = TRUE;
sp_object->vo_shadow_offset = (vm_object_offset_t)pages->phys_page*PAGE_SIZE;
entry->object.vm_object = sp_object;
+ assert(entry->use_pmap);
/* enter the base pages into the object */
vm_object_lock(sp_object);
tmp_end + (vm_map_size_t)ANON_CHUNK_SIZE : tmp2_end));
}
- vm_map_unlock(map);
- map_locked = FALSE;
-
new_mapping_established = TRUE;
- /* Wire down the new entry if the user
- * requested all new map entries be wired.
- */
- if ((map->wiring_required)||(superpage_size)) {
- pmap_empty = FALSE; /* pmap won't be empty */
- result = vm_map_wire(map, start, end,
- new_entry->protection, TRUE);
- RETURN(result);
- }
-
- if ((object != VM_OBJECT_NULL) &&
- (vm_map_pmap_enter_enable) &&
- (!anywhere) &&
- (!needs_copy) &&
- (size < (128*1024))) {
- pmap_empty = FALSE; /* pmap won't be empty */
-
- if (override_nx(map, alias) && cur_protection)
- cur_protection |= VM_PROT_EXECUTE;
-
- vm_map_pmap_enter(map, start, end,
- object, offset, cur_protection);
- }
+BailOut:
+ assert(map_locked == TRUE);
-BailOut: ;
if (result == KERN_SUCCESS) {
vm_prot_t pager_prot;
memory_object_t pager;
+#if DEBUG
if (pmap_empty &&
!(flags & VM_FLAGS_NO_PMAP_CHECK)) {
assert(vm_map_pmap_is_empty(map,
*address,
*address+size));
}
+#endif /* DEBUG */
/*
* For "named" VM objects, let the pager know that the
}
vm_object_unlock(object);
}
- } else {
+ }
+
+ assert(map_locked == TRUE);
+
+ if (!keep_map_locked) {
+ vm_map_unlock(map);
+ map_locked = FALSE;
+ }
+
+ /*
+ * We can't hold the map lock if we enter this block.
+ */
+
+ if (result == KERN_SUCCESS) {
+
+ /* Wire down the new entry if the user
+ * requested all new map entries be wired.
+ */
+ if ((map->wiring_required)||(superpage_size)) {
+ assert(!keep_map_locked);
+ pmap_empty = FALSE; /* pmap won't be empty */
+ kr = vm_map_wire(map, start, end,
+ new_entry->protection, TRUE);
+ result = kr;
+ }
+
+ }
+
+ if (result != KERN_SUCCESS) {
if (new_mapping_established) {
/*
* We have to get rid of the new mappings since we
*address,
*address + size,
map->hdr.entries_pageable);
+ vm_map_set_page_shift(zap_new_map,
+ VM_MAP_PAGE_SHIFT(map));
if (!map_locked) {
vm_map_lock(map);
map_locked = TRUE;
}
(void) vm_map_delete(map, *address, *address+size,
- VM_MAP_REMOVE_SAVE_ENTRIES,
+ (VM_MAP_REMOVE_SAVE_ENTRIES |
+ VM_MAP_REMOVE_NO_MAP_ALIGN),
zap_new_map);
}
if (zap_old_map != VM_MAP_NULL &&
}
}
- if (map_locked) {
+ /*
+ * The caller is responsible for releasing the lock if it requested to
+ * keep the map locked.
+ */
+ if (map_locked && !keep_map_locked) {
vm_map_unlock(map);
}
#undef RETURN
}
-kern_return_t
-vm_map_enter_mem_object(
+/*
+ * Counters for the prefault optimization.
+ */
+int64_t vm_prefault_nb_pages = 0;
+int64_t vm_prefault_nb_bailout = 0;
+
+static kern_return_t
+vm_map_enter_mem_object_helper(
vm_map_t target_map,
vm_map_offset_t *address,
vm_map_size_t initial_size,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
- vm_inherit_t inheritance)
+ vm_inherit_t inheritance,
+ upl_page_list_ptr_t page_list,
+ unsigned int page_list_count)
{
vm_map_address_t map_addr;
vm_map_size_t map_size;
vm_object_size_t size;
kern_return_t result;
boolean_t mask_cur_protection, mask_max_protection;
+ boolean_t try_prefault = (page_list_count != 0);
+ vm_map_offset_t offset_in_mapping;
mask_cur_protection = cur_protection & VM_PROT_IS_MASK;
mask_max_protection = max_protection & VM_PROT_IS_MASK;
(cur_protection & ~VM_PROT_ALL) ||
(max_protection & ~VM_PROT_ALL) ||
(inheritance > VM_INHERIT_LAST_VALID) ||
+ (try_prefault && (copy || !page_list)) ||
initial_size == 0)
return KERN_INVALID_ARGUMENT;
- map_addr = vm_map_trunc_page(*address);
- map_size = vm_map_round_page(initial_size);
- size = vm_object_round_page(initial_size);
+ map_addr = vm_map_trunc_page(*address,
+ VM_MAP_PAGE_MASK(target_map));
+ map_size = vm_map_round_page(initial_size,
+ VM_MAP_PAGE_MASK(target_map));
+ size = vm_object_round_page(initial_size);
/*
* Find the vm object (if any) corresponding to this port.
vm_named_entry_t named_entry;
named_entry = (vm_named_entry_t) port->ip_kobject;
+
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ offset += named_entry->data_offset;
+ }
+
/* a few checks to make sure user is obeying rules */
if (size == 0) {
if (offset >= named_entry->size)
if ((named_entry->protection & cur_protection) !=
cur_protection)
return KERN_INVALID_RIGHT;
+ if (offset + size < offset) {
+ /* overflow */
+ return KERN_INVALID_ARGUMENT;
+ }
if (named_entry->size < (offset + size))
return KERN_INVALID_ARGUMENT;
+ if (named_entry->is_copy) {
+ /* for a vm_map_copy, we can only map it whole */
+ if ((size != named_entry->size) &&
+ (vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(target_map)) ==
+ named_entry->size)) {
+ /* XXX FBDP use the rounded size... */
+ size = vm_map_round_page(
+ size,
+ VM_MAP_PAGE_MASK(target_map));
+ }
+
+ if (!(flags & VM_FLAGS_ANYWHERE) &&
+ (offset != 0 ||
+ size != named_entry->size)) {
+ /*
+ * XXX for a mapping at a "fixed" address,
+ * we can't trim after mapping the whole
+ * memory entry, so reject a request for a
+ * partial mapping.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+
/* the callers parameter offset is defined to be the */
/* offset from beginning of named entry offset in object */
offset = offset + named_entry->offset;
+ if (! VM_MAP_PAGE_ALIGNED(size,
+ VM_MAP_PAGE_MASK(target_map))) {
+ /*
+ * Let's not map more than requested;
+ * vm_map_enter() will handle this "not map-aligned"
+ * case.
+ */
+ map_size = size;
+ }
+
named_entry_lock(named_entry);
if (named_entry->is_sub_map) {
vm_map_t submap;
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap.");
+ }
+
submap = named_entry->backing.map;
vm_map_lock(submap);
vm_map_reference(submap);
* once it's been set and if we race, we'll
* just end up setting it twice, which is OK.
*/
- if (submap->mapped == FALSE) {
+ if (submap->mapped_in_other_pmaps == FALSE &&
+ vm_map_pmap(submap) != PMAP_NULL &&
+ vm_map_pmap(submap) !=
+ vm_map_pmap(target_map)) {
/*
- * This submap has never been mapped.
- * Set its "mapped" flag now that it
- * has been mapped.
- * This happens only for the first ever
- * mapping of a "submap".
+ * This submap is being mapped in a map
+ * that uses a different pmap.
+ * Set its "mapped_in_other_pmaps" flag
+ * to indicate that we now need to
+ * remove mappings from all pmaps rather
+ * than just the submap's pmap.
*/
vm_map_lock(submap);
- submap->mapped = TRUE;
+ submap->mapped_in_other_pmaps = TRUE;
vm_map_unlock(submap);
}
*address = map_addr;
protections = named_entry->protection & VM_PROT_ALL;
access = GET_MAP_MEM(named_entry->protection);
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap.");
+ }
+
object = vm_object_enter(named_entry->backing.pager,
named_entry->size,
named_entry->internal,
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ wimg_mode = VM_WIMG_INNERWBACK;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
if (object->wimg_bits != wimg_mode)
vm_object_change_wimg_mode(object, wimg_mode);
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+ if (!object->true_share &&
+ vm_object_tracking_inited) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int num = 0;
+
+ num = OSBacktrace(bt,
+ VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ object,
+ VM_OBJECT_TRACKING_OP_TRUESHARE,
+ bt,
+ num);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
vm_object_unlock(object);
+
+ } else if (named_entry->is_copy) {
+ kern_return_t kr;
+ vm_map_copy_t copy_map;
+ vm_map_entry_t copy_entry;
+ vm_map_offset_t copy_addr;
+
+ if (flags & ~(VM_FLAGS_FIXED |
+ VM_FLAGS_ANYWHERE |
+ VM_FLAGS_OVERWRITE |
+ VM_FLAGS_RETURN_DATA_ADDR)) {
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ offset_in_mapping = offset - vm_object_trunc_page(offset);
+ offset = vm_object_trunc_page(offset);
+ map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset;
+ }
+
+ copy_map = named_entry->backing.copy;
+ assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
+ if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) {
+ /* unsupported type; should not happen */
+ printf("vm_map_enter_mem_object: "
+ "memory_entry->backing.copy "
+ "unsupported type 0x%x\n",
+ copy_map->type);
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* reserve a contiguous range */
+ kr = vm_map_enter(target_map,
+ &map_addr,
+ /* map whole mem entry, trim later: */
+ named_entry->size,
+ mask,
+ flags & (VM_FLAGS_ANYWHERE |
+ VM_FLAGS_OVERWRITE |
+ VM_FLAGS_RETURN_DATA_ADDR),
+ VM_OBJECT_NULL,
+ 0,
+ FALSE, /* copy */
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (kr != KERN_SUCCESS) {
+ named_entry_unlock(named_entry);
+ return kr;
+ }
+
+ copy_addr = map_addr;
+
+ for (copy_entry = vm_map_copy_first_entry(copy_map);
+ copy_entry != vm_map_copy_to_entry(copy_map);
+ copy_entry = copy_entry->vme_next) {
+ int remap_flags = 0;
+ vm_map_t copy_submap;
+ vm_object_t copy_object;
+ vm_map_size_t copy_size;
+ vm_object_offset_t copy_offset;
+
+ copy_offset = copy_entry->offset;
+ copy_size = (copy_entry->vme_end -
+ copy_entry->vme_start);
+
+ /* sanity check */
+ if ((copy_addr + copy_size) >
+ (map_addr +
+ named_entry->size /* XXX full size */ )) {
+ /* over-mapping too much !? */
+ kr = KERN_INVALID_ARGUMENT;
+ /* abort */
+ break;
+ }
+
+ /* take a reference on the object */
+ if (copy_entry->is_sub_map) {
+ remap_flags |= VM_FLAGS_SUBMAP;
+ copy_submap =
+ copy_entry->object.sub_map;
+ vm_map_lock(copy_submap);
+ vm_map_reference(copy_submap);
+ vm_map_unlock(copy_submap);
+ copy_object = (vm_object_t) copy_submap;
+ } else {
+ copy_object =
+ copy_entry->object.vm_object;
+ vm_object_reference(copy_object);
+ }
+
+ /* over-map the object into destination */
+ remap_flags |= flags;
+ remap_flags |= VM_FLAGS_FIXED;
+ remap_flags |= VM_FLAGS_OVERWRITE;
+ remap_flags &= ~VM_FLAGS_ANYWHERE;
+ kr = vm_map_enter(target_map,
+ ©_addr,
+ copy_size,
+ (vm_map_offset_t) 0,
+ remap_flags,
+ copy_object,
+ copy_offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (kr != KERN_SUCCESS) {
+ if (copy_entry->is_sub_map) {
+ vm_map_deallocate(copy_submap);
+ } else {
+ vm_object_deallocate(copy_object);
+ }
+ /* abort */
+ break;
+ }
+
+ /* next mapping */
+ copy_addr += copy_size;
+ }
+
+ if (kr == KERN_SUCCESS) {
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ *address = map_addr + offset_in_mapping;
+ } else {
+ *address = map_addr;
+ }
+
+ if (offset) {
+ /*
+ * Trim in front, from 0 to "offset".
+ */
+ vm_map_remove(target_map,
+ map_addr,
+ map_addr + offset,
+ 0);
+ *address += offset;
+ }
+ if (offset + map_size < named_entry->size) {
+ /*
+ * Trim in back, from
+ * "offset + map_size" to
+ * "named_entry->size".
+ */
+ vm_map_remove(target_map,
+ (map_addr +
+ offset + map_size),
+ (map_addr +
+ named_entry->size),
+ 0);
+ }
+ }
+ named_entry_unlock(named_entry);
+
+ if (kr != KERN_SUCCESS) {
+ if (! (flags & VM_FLAGS_OVERWRITE)) {
+ /* deallocate the contiguous range */
+ (void) vm_deallocate(target_map,
+ map_addr,
+ map_size);
+ }
+ }
+
+ return kr;
+
} else {
/* This is the case where we are going to map */
/* an already mapped object. If the object is */
/* object cannot be mapped until it is ready */
/* we can therefore avoid the ready check */
/* in this case. */
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ offset_in_mapping = offset - vm_object_trunc_page(offset);
+ offset = vm_object_trunc_page(offset);
+ map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset;
+ }
+
object = named_entry->backing.object;
assert(object != VM_OBJECT_NULL);
named_entry_unlock(named_entry);
* this case, the port isn't really a port at all, but
* instead is just a raw memory object.
*/
-
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ panic("VM_FLAGS_RETURN_DATA_ADDR not expected for raw memory object.");
+ }
+
object = vm_object_enter((memory_object_t)port,
size, FALSE, FALSE, FALSE);
if (object == VM_OBJECT_NULL)
offset = new_offset;
}
+ /*
+ * If users want to try to prefault pages, the mapping and prefault
+ * needs to be atomic.
+ */
+ if (try_prefault)
+ flags |= VM_FLAGS_KEEP_MAP_LOCKED;
result = vm_map_enter(target_map,
&map_addr, map_size,
(vm_map_offset_t)mask,
cur_protection, max_protection, inheritance);
if (result != KERN_SUCCESS)
vm_object_deallocate(object);
- *address = map_addr;
- return result;
-}
+ /*
+ * Try to prefault, and do not forget to release the vm map lock.
+ */
+ if (result == KERN_SUCCESS && try_prefault) {
+ mach_vm_address_t va = map_addr;
+ kern_return_t kr = KERN_SUCCESS;
+ unsigned int i = 0;
+
+ for (i = 0; i < page_list_count; ++i) {
+ if (UPL_VALID_PAGE(page_list, i)) {
+ /*
+ * If this function call failed, we should stop
+ * trying to optimize, other calls are likely
+ * going to fail too.
+ *
+ * We are not gonna report an error for such
+ * failure though. That's an optimization, not
+ * something critical.
+ */
+ kr = pmap_enter_options(target_map->pmap,
+ va, UPL_PHYS_PAGE(page_list, i),
+ cur_protection, VM_PROT_NONE,
+ 0, TRUE, PMAP_OPTIONS_NOWAIT, NULL);
+ if (kr != KERN_SUCCESS) {
+ OSIncrementAtomic64(&vm_prefault_nb_bailout);
+ goto BailOut;
+ }
+ OSIncrementAtomic64(&vm_prefault_nb_pages);
+ }
+ /* Next virtual address */
+ va += PAGE_SIZE;
+ }
+BailOut:
+ vm_map_unlock(target_map);
+ }
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ *address = map_addr + offset_in_mapping;
+ } else {
+ *address = map_addr;
+ }
+ return result;
+}
kern_return_t
-vm_map_enter_mem_object_control(
+vm_map_enter_mem_object(
vm_map_t target_map,
vm_map_offset_t *address,
vm_map_size_t initial_size,
vm_map_offset_t mask,
int flags,
- memory_object_control_t control,
+ ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
vm_prot_t cur_protection,
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
- vm_map_address_t map_addr;
- vm_map_size_t map_size;
- vm_object_t object;
- vm_object_size_t size;
+ return vm_map_enter_mem_object_helper(target_map, address, initial_size, mask, flags,
+ port, offset, copy, cur_protection, max_protection,
+ inheritance, NULL, 0);
+}
+
+kern_return_t
+vm_map_enter_mem_object_prefault(
+ vm_map_t target_map,
+ vm_map_offset_t *address,
+ vm_map_size_t initial_size,
+ vm_map_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ upl_page_list_ptr_t page_list,
+ unsigned int page_list_count)
+{
+ return vm_map_enter_mem_object_helper(target_map, address, initial_size, mask, flags,
+ port, offset, FALSE, cur_protection, max_protection,
+ VM_INHERIT_DEFAULT, page_list, page_list_count);
+}
+
+
+kern_return_t
+vm_map_enter_mem_object_control(
+ vm_map_t target_map,
+ vm_map_offset_t *address,
+ vm_map_size_t initial_size,
+ vm_map_offset_t mask,
+ int flags,
+ memory_object_control_t control,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ vm_object_t object;
+ vm_object_size_t size;
kern_return_t result;
memory_object_t pager;
vm_prot_t pager_prot;
initial_size == 0)
return KERN_INVALID_ARGUMENT;
- map_addr = vm_map_trunc_page(*address);
- map_size = vm_map_round_page(initial_size);
+ map_addr = vm_map_trunc_page(*address,
+ VM_MAP_PAGE_MASK(target_map));
+ map_size = vm_map_round_page(initial_size,
+ VM_MAP_PAGE_MASK(target_map));
size = vm_object_round_page(initial_size);
object = memory_object_control_to_vm_object(control);
kern_return_t kr;
vm_map_offset_t va, start, end, offset;
#if MACH_ASSERT
- vm_map_offset_t prev_addr;
+ vm_map_offset_t prev_addr = 0;
#endif /* MACH_ASSERT */
boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
- if (!vm_allocate_cpm_enabled)
- return KERN_FAILURE;
-
if (size == 0) {
*addr = 0;
return KERN_SUCCESS;
if (anywhere)
*addr = vm_map_min(map);
else
- *addr = vm_map_trunc_page(*addr);
- size = vm_map_round_page(size);
+ *addr = vm_map_trunc_page(*addr,
+ VM_MAP_PAGE_MASK(map));
+ size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
/*
* LP64todo - cpm_allocate should probably allow
cpm_obj = vm_object_allocate((vm_object_size_t)size);
assert(cpm_obj != VM_OBJECT_NULL);
assert(cpm_obj->internal);
- assert(cpm_obj->size == (vm_object_size_t)size);
+ assert(cpm_obj->vo_size == (vm_object_size_t)size);
assert(cpm_obj->can_persist == FALSE);
assert(cpm_obj->pager_created == FALSE);
assert(cpm_obj->pageout == FALSE);
type_of_fault = DBG_ZERO_FILL_FAULT;
vm_fault_enter(m, pmap, va, VM_PROT_ALL, VM_PROT_WRITE,
- VM_PAGE_WIRED(m), FALSE, FALSE, FALSE,
+ VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, 0, NULL,
&type_of_fault);
vm_object_unlock(cpm_obj);
m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
vm_object_unlock(cpm_obj);
if (m == VM_PAGE_NULL)
- panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
- cpm_obj, offset);
+ panic("vm_allocate_cpm: obj %p off 0x%llx no page",
+ cpm_obj, (uint64_t)offset);
assert(m->tabled);
assert(!m->busy);
assert(!m->wanted);
assert(!m->absent);
assert(!m->error);
assert(!m->cleaning);
+ assert(!m->laundry);
assert(!m->precious);
assert(!m->clustered);
if (offset != 0) {
if (m->phys_page != prev_addr + 1) {
- printf("start 0x%x end 0x%x va 0x%x\n",
- start, end, va);
- printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
- printf("m 0x%x prev_address 0x%x\n", m,
- prev_addr);
+ printf("start 0x%llx end 0x%llx va 0x%llx\n",
+ (uint64_t)start, (uint64_t)end, (uint64_t)va);
+ printf("obj %p off 0x%llx\n", cpm_obj, (uint64_t)offset);
+ printf("m %p prev_address 0x%llx\n", m, (uint64_t)prev_addr);
panic("vm_allocate_cpm: pages not contig!");
}
}
assert(entry->is_sub_map);
assert(entry->object.sub_map != NULL);
+ assert(entry->use_pmap);
/*
* Query the platform for the optimal unnest range.
pmap_unnest(map->pmap,
entry->vme_start,
entry->vme_end - entry->vme_start);
- if ((map->mapped) && (map->ref_count)) {
+ if ((map->mapped_in_other_pmaps) && (map->ref_count)) {
/* clean up parent map/maps */
vm_map_submap_pmap_clean(
map, entry->vme_start,
entry->offset);
}
entry->use_pmap = FALSE;
+ if (entry->alias == VM_MEMORY_SHARED_PMAP) {
+ entry->alias = VM_MEMORY_UNSHARED_PMAP;
+ }
}
#endif /* NO_NESTED_PMAP */
* the specified address; if necessary,
* it splits the entry into two.
*/
-static void
+void
vm_map_clip_start(
vm_map_t map,
vm_map_entry_t entry,
vm_map_offset_t startaddr)
{
#ifndef NO_NESTED_PMAP
- if (entry->use_pmap &&
+ if (entry->is_sub_map &&
+ entry->use_pmap &&
startaddr >= entry->vme_start) {
vm_map_offset_t start_unnest, end_unnest;
* address.
*/
- new_entry = _vm_map_entry_create(map_header);
+ if (entry->map_aligned) {
+ assert(VM_MAP_PAGE_ALIGNED(start,
+ VM_MAP_HDR_PAGE_MASK(map_header)));
+ }
+
+ new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable);
vm_map_entry_copy_full(new_entry, entry);
new_entry->vme_end = start;
+ assert(new_entry->vme_start < new_entry->vme_end);
entry->offset += (start - entry->vme_start);
+ assert(start < entry->vme_end);
entry->vme_start = start;
_vm_map_store_entry_link(map_header, entry->vme_prev, new_entry);
* the specified address; if necessary,
* it splits the entry into two.
*/
-static void
+void
vm_map_clip_end(
vm_map_t map,
vm_map_entry_t entry,
endaddr = entry->vme_end;
}
#ifndef NO_NESTED_PMAP
- if (entry->use_pmap) {
+ if (entry->is_sub_map && entry->use_pmap) {
vm_map_offset_t start_unnest, end_unnest;
/*
* AFTER the specified entry
*/
- new_entry = _vm_map_entry_create(map_header);
+ if (entry->map_aligned) {
+ assert(VM_MAP_PAGE_ALIGNED(end,
+ VM_MAP_HDR_PAGE_MASK(map_header)));
+ }
+
+ new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable);
vm_map_entry_copy_full(new_entry, entry);
+ assert(entry->vme_start < end);
new_entry->vme_start = entry->vme_end = end;
new_entry->offset += (end - entry->vme_start);
+ assert(new_entry->vme_start < new_entry->vme_end);
_vm_map_store_entry_link(map_header, entry, new_entry);
*/
kern_return_t
vm_map_submap(
- vm_map_t map,
+ vm_map_t map,
vm_map_offset_t start,
vm_map_offset_t end,
- vm_map_t submap,
+ vm_map_t submap,
vm_map_offset_t offset,
#ifdef NO_NESTED_PMAP
__unused
#endif /* NO_NESTED_PMAP */
- boolean_t use_pmap)
+ boolean_t use_pmap)
{
vm_map_entry_t entry;
register kern_return_t result = KERN_INVALID_ARGUMENT;
return KERN_INVALID_ARGUMENT;
}
- assert(!entry->use_pmap); /* we don't want to unnest anything here */
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
entry->object.vm_object = VM_OBJECT_NULL;
vm_object_deallocate(object);
entry->is_sub_map = TRUE;
+ entry->use_pmap = FALSE;
entry->object.sub_map = submap;
vm_map_reference(submap);
- submap->mapped = TRUE;
+ if (submap->mapped_in_other_pmaps == FALSE &&
+ vm_map_pmap(submap) != PMAP_NULL &&
+ vm_map_pmap(submap) != vm_map_pmap(map)) {
+ /*
+ * This submap is being mapped in a map
+ * that uses a different pmap.
+ * Set its "mapped_in_other_pmaps" flag
+ * to indicate that we now need to
+ * remove mappings from all pmaps rather
+ * than just the submap's pmap.
+ */
+ submap->mapped_in_other_pmaps = TRUE;
+ }
#ifndef NO_NESTED_PMAP
if (use_pmap) {
/* nest if platform code will allow */
if(submap->pmap == NULL) {
- submap->pmap = pmap_create((vm_map_size_t) 0, FALSE);
+ ledger_t ledger = map->pmap->ledger;
+ submap->pmap = pmap_create(ledger,
+ (vm_map_size_t) 0, FALSE);
if(submap->pmap == PMAP_NULL) {
vm_map_unlock(map);
return(KERN_NO_SPACE);
}
}
-#if CONFIG_EMBEDDED
- if (new_prot & VM_PROT_WRITE) {
- if ((new_prot & VM_PROT_EXECUTE) && !(current->used_for_jit)) {
- printf("EMBEDDED: %s can't have both write and exec at the same time\n", __FUNCTION__);
- new_prot &= ~VM_PROT_EXECUTE;
- }
- }
-#endif
prev = current->vme_end;
current = current->vme_next;
vm_map_clip_end(map, current, end);
- assert(!current->use_pmap); /* clipping did unnest if needed */
+ if (current->is_sub_map) {
+ /* clipping did unnest if needed */
+ assert(!current->use_pmap);
+ }
old_prot = current->protection;
if (current->is_sub_map == FALSE && current->object.vm_object == VM_OBJECT_NULL){
current->object.vm_object = vm_object_allocate((vm_map_size_t)(current->vme_end - current->vme_start));
current->offset = 0;
+ assert(current->use_pmap);
}
current->needs_copy = TRUE;
current->max_protection |= VM_PROT_WRITE;
while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
vm_map_clip_end(map, entry, end);
- assert(!entry->use_pmap); /* clip did unnest if needed */
+ if (entry->is_sub_map) {
+ /* clip did unnest if needed */
+ assert(!entry->use_pmap);
+ }
entry->inheritance = new_inheritance;
register vm_prot_t access_type,
boolean_t user_wire,
pmap_t map_pmap,
- vm_map_offset_t pmap_addr)
+ vm_map_offset_t pmap_addr,
+ ppnum_t *physpage_p)
{
register vm_map_entry_t entry;
struct vm_map_entry *first_entry, tmp_entry;
thread_t cur_thread;
unsigned int last_timestamp;
vm_map_size_t size;
+ boolean_t wire_and_extract;
+
+ wire_and_extract = FALSE;
+ if (physpage_p != NULL) {
+ /*
+ * The caller wants the physical page number of the
+ * wired page. We return only one physical page number
+ * so this works for only one page at a time.
+ */
+ if ((end - start) != PAGE_SIZE) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ wire_and_extract = TRUE;
+ *physpage_p = 0;
+ }
vm_map_lock(map);
if(map_pmap == NULL)
VM_MAP_RANGE_CHECK(map, start, end);
assert(page_aligned(start));
assert(page_aligned(end));
+ assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)));
+ assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)));
if (start == end) {
/* We wired what the caller asked for, zero pages */
vm_map_unlock(map);
vm_map_offset_t local_end;
pmap_t pmap;
+ if (wire_and_extract) {
+ /*
+ * Wiring would result in copy-on-write
+ * which would not be compatible with
+ * the sharing we have with the original
+ * provider of this memory.
+ */
+ rc = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
+
vm_map_clip_start(map, entry, s);
vm_map_clip_end(map, entry, end);
s, user_wire);
return(KERN_FAILURE);
}
+ vm_object_unlock(object);
if(real_map != lookup_map)
vm_map_unlock(real_map);
vm_map_unlock_read(lookup_map);
vm_map_lock(map);
- vm_object_unlock(object);
/* we unlocked, so must re-lookup */
if (!vm_map_lookup_entry(map,
rc = vm_map_wire_nested(entry->object.sub_map,
sub_start, sub_end,
access_type,
- user_wire, pmap, pmap_addr);
+ user_wire, pmap, pmap_addr,
+ NULL);
vm_map_lock(map);
/*
* the appropriate wire reference count.
*/
if (entry->wired_count) {
+
+ if ((entry->protection & access_type) != access_type) {
+ /* found a protection problem */
+
+ /*
+ * XXX FBDP
+ * We should always return an error
+ * in this case but since we didn't
+ * enforce it before, let's do
+ * it only for the new "wire_and_extract"
+ * code path for now...
+ */
+ if (wire_and_extract) {
+ rc = KERN_PROTECTION_FAILURE;
+ goto done;
+ }
+ }
+
/*
* entry is already wired down, get our reference
* after clipping to our range.
if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
goto done;
+ if (wire_and_extract) {
+ vm_object_t object;
+ vm_object_offset_t offset;
+ vm_page_t m;
+
+ /*
+ * We don't have to "wire" the page again
+ * bit we still have to "extract" its
+ * physical page number, after some sanity
+ * checks.
+ */
+ assert((entry->vme_end - entry->vme_start)
+ == PAGE_SIZE);
+ assert(!entry->needs_copy);
+ assert(!entry->is_sub_map);
+ assert(entry->object.vm_object);
+ if (((entry->vme_end - entry->vme_start)
+ != PAGE_SIZE) ||
+ entry->needs_copy ||
+ entry->is_sub_map ||
+ entry->object.vm_object == VM_OBJECT_NULL) {
+ rc = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ object = entry->object.vm_object;
+ offset = entry->offset;
+ /* need exclusive lock to update m->dirty */
+ if (entry->protection & VM_PROT_WRITE) {
+ vm_object_lock(object);
+ } else {
+ vm_object_lock_shared(object);
+ }
+ m = vm_page_lookup(object, offset);
+ assert(m != VM_PAGE_NULL);
+ assert(m->wire_count);
+ if (m != VM_PAGE_NULL && m->wire_count) {
+ *physpage_p = m->phys_page;
+ if (entry->protection & VM_PROT_WRITE) {
+ vm_object_lock_assert_exclusive(
+ m->object);
+ m->dirty = TRUE;
+ }
+ } else {
+ /* not already wired !? */
+ *physpage_p = 0;
+ }
+ vm_object_unlock(object);
+ }
+
/* map was not unlocked: no need to relookup */
entry = entry->vme_next;
s = entry->vme_start;
* This is aggressive, but once it's wired we can't move it.
*/
if (entry->needs_copy) {
+ if (wire_and_extract) {
+ /*
+ * We're supposed to share with the original
+ * provider so should not be "needs_copy"
+ */
+ rc = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
+
vm_object_shadow(&entry->object.vm_object,
&entry->offset, size);
entry->needs_copy = FALSE;
} else if (entry->object.vm_object == VM_OBJECT_NULL) {
+ if (wire_and_extract) {
+ /*
+ * We're supposed to share with the original
+ * provider so should already have an object.
+ */
+ rc = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
entry->object.vm_object = vm_object_allocate(size);
entry->offset = (vm_object_offset_t)0;
+ assert(entry->use_pmap);
}
vm_map_clip_start(map, entry, s);
if(map_pmap)
rc = vm_fault_wire(map,
- &tmp_entry, map_pmap, pmap_addr);
+ &tmp_entry, map_pmap, pmap_addr,
+ physpage_p);
else
rc = vm_fault_wire(map,
&tmp_entry, map->pmap,
- tmp_entry.vme_start);
+ tmp_entry.vme_start,
+ physpage_p);
if (!user_wire && cur_thread != THREAD_NULL)
thread_interrupt_level(interruptible_state);
if (rc != KERN_SUCCESS) {
/* undo what has been wired so far */
vm_map_unwire(map, start, s, user_wire);
+ if (physpage_p) {
+ *physpage_p = 0;
+ }
}
return rc;
kern_return_t kret;
kret = vm_map_wire_nested(map, start, end, access_type,
- user_wire, (pmap_t)NULL, 0);
+ user_wire, (pmap_t)NULL, 0, NULL);
+ return kret;
+}
+
+kern_return_t
+vm_map_wire_and_extract(
+ vm_map_t map,
+ vm_map_offset_t start,
+ vm_prot_t access_type,
+ boolean_t user_wire,
+ ppnum_t *physpage_p)
+{
+
+ kern_return_t kret;
+
+ kret = vm_map_wire_nested(map,
+ start,
+ start+VM_MAP_PAGE_SIZE(map),
+ access_type,
+ user_wire,
+ (pmap_t)NULL,
+ 0,
+ physpage_p);
+ if (kret != KERN_SUCCESS &&
+ physpage_p != NULL) {
+ *physpage_p = 0;
+ }
return kret;
}
VM_MAP_RANGE_CHECK(map, start, end);
assert(page_aligned(start));
assert(page_aligned(end));
+ assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)));
+ assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)));
if (start == end) {
/* We unwired what the caller asked for: zero pages */
e = entry->vme_end;
assert(page_aligned(s));
assert(page_aligned(e));
+ if (entry->map_aligned == TRUE) {
+ assert(VM_MAP_PAGE_ALIGNED(s, VM_MAP_PAGE_MASK(map)));
+ assert(VM_MAP_PAGE_ALIGNED(e, VM_MAP_PAGE_MASK(map)));
+ }
assert(entry->wired_count == 0);
assert(entry->user_wired_count == 0);
assert(!entry->permanent);
entry->offset);
} else {
- if((map->mapped) && (map->ref_count)
+ if((map->mapped_in_other_pmaps) && (map->ref_count)
&& (entry->object.vm_object != NULL)) {
vm_object_pmap_protect(
entry->object.vm_object,
entry->object.sub_map,
entry->offset);
} else {
- if((map->mapped) && (map->ref_count)
+ if((map->mapped_in_other_pmaps) && (map->ref_count)
&& (entry->object.vm_object != NULL)) {
vm_object_pmap_protect(
entry->object.vm_object,
*/
if (vm_map_lookup_entry(map, start, &first_entry)) {
entry = first_entry;
+ if (map == kalloc_map &&
+ (entry->vme_start != start ||
+ entry->vme_end != end)) {
+ panic("vm_map_delete(%p,0x%llx,0x%llx): "
+ "mismatched entry %p [0x%llx:0x%llx]\n",
+ map,
+ (uint64_t)start,
+ (uint64_t)end,
+ entry,
+ (uint64_t)entry->vme_start,
+ (uint64_t)entry->vme_end);
+ }
if (entry->superpage_size && (start & ~SUPERPAGE_MASK)) { /* extend request to whole entry */ start = SUPERPAGE_ROUND_DOWN(start);
start = SUPERPAGE_ROUND_DOWN(start);
continue;
* any unnecessary unnesting in this case...
*/
} else {
+ if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) &&
+ entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(
+ start,
+ VM_MAP_PAGE_MASK(map))) {
+ /*
+ * The entry will no longer be
+ * map-aligned after clipping
+ * and the caller said it's OK.
+ */
+ entry->map_aligned = FALSE;
+ }
+ if (map == kalloc_map) {
+ panic("vm_map_delete(%p,0x%llx,0x%llx):"
+ " clipping %p at 0x%llx\n",
+ map,
+ (uint64_t)start,
+ (uint64_t)end,
+ entry,
+ (uint64_t)start);
+ }
vm_map_clip_start(map, entry, start);
}
*/
SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
} else {
+ if (map->pmap == kernel_pmap &&
+ map->ref_count != 0) {
+ panic("vm_map_delete(%p,0x%llx,0x%llx): "
+ "no map entry at 0x%llx\n",
+ map,
+ (uint64_t)start,
+ (uint64_t)end,
+ (uint64_t)start);
+ }
entry = first_entry->vme_next;
}
break;
* vm_map_simplify_entry(). We need to
* re-clip its start.
*/
+ if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) &&
+ entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(s,
+ VM_MAP_PAGE_MASK(map))) {
+ /*
+ * The entry will no longer be map-aligned
+ * after clipping and the caller said it's OK.
+ */
+ entry->map_aligned = FALSE;
+ }
+ if (map == kalloc_map) {
+ panic("vm_map_delete(%p,0x%llx,0x%llx): "
+ "clipping %p at 0x%llx\n",
+ map,
+ (uint64_t)start,
+ (uint64_t)end,
+ entry,
+ (uint64_t)s);
+ }
vm_map_clip_start(map, entry, s);
}
if (entry->vme_end <= end) {
* to clip and possibly cause an unnecessary unnesting.
*/
} else {
+ if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) &&
+ entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(end,
+ VM_MAP_PAGE_MASK(map))) {
+ /*
+ * The entry will no longer be map-aligned
+ * after clipping and the caller said it's OK.
+ */
+ entry->map_aligned = FALSE;
+ }
+ if (map == kalloc_map) {
+ panic("vm_map_delete(%p,0x%llx,0x%llx): "
+ "clipping %p at 0x%llx\n",
+ map,
+ (uint64_t)start,
+ (uint64_t)end,
+ entry,
+ (uint64_t)end);
+ }
vm_map_clip_end(map, entry, end);
}
* We do not clear the needs_wakeup flag,
* since we cannot tell if we were the only one.
*/
- vm_map_unlock(map);
return KERN_ABORTED;
}
* cannot tell if we were the
* only one.
*/
- vm_map_unlock(map);
return KERN_ABORTED;
}
pmap, pmap_addr);
} else {
+ if (tmp_entry.object.vm_object == kernel_object) {
+ pmap_protect_options(
+ map->pmap,
+ tmp_entry.vme_start,
+ tmp_entry.vme_end,
+ VM_PROT_NONE,
+ PMAP_OPTIONS_REMOVE,
+ NULL);
+ }
vm_fault_unwire(map, &tmp_entry,
tmp_entry.object.vm_object == kernel_object,
map->pmap, tmp_entry.vme_start);
(addr64_t)entry->vme_start,
entry->vme_end - entry->vme_start);
#endif /* NO_NESTED_PMAP */
- if ((map->mapped) && (map->ref_count)) {
+ if ((map->mapped_in_other_pmaps) && (map->ref_count)) {
/* clean up parent map/maps */
vm_map_submap_pmap_clean(
map, entry->vme_start,
entry->object.sub_map,
entry->offset);
}
- } else if (entry->object.vm_object != kernel_object) {
+ } else if (entry->object.vm_object != kernel_object &&
+ entry->object.vm_object != compressor_object) {
object = entry->object.vm_object;
- if((map->mapped) && (map->ref_count)) {
- vm_object_pmap_protect(
+ if ((map->mapped_in_other_pmaps) && (map->ref_count)) {
+ vm_object_pmap_protect_options(
object, entry->offset,
entry->vme_end - entry->vme_start,
PMAP_NULL,
entry->vme_start,
- VM_PROT_NONE);
- } else {
- pmap_remove(map->pmap,
- (addr64_t)entry->vme_start,
- (addr64_t)entry->vme_end);
+ VM_PROT_NONE,
+ PMAP_OPTIONS_REMOVE);
+ } else if ((entry->object.vm_object !=
+ VM_OBJECT_NULL) ||
+ (map->pmap == kernel_pmap)) {
+ /* Remove translations associated
+ * with this range unless the entry
+ * does not have an object, or
+ * it's the kernel map or a descendant
+ * since the platform could potentially
+ * create "backdoor" mappings invisible
+ * to the VM. It is expected that
+ * objectless, non-kernel ranges
+ * do not have such VM invisible
+ * translations.
+ */
+ pmap_remove_options(map->pmap,
+ (addr64_t)entry->vme_start,
+ (addr64_t)entry->vme_end,
+ PMAP_OPTIONS_REMOVE);
}
}
+ if (entry->iokit_acct) {
+ /* alternate accounting */
+ vm_map_iokit_unmapped_region(map,
+ (entry->vme_end -
+ entry->vme_start));
+ entry->iokit_acct = FALSE;
+ }
+
/*
* All pmap mappings for this map entry must have been
* cleared by now.
*/
+#if DEBUG
assert(vm_map_pmap_is_empty(map,
entry->vme_start,
entry->vme_end));
+#endif /* DEBUG */
next = entry->vme_next;
+
+ if (map->pmap == kernel_pmap &&
+ map->ref_count != 0 &&
+ entry->vme_end < end &&
+ (next == vm_map_to_entry(map) ||
+ next->vme_start != entry->vme_end)) {
+ panic("vm_map_delete(%p,0x%llx,0x%llx): "
+ "hole after %p at 0x%llx\n",
+ map,
+ (uint64_t)start,
+ (uint64_t)end,
+ entry,
+ (uint64_t)entry->vme_end);
+ }
+
s = next->vme_start;
last_timestamp = map->timestamp;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
+ /*
+ * For the zone_map, the kernel controls the allocation/freeing of memory.
+ * Any free to the zone_map should be within the bounds of the map and
+ * should free up memory. If the VM_MAP_RANGE_CHECK() silently converts a
+ * free to the zone_map into a no-op, there is a problem and we should
+ * panic.
+ */
+ if ((map == zone_map) && (start == end))
+ panic("Nothing being freed to the zone_map. start = end = %p\n", (void *)start);
result = vm_map_delete(map, start, end, flags, VM_MAP_NULL);
vm_map_unlock(map);
vm_map_entry_t entry = vm_map_copy_first_entry(copy);
vm_map_copy_entry_unlink(copy, entry);
- vm_object_deallocate(entry->object.vm_object);
+ if (entry->is_sub_map) {
+ vm_map_deallocate(entry->object.sub_map);
+ } else {
+ vm_object_deallocate(entry->object.vm_object);
+ }
vm_map_copy_entry_dispose(copy, entry);
}
break;
*/
new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ new_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
*new_copy = *copy;
if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
* splitting entries in strange ways.
*/
- dst_end = vm_map_round_page(dst_addr + dst_size);
+ dst_end = vm_map_round_page(dst_addr + dst_size,
+ VM_MAP_PAGE_MASK(dst_map));
vm_map_lock(dst_map);
start_pass_1:
return(KERN_INVALID_ADDRESS);
}
- vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr));
- assert(!tmp_entry->use_pmap); /* clipping did unnest if needed */
+ vm_map_clip_start(dst_map,
+ tmp_entry,
+ vm_map_trunc_page(dst_addr,
+ VM_MAP_PAGE_MASK(dst_map)));
+ if (tmp_entry->is_sub_map) {
+ /* clipping did unnest if needed */
+ assert(!tmp_entry->use_pmap);
+ }
for (entry = tmp_entry;;) {
vm_map_entry_t next;
if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
return(vm_map_copyout_kernel_buffer(
dst_map, &dst_addr,
- copy, TRUE));
+ copy, TRUE, discard_on_success));
}
/*
* splitting entries in strange ways.
*/
- if (!page_aligned(copy->size) ||
- !page_aligned (copy->offset) ||
- !page_aligned (dst_addr))
+ if (!VM_MAP_PAGE_ALIGNED(copy->size,
+ VM_MAP_PAGE_MASK(dst_map)) ||
+ !VM_MAP_PAGE_ALIGNED(copy->offset,
+ VM_MAP_PAGE_MASK(dst_map)) ||
+ !VM_MAP_PAGE_ALIGNED(dst_addr,
+ VM_MAP_PAGE_MASK(dst_map)))
{
aligned = FALSE;
- dst_end = vm_map_round_page(dst_addr + copy->size);
+ dst_end = vm_map_round_page(dst_addr + copy->size,
+ VM_MAP_PAGE_MASK(dst_map));
} else {
dst_end = dst_addr + copy->size;
}
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
- vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr));
+ vm_map_clip_start(dst_map,
+ tmp_entry,
+ vm_map_trunc_page(dst_addr,
+ VM_MAP_PAGE_MASK(dst_map)));
for (entry = tmp_entry;;) {
vm_map_entry_t next = entry->vme_next;
/* destroyed after successful copy_overwrite */
copy = (vm_map_copy_t)
zalloc(vm_map_copy_zone);
+ copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) =
vm_map_copy_to_entry(copy);
copy->type = VM_MAP_COPY_ENTRY_LIST;
copy->offset = new_offset;
+ /*
+ * XXX FBDP
+ * this does not seem to deal with
+ * the VM map store (R&B tree)
+ */
+
total_size -= copy_size;
copy_size = 0;
/* put back remainder of copy in container */
* bits of the region in this case !
*/
/* ALWAYS UNLOCKS THE dst_map MAP */
- if ((kr = vm_map_copy_overwrite_unaligned( dst_map,
- tmp_entry, copy, base_addr)) != KERN_SUCCESS) {
+ kr = vm_map_copy_overwrite_unaligned(
+ dst_map,
+ tmp_entry,
+ copy,
+ base_addr,
+ discard_on_success);
+ if (kr != KERN_SUCCESS) {
if(next_copy != NULL) {
copy->cpy_hdr.nentries +=
remaining_entries;
break;
}
}
- vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(base_addr));
+ vm_map_clip_start(dst_map,
+ tmp_entry,
+ vm_map_trunc_page(base_addr,
+ VM_MAP_PAGE_MASK(dst_map)));
entry = tmp_entry;
} /* while */
goto blunt_copy;
}
- if ((dst_addr & PAGE_MASK) != (copy->offset & PAGE_MASK)) {
+ if ((dst_addr & VM_MAP_PAGE_MASK(dst_map)) !=
+ (copy->offset & VM_MAP_PAGE_MASK(dst_map))) {
/*
* Incompatible mis-alignment of source and destination...
*/
*/
if (!page_aligned(dst_addr)) {
head_addr = dst_addr;
- head_size = PAGE_SIZE - (copy->offset & PAGE_MASK);
+ head_size = (VM_MAP_PAGE_SIZE(dst_map) -
+ (copy->offset & VM_MAP_PAGE_MASK(dst_map)));
}
if (!page_aligned(copy->offset + copy->size)) {
/*
* Do an aligned copy up to the last page and
* then an unaligned copy for the remaining bytes.
*/
- tail_size = (copy->offset + copy->size) & PAGE_MASK;
+ tail_size = ((copy->offset + copy->size) &
+ VM_MAP_PAGE_MASK(dst_map));
tail_addr = dst_addr + copy->size - tail_size;
}
* Extract "head_copy" out of "copy".
*/
head_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ head_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
vm_map_copy_first_entry(head_copy) =
vm_map_copy_to_entry(head_copy);
vm_map_copy_last_entry(head_copy) =
* Extract "tail_copy" out of "copy".
*/
tail_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ tail_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
vm_map_copy_first_entry(tail_copy) =
vm_map_copy_to_entry(tail_copy);
vm_map_copy_last_entry(tail_copy) =
vm_map_t dst_map,
vm_map_entry_t entry,
vm_map_copy_t copy,
- vm_map_offset_t start)
+ vm_map_offset_t start,
+ boolean_t discard_on_success)
{
- vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
+ vm_map_entry_t copy_entry;
+ vm_map_entry_t copy_entry_next;
vm_map_version_t version;
vm_object_t dst_object;
vm_object_offset_t dst_offset;
amount_left;
kern_return_t kr = KERN_SUCCESS;
+
+ copy_entry = vm_map_copy_first_entry(copy);
+
vm_map_lock_write_to_read(dst_map);
src_offset = copy->offset - vm_object_trunc_page(copy->offset);
entry->vme_end - entry->vme_start);
entry->object.vm_object = dst_object;
entry->offset = 0;
+ assert(entry->use_pmap);
vm_map_lock_write_to_read(dst_map);
}
/*
/*
* all done with this copy entry, dispose.
*/
- vm_map_copy_entry_unlink(copy, copy_entry);
- vm_object_deallocate(copy_entry->object.vm_object);
- vm_map_copy_entry_dispose(copy, copy_entry);
+ copy_entry_next = copy_entry->vme_next;
+
+ if (discard_on_success) {
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ assert(!copy_entry->is_sub_map);
+ vm_object_deallocate(
+ copy_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+ }
- if ((copy_entry = vm_map_copy_first_entry(copy))
- == vm_map_copy_to_entry(copy) && amount_left) {
+ if (copy_entry_next == vm_map_copy_to_entry(copy) &&
+ amount_left) {
/*
* not finished copying but run out of source
*/
return KERN_INVALID_ADDRESS;
}
+
+ copy_entry = copy_entry_next;
+
src_offset = 0;
}
* to the above pass and make sure that no wiring is involved.
*/
+int vm_map_copy_overwrite_aligned_src_not_internal = 0;
+int vm_map_copy_overwrite_aligned_src_not_symmetric = 0;
+int vm_map_copy_overwrite_aligned_src_large = 0;
+
static kern_return_t
vm_map_copy_overwrite_aligned(
vm_map_t dst_map,
copy_size = (copy_entry->vme_end - copy_entry->vme_start);
entry = tmp_entry;
- assert(!entry->use_pmap); /* unnested when clipped earlier */
+ if (entry->is_sub_map) {
+ /* unnested when clipped earlier */
+ assert(!entry->use_pmap);
+ }
if (entry == vm_map_to_entry(dst_map)) {
vm_map_unlock(dst_map);
return KERN_INVALID_ADDRESS;
*/
if (copy_size < size) {
+ if (entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(entry->vme_start + copy_size,
+ VM_MAP_PAGE_MASK(dst_map))) {
+ /* no longer map-aligned */
+ entry->map_aligned = FALSE;
+ }
vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
size = copy_size;
}
continue;
}
+#define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */
+#define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */
+ if (copy_entry->object.vm_object != VM_OBJECT_NULL &&
+ copy_entry->object.vm_object->vo_size >= __TRADEOFF1_OBJ_SIZE &&
+ copy_size <= __TRADEOFF1_COPY_SIZE) {
+ /*
+ * Virtual vs. Physical copy tradeoff #1.
+ *
+ * Copying only a few pages out of a large
+ * object: do a physical copy instead of
+ * a virtual copy, to avoid possibly keeping
+ * the entire large object alive because of
+ * those few copy-on-write pages.
+ */
+ vm_map_copy_overwrite_aligned_src_large++;
+ goto slow_copy;
+ }
+
+ if (entry->alias >= VM_MEMORY_MALLOC &&
+ entry->alias <= VM_MEMORY_MALLOC_LARGE_REUSED) {
+ vm_object_t new_object, new_shadow;
+
+ /*
+ * We're about to map something over a mapping
+ * established by malloc()...
+ */
+ new_object = copy_entry->object.vm_object;
+ if (new_object != VM_OBJECT_NULL) {
+ vm_object_lock_shared(new_object);
+ }
+ while (new_object != VM_OBJECT_NULL &&
+ !new_object->true_share &&
+ new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+ new_object->internal) {
+ new_shadow = new_object->shadow;
+ if (new_shadow == VM_OBJECT_NULL) {
+ break;
+ }
+ vm_object_lock_shared(new_shadow);
+ vm_object_unlock(new_object);
+ new_object = new_shadow;
+ }
+ if (new_object != VM_OBJECT_NULL) {
+ if (!new_object->internal) {
+ /*
+ * The new mapping is backed
+ * by an external object. We
+ * don't want malloc'ed memory
+ * to be replaced with such a
+ * non-anonymous mapping, so
+ * let's go off the optimized
+ * path...
+ */
+ vm_map_copy_overwrite_aligned_src_not_internal++;
+ vm_object_unlock(new_object);
+ goto slow_copy;
+ }
+ if (new_object->true_share ||
+ new_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
+ /*
+ * Same if there's a "true_share"
+ * object in the shadow chain, or
+ * an object with a non-default
+ * (SYMMETRIC) copy strategy.
+ */
+ vm_map_copy_overwrite_aligned_src_not_symmetric++;
+ vm_object_unlock(new_object);
+ goto slow_copy;
+ }
+ vm_object_unlock(new_object);
+ }
+ /*
+ * The new mapping is still backed by
+ * anonymous (internal) memory, so it's
+ * OK to substitute it for the original
+ * malloc() mapping.
+ */
+ }
+
if (old_object != VM_OBJECT_NULL) {
if(entry->is_sub_map) {
if(entry->use_pmap) {
(addr64_t)entry->vme_start,
entry->vme_end - entry->vme_start);
#endif /* NO_NESTED_PMAP */
- if(dst_map->mapped) {
+ if(dst_map->mapped_in_other_pmaps) {
/* clean up parent */
/* map/maps */
vm_map_submap_pmap_clean(
vm_map_deallocate(
entry->object.sub_map);
} else {
- if(dst_map->mapped) {
- vm_object_pmap_protect(
+ if(dst_map->mapped_in_other_pmaps) {
+ vm_object_pmap_protect_options(
entry->object.vm_object,
entry->offset,
entry->vme_end
- entry->vme_start,
PMAP_NULL,
entry->vme_start,
- VM_PROT_NONE);
+ VM_PROT_NONE,
+ PMAP_OPTIONS_REMOVE);
} else {
- pmap_remove(dst_map->pmap,
- (addr64_t)(entry->vme_start),
- (addr64_t)(entry->vme_end));
+ pmap_remove_options(
+ dst_map->pmap,
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end),
+ PMAP_OPTIONS_REMOVE);
}
vm_object_deallocate(old_object);
}
tmp_entry = tmp_entry->vme_next;
} else {
vm_map_version_t version;
- vm_object_t dst_object = entry->object.vm_object;
- vm_object_offset_t dst_offset = entry->offset;
+ vm_object_t dst_object;
+ vm_object_offset_t dst_offset;
kern_return_t r;
+ slow_copy:
+ if (entry->needs_copy) {
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ (entry->vme_end -
+ entry->vme_start));
+ entry->needs_copy = FALSE;
+ }
+
+ dst_object = entry->object.vm_object;
+ dst_offset = entry->offset;
+
/*
* Take an object reference, and record
* the map version information so that the
* map can be safely unlocked.
*/
+ if (dst_object == VM_OBJECT_NULL) {
+ /*
+ * We would usually have just taken the
+ * optimized path above if the destination
+ * object has not been allocated yet. But we
+ * now disable that optimization if the copy
+ * entry's object is not backed by anonymous
+ * memory to avoid replacing malloc'ed
+ * (i.e. re-usable) anonymous memory with a
+ * not-so-anonymous mapping.
+ * So we have to handle this case here and
+ * allocate a new VM object for this map entry.
+ */
+ dst_object = vm_object_allocate(
+ entry->vme_end - entry->vme_start);
+ dst_offset = 0;
+ entry->object.vm_object = dst_object;
+ entry->offset = dst_offset;
+ assert(entry->use_pmap);
+
+ }
+
vm_object_reference(dst_object);
/* account for unlock bumping up timestamp */
start += copy_size;
vm_map_lock(dst_map);
- if (version.main_timestamp == dst_map->timestamp) {
+ if (version.main_timestamp == dst_map->timestamp &&
+ copy_size != 0) {
/* We can safely use saved tmp_entry value */
+ if (tmp_entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(
+ start,
+ VM_MAP_PAGE_MASK(dst_map))) {
+ /* no longer map-aligned */
+ tmp_entry->map_aligned = FALSE;
+ }
vm_map_clip_end(dst_map, tmp_entry, start);
tmp_entry = tmp_entry->vme_next;
} else {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
+ if (tmp_entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(
+ start,
+ VM_MAP_PAGE_MASK(dst_map))) {
+ /* no longer map-aligned */
+ tmp_entry->map_aligned = FALSE;
+ }
vm_map_clip_start(dst_map, tmp_entry, start);
}
}
return kr;
}
if (src_destroy) {
- (void) vm_map_remove(src_map, vm_map_trunc_page(src_addr),
- vm_map_round_page(src_addr + len),
- VM_MAP_REMOVE_INTERRUPTIBLE |
- VM_MAP_REMOVE_WAIT_FOR_KWIRE |
- (src_map == kernel_map) ?
- VM_MAP_REMOVE_KUNWIRE : 0);
+ (void) vm_map_remove(
+ src_map,
+ vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map)),
+ vm_map_round_page(src_addr + len,
+ VM_MAP_PAGE_MASK(src_map)),
+ (VM_MAP_REMOVE_INTERRUPTIBLE |
+ VM_MAP_REMOVE_WAIT_FOR_KWIRE |
+ (src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : 0));
}
*copy_result = copy;
return KERN_SUCCESS;
vm_map_t map,
vm_map_address_t *addr, /* IN/OUT */
vm_map_copy_t copy,
- boolean_t overwrite)
+ boolean_t overwrite,
+ boolean_t consume_on_success)
{
kern_return_t kr = KERN_SUCCESS;
thread_t thread = current_thread();
*addr = 0;
kr = vm_map_enter(map,
addr,
- vm_map_round_page(copy->size),
+ vm_map_round_page(copy->size,
+ VM_MAP_PAGE_MASK(map)),
(vm_map_offset_t) 0,
VM_FLAGS_ANYWHERE,
VM_OBJECT_NULL,
/*
* Deallocate the space we allocated in the target map.
*/
- (void) vm_map_remove(map,
- vm_map_trunc_page(*addr),
- vm_map_round_page(*addr +
- vm_map_round_page(copy->size)),
- VM_MAP_NO_FLAGS);
+ (void) vm_map_remove(
+ map,
+ vm_map_trunc_page(*addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page((*addr +
+ vm_map_round_page(copy->size,
+ VM_MAP_PAGE_MASK(map))),
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_NO_FLAGS);
*addr = 0;
}
} else {
/* copy was successful, dicard the copy structure */
- kfree(copy, copy->cpy_kalloc_size);
+ if (consume_on_success) {
+ kfree(copy, copy->cpy_kalloc_size);
+ }
}
return kr;
zfree(vm_map_copy_zone, copy); \
MACRO_END
+void
+vm_map_copy_remap(
+ vm_map_t map,
+ vm_map_entry_t where,
+ vm_map_copy_t copy,
+ vm_map_offset_t adjustment,
+ vm_prot_t cur_prot,
+ vm_prot_t max_prot,
+ vm_inherit_t inheritance)
+{
+ vm_map_entry_t copy_entry, new_entry;
+
+ for (copy_entry = vm_map_copy_first_entry(copy);
+ copy_entry != vm_map_copy_to_entry(copy);
+ copy_entry = copy_entry->vme_next) {
+ /* get a new VM map entry for the map */
+ new_entry = vm_map_entry_create(map,
+ !map->hdr.entries_pageable);
+ /* copy the "copy entry" to the new entry */
+ vm_map_entry_copy(new_entry, copy_entry);
+ /* adjust "start" and "end" */
+ new_entry->vme_start += adjustment;
+ new_entry->vme_end += adjustment;
+ /* clear some attributes */
+ new_entry->inheritance = inheritance;
+ new_entry->protection = cur_prot;
+ new_entry->max_protection = max_prot;
+ new_entry->behavior = VM_BEHAVIOR_DEFAULT;
+ /* take an extra reference on the entry's "object" */
+ if (new_entry->is_sub_map) {
+ assert(!new_entry->use_pmap); /* not nested */
+ vm_map_lock(new_entry->object.sub_map);
+ vm_map_reference(new_entry->object.sub_map);
+ vm_map_unlock(new_entry->object.sub_map);
+ } else {
+ vm_object_reference(new_entry->object.vm_object);
+ }
+ /* insert the new entry in the map */
+ vm_map_store_entry_link(map, where, new_entry);
+ /* continue inserting the "copy entries" after the new entry */
+ where = new_entry;
+ }
+}
+
/*
* Routine: vm_map_copyout
*
* If successful, consumes the copy object.
* Otherwise, the caller is responsible for it.
*/
+
kern_return_t
vm_map_copyout(
vm_map_t dst_map,
vm_map_address_t *dst_addr, /* OUT */
vm_map_copy_t copy)
+{
+ return vm_map_copyout_internal(dst_map, dst_addr, copy,
+ TRUE, /* consume_on_success */
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+}
+
+kern_return_t
+vm_map_copyout_internal(
+ vm_map_t dst_map,
+ vm_map_address_t *dst_addr, /* OUT */
+ vm_map_copy_t copy,
+ boolean_t consume_on_success,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
vm_map_size_t size;
vm_map_size_t adjustment;
vm_map_offset_t start;
vm_object_offset_t vm_copy_start;
vm_map_entry_t last;
- register
vm_map_entry_t entry;
/*
vm_object_offset_t offset;
offset = vm_object_trunc_page(copy->offset);
- size = vm_map_round_page(copy->size +
- (vm_map_size_t)(copy->offset - offset));
+ size = vm_map_round_page((copy->size +
+ (vm_map_size_t)(copy->offset -
+ offset)),
+ VM_MAP_PAGE_MASK(dst_map));
*dst_addr = 0;
kr = vm_map_enter(dst_map, dst_addr, size,
(vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
return(kr);
/* Account for non-pagealigned copy object */
*dst_addr += (vm_map_offset_t)(copy->offset - offset);
- zfree(vm_map_copy_zone, copy);
+ if (consume_on_success)
+ zfree(vm_map_copy_zone, copy);
return(KERN_SUCCESS);
}
*/
if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
- return(vm_map_copyout_kernel_buffer(dst_map, dst_addr,
- copy, FALSE));
+ return vm_map_copyout_kernel_buffer(dst_map, dst_addr,
+ copy, FALSE,
+ consume_on_success);
}
+
/*
* Find space for the data
*/
- vm_copy_start = vm_object_trunc_page(copy->offset);
- size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size)
+ vm_copy_start = vm_map_trunc_page((vm_map_size_t)copy->offset,
+ VM_MAP_COPY_PAGE_MASK(copy));
+ size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size,
+ VM_MAP_COPY_PAGE_MASK(copy))
- vm_copy_start;
+
StartAgain: ;
vm_map_lock(dst_map);
assert(first_free_is_valid(dst_map));
start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
vm_map_min(dst_map) : last->vme_end;
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(dst_map));
}
while (TRUE) {
last = next;
start = last->vme_end;
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(dst_map));
+ }
+
+ adjustment = start - vm_copy_start;
+ if (! consume_on_success) {
+ /*
+ * We're not allowed to consume "copy", so we'll have to
+ * copy its map entries into the destination map below.
+ * No need to re-allocate map entries from the correct
+ * (pageable or not) zone, since we'll get new map entries
+ * during the transfer.
+ * We'll also adjust the map entries's "start" and "end"
+ * during the transfer, to keep "copy"'s entries consistent
+ * with its "offset".
+ */
+ goto after_adjustments;
}
/*
/*
* Find the zone that the copies were allocated from
*/
- old_zone = (copy->cpy_hdr.entries_pageable)
- ? vm_map_entry_zone
- : vm_map_kentry_zone;
+
entry = vm_map_copy_first_entry(copy);
/*
* Copy each entry.
*/
while (entry != vm_map_copy_to_entry(copy)) {
- new = vm_map_copy_entry_create(copy);
+ new = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
vm_map_entry_copy_full(new, entry);
- new->use_pmap = FALSE; /* clr address space specifics */
+ assert(!new->iokit_acct);
+ if (new->is_sub_map) {
+ /* clr address space specifics */
+ new->use_pmap = FALSE;
+ }
vm_map_copy_entry_link(copy,
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
+ old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone;
zfree(old_zone, entry);
entry = next;
}
* reset the region attributes.
*/
- adjustment = start - vm_copy_start;
for (entry = vm_map_copy_first_entry(copy);
entry != vm_map_copy_to_entry(copy);
entry = entry->vme_next) {
+ if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) {
+ /*
+ * We're injecting this copy entry into a map that
+ * has the standard page alignment, so clear
+ * "map_aligned" (which might have been inherited
+ * from the original map entry).
+ */
+ entry->map_aligned = FALSE;
+ }
+
entry->vme_start += adjustment;
entry->vme_end += adjustment;
+ if (entry->map_aligned) {
+ assert(VM_MAP_PAGE_ALIGNED(entry->vme_start,
+ VM_MAP_PAGE_MASK(dst_map)));
+ assert(VM_MAP_PAGE_ALIGNED(entry->vme_end,
+ VM_MAP_PAGE_MASK(dst_map)));
+ }
+
entry->inheritance = VM_INHERIT_DEFAULT;
entry->protection = VM_PROT_DEFAULT;
entry->max_protection = VM_PROT_ALL;
type_of_fault = DBG_CACHE_HIT_FAULT;
vm_fault_enter(m, dst_map->pmap, va, prot, prot,
- VM_PAGE_WIRED(m), FALSE, FALSE, FALSE,
- &type_of_fault);
+ VM_PAGE_WIRED(m), FALSE, FALSE,
+ FALSE, entry->alias,
+ ((entry->iokit_acct ||
+ (!entry->is_sub_map &&
+ !entry->use_pmap))
+ ? PMAP_OPTIONS_ALT_ACCT
+ : 0),
+ NULL, &type_of_fault);
vm_object_unlock(object);
}
}
+after_adjustments:
+
/*
* Correct the page alignment for the result
*/
* Update the hints and the map size
*/
- SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
-
+ if (consume_on_success) {
+ SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
+ } else {
+ SAVE_HINT_MAP_WRITE(dst_map, last);
+ }
+
dst_map->size += size;
/*
* Link in the copy
*/
- vm_map_copy_insert(dst_map, last, copy);
+ if (consume_on_success) {
+ vm_map_copy_insert(dst_map, last, copy);
+ } else {
+ vm_map_copy_remap(dst_map, last, copy, adjustment,
+ cur_protection, max_protection,
+ inheritance);
+ }
vm_map_unlock(dst_map);
register
vm_map_copy_t copy; /* Resulting copy */
- vm_map_address_t copy_addr;
+ vm_map_address_t copy_addr;
+ vm_map_size_t copy_size;
/*
* Check for copies of zero bytes.
/*
* Compute (page aligned) start and end of region
*/
- src_start = vm_map_trunc_page(src_addr);
- src_end = vm_map_round_page(src_end);
+ src_start = vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map));
+ src_end = vm_map_round_page(src_end,
+ VM_MAP_PAGE_MASK(src_map));
XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0);
*/
copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
copy->type = VM_MAP_COPY_ENTRY_LIST;
copy->cpy_hdr.nentries = 0;
copy->cpy_hdr.entries_pageable = TRUE;
+#if 00
+ copy->cpy_hdr.page_shift = src_map->hdr.page_shift;
+#else
+ /*
+ * The copy entries can be broken down for a variety of reasons,
+ * so we can't guarantee that they will remain map-aligned...
+ * Will need to adjust the first copy_entry's "vme_start" and
+ * the last copy_entry's "vme_end" to be rounded to PAGE_MASK
+ * rather than the original map's alignment.
+ */
+ copy->cpy_hdr.page_shift = PAGE_SHIFT;
+#endif
vm_map_store_init( &(copy->cpy_hdr) );
copy->offset = src_addr;
copy->size = len;
- new_entry = vm_map_copy_entry_create(copy);
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
#define RETURN(x) \
MACRO_BEGIN \
vm_map_lock(src_map);
- if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
+ /*
+ * Lookup the original "src_addr" rather than the truncated
+ * "src_start", in case "src_start" falls in a non-map-aligned
+ * map entry *before* the map entry that contains "src_addr"...
+ */
+ if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry))
RETURN(KERN_INVALID_ADDRESS);
if(!tmp_entry->is_sub_map) {
+ /*
+ * ... but clip to the map-rounded "src_start" rather than
+ * "src_addr" to preserve map-alignment. We'll adjust the
+ * first copy entry at the end, if needed.
+ */
vm_map_clip_start(src_map, tmp_entry, src_start);
}
+ if (src_start < tmp_entry->vme_start) {
+ /*
+ * Move "src_start" up to the start of the
+ * first map entry to copy.
+ */
+ src_start = tmp_entry->vme_start;
+ }
/* set for later submap fix-up */
copy_addr = src_start;
version.main_timestamp = src_map->timestamp;
vm_map_unlock(src_map);
- new_entry = vm_map_copy_entry_create(copy);
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
vm_map_lock(src_map);
if ((version.main_timestamp + 1) != src_map->timestamp) {
was_wired = (src_entry->wired_count != 0);
vm_map_entry_copy(new_entry, src_entry);
- new_entry->use_pmap = FALSE; /* clr address space specifics */
+ if (new_entry->is_sub_map) {
+ /* clr address space specifics */
+ new_entry->use_pmap = FALSE;
+ }
/*
* Attempt non-blocking copy-on-write optimizations.
new_entry->object.vm_object = new_object;
new_entry->needs_copy = TRUE;
+ assert(!new_entry->iokit_acct);
+ assert(new_object->purgable == VM_PURGABLE_DENY);
+ new_entry->use_pmap = TRUE;
result = KERN_SUCCESS;
} else {
*/
if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
+ if (result != KERN_MEMORY_RESTART_COPY) {
+ vm_object_deallocate(new_entry->object.vm_object);
+ new_entry->object.vm_object = VM_OBJECT_NULL;
+ assert(!new_entry->iokit_acct);
+ new_entry->use_pmap = TRUE;
+ }
RETURN(KERN_INVALID_ADDRESS);
}
((src_entry->max_protection & VM_PROT_READ) == 0))
goto VerificationFailed;
- if (src_entry->vme_end < new_entry->vme_end)
- src_size = (new_entry->vme_end = src_entry->vme_end) - src_start;
+ if (src_entry->vme_end < new_entry->vme_end) {
+ assert(VM_MAP_PAGE_ALIGNED(src_entry->vme_end,
+ VM_MAP_COPY_PAGE_MASK(copy)));
+ new_entry->vme_end = src_entry->vme_end;
+ src_size = new_entry->vme_end - src_start;
+ }
if ((src_entry->object.vm_object != src_object) ||
(src_entry->offset != src_offset) ) {
src_start = new_entry->vme_end;
new_entry = VM_MAP_ENTRY_NULL;
while ((src_start >= src_end) && (src_end != 0)) {
- if (src_map != base_map) {
- submap_map_t *ptr;
-
- ptr = parent_maps;
- assert(ptr != NULL);
- parent_maps = parent_maps->next;
-
- /* fix up the damage we did in that submap */
- vm_map_simplify_range(src_map,
- src_base,
- src_end);
-
- vm_map_unlock(src_map);
- vm_map_deallocate(src_map);
- vm_map_lock(ptr->parent_map);
- src_map = ptr->parent_map;
- src_base = ptr->base_start;
- src_start = ptr->base_start + ptr->base_len;
- src_end = ptr->base_end;
- if ((src_end > src_start) &&
- !vm_map_lookup_entry(
- src_map, src_start, &tmp_entry))
- RETURN(KERN_INVALID_ADDRESS);
- kfree(ptr, sizeof(submap_map_t));
- if(parent_maps == NULL)
- map_share = FALSE;
- src_entry = tmp_entry->vme_prev;
- } else
+ submap_map_t *ptr;
+
+ if (src_map == base_map) {
+ /* back to the top */
break;
+ }
+
+ ptr = parent_maps;
+ assert(ptr != NULL);
+ parent_maps = parent_maps->next;
+
+ /* fix up the damage we did in that submap */
+ vm_map_simplify_range(src_map,
+ src_base,
+ src_end);
+
+ vm_map_unlock(src_map);
+ vm_map_deallocate(src_map);
+ vm_map_lock(ptr->parent_map);
+ src_map = ptr->parent_map;
+ src_base = ptr->base_start;
+ src_start = ptr->base_start + ptr->base_len;
+ src_end = ptr->base_end;
+ if (!vm_map_lookup_entry(src_map,
+ src_start,
+ &tmp_entry) &&
+ (src_end > src_start)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+ kfree(ptr, sizeof(submap_map_t));
+ if (parent_maps == NULL)
+ map_share = FALSE;
+ src_entry = tmp_entry->vme_prev;
+ }
+
+ if ((VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) &&
+ (src_start >= src_addr + len) &&
+ (src_addr + len != 0)) {
+ /*
+ * Stop copying now, even though we haven't reached
+ * "src_end". We'll adjust the end of the last copy
+ * entry at the end, if needed.
+ *
+ * If src_map's aligment is different from the
+ * system's page-alignment, there could be
+ * extra non-map-aligned map entries between
+ * the original (non-rounded) "src_addr + len"
+ * and the rounded "src_end".
+ * We do not want to copy those map entries since
+ * they're not part of the copied range.
+ */
+ break;
}
+
if ((src_start >= src_end) && (src_end != 0))
break;
*/
tmp_entry = src_entry->vme_next;
- if ((tmp_entry->vme_start != src_start) ||
- (tmp_entry == vm_map_to_entry(src_map)))
+ if ((tmp_entry->vme_start != src_start) ||
+ (tmp_entry == vm_map_to_entry(src_map))) {
RETURN(KERN_INVALID_ADDRESS);
+ }
}
/*
* copy was successful.
*/
if (src_destroy) {
- (void) vm_map_delete(src_map,
- vm_map_trunc_page(src_addr),
- src_end,
- (src_map == kernel_map) ?
- VM_MAP_REMOVE_KUNWIRE :
- VM_MAP_NO_FLAGS,
- VM_MAP_NULL);
+ (void) vm_map_delete(
+ src_map,
+ vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map)),
+ src_end,
+ ((src_map == kernel_map) ?
+ VM_MAP_REMOVE_KUNWIRE :
+ VM_MAP_NO_FLAGS),
+ VM_MAP_NULL);
} else {
/* fix up the damage we did in the base map */
- vm_map_simplify_range(src_map,
- vm_map_trunc_page(src_addr),
- vm_map_round_page(src_end));
+ vm_map_simplify_range(
+ src_map,
+ vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map)),
+ vm_map_round_page(src_end,
+ VM_MAP_PAGE_MASK(src_map)));
}
vm_map_unlock(src_map);
+ if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) {
+ vm_map_offset_t original_start, original_offset, original_end;
+
+ assert(VM_MAP_COPY_PAGE_MASK(copy) == PAGE_MASK);
+
+ /* adjust alignment of first copy_entry's "vme_start" */
+ tmp_entry = vm_map_copy_first_entry(copy);
+ if (tmp_entry != vm_map_copy_to_entry(copy)) {
+ vm_map_offset_t adjustment;
+
+ original_start = tmp_entry->vme_start;
+ original_offset = tmp_entry->offset;
+
+ /* map-align the start of the first copy entry... */
+ adjustment = (tmp_entry->vme_start -
+ vm_map_trunc_page(
+ tmp_entry->vme_start,
+ VM_MAP_PAGE_MASK(src_map)));
+ tmp_entry->vme_start -= adjustment;
+ tmp_entry->offset -= adjustment;
+ copy_addr -= adjustment;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
+ /* ... adjust for mis-aligned start of copy range */
+ adjustment =
+ (vm_map_trunc_page(copy->offset,
+ PAGE_MASK) -
+ vm_map_trunc_page(copy->offset,
+ VM_MAP_PAGE_MASK(src_map)));
+ if (adjustment) {
+ assert(page_aligned(adjustment));
+ assert(adjustment < VM_MAP_PAGE_SIZE(src_map));
+ tmp_entry->vme_start += adjustment;
+ tmp_entry->offset += adjustment;
+ copy_addr += adjustment;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
+ }
+
+ /*
+ * Assert that the adjustments haven't exposed
+ * more than was originally copied...
+ */
+ assert(tmp_entry->vme_start >= original_start);
+ assert(tmp_entry->offset >= original_offset);
+ /*
+ * ... and that it did not adjust outside of a
+ * a single 16K page.
+ */
+ assert(vm_map_trunc_page(tmp_entry->vme_start,
+ VM_MAP_PAGE_MASK(src_map)) ==
+ vm_map_trunc_page(original_start,
+ VM_MAP_PAGE_MASK(src_map)));
+ }
+
+ /* adjust alignment of last copy_entry's "vme_end" */
+ tmp_entry = vm_map_copy_last_entry(copy);
+ if (tmp_entry != vm_map_copy_to_entry(copy)) {
+ vm_map_offset_t adjustment;
+
+ original_end = tmp_entry->vme_end;
+
+ /* map-align the end of the last copy entry... */
+ tmp_entry->vme_end =
+ vm_map_round_page(tmp_entry->vme_end,
+ VM_MAP_PAGE_MASK(src_map));
+ /* ... adjust for mis-aligned end of copy range */
+ adjustment =
+ (vm_map_round_page((copy->offset +
+ copy->size),
+ VM_MAP_PAGE_MASK(src_map)) -
+ vm_map_round_page((copy->offset +
+ copy->size),
+ PAGE_MASK));
+ if (adjustment) {
+ assert(page_aligned(adjustment));
+ assert(adjustment < VM_MAP_PAGE_SIZE(src_map));
+ tmp_entry->vme_end -= adjustment;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
+ }
+
+ /*
+ * Assert that the adjustments haven't exposed
+ * more than was originally copied...
+ */
+ assert(tmp_entry->vme_end <= original_end);
+ /*
+ * ... and that it did not adjust outside of a
+ * a single 16K page.
+ */
+ assert(vm_map_round_page(tmp_entry->vme_end,
+ VM_MAP_PAGE_MASK(src_map)) ==
+ vm_map_round_page(original_end,
+ VM_MAP_PAGE_MASK(src_map)));
+ }
+ }
+
/* Fix-up start and end points in copy. This is necessary */
/* when the various entries in the copy object were picked */
/* up from different sub-maps */
tmp_entry = vm_map_copy_first_entry(copy);
+ copy_size = 0; /* compute actual size */
while (tmp_entry != vm_map_copy_to_entry(copy)) {
+ assert(VM_MAP_PAGE_ALIGNED(
+ copy_addr + (tmp_entry->vme_end -
+ tmp_entry->vme_start),
+ VM_MAP_COPY_PAGE_MASK(copy)));
+ assert(VM_MAP_PAGE_ALIGNED(
+ copy_addr,
+ VM_MAP_COPY_PAGE_MASK(copy)));
+
+ /*
+ * The copy_entries will be injected directly into the
+ * destination map and might not be "map aligned" there...
+ */
+ tmp_entry->map_aligned = FALSE;
+
tmp_entry->vme_end = copy_addr +
(tmp_entry->vme_end - tmp_entry->vme_start);
tmp_entry->vme_start = copy_addr;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
copy_addr += tmp_entry->vme_end - tmp_entry->vme_start;
+ copy_size += tmp_entry->vme_end - tmp_entry->vme_start;
tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next;
}
+ if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT &&
+ copy_size < copy->size) {
+ /*
+ * The actual size of the VM map copy is smaller than what
+ * was requested by the caller. This must be because some
+ * PAGE_SIZE-sized pages are missing at the end of the last
+ * VM_MAP_PAGE_SIZE(src_map)-sized chunk of the range.
+ * The caller might not have been aware of those missing
+ * pages and might not want to be aware of it, which is
+ * fine as long as they don't try to access (and crash on)
+ * those missing pages.
+ * Let's adjust the size of the "copy", to avoid failing
+ * in vm_map_copyout() or vm_map_copy_overwrite().
+ */
+ assert(vm_map_round_page(copy_size,
+ VM_MAP_PAGE_MASK(src_map)) ==
+ vm_map_round_page(copy->size,
+ VM_MAP_PAGE_MASK(src_map)));
+ copy->size = copy_size;
+ }
+
*copy_result = copy;
return(KERN_SUCCESS);
#undef RETURN
}
+kern_return_t
+vm_map_copy_extract(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ vm_map_copy_t *copy_result, /* OUT */
+ vm_prot_t *cur_prot, /* OUT */
+ vm_prot_t *max_prot)
+{
+ vm_map_offset_t src_start, src_end;
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+ src_end = src_addr + len;
+ if (src_end < src_addr)
+ return KERN_INVALID_ADDRESS;
+
+ /*
+ * Compute (page aligned) start and end of region
+ */
+ src_start = vm_map_trunc_page(src_addr, PAGE_MASK);
+ src_end = vm_map_round_page(src_end, PAGE_MASK);
+
+ /*
+ * Allocate a header element for the list.
+ *
+ * Use the start and end in the header to
+ * remember the endpoints prior to rounding.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = TRUE;
+
+ vm_map_store_init(©->cpy_hdr);
+
+ copy->offset = 0;
+ copy->size = len;
+
+ kr = vm_map_remap_extract(src_map,
+ src_addr,
+ len,
+ FALSE, /* copy */
+ ©->cpy_hdr,
+ cur_prot,
+ max_prot,
+ VM_INHERIT_SHARE,
+ TRUE); /* pageable */
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return kr;
+ }
+
+ *copy_result = copy;
+ return KERN_SUCCESS;
+}
+
/*
* vm_map_copyin_object:
*
*/
copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
copy->type = VM_MAP_COPY_OBJECT;
copy->cpy_object = object;
copy->offset = offset;
old_entry->vme_start));
old_entry->offset = 0;
old_entry->object.vm_object = object;
+ old_entry->use_pmap = TRUE;
assert(!old_entry->needs_copy);
} else if (object->copy_strategy !=
MEMORY_OBJECT_COPY_SYMMETRIC) {
if (override_nx(old_map, old_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
- if (old_map->mapped) {
+ if (old_map->mapped_in_other_pmaps) {
vm_object_pmap_protect(
old_entry->object.vm_object,
old_entry->offset,
* Mark both entries as shared.
*/
- new_entry = vm_map_entry_create(new_map);
+ new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel
+ * map or descendants */
vm_map_entry_copy(new_entry, old_entry);
old_entry->is_shared = TRUE;
new_entry->is_shared = TRUE;
*/
vm_map_t
vm_map_fork(
+ ledger_t ledger,
vm_map_t old_map)
{
pmap_t new_pmap;
boolean_t src_needs_copy;
boolean_t new_entry_needs_copy;
- new_pmap = pmap_create((vm_map_size_t) 0,
+ new_pmap = pmap_create(ledger, (vm_map_size_t) 0,
#if defined(__i386__) || defined(__x86_64__)
old_map->pmap->pm_task_map != TASK_MAP_32BIT
#else
- 0
+#error Unknown architecture.
#endif
);
-#if defined(__i386__)
- if (old_map->pmap->pm_task_map == TASK_MAP_64BIT_SHARED)
- pmap_set_4GB_pagezero(new_pmap);
-#endif
vm_map_reference_swap(old_map);
vm_map_lock(old_map);
old_map->min_offset,
old_map->max_offset,
old_map->hdr.entries_pageable);
+ /* inherit the parent map's page size */
+ vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(old_map));
for (
old_entry = vm_map_first_entry(old_map);
old_entry != vm_map_to_entry(old_map);
goto slow_vm_map_fork_copy;
}
- new_entry = vm_map_entry_create(new_map);
+ new_entry = vm_map_entry_create(new_map, FALSE); /* never the kernel map or descendants */
vm_map_entry_copy(new_entry, old_entry);
- /* clear address space specifics */
- new_entry->use_pmap = FALSE;
+ if (new_entry->is_sub_map) {
+ /* clear address space specifics */
+ new_entry->use_pmap = FALSE;
+ }
if (! vm_object_copy_quickly(
&new_entry->object.vm_object,
(old_entry->vme_end -
old_entry->vme_start),
((old_entry->is_shared
- || old_map->mapped)
+ || old_map->mapped_in_other_pmaps)
? PMAP_NULL :
old_map->pmap),
old_entry->vme_start,
old_entry = old_entry->vme_next;
}
+
new_map->size = new_size;
vm_map_unlock(old_map);
vm_map_deallocate(old_map);
{
SHARED_REGION_TRACE_DEBUG(
("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): ->\n",
- current_task(), new_map, task, fsroot, cpu));
+ (void *)VM_KERNEL_ADDRPERM(current_task()),
+ (void *)VM_KERNEL_ADDRPERM(new_map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu));
(void) vm_commpage_enter(new_map, task);
(void) vm_shared_region_enter(new_map, task, fsroot, cpu);
SHARED_REGION_TRACE_DEBUG(
("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): <-\n",
- current_task(), new_map, task, fsroot, cpu));
+ (void *)VM_KERNEL_ADDRPERM(current_task()),
+ (void *)VM_KERNEL_ADDRPERM(new_map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu));
return KERN_SUCCESS;
}
vm_map_offset_t old_end = 0;
register vm_prot_t prot;
boolean_t mask_protections;
+ boolean_t force_copy;
vm_prot_t original_fault_type;
/*
* absolute value.
*/
mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE;
- fault_type &= ~VM_PROT_IS_MASK;
+ force_copy = (fault_type & VM_PROT_COPY) ? TRUE : FALSE;
+ fault_type &= VM_PROT_ALL;
original_fault_type = fault_type;
*real_map = map;
if (!mapped_needs_copy) {
if (vm_map_lock_read_to_write(map)) {
vm_map_lock_read(map);
- /* XXX FBDP: entry still valid ? */
- if(*real_map == entry->object.sub_map)
- *real_map = map;
+ *real_map = map;
goto RetryLookup;
}
vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
cow_sub_map_parent = map;
/* reset base to map before cow object */
/* this is the map which will accept */
mapped_needs_copy = TRUE;
} else {
vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
if((cow_sub_map_parent != map) &&
(*real_map != map))
vm_map_unlock(map);
}
} else {
vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
/* leave map locked if it is a target */
/* cow sub_map above otherwise, just */
/* follow the maps down to the object */
vm_map_unlock_read(map);
}
- /* XXX FBDP: map has been unlocked, what protects "entry" !? */
- *var_map = map = entry->object.sub_map;
+ map = *var_map;
/* calculate the offset in the submap for vaddr */
local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
(old_end - cow_parent_vaddr);
vm_map_clip_start(map, submap_entry, local_start);
vm_map_clip_end(map, submap_entry, local_end);
- /* unnesting was done in vm_map_clip_start/end() */
- assert(!submap_entry->use_pmap);
+ if (submap_entry->is_sub_map) {
+ /* unnesting was done when clipping */
+ assert(!submap_entry->use_pmap);
+ }
/* This is the COW case, lets connect */
/* an entry in our space to the underlying */
prot = submap_entry->protection & ~VM_PROT_WRITE;
- if (override_nx(map, submap_entry->alias) && prot)
+ if (override_nx(old_map, submap_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(
submap_entry->vme_end -
submap_entry->vme_start,
(submap_entry->is_shared
- || map->mapped) ?
+ || map->mapped_in_other_pmaps) ?
PMAP_NULL : map->pmap,
submap_entry->vme_start,
prot);
vm_map_clip_start(map, entry, local_start);
vm_map_clip_end(map, entry, local_end);
- /* unnesting was done in vm_map_clip_start/end() */
- assert(!entry->use_pmap);
+ if (entry->is_sub_map) {
+ /* unnesting was done when clipping */
+ assert(!entry->use_pmap);
+ }
/* substitute copy object for */
/* shared map entry */
vm_map_deallocate(entry->object.sub_map);
+ assert(!entry->iokit_acct);
entry->is_sub_map = FALSE;
+ entry->use_pmap = TRUE;
entry->object.vm_object = copy_object;
/* propagate the submap entry's protections */
prot = entry->protection;
- if (override_nx(map, entry->alias) && prot) {
+ if (override_nx(old_map, entry->alias) && prot) {
/*
* HACK -- if not a stack, then allow execution
*/
* demote the permissions allowed.
*/
- if ((fault_type & VM_PROT_WRITE) || *wired) {
+ if ((fault_type & VM_PROT_WRITE) || *wired || force_copy) {
/*
* Make a new object, and place it in the
* object chain. Note that no new references
/* ... the caller will change "interruptible" if needed */
fault_info->cluster_size = 0;
fault_info->user_tag = entry->alias;
+ fault_info->pmap_options = 0;
+ if (entry->iokit_acct ||
+ (!entry->is_sub_map && !entry->use_pmap)) {
+ fault_info->pmap_options |= PMAP_OPTIONS_ALT_ACCT;
+ }
fault_info->behavior = entry->behavior;
fault_info->lo_offset = entry->offset;
fault_info->hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
fault_info->io_sync = FALSE;
fault_info->cs_bypass = (entry->used_for_jit)? TRUE : FALSE;
fault_info->mark_zf_absent = FALSE;
+ fault_info->batch_pmap_op = FALSE;
}
/*
vm_region_submap_info_64_t submap_info, /* IN/OUT */
mach_msg_type_number_t *count) /* IN/OUT */
{
+ mach_msg_type_number_t original_count;
vm_region_extended_info_data_t extended;
vm_map_entry_t tmp_entry;
vm_map_offset_t user_address;
return KERN_INVALID_ARGUMENT;
}
- if (*count < VM_REGION_SUBMAP_INFO_COUNT_64) {
- if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) {
- /*
- * "info" structure is not big enough and
- * would overflow
- */
- return KERN_INVALID_ARGUMENT;
- } else {
- look_for_pages = FALSE;
- *count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
- short_info = (vm_region_submap_short_info_64_t) submap_info;
- submap_info = NULL;
- }
+
+ if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) {
+ /*
+ * "info" structure is not big enough and
+ * would overflow
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ original_count = *count;
+
+ if (original_count < VM_REGION_SUBMAP_INFO_V0_COUNT_64) {
+ *count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
+ look_for_pages = FALSE;
+ short_info = (vm_region_submap_short_info_64_t) submap_info;
+ submap_info = NULL;
} else {
look_for_pages = TRUE;
- *count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ *count = VM_REGION_SUBMAP_INFO_V0_COUNT_64;
short_info = NULL;
+
+ if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) {
+ *count = VM_REGION_SUBMAP_INFO_V1_COUNT_64;
+ }
}
-
-
+
user_address = *address;
user_max_depth = *nesting_depth;
// LP64todo: all the current tools are 32bit, obviously never worked for 64b
// so probably should be a real 32b ID vs. ptr.
// Current users just check for equality
-#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)p)
+#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p))
if (look_for_pages) {
submap_info->user_tag = curr_entry->alias;
extended.pages_swapped_out = 0;
extended.pages_shared_now_private = 0;
extended.pages_dirtied = 0;
+ extended.pages_reusable = 0;
extended.external_pager = 0;
extended.shadow_depth = 0;
curr_entry->vme_start)),
range_end - range_start,
&extended,
- look_for_pages);
+ look_for_pages, VM_REGION_EXTENDED_INFO_COUNT);
if (extended.external_pager &&
extended.ref_count == 2 &&
extended.share_mode == SM_SHARED) {
submap_info->shadow_depth = extended.shadow_depth;
submap_info->share_mode = extended.share_mode;
submap_info->ref_count = extended.ref_count;
+
+ if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) {
+ submap_info->pages_reusable = extended.pages_reusable;
+ }
} else {
short_info->external_pager = extended.external_pager;
short_info->shadow_depth = extended.shadow_depth;
return(KERN_SUCCESS);
}
case VM_REGION_EXTENDED_INFO:
- {
- vm_region_extended_info_t extended;
-
if (*count < VM_REGION_EXTENDED_INFO_COUNT)
return(KERN_INVALID_ARGUMENT);
+ /*fallthru*/
+ case VM_REGION_EXTENDED_INFO__legacy:
+ if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy)
+ return KERN_INVALID_ARGUMENT;
+
+ {
+ vm_region_extended_info_t extended;
+ mach_msg_type_number_t original_count;
extended = (vm_region_extended_info_t) info;
- *count = VM_REGION_EXTENDED_INFO_COUNT;
vm_map_lock_read(map);
extended->external_pager = 0;
extended->shadow_depth = 0;
- vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended, TRUE);
+ original_count = *count;
+ if (flavor == VM_REGION_EXTENDED_INFO__legacy) {
+ *count = VM_REGION_EXTENDED_INFO_COUNT__legacy;
+ } else {
+ extended->pages_reusable = 0;
+ *count = VM_REGION_EXTENDED_INFO_COUNT;
+ }
+
+ vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended, TRUE, *count);
if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED)
extended->share_mode = SM_PRIVATE;
if (ref_count == 1 ||
(ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) {
top->share_mode = SM_PRIVATE;
- top->private_pages_resident =
- OBJ_RESIDENT_COUNT(obj,
- entry_size);
+ top->private_pages_resident =
+ OBJ_RESIDENT_COUNT(obj,
+ entry_size);
} else {
top->share_mode = SM_SHARED;
top->shared_pages_resident =
top->ref_count = ref_count;
}
/* XXX K64: obj_id will be truncated */
- top->obj_id = (unsigned int) (uintptr_t)obj;
+ top->obj_id = (unsigned int) (uintptr_t)VM_KERNEL_ADDRPERM(obj);
vm_object_unlock(obj);
}
vm_object_offset_t offset,
vm_object_size_t range,
vm_region_extended_info_t extended,
- boolean_t look_for_pages)
+ boolean_t look_for_pages,
+ mach_msg_type_number_t count)
{
register struct vm_object *obj, *tmp_obj;
register vm_map_offset_t last_offset;
if (look_for_pages) {
for (last_offset = offset + range;
offset < last_offset;
- offset += PAGE_SIZE_64, va += PAGE_SIZE)
- vm_map_region_look_for_page(map, va, obj,
- offset, ref_count,
- 0, extended);
+ offset += PAGE_SIZE_64, va += PAGE_SIZE) {
+ vm_map_region_look_for_page(map, va, obj,
+ offset, ref_count,
+ 0, extended, count);
+ }
} else {
shadow_object = obj->shadow;
shadow_depth = 0;
vm_object_offset_t offset,
int max_refcnt,
int depth,
- vm_region_extended_info_t extended)
+ vm_region_extended_info_t extended,
+ mach_msg_type_number_t count)
{
register vm_page_t p;
register vm_object_t shadow;
register int ref_count;
vm_object_t caller_object;
-#if MACH_PAGEMAP
kern_return_t kr;
-#endif
shadow = object->shadow;
caller_object = object;
if (shadow && (max_refcnt == 1))
extended->pages_shared_now_private++;
- if (!p->fictitious &&
+ if (!p->fictitious &&
(p->dirty || pmap_is_modified(p->phys_page)))
extended->pages_dirtied++;
+ else if (count >= VM_REGION_EXTENDED_INFO_COUNT) {
+ if (p->reusable || p->object->all_reusable) {
+ extended->pages_reusable++;
+ }
+ }
- extended->pages_resident++;
+ extended->pages_resident++;
if(object != caller_object)
vm_object_unlock(object);
return;
}
- } else if (object->internal &&
- object->alive &&
- !object->terminating &&
- object->pager_ready) {
-
- memory_object_t pager;
-
- vm_object_paging_begin(object);
- pager = object->pager;
- vm_object_unlock(object);
+ } else
+#endif /* MACH_PAGEMAP */
+ if (object->internal &&
+ object->alive &&
+ !object->terminating &&
+ object->pager_ready) {
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ if (VM_COMPRESSOR_PAGER_STATE_GET(object,
+ offset)
+ == VM_EXTERNAL_STATE_EXISTS) {
+ /* the pager has that page */
+ extended->pages_swapped_out++;
+ if (object != caller_object)
+ vm_object_unlock(object);
+ return;
+ }
+ } else {
+ memory_object_t pager;
- kr = memory_object_data_request(
- pager,
- offset + object->paging_offset,
- 0, /* just poke the pager */
- VM_PROT_READ,
- NULL);
+ vm_object_paging_begin(object);
+ pager = object->pager;
+ vm_object_unlock(object);
- vm_object_lock(object);
- vm_object_paging_end(object);
+ kr = memory_object_data_request(
+ pager,
+ offset + object->paging_offset,
+ 0, /* just poke the pager */
+ VM_PROT_READ,
+ NULL);
- if (kr == KERN_SUCCESS) {
- /* the pager has that page */
- extended->pages_swapped_out++;
- if (object != caller_object)
- vm_object_unlock(object);
- return;
+ vm_object_lock(object);
+ vm_object_paging_end(object);
+
+ if (kr == KERN_SUCCESS) {
+ /* the pager has that page */
+ extended->pages_swapped_out++;
+ if (object != caller_object)
+ vm_object_unlock(object);
+ return;
+ }
}
}
-#endif /* MACH_PAGEMAP */
if (shadow) {
vm_object_lock(shadow);
(prev_entry->vme_end == this_entry->vme_start) &&
(prev_entry->is_sub_map == this_entry->is_sub_map) &&
-
(prev_entry->object.vm_object == this_entry->object.vm_object) &&
((prev_entry->offset + (prev_entry->vme_end -
prev_entry->vme_start))
== this_entry->offset) &&
- (prev_entry->inheritance == this_entry->inheritance) &&
+ (prev_entry->behavior == this_entry->behavior) &&
+ (prev_entry->needs_copy == this_entry->needs_copy) &&
(prev_entry->protection == this_entry->protection) &&
(prev_entry->max_protection == this_entry->max_protection) &&
- (prev_entry->behavior == this_entry->behavior) &&
+ (prev_entry->inheritance == this_entry->inheritance) &&
+ (prev_entry->use_pmap == this_entry->use_pmap) &&
(prev_entry->alias == this_entry->alias) &&
- (prev_entry->zero_wired_pages == this_entry->zero_wired_pages) &&
(prev_entry->no_cache == this_entry->no_cache) &&
+ (prev_entry->permanent == this_entry->permanent) &&
+ (prev_entry->map_aligned == this_entry->map_aligned) &&
+ (prev_entry->zero_wired_pages == this_entry->zero_wired_pages) &&
+ (prev_entry->used_for_jit == this_entry->used_for_jit) &&
+ /* from_reserved_zone: OK if that field doesn't match */
+ (prev_entry->iokit_acct == this_entry->iokit_acct) &&
+
(prev_entry->wired_count == this_entry->wired_count) &&
(prev_entry->user_wired_count == this_entry->user_wired_count) &&
- (prev_entry->needs_copy == this_entry->needs_copy) &&
- (prev_entry->permanent == this_entry->permanent) &&
-
- (prev_entry->use_pmap == FALSE) &&
- (this_entry->use_pmap == FALSE) &&
(prev_entry->in_transition == FALSE) &&
(this_entry->in_transition == FALSE) &&
(prev_entry->needs_wakeup == FALSE) &&
(this_entry->needs_wakeup == FALSE) &&
(prev_entry->is_shared == FALSE) &&
- (this_entry->is_shared == FALSE)
+ (this_entry->is_shared == FALSE) &&
+ (prev_entry->superpage_size == FALSE) &&
+ (this_entry->superpage_size == FALSE)
) {
- _vm_map_store_entry_unlink(&map->hdr, prev_entry);
+ vm_map_store_entry_unlink(map, prev_entry);
+ assert(prev_entry->vme_start < this_entry->vme_end);
+ if (prev_entry->map_aligned)
+ assert(VM_MAP_PAGE_ALIGNED(prev_entry->vme_start,
+ VM_MAP_PAGE_MASK(map)));
this_entry->vme_start = prev_entry->vme_start;
this_entry->offset = prev_entry->offset;
if (prev_entry->is_sub_map) {
return;
}
- start = vm_map_trunc_page(start);
- end = vm_map_round_page(end);
+ start = vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map));
+ end = vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(map));
if (!vm_map_lookup_entry(map, start, &entry)) {
/* "start" is not mapped and "entry" ends before "start" */
while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
vm_map_clip_end(map, entry, end);
- assert(!entry->use_pmap);
+ if (entry->is_sub_map) {
+ assert(!entry->use_pmap);
+ }
if( new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES ) {
entry->zero_wired_pages = TRUE;
fault_info.io_sync = FALSE;
fault_info.cs_bypass = FALSE;
fault_info.mark_zf_absent = FALSE;
+ fault_info.batch_pmap_op = FALSE;
/*
* The MADV_WILLNEED operation doesn't require any changes to the
fault_info.lo_offset = offset;
fault_info.hi_offset = offset + len;
fault_info.user_tag = entry->alias;
+ fault_info.pmap_options = 0;
+ if (entry->iokit_acct ||
+ (!entry->is_sub_map && !entry->use_pmap)) {
+ fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
+ }
/*
* If there's no read permission to this mapping, then just
*
* Note that memory_object_data_request() places limits on the
* amount of I/O it will do. Regardless of the len we
- * specified, it won't do more than MAX_UPL_TRANSFER and it
+ * specified, it won't do more than MAX_UPL_TRANSFER_BYTES and it
* silently truncates the len to that size. This isn't
* necessarily bad since madvise shouldn't really be used to
* page in unlimited amounts of data. Other Unix variants
{
vm_object_t object;
+ switch (entry->alias) {
+ case VM_MEMORY_MALLOC:
+ case VM_MEMORY_MALLOC_SMALL:
+ case VM_MEMORY_MALLOC_LARGE:
+ case VM_MEMORY_REALLOC:
+ case VM_MEMORY_MALLOC_TINY:
+ case VM_MEMORY_MALLOC_LARGE_REUSABLE:
+ case VM_MEMORY_MALLOC_LARGE_REUSED:
+ /*
+ * This is a malloc() memory region: check if it's still
+ * in its original state and can be re-used for more
+ * malloc() allocations.
+ */
+ break;
+ default:
+ /*
+ * Not a malloc() memory region: let the caller decide if
+ * it's re-usable.
+ */
+ return TRUE;
+ }
+
if (entry->is_shared ||
entry->is_sub_map ||
entry->in_transition ||
entry->inheritance != VM_INHERIT_DEFAULT ||
entry->no_cache ||
entry->permanent ||
- entry->superpage_size != 0 ||
+ entry->superpage_size != FALSE ||
entry->zero_wired_pages ||
entry->wired_count != 0 ||
entry->user_wired_count != 0) {
if (object == VM_OBJECT_NULL) {
return TRUE;
}
- if (object->ref_count == 1 &&
+ if (
+#if 0
+ /*
+ * Let's proceed even if the VM object is potentially
+ * shared.
+ * We check for this later when processing the actual
+ * VM pages, so the contents will be safe if shared.
+ *
+ * But we can still mark this memory region as "reusable" to
+ * acknowledge that the caller did let us know that the memory
+ * could be re-used and should not be penalized for holding
+ * on to it. This allows its "resident size" to not include
+ * the reusable range.
+ */
+ object->ref_count == 1 &&
+#endif
object->wired_page_count == 0 &&
object->copy == VM_OBJECT_NULL &&
object->shadow == VM_OBJECT_NULL &&
vm_object_lock(object);
- if (object->ref_count == 1 && !object->shadow)
+ if (object->ref_count == 1 &&
+ !object->shadow &&
+ /*
+ * "iokit_acct" entries are billed for their virtual size
+ * (rather than for their resident pages only), so they
+ * wouldn't benefit from making pages reusable, and it
+ * would be hard to keep track of pages that are both
+ * "iokit_acct" and "reusable" in the pmap stats and ledgers.
+ */
+ !(entry->iokit_acct ||
+ (!entry->is_sub_map && !entry->use_pmap)))
kill_pages = 1;
else
kill_pages = -1;
}
-
-#include <mach_kdb.h>
-#if MACH_KDB
-#include <ddb/db_output.h>
-#include <vm/vm_print.h>
-
-#define printf db_printf
-
/*
- * Forward declarations for internal functions.
+ * Routine: vm_map_entry_insert
+ *
+ * Descritpion: This routine inserts a new vm_entry in a locked map.
*/
-extern void vm_map_links_print(
- struct vm_map_links *links);
-
-extern void vm_map_header_print(
- struct vm_map_header *header);
-
-extern void vm_map_entry_print(
- vm_map_entry_t entry);
+vm_map_entry_t
+vm_map_entry_insert(
+ vm_map_t map,
+ vm_map_entry_t insp_entry,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ boolean_t needs_copy,
+ boolean_t is_shared,
+ boolean_t in_transition,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_behavior_t behavior,
+ vm_inherit_t inheritance,
+ unsigned wired_count,
+ boolean_t no_cache,
+ boolean_t permanent,
+ unsigned int superpage_size,
+ boolean_t clear_map_aligned,
+ boolean_t is_submap)
+{
+ vm_map_entry_t new_entry;
-extern void vm_follow_entry(
- vm_map_entry_t entry);
+ assert(insp_entry != (vm_map_entry_t)0);
-extern void vm_follow_map(
- vm_map_t map);
+ new_entry = vm_map_entry_create(map, !map->hdr.entries_pageable);
-/*
- * vm_map_links_print: [ debug ]
- */
-void
-vm_map_links_print(
- struct vm_map_links *links)
-{
- iprintf("prev = %08X next = %08X start = %016llX end = %016llX\n",
- links->prev,
- links->next,
- (unsigned long long)links->start,
- (unsigned long long)links->end);
-}
+ if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) {
+ new_entry->map_aligned = TRUE;
+ } else {
+ new_entry->map_aligned = FALSE;
+ }
+ if (clear_map_aligned &&
+ (! VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)) ||
+ ! VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)))) {
+ new_entry->map_aligned = FALSE;
+ }
-/*
- * vm_map_header_print: [ debug ]
- */
-void
-vm_map_header_print(
- struct vm_map_header *header)
-{
- vm_map_links_print(&header->links);
- iprintf("nentries = %08X, %sentries_pageable\n",
- header->nentries,
- (header->entries_pageable ? "" : "!"));
-}
-
-/*
- * vm_follow_entry: [ debug ]
- */
-void
-vm_follow_entry(
- vm_map_entry_t entry)
-{
- int shadows;
-
- iprintf("map entry %08X\n", entry);
-
- db_indent += 2;
-
- shadows = vm_follow_object(entry->object.vm_object);
- iprintf("Total objects : %d\n",shadows);
-
- db_indent -= 2;
-}
-
-/*
- * vm_map_entry_print: [ debug ]
- */
-void
-vm_map_entry_print(
- register vm_map_entry_t entry)
-{
- static const char *inheritance_name[4] =
- { "share", "copy", "none", "?"};
- static const char *behavior_name[4] =
- { "dflt", "rand", "seqtl", "rseqntl" };
-
- iprintf("map entry %08X - prev = %08X next = %08X\n", entry, entry->vme_prev, entry->vme_next);
-
- db_indent += 2;
-
- vm_map_links_print(&entry->links);
-
- iprintf("start = %016llX end = %016llX - prot=%x/%x/%s\n",
- (unsigned long long)entry->vme_start,
- (unsigned long long)entry->vme_end,
- entry->protection,
- entry->max_protection,
- inheritance_name[(entry->inheritance & 0x3)]);
-
- iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
- behavior_name[(entry->behavior & 0x3)],
- entry->wired_count,
- entry->user_wired_count);
- iprintf("%sin_transition, %sneeds_wakeup\n",
- (entry->in_transition ? "" : "!"),
- (entry->needs_wakeup ? "" : "!"));
-
- if (entry->is_sub_map) {
- iprintf("submap = %08X - offset = %016llX\n",
- entry->object.sub_map,
- (unsigned long long)entry->offset);
- } else {
- iprintf("object = %08X offset = %016llX - ",
- entry->object.vm_object,
- (unsigned long long)entry->offset);
- printf("%sis_shared, %sneeds_copy\n",
- (entry->is_shared ? "" : "!"),
- (entry->needs_copy ? "" : "!"));
- }
-
- db_indent -= 2;
-}
-
-/*
- * vm_follow_map: [ debug ]
- */
-void
-vm_follow_map(
- vm_map_t map)
-{
- register vm_map_entry_t entry;
-
- iprintf("task map %08X\n", map);
-
- db_indent += 2;
-
- for (entry = vm_map_first_entry(map);
- entry && entry != vm_map_to_entry(map);
- entry = entry->vme_next) {
- vm_follow_entry(entry);
- }
-
- db_indent -= 2;
-}
-
-/*
- * vm_map_print: [ debug ]
- */
-void
-vm_map_print(
- db_addr_t inmap)
-{
- register vm_map_entry_t entry;
- vm_map_t map;
-#if TASK_SWAPPER
- char *swstate;
-#endif /* TASK_SWAPPER */
-
- map = (vm_map_t)(long)
- inmap; /* Make sure we have the right type */
-
- iprintf("task map %08X\n", map);
-
- db_indent += 2;
-
- vm_map_header_print(&map->hdr);
-
- iprintf("pmap = %08X size = %08X ref = %d hint = %08X first_free = %08X\n",
- map->pmap,
- map->size,
- map->ref_count,
- map->hint,
- map->first_free);
-
- iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
- (map->wait_for_space ? "" : "!"),
- (map->wiring_required ? "" : "!"),
- map->timestamp);
-
-#if TASK_SWAPPER
- switch (map->sw_state) {
- case MAP_SW_IN:
- swstate = "SW_IN";
- break;
- case MAP_SW_OUT:
- swstate = "SW_OUT";
- break;
- default:
- swstate = "????";
- break;
- }
- iprintf("res = %d, sw_state = %s\n", map->res_count, swstate);
-#endif /* TASK_SWAPPER */
-
- for (entry = vm_map_first_entry(map);
- entry && entry != vm_map_to_entry(map);
- entry = entry->vme_next) {
- vm_map_entry_print(entry);
- }
-
- db_indent -= 2;
-}
-
-/*
- * Routine: vm_map_copy_print
- * Purpose:
- * Pretty-print a copy object for ddb.
- */
-
-void
-vm_map_copy_print(
- db_addr_t incopy)
-{
- vm_map_copy_t copy;
- vm_map_entry_t entry;
-
- copy = (vm_map_copy_t)(long)
- incopy; /* Make sure we have the right type */
-
- printf("copy object 0x%x\n", copy);
-
- db_indent += 2;
-
- iprintf("type=%d", copy->type);
- switch (copy->type) {
- case VM_MAP_COPY_ENTRY_LIST:
- printf("[entry_list]");
- break;
-
- case VM_MAP_COPY_OBJECT:
- printf("[object]");
- break;
-
- case VM_MAP_COPY_KERNEL_BUFFER:
- printf("[kernel_buffer]");
- break;
-
- default:
- printf("[bad type]");
- break;
- }
- printf(", offset=0x%llx", (unsigned long long)copy->offset);
- printf(", size=0x%x\n", copy->size);
-
- switch (copy->type) {
- case VM_MAP_COPY_ENTRY_LIST:
- vm_map_header_print(©->cpy_hdr);
- for (entry = vm_map_copy_first_entry(copy);
- entry && entry != vm_map_copy_to_entry(copy);
- entry = entry->vme_next) {
- vm_map_entry_print(entry);
- }
- break;
-
- case VM_MAP_COPY_OBJECT:
- iprintf("object=0x%x\n", copy->cpy_object);
- break;
-
- case VM_MAP_COPY_KERNEL_BUFFER:
- iprintf("kernel buffer=0x%x", copy->cpy_kdata);
- printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size);
- break;
-
- }
-
- db_indent -=2;
-}
-
-/*
- * db_vm_map_total_size(map) [ debug ]
- *
- * return the total virtual size (in bytes) of the map
- */
-vm_map_size_t
-db_vm_map_total_size(
- db_addr_t inmap)
-{
- vm_map_entry_t entry;
- vm_map_size_t total;
- vm_map_t map;
-
- map = (vm_map_t)(long)
- inmap; /* Make sure we have the right type */
-
- total = 0;
- for (entry = vm_map_first_entry(map);
- entry != vm_map_to_entry(map);
- entry = entry->vme_next) {
- total += entry->vme_end - entry->vme_start;
- }
-
- return total;
-}
-
-#endif /* MACH_KDB */
-
-/*
- * Routine: vm_map_entry_insert
- *
- * Descritpion: This routine inserts a new vm_entry in a locked map.
- */
-vm_map_entry_t
-vm_map_entry_insert(
- vm_map_t map,
- vm_map_entry_t insp_entry,
- vm_map_offset_t start,
- vm_map_offset_t end,
- vm_object_t object,
- vm_object_offset_t offset,
- boolean_t needs_copy,
- boolean_t is_shared,
- boolean_t in_transition,
- vm_prot_t cur_protection,
- vm_prot_t max_protection,
- vm_behavior_t behavior,
- vm_inherit_t inheritance,
- unsigned wired_count,
- boolean_t no_cache,
- boolean_t permanent,
- unsigned int superpage_size)
-{
- vm_map_entry_t new_entry;
-
- assert(insp_entry != (vm_map_entry_t)0);
-
- new_entry = vm_map_entry_create(map);
-
- new_entry->vme_start = start;
- new_entry->vme_end = end;
- assert(page_aligned(new_entry->vme_start));
- assert(page_aligned(new_entry->vme_end));
+ new_entry->vme_start = start;
+ new_entry->vme_end = end;
+ assert(page_aligned(new_entry->vme_start));
+ assert(page_aligned(new_entry->vme_end));
+ if (new_entry->map_aligned) {
+ assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start,
+ VM_MAP_PAGE_MASK(map)));
+ assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end,
+ VM_MAP_PAGE_MASK(map)));
+ }
+ assert(new_entry->vme_start < new_entry->vme_end);
new_entry->object.vm_object = object;
new_entry->offset = offset;
new_entry->is_shared = is_shared;
- new_entry->is_sub_map = FALSE;
+ new_entry->is_sub_map = is_submap;
new_entry->needs_copy = needs_copy;
new_entry->in_transition = in_transition;
new_entry->needs_wakeup = FALSE;
new_entry->behavior = behavior;
new_entry->wired_count = wired_count;
new_entry->user_wired_count = 0;
- new_entry->use_pmap = FALSE;
+ if (is_submap) {
+ /*
+ * submap: "use_pmap" means "nested".
+ * default: false.
+ */
+ new_entry->use_pmap = FALSE;
+ } else {
+ /*
+ * object: "use_pmap" means "use pmap accounting" for footprint.
+ * default: true.
+ */
+ new_entry->use_pmap = TRUE;
+ }
new_entry->alias = 0;
new_entry->zero_wired_pages = FALSE;
new_entry->no_cache = no_cache;
new_entry->permanent = permanent;
- new_entry->superpage_size = superpage_size;
+ if (superpage_size)
+ new_entry->superpage_size = TRUE;
+ else
+ new_entry->superpage_size = FALSE;
new_entry->used_for_jit = FALSE;
+ new_entry->iokit_acct = FALSE;
/*
* Insert the new entry into the list.
boolean_t new_entry_needs_copy;
assert(map != VM_MAP_NULL);
- assert(size != 0 && size == vm_map_round_page(size));
+ assert(size != 0);
+ assert(size == vm_map_round_page(size, PAGE_MASK));
assert(inheritance == VM_INHERIT_NONE ||
inheritance == VM_INHERIT_COPY ||
inheritance == VM_INHERIT_SHARE);
/*
* Compute start and end of region.
*/
- src_start = vm_map_trunc_page(addr);
- src_end = vm_map_round_page(src_start + size);
+ src_start = vm_map_trunc_page(addr, PAGE_MASK);
+ src_end = vm_map_round_page(src_start + size, PAGE_MASK);
+
/*
* Initialize map_header.
map_header->links.prev = (struct vm_map_entry *)&map_header->links;
map_header->nentries = 0;
map_header->entries_pageable = pageable;
+ map_header->page_shift = PAGE_SHIFT;
vm_map_store_init( map_header );
object = VM_OBJECT_NULL;
} else {
object = src_entry->object.vm_object;
+ if (src_entry->iokit_acct) {
+ /*
+ * This entry uses "IOKit accounting".
+ */
+ } else if (object != VM_OBJECT_NULL &&
+ object->purgable != VM_PURGABLE_DENY) {
+ /*
+ * Purgeable objects have their own accounting:
+ * no pmap accounting for them.
+ */
+ assert(!src_entry->use_pmap);
+ } else {
+ /*
+ * Not IOKit or purgeable:
+ * must be accounted by pmap stats.
+ */
+ assert(src_entry->use_pmap);
+ }
if (object == VM_OBJECT_NULL) {
object = vm_object_allocate(entry_size);
if (override_nx(map, src_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
- if(map->mapped) {
+ if(map->mapped_in_other_pmaps) {
vm_object_pmap_protect(
src_entry->object.vm_object,
src_entry->offset,
offset = src_entry->offset + (src_start - src_entry->vme_start);
- new_entry = _vm_map_entry_create(map_header);
+ new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable);
vm_map_entry_copy(new_entry, src_entry);
- new_entry->use_pmap = FALSE; /* clr address space specifics */
+ if (new_entry->is_sub_map) {
+ /* clr address space specifics */
+ new_entry->use_pmap = FALSE;
+ }
+
+ new_entry->map_aligned = FALSE;
new_entry->vme_start = map_address;
new_entry->vme_end = map_address + tmp_size;
+ assert(new_entry->vme_start < new_entry->vme_end);
new_entry->inheritance = inheritance;
new_entry->offset = offset;
*/
RestartCopy:
if (!copy) {
+ /*
+ * Cannot allow an entry describing a JIT
+ * region to be shared across address spaces.
+ */
+ if (src_entry->used_for_jit == TRUE) {
+ result = KERN_INVALID_ARGUMENT;
+ break;
+ }
src_entry->is_shared = TRUE;
new_entry->is_shared = TRUE;
if (!(new_entry->is_sub_map))
offset,
entry_size,
((src_entry->is_shared
- || map->mapped) ?
+ || map->mapped_in_other_pmaps) ?
PMAP_NULL : map->pmap),
src_entry->vme_start,
prot);
src_entry = new_entry) {
new_entry = src_entry->vme_next;
_vm_map_store_entry_unlink(map_header, src_entry);
- vm_object_deallocate(src_entry->object.vm_object);
+ if (src_entry->is_sub_map) {
+ vm_map_deallocate(src_entry->object.sub_map);
+ } else {
+ vm_object_deallocate(src_entry->object.vm_object);
+ }
_vm_map_entry_dispose(map_header, src_entry);
}
}
vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL;
vm_map_entry_t new_entry;
struct vm_map_header map_header;
+ vm_map_offset_t offset_in_mapping;
if (target_map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
return KERN_INVALID_ARGUMENT;
}
- size = vm_map_round_page(size);
+ /*
+ * If the user is requesting that we return the address of the
+ * first byte of the data (rather than the base of the page),
+ * then we use different rounding semantics: specifically,
+ * we assume that (memory_address, size) describes a region
+ * all of whose pages we must cover, rather than a base to be truncated
+ * down and a size to be added to that base. So we figure out
+ * the highest page that the requested region includes and make
+ * sure that the size will cover it.
+ *
+ * The key example we're worried about it is of the form:
+ *
+ * memory_address = 0x1ff0, size = 0x20
+ *
+ * With the old semantics, we round down the memory_address to 0x1000
+ * and round up the size to 0x1000, resulting in our covering *only*
+ * page 0x1000. With the new semantics, we'd realize that the region covers
+ * 0x1ff0-0x2010, and compute a size of 0x2000. Thus, we cover both page
+ * 0x1000 and page 0x2000 in the region we remap.
+ */
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ offset_in_mapping = memory_address - vm_map_trunc_page(memory_address, PAGE_MASK);
+ size = vm_map_round_page(memory_address + size - vm_map_trunc_page(memory_address, PAGE_MASK), PAGE_MASK);
+ } else {
+ size = vm_map_round_page(size, PAGE_MASK);
+ }
result = vm_map_remap_extract(src_map, memory_address,
size, copy, &map_header,
cur_protection,
max_protection,
inheritance,
- target_map->hdr.
- entries_pageable);
+ target_map->hdr.entries_pageable);
if (result != KERN_SUCCESS) {
return result;
* Allocate/check a range of free virtual address
* space for the target
*/
- *address = vm_map_trunc_page(*address);
+ *address = vm_map_trunc_page(*address,
+ VM_MAP_PAGE_MASK(target_map));
vm_map_lock(target_map);
result = vm_map_remap_range_allocate(target_map, address, size,
mask, flags, &insp_entry);
if (result == KERN_SUCCESS) {
entry->vme_start += *address;
entry->vme_end += *address;
+ assert(!entry->map_aligned);
vm_map_store_entry_link(target_map, insp_entry, entry);
insp_entry = entry;
} else {
if (result == KERN_SUCCESS && target_map->wiring_required)
result = vm_map_wire(target_map, *address,
*address + size, *cur_protection, TRUE);
+
+ /*
+ * If requested, return the address of the data pointed to by the
+ * request, rather than the base of the resulting page.
+ */
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ *address += offset_in_mapping;
+ }
+
return result;
}
start = tmp_entry->vme_end;
entry = tmp_entry;
}
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(map));
}
/*
*/
end = ((start + mask) & ~mask);
+ end = vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(map));
if (end < start)
return(KERN_NO_SPACE);
start = end;
*/
zap_map = vm_map_create(PMAP_NULL,
start,
- end - start,
+ end,
map->hdr.entries_pageable);
if (zap_map == VM_MAP_NULL) {
return KERN_RESOURCE_SHORTAGE;
}
+ vm_map_set_page_shift(zap_map, VM_MAP_PAGE_SHIFT(map));
kr = vm_map_delete(map, start, end,
- VM_MAP_REMOVE_SAVE_ENTRIES,
+ (VM_MAP_REMOVE_SAVE_ENTRIES |
+ VM_MAP_REMOVE_NO_MAP_ALIGN),
zap_map);
if (kr == KERN_SUCCESS) {
vm_map_destroy(zap_map,
vm_map_entry_t entry;
vm_object_t object;
kern_return_t kr;
+ boolean_t was_nonvolatile;
/*
* Vet all the input parameters and current type and state of the
}
object = entry->object.vm_object;
- if (object == VM_OBJECT_NULL) {
+ if (object == VM_OBJECT_NULL ||
+ object->purgable == VM_PURGABLE_DENY) {
/*
- * Object must already be present or it can't be purgable.
+ * Object must already be present and be purgeable.
*/
vm_map_unlock_read(map);
return KERN_INVALID_ARGUMENT;
vm_object_lock(object);
+#if 00
if (entry->offset != 0 ||
entry->vme_end - entry->vme_start != object->vo_size) {
/*
vm_object_unlock(object);
return KERN_INVALID_ARGUMENT;
}
-
+#endif
+
+ assert(!entry->is_sub_map);
+ assert(!entry->use_pmap); /* purgeable has its own accounting */
+
vm_map_unlock_read(map);
+ was_nonvolatile = (object->purgable == VM_PURGABLE_NONVOLATILE);
+
kr = vm_object_purgable_control(object, control, state);
+ if (was_nonvolatile &&
+ object->purgable != VM_PURGABLE_NONVOLATILE &&
+ map->pmap == kernel_pmap) {
+#if DEBUG
+ object->vo_purgeable_volatilizer = kernel_task;
+#endif /* DEBUG */
+ }
+
vm_object_unlock(object);
return kr;
boolean_t top_object;
int disposition;
int ref_count;
- vm_object_id_t object_id;
vm_page_info_basic_t basic_info;
int depth;
vm_map_offset_t offset_in_page;
disposition = 0;
ref_count = 0;
- object_id = 0;
top_object = TRUE;
depth = 0;
retval = KERN_SUCCESS;
offset_in_page = offset & PAGE_MASK;
- offset = vm_map_trunc_page(offset);
+ offset = vm_map_trunc_page(offset, PAGE_MASK);
vm_map_lock_read(map);
}
} else
#endif
- {
- if (object->internal &&
- object->alive &&
- !object->terminating &&
- object->pager_ready) {
-
+ if (object->internal &&
+ object->alive &&
+ !object->terminating &&
+ object->pager_ready) {
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ if (VM_COMPRESSOR_PAGER_STATE_GET(
+ object,
+ offset)
+ == VM_EXTERNAL_STATE_EXISTS) {
+ /* the pager has that page */
+ disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT;
+ break;
+ }
+ } else {
memory_object_t pager;
vm_object_paging_begin(object);
basic_info = (vm_page_info_basic_t) info;
basic_info->disposition = disposition;
basic_info->ref_count = ref_count;
- basic_info->object_id = (vm_object_id_t) (uintptr_t) object;
+ basic_info->object_id = (vm_object_id_t) (uintptr_t)
+ VM_KERNEL_ADDRPERM(object);
basic_info->offset =
(memory_object_offset_t) offset + offset_in_page;
basic_info->depth = depth;
/*
* align address and size on page boundaries
*/
- size = vm_map_round_page(address + size) - vm_map_trunc_page(address);
- address = vm_map_trunc_page(address);
+ size = (vm_map_round_page(address + size,
+ VM_MAP_PAGE_MASK(map)) -
+ vm_map_trunc_page(address,
+ VM_MAP_PAGE_MASK(map)));
+ address = vm_map_trunc_page(address,
+ VM_MAP_PAGE_MASK(map));
if (map == VM_MAP_NULL)
return(KERN_INVALID_TASK);
vm_map_lock(map);
if (!vm_map_lookup_entry(map,
- vm_map_trunc_page(address), &entry)) {
+ vm_map_trunc_page(
+ address,
+ VM_MAP_PAGE_MASK(map)),
+ &entry)) {
vm_map_size_t skip;
if (pager == MEMORY_OBJECT_NULL) {
vm_object_unlock(object);
vm_object_deallocate(object);
+ msync_req_free(new_msr);
+ new_msr = NULL;
continue;
}
convert_port_entry_to_object(
ipc_port_t port)
{
- vm_object_t object;
+ vm_object_t object = VM_OBJECT_NULL;
vm_named_entry_t named_entry;
- uint32_t try_failed_count = 0;
-
- if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
- while(TRUE) {
- ip_lock(port);
- if(ip_active(port) && (ip_kotype(port)
- == IKOT_NAMED_ENTRY)) {
- named_entry =
- (vm_named_entry_t)port->ip_kobject;
- if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
- ip_unlock(port);
-
- try_failed_count++;
- mutex_pause(try_failed_count);
- continue;
- }
- named_entry->ref_count++;
- lck_mtx_unlock(&(named_entry)->Lock);
+ uint32_t try_failed_count = 0;
+
+ if (IP_VALID(port) &&
+ (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
+ try_again:
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
+ named_entry = (vm_named_entry_t)port->ip_kobject;
+ if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
ip_unlock(port);
- if ((!named_entry->is_sub_map) &&
- (!named_entry->is_pager) &&
- (named_entry->protection
- & VM_PROT_WRITE)) {
- object = named_entry->backing.object;
- } else {
- mach_destroy_memory_entry(port);
- return (vm_object_t)NULL;
- }
- vm_object_reference(named_entry->backing.object);
- mach_destroy_memory_entry(port);
- break;
+ try_failed_count++;
+ mutex_pause(try_failed_count);
+ goto try_again;
+ }
+ named_entry->ref_count++;
+ lck_mtx_unlock(&(named_entry)->Lock);
+ ip_unlock(port);
+ if (!(named_entry->is_sub_map) &&
+ !(named_entry->is_pager) &&
+ !(named_entry->is_copy) &&
+ (named_entry->protection & VM_PROT_WRITE)) {
+ object = named_entry->backing.object;
+ vm_object_reference(object);
}
- else
- return (vm_object_t)NULL;
+ mach_destroy_memory_entry(port);
}
- } else {
- return (vm_object_t)NULL;
}
return object;
return (is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS);
}
+uint64_t
+vm_map_get_max_aslr_slide_pages(vm_map_t map)
+{
+ return (1 << (vm_map_is_64bit(map) ? 16 : 8));
+}
+
boolean_t
vm_map_is_64bit(
vm_map_t map)
}
boolean_t
-vm_map_has_4GB_pagezero(
- vm_map_t map)
+vm_map_has_hard_pagezero(
+ vm_map_t map,
+ vm_map_offset_t pagezero_size)
{
/*
* XXX FBDP
* VM map is being torn down, and when a new map is created via
* load_machfile()/execve().
*/
- return (map->min_offset >= 0x100000000ULL);
+ return (map->min_offset >= pagezero_size);
}
-void
-vm_map_set_4GB_pagezero(vm_map_t map)
+/*
+ * Raise a VM map's maximun offset.
+ */
+kern_return_t
+vm_map_raise_max_offset(
+ vm_map_t map,
+ vm_map_offset_t new_max_offset)
{
-#if defined(__i386__)
- pmap_set_4GB_pagezero(map->pmap);
-#else
-#pragma unused(map)
-#endif
+ kern_return_t ret;
-}
+ vm_map_lock(map);
+ ret = KERN_INVALID_ADDRESS;
-void
-vm_map_clear_4GB_pagezero(vm_map_t map)
-{
-#if defined(__i386__)
- pmap_clear_4GB_pagezero(map->pmap);
-#else
-#pragma unused(map)
-#endif
+ if (new_max_offset >= map->max_offset) {
+ if (!vm_map_is_64bit(map)) {
+ if (new_max_offset <= (vm_map_offset_t)VM_MAX_ADDRESS) {
+ map->max_offset = new_max_offset;
+ ret = KERN_SUCCESS;
+ }
+ } else {
+ if (new_max_offset <= (vm_map_offset_t)MACH_VM_MAX_ADDRESS) {
+ map->max_offset = new_max_offset;
+ ret = KERN_SUCCESS;
+ }
+ }
+ }
+
+ vm_map_unlock(map);
+ return ret;
}
+
/*
* Raise a VM map's minimum offset.
* To strictly enforce "page zero" reservation.
{
vm_map_entry_t first_entry;
- new_min_offset = vm_map_round_page(new_min_offset);
+ new_min_offset = vm_map_round_page(new_min_offset,
+ VM_MAP_PAGE_MASK(map));
vm_map_lock(map);
vm_map_unlock(map);
}
+/*
+ * IOKit has mapped a region into this map; adjust the pmap's ledgers appropriately.
+ * phys_footprint is a composite limit consisting of iokit + physmem, so we need to
+ * bump both counters.
+ */
+void
+vm_map_iokit_mapped_region(vm_map_t map, vm_size_t bytes)
+{
+ pmap_t pmap = vm_map_pmap(map);
+
+ ledger_credit(pmap->ledger, task_ledgers.iokit_mapped, bytes);
+ ledger_credit(pmap->ledger, task_ledgers.phys_footprint, bytes);
+}
+
+void
+vm_map_iokit_unmapped_region(vm_map_t map, vm_size_t bytes)
+{
+ pmap_t pmap = vm_map_pmap(map);
+
+ ledger_debit(pmap->ledger, task_ledgers.iokit_mapped, bytes);
+ ledger_debit(pmap->ledger, task_ledgers.phys_footprint, bytes);
+}
+
/* Add (generate) code signature for memory range */
#if CONFIG_DYNAMIC_CODE_SIGNING
kern_return_t vm_map_sign(vm_map_t map,
/* Pull the dirty status from the pmap, since we cleared the
* wpmapped bit */
if ((refmod & VM_MEM_MODIFIED) && !m->dirty) {
- m->dirty = TRUE;
+ SET_PAGE_DIRTY(m, FALSE);
}
/* On to the next page */
}
#endif
+kern_return_t vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident, unsigned int *reclaimed_compressed)
+{
+ vm_map_entry_t entry = VM_MAP_ENTRY_NULL;
+ vm_map_entry_t next_entry;
+ kern_return_t kr = KERN_SUCCESS;
+ vm_map_t zap_map;
+
+ vm_map_lock(map);
+
+ /*
+ * We use a "zap_map" to avoid having to unlock
+ * the "map" in vm_map_delete().
+ */
+ zap_map = vm_map_create(PMAP_NULL,
+ map->min_offset,
+ map->max_offset,
+ map->hdr.entries_pageable);
+
+ if (zap_map == VM_MAP_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ vm_map_set_page_shift(zap_map,
+ VM_MAP_PAGE_SHIFT(map));
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = next_entry) {
+ next_entry = entry->vme_next;
+
+ if (entry->object.vm_object && !entry->is_sub_map && (entry->object.vm_object->internal == TRUE)
+ && (entry->object.vm_object->ref_count == 1)) {
+
+ *reclaimed_resident += entry->object.vm_object->resident_page_count;
+ *reclaimed_compressed += vm_compressor_pager_get_count(entry->object.vm_object->pager);
+
+ (void)vm_map_delete(map,
+ entry->vme_start,
+ entry->vme_end,
+ VM_MAP_REMOVE_SAVE_ENTRIES,
+ zap_map);
+ }
+ }
+
+ vm_map_unlock(map);
+
+ /*
+ * Get rid of the "zap_maps" and all the map entries that
+ * they may still contain.
+ */
+ if (zap_map != VM_MAP_NULL) {
+ vm_map_destroy(zap_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP);
+ zap_map = VM_MAP_NULL;
+ }
+
+ return kr;
+}
+
#if CONFIG_FREEZE
kern_return_t vm_map_freeze_walk(
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
+ unsigned int dirty_budget,
boolean_t *has_shared)
{
vm_map_entry_t entry;
continue;
}
- vm_object_pack(&purgeable, &wired, &clean, &dirty, &shared, entry->object.vm_object, VM_OBJECT_NULL, NULL, NULL);
+ default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared, entry->object.vm_object, NULL);
*purgeable_count += purgeable;
*wired_count += wired;
if (shared) {
*has_shared = TRUE;
}
+
+ /* Adjust pageout budget and finish up if reached */
+ if (dirty_budget) {
+ dirty_budget -= dirty;
+ if (dirty_budget == 0) {
+ break;
+ }
+ }
}
vm_map_unlock_read(map);
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
+ unsigned int dirty_budget,
boolean_t *has_shared)
{
- vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL;
- vm_object_t compact_object = VM_OBJECT_NULL;
- vm_object_offset_t offset = 0x0;
- kern_return_t kr = KERN_SUCCESS;
- void *default_freezer_toc = NULL;
- boolean_t cleanup = FALSE;
+ vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL;
+ kern_return_t kr = KERN_SUCCESS;
+ boolean_t default_freezer_active = TRUE;
*purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
*has_shared = FALSE;
- /* Create our compact object */
- compact_object = vm_object_allocate((vm_map_offset_t)(VM_MAX_ADDRESS) - (vm_map_offset_t)(VM_MIN_ADDRESS));
- if (!compact_object) {
- kr = KERN_FAILURE;
- goto done;
- }
-
- default_freezer_toc = default_freezer_mapping_create(compact_object, offset);
- if (!default_freezer_toc) {
- kr = KERN_FAILURE;
- goto done;
- }
-
/*
* We need the exclusive lock here so that we can
* block any page faults or lookups while we are
*/
vm_map_lock(map);
- if (map->default_freezer_toc != NULL){
- /*
- * This map has already been frozen.
- */
- cleanup = TRUE;
- kr = KERN_SUCCESS;
- goto done;
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ default_freezer_active = FALSE;
}
-
- /* Get a mapping in place for the freezing about to commence */
- map->default_freezer_toc = default_freezer_toc;
-
- vm_object_lock(compact_object);
-
+
+ if (default_freezer_active) {
+ if (map->default_freezer_handle == NULL) {
+ map->default_freezer_handle = default_freezer_handle_allocate();
+ }
+
+ if ((kr = default_freezer_handle_init(map->default_freezer_handle)) != KERN_SUCCESS) {
+ /*
+ * Can happen if default_freezer_handle passed in is NULL
+ * Or, a table has already been allocated and associated
+ * with this handle, i.e. the map is already frozen.
+ */
+ goto done;
+ }
+ }
+
for (entry2 = vm_map_first_entry(map);
entry2 != vm_map_to_entry(map);
entry2 = entry2->vme_next) {
vm_object_t src_object = entry2->object.vm_object;
- /* If eligible, scan the entry, moving eligible pages over to our parent object */
if (entry2->object.vm_object && !entry2->is_sub_map && !entry2->object.vm_object->phys_contiguous) {
- unsigned int purgeable, clean, dirty, wired;
- boolean_t shared;
-
- vm_object_pack(&purgeable, &wired, &clean, &dirty, &shared,
- src_object, compact_object, &default_freezer_toc, &offset);
-
- *purgeable_count += purgeable;
- *wired_count += wired;
- *clean_count += clean;
- *dirty_count += dirty;
+ /* If eligible, scan the entry, moving eligible pages over to our parent object */
+ if (default_freezer_active) {
+ unsigned int purgeable, clean, dirty, wired;
+ boolean_t shared;
+
+ default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared,
+ src_object, map->default_freezer_handle);
+
+ *purgeable_count += purgeable;
+ *wired_count += wired;
+ *clean_count += clean;
+ *dirty_count += dirty;
+
+ /* Adjust pageout budget and finish up if reached */
+ if (dirty_budget) {
+ dirty_budget -= dirty;
+ if (dirty_budget == 0) {
+ break;
+ }
+ }
- if (shared) {
- *has_shared = TRUE;
+ if (shared) {
+ *has_shared = TRUE;
+ }
+ } else {
+ /*
+ * To the compressor.
+ */
+ if (entry2->object.vm_object->internal == TRUE) {
+ vm_object_pageout(entry2->object.vm_object);
+ }
}
}
}
- vm_object_unlock(compact_object);
-
- /* Finally, throw out the pages to swap */
- vm_object_pageout(compact_object);
+ if (default_freezer_active) {
+ /* Finally, throw out the pages to swap */
+ default_freezer_pageout(map->default_freezer_handle);
+ }
done:
vm_map_unlock(map);
-
- /* Unwind if there was a failure */
- if ((cleanup) || (KERN_SUCCESS != kr)) {
- if (default_freezer_toc){
- default_freezer_mapping_free(&map->default_freezer_toc, TRUE);
- }
- if (compact_object){
- vm_object_deallocate(compact_object);
- }
- }
return kr;
}
-__private_extern__ vm_object_t default_freezer_get_compact_vm_object( void** );
-
-void
+kern_return_t
vm_map_thaw(
vm_map_t map)
{
- void **default_freezer_toc;
- vm_object_t compact_object;
+ kern_return_t kr = KERN_SUCCESS;
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ /*
+ * We will on-demand thaw in the presence of the compressed pager.
+ */
+ return kr;
+ }
vm_map_lock(map);
- if (map->default_freezer_toc == NULL){
+ if (map->default_freezer_handle == NULL) {
/*
* This map is not in a frozen state.
*/
+ kr = KERN_FAILURE;
goto out;
}
-
- default_freezer_toc = &(map->default_freezer_toc);
-
- compact_object = default_freezer_get_compact_vm_object(default_freezer_toc);
-
- /* Bring the pages back in */
- vm_object_pagein(compact_object);
-
- /* Shift pages back to their original objects */
- vm_object_unpack(compact_object, default_freezer_toc);
- vm_object_deallocate(compact_object);
-
- map->default_freezer_toc = NULL;
-
+ kr = default_freezer_unpack(map->default_freezer_handle);
out:
vm_map_unlock(map);
+
+ return kr;
+}
+#endif
+
+/*
+ * vm_map_entry_should_cow_for_true_share:
+ *
+ * Determines if the map entry should be clipped and setup for copy-on-write
+ * to avoid applying "true_share" to a large VM object when only a subset is
+ * targeted.
+ *
+ * For now, we target only the map entries created for the Objective C
+ * Garbage Collector, which initially have the following properties:
+ * - alias == VM_MEMORY_MALLOC
+ * - wired_count == 0
+ * - !needs_copy
+ * and a VM object with:
+ * - internal
+ * - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC
+ * - !true_share
+ * - vo_size == ANON_CHUNK_SIZE
+ */
+boolean_t
+vm_map_entry_should_cow_for_true_share(
+ vm_map_entry_t entry)
+{
+ vm_object_t object;
+
+ if (entry->is_sub_map) {
+ /* entry does not point at a VM object */
+ return FALSE;
+ }
+
+ if (entry->needs_copy) {
+ /* already set for copy_on_write: done! */
+ return FALSE;
+ }
+
+ if (entry->alias != VM_MEMORY_MALLOC &&
+ entry->alias != VM_MEMORY_MALLOC_SMALL) {
+ /* not a malloc heap or Obj-C Garbage Collector heap */
+ return FALSE;
+ }
+
+ if (entry->wired_count) {
+ /* wired: can't change the map entry... */
+ vm_counters.should_cow_but_wired++;
+ return FALSE;
+ }
+
+ object = entry->object.vm_object;
+
+ if (object == VM_OBJECT_NULL) {
+ /* no object yet... */
+ return FALSE;
+ }
+
+ if (!object->internal) {
+ /* not an internal object */
+ return FALSE;
+ }
+
+ if (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
+ /* not the default copy strategy */
+ return FALSE;
+ }
+
+ if (object->true_share) {
+ /* already true_share: too late to avoid it */
+ return FALSE;
+ }
+
+ if (entry->alias == VM_MEMORY_MALLOC &&
+ object->vo_size != ANON_CHUNK_SIZE) {
+ /* ... not an object created for the ObjC Garbage Collector */
+ return FALSE;
+ }
+
+ if (entry->alias == VM_MEMORY_MALLOC_SMALL &&
+ object->vo_size != 2048 * 4096) {
+ /* ... not a "MALLOC_SMALL" heap */
+ return FALSE;
+ }
+
+ /*
+ * All the criteria match: we have a large object being targeted for "true_share".
+ * To limit the adverse side-effects linked with "true_share", tell the caller to
+ * try and avoid setting up the entire object for "true_share" by clipping the
+ * targeted range and setting it up for copy-on-write.
+ */
+ return TRUE;
+}
+
+vm_map_offset_t
+vm_map_round_page_mask(
+ vm_map_offset_t offset,
+ vm_map_offset_t mask)
+{
+ return VM_MAP_ROUND_PAGE(offset, mask);
+}
+
+vm_map_offset_t
+vm_map_trunc_page_mask(
+ vm_map_offset_t offset,
+ vm_map_offset_t mask)
+{
+ return VM_MAP_TRUNC_PAGE(offset, mask);
+}
+
+int
+vm_map_page_shift(
+ vm_map_t map)
+{
+ return VM_MAP_PAGE_SHIFT(map);
+}
+
+int
+vm_map_page_size(
+ vm_map_t map)
+{
+ return VM_MAP_PAGE_SIZE(map);
+}
+
+int
+vm_map_page_mask(
+ vm_map_t map)
+{
+ return VM_MAP_PAGE_MASK(map);
}
+
+kern_return_t
+vm_map_set_page_shift(
+ vm_map_t map,
+ int pageshift)
+{
+ if (map->hdr.nentries != 0) {
+ /* too late to change page size */
+ return KERN_FAILURE;
+ }
+
+ map->hdr.page_shift = pageshift;
+
+ return KERN_SUCCESS;
+}
+
+int
+vm_map_purge(
+ vm_map_t map)
+{
+ int num_object_purged;
+ vm_map_entry_t entry;
+ vm_map_offset_t next_address;
+ vm_object_t object;
+ int state;
+ kern_return_t kr;
+
+ num_object_purged = 0;
+
+ vm_map_lock_read(map);
+ entry = vm_map_first_entry(map);
+ while (entry != vm_map_to_entry(map)) {
+ if (entry->is_sub_map) {
+ goto next;
+ }
+ if (! (entry->protection & VM_PROT_WRITE)) {
+ goto next;
+ }
+ object = entry->object.vm_object;
+ if (object == VM_OBJECT_NULL) {
+ goto next;
+ }
+ if (object->purgable != VM_PURGABLE_VOLATILE) {
+ goto next;
+ }
+
+ vm_object_lock(object);
+#if 00
+ if (entry->offset != 0 ||
+ (entry->vme_end - entry->vme_start) != object->vo_size) {
+ vm_object_unlock(object);
+ goto next;
+ }
#endif
+ next_address = entry->vme_end;
+ vm_map_unlock_read(map);
+ state = VM_PURGABLE_EMPTY;
+ kr = vm_object_purgable_control(object,
+ VM_PURGABLE_SET_STATE,
+ &state);
+ if (kr == KERN_SUCCESS) {
+ num_object_purged++;
+ }
+ vm_object_unlock(object);
+
+ vm_map_lock_read(map);
+ if (vm_map_lookup_entry(map, next_address, &entry)) {
+ continue;
+ }
+ next:
+ entry = entry->vme_next;
+ }
+ vm_map_unlock_read(map);
+
+ return num_object_purged;
+}
+
+kern_return_t
+vm_map_query_volatile(
+ vm_map_t map,
+ mach_vm_size_t *volatile_virtual_size_p,
+ mach_vm_size_t *volatile_resident_size_p,
+ mach_vm_size_t *volatile_pmap_size_p)
+{
+ mach_vm_size_t volatile_virtual_size;
+ mach_vm_size_t volatile_resident_count;
+ mach_vm_size_t volatile_pmap_count;
+ mach_vm_size_t resident_count;
+ vm_map_entry_t entry;
+ vm_object_t object;
+
+ /* map should be locked by caller */
+
+ volatile_virtual_size = 0;
+ volatile_resident_count = 0;
+ volatile_pmap_count = 0;
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ if (entry->is_sub_map) {
+ continue;
+ }
+ if (! (entry->protection & VM_PROT_WRITE)) {
+ continue;
+ }
+ object = entry->object.vm_object;
+ if (object == VM_OBJECT_NULL) {
+ continue;
+ }
+ if (object->purgable != VM_PURGABLE_VOLATILE) {
+ continue;
+ }
+ if (entry->offset != 0) {
+ /*
+ * If the map entry has been split and the object now
+ * appears several times in the VM map, we don't want
+ * to count the object's resident_page_count more than
+ * once. We count it only for the first one, starting
+ * at offset 0 and ignore the other VM map entries.
+ */
+ continue;
+ }
+ resident_count = object->resident_page_count;
+ if ((entry->offset / PAGE_SIZE) >= resident_count) {
+ resident_count = 0;
+ } else {
+ resident_count -= (entry->offset / PAGE_SIZE);
+ }
+
+ volatile_virtual_size += entry->vme_end - entry->vme_start;
+ volatile_resident_count += resident_count;
+ volatile_pmap_count += pmap_query_resident(map->pmap,
+ entry->vme_start,
+ entry->vme_end);
+ }
+
+ /* map is still locked on return */
+
+ *volatile_virtual_size_p = volatile_virtual_size;
+ *volatile_resident_size_p = volatile_resident_count * PAGE_SIZE;
+ *volatile_pmap_size_p = volatile_pmap_count * PAGE_SIZE;
+
+ return KERN_SUCCESS;
+}
+
+#if VM_SCAN_FOR_SHADOW_CHAIN
+int vm_map_shadow_max(vm_map_t map);
+int vm_map_shadow_max(
+ vm_map_t map)
+{
+ int shadows, shadows_max;
+ vm_map_entry_t entry;
+ vm_object_t object, next_object;
+
+ if (map == NULL)
+ return 0;
+
+ shadows_max = 0;
+
+ vm_map_lock_read(map);
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ if (entry->is_sub_map) {
+ continue;
+ }
+ object = entry->object.vm_object;
+ if (object == NULL) {
+ continue;
+ }
+ vm_object_lock_shared(object);
+ for (shadows = 0;
+ object->shadow != NULL;
+ shadows++, object = next_object) {
+ next_object = object->shadow;
+ vm_object_lock_shared(next_object);
+ vm_object_unlock(object);
+ }
+ vm_object_unlock(object);
+ if (shadows > shadows_max) {
+ shadows_max = shadows;
+ }
+ }
+
+ vm_map_unlock_read(map);
+
+ return shadows_max;
+}
+#endif /* VM_SCAN_FOR_SHADOW_CHAIN */