/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
extern void vm_map_reference(vm_map_t map);
extern vm_map_t current_map(void);
+/* Setup reserved areas in a new VM map */
+extern kern_return_t vm_map_exec(
+ vm_map_t new_map,
+ task_t task,
+ void *fsroot,
+ cpu_type_t cpu);
+
__END_DECLS
#ifdef MACH_KERNEL_PRIVATE
#define current_map_fast() (current_thread()->map)
#define current_map() (current_map_fast())
+#include <vm/vm_map_store.h>
+
+
/*
* Types defined:
*
vm_map_t sub_map; /* belongs to another map */
} vm_map_object_t;
-#define named_entry_lock_init(object) mutex_init(&(object)->Lock, 0)
-#define named_entry_lock(object) mutex_lock(&(object)->Lock)
-#define named_entry_unlock(object) mutex_unlock(&(object)->Lock)
+#define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
+#define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
+#define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
+#define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
/*
* Type: vm_named_entry_t [internal use only]
*/
struct vm_named_entry {
- decl_mutex_data(, Lock) /* Synchronization */
+ decl_lck_mtx_data(, Lock) /* Synchronization */
union {
vm_object_t object; /* object I point to */
memory_object_t pager; /* amo pager port */
vm_map_t map; /* map backing submap */
+ vm_map_copy_t copy; /* a VM map copy */
} backing;
vm_object_offset_t offset; /* offset into object */
vm_object_size_t size; /* size of region */
+ vm_object_offset_t data_offset; /* offset to first byte of data */
vm_prot_t protection; /* access permissions */
int ref_count; /* Number of references */
unsigned int /* Is backing.xxx : */
/* boolean_t */ internal:1, /* ... an internal object */
/* boolean_t */ is_sub_map:1, /* ... a submap? */
- /* boolean_t */ is_pager:1; /* ... a pager port */
+ /* boolean_t */ is_pager:1, /* ... a pager port */
+ /* boolean_t */ is_copy:1; /* ... a VM map copy */
};
/*
* Control information for virtual copy operations is also
* stored in the address map entry.
*/
+
struct vm_map_links {
struct vm_map_entry *prev; /* previous entry */
struct vm_map_entry *next; /* next entry */
#define vme_next links.next
#define vme_start links.start
#define vme_end links.end
+
+ struct vm_map_store store;
union vm_map_object object; /* object I point to */
vm_object_offset_t offset; /* offset into object */
unsigned int
/* vm_prot_t */ max_protection:3,/* maximum protection */
/* vm_inherit_t */ inheritance:2, /* inheritance */
/* boolean_t */ use_pmap:1, /* nested pmaps */
+ /*
+ * IMPORTANT:
+ * The "alias" field can be updated while holding the VM map lock
+ * "shared". It's OK as along as it's the only field that can be
+ * updated without the VM map "exclusive" lock.
+ */
/* unsigned char */ alias:8, /* user alias */
- /* unsigned char */ pad:8; /* available bits */
+ /* boolean_t */ no_cache:1, /* should new pages be cached? */
+ /* boolean_t */ permanent:1, /* mapping can not be removed */
+ /* boolean_t */ superpage_size:1,/* use superpages of a certain size */
+ /* boolean_t */ map_aligned:1, /* align to map's page size */
+ /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
+ /* boolean_t */ used_for_jit:1,
+ /* boolean_t */ from_reserved_zone:1, /* Allocated from
+ * kernel reserved zone */
+ __unused_bits:1;
unsigned short wired_count; /* can be paged if = 0 */
unsigned short user_wired_count; /* for vm_wire */
+#if DEBUG
+#define MAP_ENTRY_CREATION_DEBUG (1)
+#define MAP_ENTRY_INSERTION_DEBUG (1)
+#endif
+#if MAP_ENTRY_CREATION_DEBUG
+ struct vm_map_header *vme_creation_maphdr;
+ uintptr_t vme_creation_bt[16];
+#endif
+#if MAP_ENTRY_INSERTION_DEBUG
+ uintptr_t vme_insertion_bt[16];
+#endif
};
+/*
+ * Convenience macros for dealing with superpages
+ * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
+ */
+#define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
+#define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
+#define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
+#define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
+
/*
* wired_counts are unsigned short. This value is used to safeguard
* against any mishaps due to runaway user programs.
* Description:
* Header for a vm_map and a vm_map_copy.
*/
+
+
struct vm_map_header {
struct vm_map_links links; /* first, last, min, max */
int nentries; /* Number of entries */
boolean_t entries_pageable;
/* are map entries pageable? */
+ vm_map_offset_t highest_entry_end_addr; /* The ending address of the highest allocated vm_entry_t */
+#ifdef VM_MAP_STORE_USE_RB
+ struct rb_head rb_head_store;
+#endif
+ int page_shift; /* page shift */
};
+#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
+#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
+#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
+
/*
* Type: vm_map_t [exported; contents invisible]
*
* insertion, or removal. Another hint is used to
* quickly find free space.
*/
-struct vm_map {
+struct _vm_map {
lock_t lock; /* uni- and smp-lock */
struct vm_map_header hdr; /* Map entry header */
#define min_offset hdr.links.start /* start of range */
#define max_offset hdr.links.end /* end of range */
+#define highest_entry_end hdr.highest_entry_end_addr
pmap_t pmap; /* Physical map */
vm_map_size_t size; /* virtual size */
+ vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
+ vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
int ref_count; /* Reference count */
#if TASK_SWAPPER
int res_count; /* Residence count (swap) */
int sw_state; /* Swap state */
#endif /* TASK_SWAPPER */
- decl_mutex_data(, s_lock) /* Lock ref, res, hint fields */
+ decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */
+ lck_mtx_ext_t s_lock_ext;
vm_map_entry_t hint; /* hint for quick lookups */
vm_map_entry_t first_free; /* First free space hint */
- boolean_t wait_for_space; /* Should callers wait
- for space? */
- boolean_t wiring_required;/* All memory wired? */
- boolean_t no_zero_fill; /* No zero fill absent pages */
- boolean_t mapped; /* has this map been mapped */
+ unsigned int
+ /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
+ /* boolean_t */ wiring_required:1, /* All memory wired? */
+ /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */
+ /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
+ /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
+ /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
+ /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
+ /* reserved */ pad:25;
unsigned int timestamp; /* Version number */
+ unsigned int color_rr; /* next color (not protected by a lock) */
+#if CONFIG_FREEZE
+ void *default_freezer_handle;
+#endif
+ boolean_t jit_entry_exists;
} ;
#define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
#define cpy_kdata c_u.c_k.kdata
#define cpy_kalloc_size c_u.c_k.kalloc_size
+#define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
+#define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
+#define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
/*
* Useful macros for entry list copy objects
#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
#define vm_map_lock_write_to_read(map) \
((map)->timestamp++ , lock_write_to_read(&(map)->lock))
-#define vm_map_lock_read_to_write(map) lock_read_to_write(&(map)->lock)
+/* lock_read_to_write() returns FALSE on failure. Macro evaluates to
+ * zero on success and non-zero value on failure.
+ */
+#define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE)
/*
* Exported procedures that operate on vm_map_t.
/* Initialize the module */
extern void vm_map_init(void);
+extern void vm_kernel_reserved_entry_init(void);
+
/* Allocate a range in the specified virtual address map and
* return the entry allocated for that range. */
extern kern_return_t vm_map_find_space(
vm_map_address_t *address, /* OUT */
vm_map_size_t size,
vm_map_offset_t mask,
+ int flags,
vm_map_entry_t *o_entry); /* OUT */
+extern void vm_map_clip_start(
+ vm_map_t map,
+ vm_map_entry_t entry,
+ vm_map_offset_t endaddr);
+extern void vm_map_clip_end(
+ vm_map_t map,
+ vm_map_entry_t entry,
+ vm_map_offset_t endaddr);
+extern boolean_t vm_map_entry_should_cow_for_true_share(
+ vm_map_entry_t entry);
+
/* Lookup map entry containing or the specified address in the given map */
extern boolean_t vm_map_lookup_entry(
vm_map_t map,
vm_map_address_t address,
vm_map_entry_t *entry); /* OUT */
+extern void vm_map_copy_remap(
+ vm_map_t map,
+ vm_map_entry_t where,
+ vm_map_copy_t copy,
+ vm_map_offset_t adjustment,
+ vm_prot_t cur_prot,
+ vm_prot_t max_prot,
+ vm_inherit_t inheritance);
+
/* Find the VM object, offset, and protection for a given virtual address
* in the specified map, assuming a page fault of the type specified. */
extern kern_return_t vm_map_lookup_locked(
vm_map_t *var_map, /* IN/OUT */
vm_map_address_t vaddr,
vm_prot_t fault_type,
+ int object_lock_type,
vm_map_version_t *out_version, /* OUT */
vm_object_t *object, /* OUT */
vm_object_offset_t *offset, /* OUT */
vm_prot_t *out_prot, /* OUT */
boolean_t *wired, /* OUT */
- int *behavior, /* OUT */
- vm_map_offset_t *lo_offset, /* OUT */
- vm_map_offset_t *hi_offset, /* OUT */
+ vm_object_fault_info_t fault_info, /* OUT */
vm_map_t *real_map); /* OUT */
/* Verifies that the map has not changed since the given version. */
vm_prot_t max_protection,
vm_behavior_t behavior,
vm_inherit_t inheritance,
- unsigned wired_count);
+ unsigned wired_count,
+ boolean_t no_cache,
+ boolean_t permanent,
+ unsigned int superpage_size,
+ boolean_t clear_map_aligned);
/*
MACRO_BEGIN \
vm_map_t Map = (map); \
if (Map) { \
- mutex_lock(&Map->s_lock); \
+ lck_mtx_lock(&Map->s_lock); \
Map->res_count++; \
Map->ref_count++; \
- mutex_unlock(&Map->s_lock); \
+ lck_mtx_unlock(&Map->s_lock); \
} \
MACRO_END
MACRO_BEGIN \
vm_map_t Lmap = (map); \
if (Lmap->res_count == 0) { \
- mutex_unlock(&Lmap->s_lock);\
+ lck_mtx_unlock(&Lmap->s_lock);\
vm_map_lock(Lmap); \
vm_map_swapin(Lmap); \
- mutex_lock(&Lmap->s_lock); \
+ lck_mtx_lock(&Lmap->s_lock); \
++Lmap->res_count; \
vm_map_unlock(Lmap); \
} else \
MACRO_BEGIN \
vm_map_t Map = (map); \
if (--Map->res_count == 0) { \
- mutex_unlock(&Map->s_lock); \
+ lck_mtx_unlock(&Map->s_lock); \
vm_map_lock(Map); \
vm_map_swapout(Map); \
vm_map_unlock(Map); \
- mutex_lock(&Map->s_lock); \
+ lck_mtx_lock(&Map->s_lock); \
} \
MACRO_END
#define vm_map_reference_swap(map) \
MACRO_BEGIN \
vm_map_t Map = (map); \
- mutex_lock(&Map->s_lock); \
+ lck_mtx_lock(&Map->s_lock); \
++Map->ref_count; \
vm_map_res_reference(Map); \
- mutex_unlock(&Map->s_lock); \
+ lck_mtx_unlock(&Map->s_lock); \
MACRO_END
#endif /* MACH_ASSERT */
MACRO_BEGIN \
vm_map_t Map = (map); \
if (Map) { \
- mutex_lock(&Map->s_lock); \
+ lck_mtx_lock(&Map->s_lock); \
Map->ref_count++; \
- mutex_unlock(&Map->s_lock); \
+ lck_mtx_unlock(&Map->s_lock); \
} \
MACRO_END
#define vm_map_ref_fast(map) \
MACRO_BEGIN \
- mutex_lock(&map->s_lock); \
+ lck_mtx_lock(&map->s_lock); \
map->ref_count++; \
vm_map_res_reference(map); \
- mutex_unlock(&map->s_lock); \
+ lck_mtx_unlock(&map->s_lock); \
MACRO_END
#define vm_map_dealloc_fast(map) \
MACRO_BEGIN \
register int c; \
\
- mutex_lock(&map->s_lock); \
+ lck_mtx_lock(&map->s_lock); \
c = --map->ref_count; \
if (c > 0) \
vm_map_res_deallocate(map); \
- mutex_unlock(&map->s_lock); \
+ lck_mtx_unlock(&map->s_lock); \
if (c == 0) \
vm_map_destroy(map); \
MACRO_END
vm_object_size_t size,
vm_map_copy_t *copy_result); /* OUT */
+extern kern_return_t vm_map_random_address_for_size(
+ vm_map_t map,
+ vm_map_offset_t *address,
+ vm_map_size_t size);
+
/* Enter a mapping */
extern kern_return_t vm_map_enter(
vm_map_t map,
vm_map_offset_t *address,
vm_map_size_t size,
vm_map_offset_t mask,
- boolean_t anywhere,
+ int flags,
vm_map_t src_map,
vm_map_offset_t memory_address,
boolean_t copy,
/* Create a new task map using an existing task map as a template. */
extern vm_map_t vm_map_fork(
+ ledger_t ledger,
vm_map_t old_map);
/* Change inheritance */
vm_region_submap_info_64_t info,
mach_msg_type_number_t *count);
-extern kern_return_t vm_map_page_info(
+extern kern_return_t vm_map_page_query_internal(
vm_map_t map,
vm_map_offset_t offset,
int *disposition,
int *ref_count);
+extern kern_return_t vm_map_query_volatile(
+ vm_map_t map,
+ mach_vm_size_t *volatile_virtual_size_p,
+ mach_vm_size_t *volatile_resident_size_p,
+ mach_vm_size_t *volatile_pmap_size_p);
+
extern kern_return_t vm_map_submap(
vm_map_t map,
vm_map_offset_t start,
ipc_port_t port);
+extern kern_return_t vm_map_set_cache_attr(
+ vm_map_t map,
+ vm_map_offset_t va);
+
+
+/* definitions related to overriding the NX behavior */
+
+#define VM_ABI_32 0x1
+#define VM_ABI_64 0x2
+
+extern int override_nx(vm_map_t map, uint32_t user_tag);
+
#endif /* MACH_KERNEL_PRIVATE */
__BEGIN_DECLS
/* Get rid of a map */
extern void vm_map_destroy(
- vm_map_t map);
+ vm_map_t map,
+ int flags);
+
/* Lose a reference */
extern void vm_map_deallocate(
vm_map_t map);
vm_map_offset_t end,
boolean_t user_wire);
+/* Enter a mapping of a memory object */
+extern kern_return_t vm_map_enter_mem_object(
+ vm_map_t map,
+ vm_map_offset_t *address,
+ vm_map_size_t size,
+ vm_map_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+/* Enter a mapping of a memory object */
+extern kern_return_t vm_map_enter_mem_object_control(
+ vm_map_t map,
+ vm_map_offset_t *address,
+ vm_map_size_t size,
+ vm_map_offset_t mask,
+ int flags,
+ memory_object_control_t control,
+ vm_object_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
/* Deallocate a region */
extern kern_return_t vm_map_remove(
vm_map_t map,
vm_map_t dst_map,
vm_map_address_t dst_addr,
vm_map_copy_t copy,
- int interruptible);
+ boolean_t interruptible);
/* Place a copy into a map */
extern kern_return_t vm_map_copyout(
vm_map_address_t *dst_addr, /* OUT */
vm_map_copy_t copy);
+extern kern_return_t vm_map_copyout_internal(
+ vm_map_t dst_map,
+ vm_map_address_t *dst_addr, /* OUT */
+ vm_map_copy_t copy,
+ boolean_t consume_on_success,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+extern kern_return_t vm_map_copyin(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result); /* OUT */
+
extern kern_return_t vm_map_copyin_common(
vm_map_t src_map,
vm_map_address_t src_addr,
vm_map_copy_t *copy_result, /* OUT */
boolean_t use_maxprot);
+extern kern_return_t vm_map_copy_extract(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ vm_map_copy_t *copy_result, /* OUT */
+ vm_prot_t *cur_prot, /* OUT */
+ vm_prot_t *max_prot);
+
+
+extern void vm_map_disable_NX(
+ vm_map_t map);
+
+extern void vm_map_disallow_data_exec(
+ vm_map_t map);
+
+extern void vm_map_set_64bit(
+ vm_map_t map);
+
+extern void vm_map_set_32bit(
+ vm_map_t map);
+
+extern boolean_t vm_map_has_hard_pagezero(
+ vm_map_t map,
+ vm_map_offset_t pagezero_size);
+
+extern boolean_t vm_map_is_64bit(
+ vm_map_t map);
+#define vm_map_has_4GB_pagezero(map) vm_map_has_hard_pagezero(map, (vm_map_offset_t)0x100000000ULL)
+
+
+extern void vm_map_set_4GB_pagezero(
+ vm_map_t map);
+
+extern void vm_map_clear_4GB_pagezero(
+ vm_map_t map);
+
+extern kern_return_t vm_map_raise_max_offset(
+ vm_map_t map,
+ vm_map_offset_t new_max_offset);
+
+extern kern_return_t vm_map_raise_min_offset(
+ vm_map_t map,
+ vm_map_offset_t new_min_offset);
+
+extern vm_map_offset_t vm_compute_max_offset(
+ unsigned is64);
+
+extern uint64_t vm_map_get_max_aslr_slide_pages(
+ vm_map_t map);
+
+extern void vm_map_set_user_wire_limit(
+ vm_map_t map,
+ vm_size_t limit);
+
+extern void vm_map_switch_protect(
+ vm_map_t map,
+ boolean_t val);
+
+extern void vm_map_iokit_mapped_region(
+ vm_map_t map,
+ vm_size_t bytes);
+
+extern void vm_map_iokit_unmapped_region(
+ vm_map_t map,
+ vm_size_t bytes);
+
+
+extern boolean_t first_free_is_valid(vm_map_t);
+
+extern int vm_map_page_shift(
+ vm_map_t map);
+
+extern int vm_map_page_mask(
+ vm_map_t map);
+
+extern int vm_map_page_size(
+ vm_map_t map);
+
+extern vm_map_offset_t vm_map_round_page_mask(
+ vm_map_offset_t offset,
+ vm_map_offset_t mask);
+
+extern vm_map_offset_t vm_map_trunc_page_mask(
+ vm_map_offset_t offset,
+ vm_map_offset_t mask);
+
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t vm_map_page_info(
+ vm_map_t map,
+ vm_map_offset_t offset,
+ vm_page_info_flavor_t flavor,
+ vm_page_info_t info,
+ mach_msg_type_number_t *count);
+#endif /* XNU_KERNEL_PRIVATE */
+
+
+#ifdef MACH_KERNEL_PRIVATE
+
/*
* Macros to invoke vm_map_copyin_common. vm_map_copyin is the
* usual form; it handles a copyin based on the current protection
vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
FALSE, copy_result, TRUE)
+
+/*
+ * Internal macros for rounding and truncation of vm_map offsets and sizes
+ */
+#define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
+#define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
+
/*
* Macros for rounding and truncation of vm_map offsets and sizes
*/
-#define vm_map_round_page(x) (((vm_map_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
-#define vm_map_trunc_page(x) ((vm_map_offset_t)(x) & ~((signed)PAGE_MASK))
+#define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
+#define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
+#define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
+#define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
+#endif /* XNU_KERNEL_PRIVATE */
+
+#define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
+#define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
/*
* Flags for vm_map_remove() and vm_map_delete()
#define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
#define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
#define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
-
-/* Support for shared regions */
-extern kern_return_t vm_region_clone(
- ipc_port_t src_region,
- ipc_port_t dst_region);
-
-extern kern_return_t vm_map_region_replace(
- vm_map_t target_map,
- ipc_port_t old_region,
- ipc_port_t new_region,
- vm_map_offset_t start,
- vm_map_offset_t end);
+#define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
/* Support for UPLs from vm_maps */
extern kern_return_t vm_map_get_upl(
vm_map_t target_map,
vm_map_offset_t map_offset,
- vm_size_t *size,
+ upl_size_t *size,
upl_t *upl,
upl_page_info_array_t page_info,
- mach_msg_type_number_t *page_infoCnt,
- integer_t *flags,
- integer_t force_data_sync);
+ unsigned int *page_infoCnt,
+ int *flags,
+ int force_data_sync);
+
+#if CONFIG_DYNAMIC_CODE_SIGNING
+extern kern_return_t vm_map_sign(vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end);
+#endif
+
+#if CONFIG_FREEZE
+void vm_map_freeze_thaw_init(void);
+void vm_map_freeze_thaw(void);
+void vm_map_demand_fault(void);
+
+extern kern_return_t vm_map_freeze_walk(
+ vm_map_t map,
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ unsigned int dirty_budget,
+ boolean_t *has_shared);
+
+extern kern_return_t vm_map_freeze(
+ vm_map_t map,
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ unsigned int dirty_budget,
+ boolean_t *has_shared);
+
+extern kern_return_t vm_map_thaw(
+ vm_map_t map);
+#endif
__END_DECLS