X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..d26ffc64f583ab2d29df48f13518685602bc8832:/osfmk/vm/vm_map.h diff --git a/osfmk/vm/vm_map.h b/osfmk/vm/vm_map.h index d0f12dacc..23592b8e4 100644 --- a/osfmk/vm/vm_map.h +++ b/osfmk/vm/vm_map.h @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -74,38 +77,47 @@ #include #include #include +#include #include -#include - -#ifdef __APPLE_API_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef MACH_KERNEL_PRIVATE +#include -#ifdef __APPLE_API_OBSOLETE -extern void kernel_vm_map_reference(vm_map_t map); -#endif /* __APPLE_API_OBSOLETE */ +__BEGIN_DECLS extern void vm_map_reference(vm_map_t map); extern vm_map_t current_map(void); -#else /* MACH_KERNEL_PRIVATE */ +/* Setup reserved areas in a new VM map */ +extern kern_return_t vm_map_exec( + vm_map_t new_map, + task_t task, + boolean_t is64bit, + void *fsroot, + cpu_type_t cpu); + +__END_DECLS + +#ifdef MACH_KERNEL_PRIVATE -#include #include #include #include #include -#include +#include #include #include -#include +#include -#define current_map_fast() (current_act_fast()->map) +#define current_map_fast() (current_thread()->map) #define current_map() (current_map_fast()) +#include + + /* * Types defined: * @@ -116,6 +128,7 @@ extern vm_map_t current_map(void); * used for inter-map copy operations */ typedef struct vm_map_entry *vm_map_entry_t; +#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) /* @@ -126,13 +139,14 @@ typedef struct vm_map_entry *vm_map_entry_t; * memory object or a sub map (of the kernel map). */ typedef union vm_map_object { - struct vm_object *vm_object; /* object object */ - struct vm_map *sub_map; /* belongs to another map */ + vm_object_t vmo_object; /* object object */ + vm_map_t vmo_submap; /* belongs to another map */ } vm_map_object_t; -#define named_entry_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) -#define named_entry_lock(object) mutex_lock(&(object)->Lock) -#define named_entry_unlock(object) mutex_unlock(&(object)->Lock) +#define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) +#define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) +#define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) +#define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) /* * Type: vm_named_entry_t [internal use only] @@ -153,19 +167,21 @@ typedef union vm_map_object { */ struct vm_named_entry { - decl_mutex_data(, Lock) /* Synchronization */ - vm_object_t object; /* object I point to */ - vm_object_offset_t offset; /* offset into object */ + decl_lck_mtx_data(, Lock) /* Synchronization */ union { - memory_object_t pager; /* amo pager port */ - vm_map_t map; /* map backing submap */ + vm_object_t object; /* object I point to */ + vm_map_t map; /* map backing submap */ + vm_map_copy_t copy; /* a VM map copy */ } backing; - unsigned int size; /* size of region */ - unsigned int protection; /* access permissions */ + vm_object_offset_t offset; /* offset into object */ + vm_object_size_t size; /* size of region */ + vm_object_offset_t data_offset; /* offset to first byte of data */ + vm_prot_t protection; /* access permissions */ int ref_count; /* Number of references */ - unsigned int - /* boolean_t */ internal:1, /* is an internal object */ - /* boolean_t */ is_sub_map:1; /* is object is a submap? */ + unsigned int /* Is backing.xxx : */ + /* boolean_t */ internal:1, /* ... an internal object */ + /* boolean_t */ is_sub_map:1, /* ... a submap? */ + /* boolean_t */ is_copy:1; /* ... a VM map copy */ }; /* @@ -181,39 +197,167 @@ struct vm_named_entry { * Control information for virtual copy operations is also * stored in the address map entry. */ + struct vm_map_links { struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ - vm_offset_t start; /* start address */ - vm_offset_t end; /* end address */ + vm_map_offset_t start; /* start address */ + vm_map_offset_t end; /* end address */ }; +/* + * IMPORTANT: + * The "alias" field can be updated while holding the VM map lock + * "shared". It's OK as along as it's the only field that can be + * updated without the VM map "exclusive" lock. + */ +#define VME_OBJECT(entry) ((entry)->vme_object.vmo_object) +#define VME_OBJECT_SET(entry, object) \ + MACRO_BEGIN \ + (entry)->vme_object.vmo_object = (object); \ + MACRO_END +#define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap) +#define VME_SUBMAP_SET(entry, submap) \ + MACRO_BEGIN \ + (entry)->vme_object.vmo_submap = (submap); \ + MACRO_END +#define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK) +#define VME_OFFSET_SET(entry, offset) \ + MACRO_BEGIN \ + int __alias; \ + __alias = VME_ALIAS((entry)); \ + assert((offset & PAGE_MASK) == 0); \ + (entry)->vme_offset = offset | __alias; \ + MACRO_END +#define VME_OBJECT_SHADOW(entry, length) \ + MACRO_BEGIN \ + vm_object_t __object; \ + vm_object_offset_t __offset; \ + __object = VME_OBJECT((entry)); \ + __offset = VME_OFFSET((entry)); \ + vm_object_shadow(&__object, &__offset, (length)); \ + if (__object != VME_OBJECT((entry))) { \ + VME_OBJECT_SET((entry), __object); \ + (entry)->use_pmap = TRUE; \ + } \ + if (__offset != VME_OFFSET((entry))) { \ + VME_OFFSET_SET((entry), __offset); \ + } \ + MACRO_END + +#define VME_ALIAS_MASK (PAGE_MASK) +#define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK)) +#define VME_ALIAS_SET(entry, alias) \ + MACRO_BEGIN \ + vm_map_offset_t __offset; \ + __offset = VME_OFFSET((entry)); \ + (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK); \ + MACRO_END + +/* + * FOOTPRINT ACCOUNTING: + * The "memory footprint" is better described in the pmap layer. + * + * At the VM level, these 2 vm_map_entry_t fields are relevant: + * iokit_mapped: + * For an "iokit_mapped" entry, we add the size of the entry to the + * footprint when the entry is entered into the map and we subtract that + * size when the entry is removed. No other accounting should take place. + * "use_pmap" should be FALSE but is not taken into account. + * use_pmap: (only when is_sub_map is FALSE) + * This indicates if we should ask the pmap layer to account for pages + * in this mapping. If FALSE, we expect that another form of accounting + * is being used (e.g. "iokit_mapped" or the explicit accounting of + * non-volatile purgable memory). + * + * So the logic is mostly: + * if entry->is_sub_map == TRUE + * anything in a submap does not count for the footprint + * else if entry->iokit_mapped == TRUE + * footprint includes the entire virtual size of this entry + * else if entry->use_pmap == FALSE + * tell pmap NOT to account for pages being pmap_enter()'d from this + * mapping (i.e. use "alternate accounting") + * else + * pmap will account for pages being pmap_enter()'d from this mapping + * as it sees fit (only if anonymous, etc...) + */ + struct vm_map_entry { struct vm_map_links links; /* links to other entries */ #define vme_prev links.prev #define vme_next links.next #define vme_start links.start #define vme_end links.end - union vm_map_object object; /* object I point to */ - vm_object_offset_t offset; /* offset into object */ + + struct vm_map_store store; + union vm_map_object vme_object; /* object I point to */ + vm_object_offset_t vme_offset; /* offset into object */ + unsigned int - /* boolean_t */ is_shared:1, /* region is shared */ - /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ - /* boolean_t */ in_transition:1, /* Entry being changed */ - /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ - /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ + /* boolean_t */ is_shared:1, /* region is shared */ + /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ + /* boolean_t */ in_transition:1, /* Entry being changed */ + /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ + /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ /* behavior is not defined for submap type */ - /* boolean_t */ needs_copy:1, /* object need to be copied? */ + /* boolean_t */ needs_copy:1, /* object need to be copied? */ + /* Only in task maps: */ - /* vm_prot_t */ protection:3, /* protection code */ - /* vm_prot_t */ max_protection:3,/* maximum protection */ - /* vm_inherit_t */ inheritance:2, /* inheritance */ - /* nested pmap */ use_pmap:1, /* nested pmaps */ - /* user alias */ alias:8; + /* vm_prot_t */ protection:3, /* protection code */ + /* vm_prot_t */ max_protection:3, /* maximum protection */ + /* vm_inherit_t */ inheritance:2, /* inheritance */ + /* boolean_t */ use_pmap:1, /* + * use_pmap is overloaded: + * if "is_sub_map": + * use a nested pmap? + * else (i.e. if object): + * use pmap accounting + * for footprint? + */ + /* boolean_t */ no_cache:1, /* should new pages be cached? */ + /* boolean_t */ permanent:1, /* mapping can not be removed */ + /* boolean_t */ superpage_size:1, /* use superpages of a certain size */ + /* boolean_t */ map_aligned:1, /* align to map's page size */ + /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of + * this entry it is being deleted + * without unwiring them */ + /* boolean_t */ used_for_jit:1, + /* boolean_t */ from_reserved_zone:1, /* Allocated from + * kernel reserved zone */ + + /* iokit accounting: use the virtual size rather than resident size: */ + /* boolean_t */ iokit_acct:1, + /* boolean_t */ vme_resilient_codesign:1, + /* boolean_t */ vme_resilient_media:1, + /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */ + __unused:5; +; + unsigned short wired_count; /* can be paged if = 0 */ unsigned short user_wired_count; /* for vm_wire */ +#if DEBUG +#define MAP_ENTRY_CREATION_DEBUG (1) +#define MAP_ENTRY_INSERTION_DEBUG (1) +#endif +#if MAP_ENTRY_CREATION_DEBUG + struct vm_map_header *vme_creation_maphdr; + uintptr_t vme_creation_bt[16]; +#endif +#if MAP_ENTRY_INSERTION_DEBUG + uintptr_t vme_insertion_bt[16]; +#endif }; +/* + * Convenience macros for dealing with superpages + * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h + */ +#define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES) +#define SUPERPAGE_MASK (-SUPERPAGE_SIZE) +#define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK) +#define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK) + /* * wired_counts are unsigned short. This value is used to safeguard * against any mishaps due to runaway user programs. @@ -228,13 +372,23 @@ struct vm_map_entry { * Description: * Header for a vm_map and a vm_map_copy. */ + + struct vm_map_header { struct vm_map_links links; /* first, last, min, max */ int nentries; /* Number of entries */ boolean_t entries_pageable; /* are map entries pageable? */ +#ifdef VM_MAP_STORE_USE_RB + struct rb_head rb_head_store; +#endif + int page_shift; /* page shift */ }; +#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift) +#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr))) +#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1) + /* * Type: vm_map_t [exported; contents invisible] * @@ -250,27 +404,69 @@ struct vm_map_header { * insertion, or removal. Another hint is used to * quickly find free space. */ -struct vm_map { - lock_t lock; /* uni- and smp-lock */ +struct _vm_map { + lck_rw_t lock; /* map lock */ struct vm_map_header hdr; /* Map entry header */ #define min_offset hdr.links.start /* start of range */ #define max_offset hdr.links.end /* end of range */ pmap_t pmap; /* Physical map */ - vm_size_t size; /* virtual size */ + vm_map_size_t size; /* virtual size */ + vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ + vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ +#if __x86_64__ + vm_map_offset_t vmmap_high_start; +#endif /* __x86_64__ */ + + union { + /* + * If map->disable_vmentry_reuse == TRUE: + * the end address of the highest allocated vm_map_entry_t. + */ + vm_map_offset_t vmu1_highest_entry_end; + /* + * For a nested VM map: + * the lowest address in this nested VM map that we would + * expect to be unnested under normal operation (i.e. for + * regular copy-on-write on DATA section). + */ + vm_map_offset_t vmu1_lowest_unnestable_start; + } vmu1; +#define highest_entry_end vmu1.vmu1_highest_entry_end +#define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start + int ref_count; /* Reference count */ #if TASK_SWAPPER int res_count; /* Residence count (swap) */ int sw_state; /* Swap state */ #endif /* TASK_SWAPPER */ - decl_mutex_data(, s_lock) /* Lock ref, res, hint fields */ + decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */ + lck_mtx_ext_t s_lock_ext; vm_map_entry_t hint; /* hint for quick lookups */ - vm_map_entry_t first_free; /* First free space hint */ - boolean_t wait_for_space; /* Should callers wait - for space? */ - boolean_t wiring_required;/* All memory wired? */ - boolean_t no_zero_fill; /* No zero fill absent pages */ - boolean_t mapped; /* has this map been mapped */ + struct vm_map_links* hole_hint; /* hint for quick hole lookups */ + union{ + vm_map_entry_t _first_free; /* First free space hint */ + struct vm_map_links* _holes; /* links all holes between entries */ + }f_s; /* Union for free space data structures being used */ + +#define first_free f_s._first_free +#define holes_list f_s._holes + + unsigned int + /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */ + /* boolean_t */ wiring_required:1, /* All memory wired? */ + /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */ + /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */ + /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */ + /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */ + /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */ + /* boolean_t */ holelistenabled:1, + /* boolean_t */ is_nested_map:1, + /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */ + /* reserved */ pad:22; unsigned int timestamp; /* Version number */ + unsigned int color_rr; /* next color (not protected by a lock) */ + + boolean_t jit_entry_exists; } ; #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links) @@ -325,52 +521,18 @@ typedef struct vm_map_version { * entry onto which the other entries that represent * the region are chained. * - * The second format is a single vm object. This is used - * primarily in the pageout path. The third format is a - * list of vm pages. An optional continuation provides - * a hook to be called to obtain more of the memory, - * or perform other operations. The continuation takes 3 - * arguments, a saved arg buffer, a pointer to a new vm_map_copy - * (returned) and an abort flag (abort if TRUE). - */ - -#define VM_MAP_COPY_PAGE_LIST_MAX 20 -#define VM_MAP_COPY_PAGE_LIST_MAX_SIZE (VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE) - - -/* - * Options for vm_map_copyin_page_list. - */ - -#define VM_MAP_COPYIN_OPT_VM_PROT 0x7 -#define VM_MAP_COPYIN_OPT_SRC_DESTROY 0x8 -#define VM_MAP_COPYIN_OPT_STEAL_PAGES 0x10 -#define VM_MAP_COPYIN_OPT_PMAP_ENTER 0x20 -#define VM_MAP_COPYIN_OPT_NO_ZERO_FILL 0x40 - -/* - * Continuation structures for vm_map_copyin_page_list. - */ -typedef struct { - vm_map_t map; - vm_offset_t src_addr; - vm_size_t src_len; - vm_offset_t destroy_addr; - vm_size_t destroy_len; - int options; -} vm_map_copyin_args_data_t, *vm_map_copyin_args_t; - -#define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0) - - -/* vm_map_copy_cont_t is a type definition/prototype - * for the cont function pointer in vm_map_copy structure. + * The second format is a single vm object. This was used + * primarily in the pageout path - but is not currently used + * except for placeholder copy objects (see vm_map_copy_copy()). + * + * The third format is a kernel buffer copy object - for data + * small enough that physical copies were the most efficient + * method. This method uses a zero-sized array unioned with + * other format-specific data in the 'c_u' member. This unsized + * array overlaps the other elements and allows us to use this + * extra structure space for physical memory copies. On 64-bit + * systems this saves ~64 bytes per vm_map_copy. */ -typedef kern_return_t (*vm_map_copy_cont_t)( - vm_map_copyin_args_t, - vm_map_copy_t *); - -#define VM_MAP_COPY_CONT_NULL ((vm_map_copy_cont_t) 0) struct vm_map_copy { int type; @@ -378,34 +540,24 @@ struct vm_map_copy { #define VM_MAP_COPY_OBJECT 2 #define VM_MAP_COPY_KERNEL_BUFFER 3 vm_object_offset_t offset; - vm_size_t size; + vm_map_size_t size; union { - struct vm_map_header hdr; /* ENTRY_LIST */ - struct { /* OBJECT */ - vm_object_t object; - vm_size_t index; /* record progress as pages - * are moved from object to - * page list; must be zero - * when first invoking - * vm_map_object_to_page_list - */ - } c_o; - struct { /* KERNEL_BUFFER */ - vm_offset_t kdata; - vm_size_t kalloc_size; /* size of this copy_t */ - } c_k; + struct vm_map_header hdr; /* ENTRY_LIST */ + vm_object_t object; /* OBJECT */ + uint8_t kdata[0]; /* KERNEL_BUFFER */ } c_u; }; #define cpy_hdr c_u.hdr -#define cpy_object c_u.c_o.object -#define cpy_index c_u.c_o.index - -#define cpy_kdata c_u.c_k.kdata -#define cpy_kalloc_size c_u.c_k.kalloc_size +#define cpy_object c_u.object +#define cpy_kdata c_u.kdata +#define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata)) +#define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) +#define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) +#define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1) /* * Useful macros for entry list copy objects @@ -428,18 +580,38 @@ struct vm_map_copy { #define vm_map_lock_init(map) \ ((map)->timestamp = 0 , \ - lock_init(&(map)->lock, TRUE, ETAP_VM_MAP, ETAP_VM_MAP_I)) + lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr)) -#define vm_map_lock(map) lock_write(&(map)->lock) +#define vm_map_lock(map) lck_rw_lock_exclusive(&(map)->lock) #define vm_map_unlock(map) \ - ((map)->timestamp++ , lock_write_done(&(map)->lock)) -#define vm_map_lock_read(map) lock_read(&(map)->lock) -#define vm_map_unlock_read(map) lock_read_done(&(map)->lock) + ((map)->timestamp++ , lck_rw_done(&(map)->lock)) +#define vm_map_lock_read(map) lck_rw_lock_shared(&(map)->lock) +#define vm_map_unlock_read(map) lck_rw_done(&(map)->lock) #define vm_map_lock_write_to_read(map) \ - ((map)->timestamp++ , lock_write_to_read(&(map)->lock)) -#define vm_map_lock_read_to_write(map) lock_read_to_write(&(map)->lock) - -extern zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ + ((map)->timestamp++ , lck_rw_lock_exclusive_to_shared(&(map)->lock)) +/* lock_read_to_write() returns FALSE on failure. Macro evaluates to + * zero on success and non-zero value on failure. + */ +#define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE) + +#define vm_map_try_lock(map) lck_rw_try_lock_exclusive(&(map)->lock) +#define vm_map_try_lock_read(map) lck_rw_try_lock_shared(&(map)->lock) + +#if MACH_ASSERT || DEBUG +#define vm_map_lock_assert_held(map) \ + lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD) +#define vm_map_lock_assert_shared(map) \ + lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED) +#define vm_map_lock_assert_exclusive(map) \ + lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE) +#define vm_map_lock_assert_notheld(map) \ + lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD) +#else /* MACH_ASSERT || DEBUG */ +#define vm_map_lock_assert_held(map) +#define vm_map_lock_assert_shared(map) +#define vm_map_lock_assert_exclusive(map) +#define vm_map_lock_assert_notheld(map) +#endif /* MACH_ASSERT || DEBUG */ /* * Exported procedures that operate on vm_map_t. @@ -448,53 +620,71 @@ extern zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ /* Initialize the module */ extern void vm_map_init(void); +extern void vm_kernel_reserved_entry_init(void); + /* Allocate a range in the specified virtual address map and * return the entry allocated for that range. */ extern kern_return_t vm_map_find_space( - vm_map_t map, - vm_offset_t *address, /* OUT */ - vm_size_t size, - vm_offset_t mask, - vm_map_entry_t *o_entry); /* OUT */ + vm_map_t map, + vm_map_address_t *address, /* OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_entry_t *o_entry); /* OUT */ + +extern void vm_map_clip_start( + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t endaddr); +extern void vm_map_clip_end( + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t endaddr); +extern boolean_t vm_map_entry_should_cow_for_true_share( + vm_map_entry_t entry); /* Lookup map entry containing or the specified address in the given map */ extern boolean_t vm_map_lookup_entry( - vm_map_t map, - vm_offset_t address, - vm_map_entry_t *entry); /* OUT */ + vm_map_t map, + vm_map_address_t address, + vm_map_entry_t *entry); /* OUT */ + +extern void vm_map_copy_remap( + vm_map_t map, + vm_map_entry_t where, + vm_map_copy_t copy, + vm_map_offset_t adjustment, + vm_prot_t cur_prot, + vm_prot_t max_prot, + vm_inherit_t inheritance); /* Find the VM object, offset, and protection for a given virtual address * in the specified map, assuming a page fault of the type specified. */ extern kern_return_t vm_map_lookup_locked( - vm_map_t *var_map, /* IN/OUT */ - vm_offset_t vaddr, - vm_prot_t fault_type, - vm_map_version_t *out_version, /* OUT */ - vm_object_t *object, /* OUT */ - vm_object_offset_t *offset, /* OUT */ - vm_prot_t *out_prot, /* OUT */ - boolean_t *wired, /* OUT */ - int *behavior, /* OUT */ - vm_object_offset_t *lo_offset, /* OUT */ - vm_object_offset_t *hi_offset, /* OUT */ - vm_map_t *pmap_map); /* OUT */ + vm_map_t *var_map, /* IN/OUT */ + vm_map_address_t vaddr, + vm_prot_t fault_type, + int object_lock_type, + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_object_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired, /* OUT */ + vm_object_fault_info_t fault_info, /* OUT */ + vm_map_t *real_map); /* OUT */ /* Verifies that the map has not changed since the given version. */ extern boolean_t vm_map_verify( - vm_map_t map, - vm_map_version_t *version); /* REF */ - -/* Split a vm_map_entry into 2 entries */ -extern void _vm_map_clip_start( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_offset_t start); + vm_map_t map, + vm_map_version_t *version); /* REF */ extern vm_map_entry_t vm_map_entry_insert( vm_map_t map, vm_map_entry_t insp_entry, - vm_offset_t start, - vm_offset_t end, + vm_map_offset_t start, + vm_map_offset_t end, vm_object_t object, vm_object_offset_t offset, boolean_t needs_copy, @@ -504,65 +694,30 @@ extern vm_map_entry_t vm_map_entry_insert( vm_prot_t max_protection, vm_behavior_t behavior, vm_inherit_t inheritance, - unsigned wired_count); + unsigned wired_count, + boolean_t no_cache, + boolean_t permanent, + unsigned int superpage_size, + boolean_t clear_map_aligned, + boolean_t is_submap, + boolean_t used_for_jit, + int alias); -extern kern_return_t vm_remap_extract( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - boolean_t copy, - struct vm_map_header *map_header, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance, - boolean_t pageable); - -extern kern_return_t vm_remap_range_allocate( - vm_map_t map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - boolean_t anywhere, - vm_map_entry_t *map_entry); - -extern kern_return_t vm_remap_extract( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - boolean_t copy, - struct vm_map_header *map_header, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance, - boolean_t pageable); - -extern kern_return_t vm_remap_range_allocate( - vm_map_t map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - boolean_t anywhere, - vm_map_entry_t *map_entry); /* * Functions implemented as macros */ -#define vm_map_min(map) ((map)->min_offset) +#define vm_map_min(map) ((map)->min_offset) /* Lowest valid address in * a map */ -#define vm_map_max(map) ((map)->max_offset) +#define vm_map_max(map) ((map)->max_offset) /* Highest valid address */ #define vm_map_pmap(map) ((map)->pmap) /* Physical map associated * with this address map */ -#define vm_map_verify_done(map, version) vm_map_unlock_read(map) - /* Operation that required - * a verified lookup is - * now complete */ - /* * Macros/functions for map residence counts and swapin/out of vm maps */ @@ -584,25 +739,25 @@ extern void vm_map_reference_swap( #else /* MACH_ASSERT */ -#define vm_map_reference(map) \ +#define vm_map_reference(map) \ MACRO_BEGIN \ - vm_map_t Map = (map); \ + vm_map_t Map = (map); \ if (Map) { \ - mutex_lock(&Map->s_lock); \ + lck_mtx_lock(&Map->s_lock); \ Map->res_count++; \ Map->ref_count++; \ - mutex_unlock(&Map->s_lock); \ + lck_mtx_unlock(&Map->s_lock); \ } \ MACRO_END #define vm_map_res_reference(map) \ MACRO_BEGIN \ - vm_map_t Lmap = (map); \ + vm_map_t Lmap = (map); \ if (Lmap->res_count == 0) { \ - mutex_unlock(&Lmap->s_lock); \ + lck_mtx_unlock(&Lmap->s_lock);\ vm_map_lock(Lmap); \ vm_map_swapin(Lmap); \ - mutex_lock(&Lmap->s_lock); \ + lck_mtx_lock(&Lmap->s_lock); \ ++Lmap->res_count; \ vm_map_unlock(Lmap); \ } else \ @@ -611,23 +766,23 @@ MACRO_END #define vm_map_res_deallocate(map) \ MACRO_BEGIN \ - vm_map_t Map = (map); \ - if (--Map->res_count == 0) { \ - mutex_unlock(&Map->s_lock); \ + vm_map_t Map = (map); \ + if (--Map->res_count == 0) { \ + lck_mtx_unlock(&Map->s_lock); \ vm_map_lock(Map); \ vm_map_swapout(Map); \ vm_map_unlock(Map); \ - mutex_lock(&Map->s_lock); \ + lck_mtx_lock(&Map->s_lock); \ } \ MACRO_END #define vm_map_reference_swap(map) \ MACRO_BEGIN \ vm_map_t Map = (map); \ - mutex_lock(&Map->s_lock); \ + lck_mtx_lock(&Map->s_lock); \ ++Map->ref_count; \ vm_map_res_reference(Map); \ - mutex_unlock(&Map->s_lock); \ + lck_mtx_unlock(&Map->s_lock); \ MACRO_END #endif /* MACH_ASSERT */ @@ -643,9 +798,9 @@ extern void vm_map_swapout( MACRO_BEGIN \ vm_map_t Map = (map); \ if (Map) { \ - mutex_lock(&Map->s_lock); \ + lck_mtx_lock(&Map->s_lock); \ Map->ref_count++; \ - mutex_unlock(&Map->s_lock); \ + lck_mtx_unlock(&Map->s_lock); \ } \ MACRO_END @@ -666,58 +821,47 @@ extern vm_object_t vm_submap_object; */ #define vm_map_entry_wait(map, interruptible) \ ((map)->timestamp++ , \ - thread_sleep_lock_write((event_t)&(map)->hdr, \ - &(map)->lock, interruptible)) + lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \ + (event_t)&(map)->hdr, interruptible)) -#define vm_map_entry_wakeup(map) thread_wakeup((event_t)(&(map)->hdr)) +#define vm_map_entry_wakeup(map) \ + thread_wakeup((event_t)(&(map)->hdr)) -#define vm_map_ref_fast(map) \ +#define vm_map_ref_fast(map) \ MACRO_BEGIN \ - mutex_lock(&map->s_lock); \ + lck_mtx_lock(&map->s_lock); \ map->ref_count++; \ vm_map_res_reference(map); \ - mutex_unlock(&map->s_lock); \ + lck_mtx_unlock(&map->s_lock); \ MACRO_END -#define vm_map_dealloc_fast(map) \ +#define vm_map_dealloc_fast(map) \ MACRO_BEGIN \ - register int c; \ + int c; \ \ - mutex_lock(&map->s_lock); \ - c = --map->ref_count; \ + lck_mtx_lock(&map->s_lock); \ + c = --map->ref_count; \ if (c > 0) \ vm_map_res_deallocate(map); \ - mutex_unlock(&map->s_lock); \ + lck_mtx_unlock(&map->s_lock); \ if (c == 0) \ vm_map_destroy(map); \ MACRO_END /* simplify map entries */ +extern void vm_map_simplify_entry( + vm_map_t map, + vm_map_entry_t this_entry); extern void vm_map_simplify( - vm_map_t map, - vm_offset_t start); - -/* Steal all the pages from a vm_map_copy page_list */ -extern void vm_map_copy_steal_pages( - vm_map_copy_t copy); - -/* Discard a copy without using it */ -extern void vm_map_copy_discard( - vm_map_copy_t copy); + vm_map_t map, + vm_map_offset_t start); /* Move the information in a map copy object to a new map copy object */ extern vm_map_copy_t vm_map_copy_copy( - vm_map_copy_t copy); - -/* Overwrite existing memory with a copy */ -extern kern_return_t vm_map_copy_overwrite( - vm_map_t dst_map, - vm_offset_t dst_addr, - vm_map_copy_t copy, - int interruptible); + vm_map_copy_t copy); /* Create a copy object from an object. */ extern kern_return_t vm_map_copyin_object( @@ -726,23 +870,20 @@ extern kern_return_t vm_map_copyin_object( vm_object_size_t size, vm_map_copy_t *copy_result); /* OUT */ -extern vm_map_t vm_map_switch( - vm_map_t map); - -extern int vm_map_copy_cont_is_valid( - vm_map_copy_t copy); - - -#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) - +extern kern_return_t vm_map_random_address_for_size( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size); /* Enter a mapping */ extern kern_return_t vm_map_enter( vm_map_t map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, vm_object_t object, vm_object_offset_t offset, boolean_t needs_copy, @@ -750,129 +891,533 @@ extern kern_return_t vm_map_enter( vm_prot_t max_protection, vm_inherit_t inheritance); +#if __arm64__ +extern kern_return_t vm_map_enter_fourk( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); +#endif /* __arm64__ */ + +/* XXX should go away - replaced with regular enter of contig object */ +extern kern_return_t vm_map_enter_cpm( + vm_map_t map, + vm_map_address_t *addr, + vm_map_size_t size, + int flags); + +extern kern_return_t vm_map_remap( + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_t src_map, + vm_map_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); + + +/* + * Read and write from a kernel buffer to a specified map. + */ extern kern_return_t vm_map_write_user( - vm_map_t map, - vm_offset_t src_addr, - vm_offset_t dst_addr, - vm_size_t size); + vm_map_t map, + void *src_p, + vm_map_offset_t dst_addr, + vm_size_t size); extern kern_return_t vm_map_read_user( - vm_map_t map, - vm_offset_t src_addr, - vm_offset_t dst_addr, - vm_size_t size); + vm_map_t map, + vm_map_offset_t src_addr, + void *dst_p, + vm_size_t size); /* Create a new task map using an existing task map as a template. */ extern vm_map_t vm_map_fork( - vm_map_t old_map); + ledger_t ledger, + vm_map_t old_map, + int options); +#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001 +#define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002 /* Change inheritance */ extern kern_return_t vm_map_inherit( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - vm_inherit_t new_inheritance); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_inherit_t new_inheritance); /* Add or remove machine-dependent attributes from map regions */ extern kern_return_t vm_map_machine_attribute( - vm_map_t map, - vm_offset_t address, - vm_size_t size, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, vm_machine_attribute_t attribute, vm_machine_attribute_val_t* value); /* IN/OUT */ + +extern kern_return_t vm_map_msync( + vm_map_t map, + vm_map_address_t address, + vm_map_size_t size, + vm_sync_t sync_flags); + /* Set paging behavior */ extern kern_return_t vm_map_behavior_set( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - vm_behavior_t new_behavior); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_behavior_t new_behavior); + +extern kern_return_t vm_map_region( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t *size, + vm_region_flavor_t flavor, + vm_region_info_t info, + mach_msg_type_number_t *count, + mach_port_t *object_name); + +extern kern_return_t vm_map_region_recurse_64( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t *size, + natural_t *nesting_depth, + vm_region_submap_info_64_t info, + mach_msg_type_number_t *count); + +extern kern_return_t vm_map_page_query_internal( + vm_map_t map, + vm_map_offset_t offset, + int *disposition, + int *ref_count); + +extern kern_return_t vm_map_query_volatile( + vm_map_t map, + mach_vm_size_t *volatile_virtual_size_p, + mach_vm_size_t *volatile_resident_size_p, + mach_vm_size_t *volatile_compressed_size_p, + mach_vm_size_t *volatile_pmap_size_p, + mach_vm_size_t *volatile_compressed_pmap_size_p); extern kern_return_t vm_map_submap( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - vm_map_t submap, - vm_offset_t offset, - boolean_t use_pmap); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t submap, + vm_map_offset_t offset, + boolean_t use_pmap); + +extern void vm_map_submap_pmap_clean( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t sub_map, + vm_map_offset_t offset); + +/* Convert from a map entry port to a map */ +extern vm_map_t convert_port_entry_to_map( + ipc_port_t port); + +/* Convert from a port to a vm_object */ +extern vm_object_t convert_port_entry_to_object( + ipc_port_t port); + + +extern kern_return_t vm_map_set_cache_attr( + vm_map_t map, + vm_map_offset_t va); + + +/* definitions related to overriding the NX behavior */ + +#define VM_ABI_32 0x1 +#define VM_ABI_64 0x2 + +extern int override_nx(vm_map_t map, uint32_t user_tag); +extern void vm_map_region_top_walk( + vm_map_entry_t entry, + vm_region_top_info_t top); +extern void vm_map_region_walk( + vm_map_t map, + vm_map_offset_t va, + vm_map_entry_t entry, + vm_object_offset_t offset, + vm_object_size_t range, + vm_region_extended_info_t extended, + boolean_t look_for_pages, + mach_msg_type_number_t count); #endif /* MACH_KERNEL_PRIVATE */ +__BEGIN_DECLS + /* Create an empty map */ extern vm_map_t vm_map_create( - pmap_t pmap, - vm_offset_t min, - vm_offset_t max, - boolean_t pageable); + pmap_t pmap, + vm_map_offset_t min_off, + vm_map_offset_t max_off, + boolean_t pageable); + +extern void vm_map_disable_hole_optimization(vm_map_t map); /* Get rid of a map */ extern void vm_map_destroy( - vm_map_t map); + vm_map_t map, + int flags); + /* Lose a reference */ extern void vm_map_deallocate( - vm_map_t map); + vm_map_t map); + +extern vm_map_t vm_map_switch( + vm_map_t map); /* Change protection */ extern kern_return_t vm_map_protect( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - vm_prot_t new_prot, - boolean_t set_max); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t new_prot, + boolean_t set_max); + +/* Check protection */ +extern boolean_t vm_map_check_protection( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t protection); /* wire down a region */ + +#ifdef XNU_KERNEL_PRIVATE + +extern kern_return_t vm_map_wire_kernel( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + vm_tag_t tag, + boolean_t user_wire); + +extern kern_return_t vm_map_wire_and_extract_kernel( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t access_type, + vm_tag_t tag, + boolean_t user_wire, + ppnum_t *physpage_p); + +/* kext exported versions */ + +extern kern_return_t vm_map_wire_external( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + boolean_t user_wire); + +extern kern_return_t vm_map_wire_and_extract_external( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t access_type, + boolean_t user_wire, + ppnum_t *physpage_p); + +#else /* XNU_KERNEL_PRIVATE */ + extern kern_return_t vm_map_wire( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - vm_prot_t access_type, - boolean_t user_wire); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + boolean_t user_wire); + +extern kern_return_t vm_map_wire_and_extract( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t access_type, + boolean_t user_wire, + ppnum_t *physpage_p); + +#endif /* !XNU_KERNEL_PRIVATE */ /* unwire a region */ extern kern_return_t vm_map_unwire( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - boolean_t user_wire); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire); + +#ifdef XNU_KERNEL_PRIVATE + +/* Enter a mapping of a memory object */ +extern kern_return_t vm_map_enter_mem_object( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +/* Enter a mapping of a memory object */ +extern kern_return_t vm_map_enter_mem_object_prefault( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + vm_prot_t cur_protection, + vm_prot_t max_protection, + upl_page_list_ptr_t page_list, + unsigned int page_list_count); + +/* Enter a mapping of a memory object */ +extern kern_return_t vm_map_enter_mem_object_control( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + memory_object_control_t control, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +#endif /* !XNU_KERNEL_PRIVATE */ /* Deallocate a region */ extern kern_return_t vm_map_remove( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - boolean_t flags); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags); + +/* Deallocate a region when the map is already locked */ +extern kern_return_t vm_map_remove_locked( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags); + +/* Discard a copy without using it */ +extern void vm_map_copy_discard( + vm_map_copy_t copy); + +/* Overwrite existing memory with a copy */ +extern kern_return_t vm_map_copy_overwrite( + vm_map_t dst_map, + vm_map_address_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible); + +/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */ +extern boolean_t vm_map_copy_validate_size( + vm_map_t dst_map, + vm_map_copy_t copy, + vm_map_size_t *size); /* Place a copy into a map */ extern kern_return_t vm_map_copyout( - vm_map_t dst_map, - vm_offset_t *dst_addr, /* OUT */ - vm_map_copy_t copy); + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy); + +extern kern_return_t vm_map_copyout_size( + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size); + +extern kern_return_t vm_map_copyout_internal( + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t consume_on_success, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +extern kern_return_t vm_map_copyin( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result); /* OUT */ extern kern_return_t vm_map_copyin_common( - vm_map_t src_map, - vm_offset_t src_addr, - vm_size_t len, - boolean_t src_destroy, - boolean_t src_volatile, - vm_map_copy_t *copy_result, /* OUT */ - boolean_t use_maxprot); - -extern kern_return_t vm_region_clone( - ipc_port_t src_region, - ipc_port_t dst_region); - -extern kern_return_t vm_map_region_replace( - vm_map_t target_map, - ipc_port_t old_region, - ipc_port_t new_region, - vm_offset_t start, - vm_offset_t end); + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + boolean_t src_volatile, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t use_maxprot); + +#define VM_MAP_COPYIN_SRC_DESTROY 0x00000001 +#define VM_MAP_COPYIN_USE_MAXPROT 0x00000002 +#define VM_MAP_COPYIN_ENTRY_LIST 0x00000004 +#define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008 +#define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F +extern kern_return_t vm_map_copyin_internal( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + int flags, + vm_map_copy_t *copy_result); /* OUT */ -extern boolean_t vm_map_check_protection( - vm_map_t map, - vm_offset_t start, - vm_offset_t end, - vm_prot_t protection); +extern kern_return_t vm_map_copy_extract( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + vm_map_copy_t *copy_result, /* OUT */ + vm_prot_t *cur_prot, /* OUT */ + vm_prot_t *max_prot); + + +extern void vm_map_disable_NX( + vm_map_t map); + +extern void vm_map_disallow_data_exec( + vm_map_t map); + +extern void vm_map_set_64bit( + vm_map_t map); + +extern void vm_map_set_32bit( + vm_map_t map); + +extern void vm_map_set_jumbo( + vm_map_t map); + +extern boolean_t vm_map_has_hard_pagezero( + vm_map_t map, + vm_map_offset_t pagezero_size); +extern void vm_commit_pagezero_status(vm_map_t tmap); + +#ifdef __arm__ +static inline boolean_t vm_map_is_64bit(__unused vm_map_t map) { return 0; } +#else +extern boolean_t vm_map_is_64bit( + vm_map_t map); +#endif + + +extern kern_return_t vm_map_raise_max_offset( + vm_map_t map, + vm_map_offset_t new_max_offset); + +extern kern_return_t vm_map_raise_min_offset( + vm_map_t map, + vm_map_offset_t new_min_offset); +#if __x86_64__ +extern void vm_map_set_high_start( + vm_map_t map, + vm_map_offset_t high_start); +#endif /* __x86_64__ */ + +extern vm_map_offset_t vm_compute_max_offset( + boolean_t is64); + +extern void vm_map_get_max_aslr_slide_section( + vm_map_t map, + int64_t *max_sections, + int64_t *section_size); + +extern uint64_t vm_map_get_max_aslr_slide_pages( + vm_map_t map); + +extern uint64_t vm_map_get_max_loader_aslr_slide_pages( + vm_map_t map); + +extern void vm_map_set_user_wire_limit( + vm_map_t map, + vm_size_t limit); + +extern void vm_map_switch_protect( + vm_map_t map, + boolean_t val); + +extern void vm_map_iokit_mapped_region( + vm_map_t map, + vm_size_t bytes); + +extern void vm_map_iokit_unmapped_region( + vm_map_t map, + vm_size_t bytes); + + +extern boolean_t first_free_is_valid(vm_map_t); + +extern int vm_map_page_shift( + vm_map_t map); + +extern vm_map_offset_t vm_map_page_mask( + vm_map_t map); + +extern int vm_map_page_size( + vm_map_t map); + +extern vm_map_offset_t vm_map_round_page_mask( + vm_map_offset_t offset, + vm_map_offset_t mask); + +extern vm_map_offset_t vm_map_trunc_page_mask( + vm_map_offset_t offset, + vm_map_offset_t mask); + +extern boolean_t vm_map_page_aligned( + vm_map_offset_t offset, + vm_map_offset_t mask); + +#ifdef XNU_KERNEL_PRIVATE +extern kern_return_t vm_map_page_info( + vm_map_t map, + vm_map_offset_t offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count); +extern kern_return_t vm_map_page_range_info_internal( + vm_map_t map, + vm_map_offset_t start_offset, + vm_map_offset_t end_offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count); +#endif /* XNU_KERNEL_PRIVATE */ + + +#ifdef MACH_KERNEL_PRIVATE /* * Macros to invoke vm_map_copyin_common. vm_map_copyin is the @@ -892,6 +1437,45 @@ extern boolean_t vm_map_check_protection( vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ FALSE, copy_result, TRUE) + +/* + * Internal macros for rounding and truncation of vm_map offsets and sizes + */ +#define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) +#define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) + +/* + * Macros for rounding and truncation of vm_map offsets and sizes + */ +#define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT) +#define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map))) +#define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) +#define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0) + +static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) +{ + switch (prot) { + case MAP_MEM_NOOP: break; + case MAP_MEM_IO: *wimg = VM_WIMG_IO; break; + case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break; + case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break; + case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break; + case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break; + case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break; + default: + panic("Unrecognized mapping type %u\n", prot); + } +} + +#endif /* MACH_KERNEL_PRIVATE */ + +#ifdef XNU_KERNEL_PRIVATE +extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift); +#endif /* XNU_KERNEL_PRIVATE */ + +#define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) +#define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) + /* * Flags for vm_map_remove() and vm_map_delete() */ @@ -899,8 +1483,77 @@ extern boolean_t vm_map_check_protection( #define VM_MAP_REMOVE_KUNWIRE 0x1 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 +#define VM_MAP_REMOVE_SAVE_ENTRIES 0x8 +#define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10 +#define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20 +#define VM_MAP_REMOVE_NO_UNNESTING 0x40 +#define VM_MAP_REMOVE_IMMUTABLE 0x80 + +/* Support for UPLs from vm_maps */ + +#ifdef XNU_KERNEL_PRIVATE -#endif /* __APPLE_API_PRIVATE */ +extern kern_return_t vm_map_get_upl( + vm_map_t target_map, + vm_map_offset_t map_offset, + upl_size_t *size, + upl_t *upl, + upl_page_info_array_t page_info, + unsigned int *page_infoCnt, + upl_control_flags_t *flags, + vm_tag_t tag, + int force_data_sync); + +#endif /* XNU_KERNEL_PRIVATE */ + +extern void +vm_map_sizes(vm_map_t map, + vm_map_size_t * psize, + vm_map_size_t * pfree, + vm_map_size_t * plargest_free); + +#if CONFIG_DYNAMIC_CODE_SIGNING +extern kern_return_t vm_map_sign(vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); +#endif + +extern kern_return_t vm_map_partial_reap( + vm_map_t map, + unsigned int *reclaimed_resident, + unsigned int *reclaimed_compressed); + + +#if DEVELOPMENT || DEBUG + +extern int vm_map_disconnect_page_mappings( + vm_map_t map, + boolean_t); +#endif + + +#if CONFIG_FREEZE + +extern kern_return_t vm_map_freeze( + vm_map_t map, + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + boolean_t *has_shared); +#endif + +__END_DECLS + +/* + * In some cases, we don't have a real VM object but still want to return a + * unique ID (to avoid a memory region looking like shared memory), so build + * a fake pointer based on the map's ledger and the index of the ledger being + * reported. + */ +#define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) + +#endif /* KERNEL_PRIVATE */ #endif /* _VM_VM_MAP_H_ */ -