2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Virtual memory map module definitions.
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
83 #include <os/overflow.h>
87 #include <sys/cdefs.h>
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
95 extern void vm_map_reference(vm_map_t map
);
96 extern vm_map_t
current_map(void);
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t
vm_map_exec(
105 cpu_subtype_t cpu_subtype
,
110 #ifdef MACH_KERNEL_PRIVATE
112 #include <mach_assert.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 #include <kern/locks.h>
117 #include <kern/zalloc.h>
118 #include <kern/macro_help.h>
120 #include <kern/thread.h>
121 #include <os/refcnt.h>
123 #define current_map_fast() (current_thread()->map)
124 #define current_map() (current_map_fast())
126 #include <vm/vm_map_store.h>
132 * vm_map_t the high-level address map data structure.
133 * vm_map_entry_t an entry in an address map.
134 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
135 * vm_map_copy_t represents memory copied from an address map,
136 * used for inter-map copy operations
138 typedef struct vm_map_entry
*vm_map_entry_t
;
139 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
143 * Type: vm_map_object_t [internal use only]
146 * The target of an address mapping, either a virtual
147 * memory object or a sub map (of the kernel map).
149 typedef union vm_map_object
{
150 vm_object_t vmo_object
; /* object object */
151 vm_map_t vmo_submap
; /* belongs to another map */
154 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
155 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
156 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
157 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
158 #if VM_NAMED_ENTRY_LIST
159 extern queue_head_t vm_named_entry_list
;
160 #endif /* VM_NAMED_ENTRY_LIST */
163 * Type: vm_named_entry_t [internal use only]
166 * Description of a mapping to a memory cache object.
169 * While the handle to this object is used as a means to map
170 * and pass around the right to map regions backed by pagers
171 * of all sorts, the named_entry itself is only manipulated
172 * by the kernel. Named entries hold information on the
173 * right to map a region of a cached object. Namely,
174 * the target cache object, the beginning and ending of the
175 * region to be mapped, and the permissions, (read, write)
176 * with which it can be mapped.
180 struct vm_named_entry
{
181 decl_lck_mtx_data(, Lock
); /* Synchronization */
183 vm_map_t map
; /* map backing submap */
184 vm_map_copy_t copy
; /* a VM map copy */
186 vm_object_offset_t offset
; /* offset into object */
187 vm_object_size_t size
; /* size of region */
188 vm_object_offset_t data_offset
; /* offset to first byte of data */
189 vm_prot_t protection
; /* access permissions */
190 int ref_count
; /* Number of references */
191 unsigned int /* Is backing.xxx : */
192 /* boolean_t */ is_object
:1, /* ... a VM object (wrapped in a VM map copy) */
193 /* boolean_t */ internal
:1, /* ... an internal object */
194 /* boolean_t */ is_sub_map
:1, /* ... a submap? */
195 /* boolean_t */ is_copy
:1; /* ... a VM map copy */
196 #if VM_NAMED_ENTRY_LIST
197 queue_chain_t named_entry_list
;
198 int named_entry_alias
;
199 mach_port_t named_entry_port
;
200 #define NAMED_ENTRY_BT_DEPTH 16
201 void *named_entry_bt
[NAMED_ENTRY_BT_DEPTH
];
202 #endif /* VM_NAMED_ENTRY_LIST */
206 * Type: vm_map_entry_t [internal use only]
209 * A single mapping within an address map.
212 * Address map entries consist of start and end addresses,
213 * a VM object (or sub map) and offset into that object,
214 * and user-exported inheritance and protection information.
215 * Control information for virtual copy operations is also
216 * stored in the address map entry.
219 struct vm_map_links
{
220 struct vm_map_entry
*prev
; /* previous entry */
221 struct vm_map_entry
*next
; /* next entry */
222 vm_map_offset_t start
; /* start address */
223 vm_map_offset_t end
; /* end address */
227 * FOOTPRINT ACCOUNTING:
228 * The "memory footprint" is better described in the pmap layer.
230 * At the VM level, these 2 vm_map_entry_t fields are relevant:
232 * For an "iokit_mapped" entry, we add the size of the entry to the
233 * footprint when the entry is entered into the map and we subtract that
234 * size when the entry is removed. No other accounting should take place.
235 * "use_pmap" should be FALSE but is not taken into account.
236 * use_pmap: (only when is_sub_map is FALSE)
237 * This indicates if we should ask the pmap layer to account for pages
238 * in this mapping. If FALSE, we expect that another form of accounting
239 * is being used (e.g. "iokit_mapped" or the explicit accounting of
240 * non-volatile purgable memory).
242 * So the logic is mostly:
243 * if entry->is_sub_map == TRUE
244 * anything in a submap does not count for the footprint
245 * else if entry->iokit_mapped == TRUE
246 * footprint includes the entire virtual size of this entry
247 * else if entry->use_pmap == FALSE
248 * tell pmap NOT to account for pages being pmap_enter()'d from this
249 * mapping (i.e. use "alternate accounting")
251 * pmap will account for pages being pmap_enter()'d from this mapping
252 * as it sees fit (only if anonymous, etc...)
255 struct vm_map_entry
{
256 struct vm_map_links links
; /* links to other entries */
257 #define vme_prev links.prev
258 #define vme_next links.next
259 #define vme_start links.start
260 #define vme_end links.end
262 struct vm_map_store store
;
263 union vm_map_object vme_object
; /* object I point to */
264 vm_object_offset_t vme_offset
; /* offset into object */
267 /* boolean_t */ is_shared
:1, /* region is shared */
268 /* boolean_t */ is_sub_map
:1, /* Is "object" a submap? */
269 /* boolean_t */ in_transition
:1, /* Entry being changed */
270 /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */
271 /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */
272 /* behavior is not defined for submap type */
273 /* boolean_t */ needs_copy
:1, /* object need to be copied? */
275 /* Only in task maps: */
276 /* vm_prot_t */ protection
:3, /* protection code */
277 /* vm_prot_t */ max_protection
:3, /* maximum protection */
278 /* vm_inherit_t */ inheritance
:2, /* inheritance */
279 /* boolean_t */ use_pmap
:1, /*
280 * use_pmap is overloaded:
283 * else (i.e. if object):
284 * use pmap accounting
287 /* boolean_t */ no_cache
:1, /* should new pages be cached? */
288 /* boolean_t */ permanent
:1, /* mapping can not be removed */
289 /* boolean_t */ superpage_size
:1, /* use superpages of a certain size */
290 /* boolean_t */ map_aligned
:1, /* align to map's page size */
291 /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of
292 * this entry it is being deleted
293 * without unwiring them */
294 /* boolean_t */ used_for_jit
:1,
295 /* boolean_t */ pmap_cs_associated
:1, /* pmap_cs will validate */
296 /* boolean_t */ from_reserved_zone
:1, /* Allocated from
297 * kernel reserved zone */
299 /* iokit accounting: use the virtual size rather than resident size: */
300 /* boolean_t */ iokit_acct
:1,
301 /* boolean_t */ vme_resilient_codesign
:1,
302 /* boolean_t */ vme_resilient_media
:1,
303 /* boolean_t */ vme_atomic
:1, /* entry cannot be split/coalesced */
304 /* boolean_t */ vme_no_copy_on_read
:1,
305 /* boolean_t */ translated_allow_execute
:1, /* execute in translated processes */
308 unsigned short wired_count
; /* can be paged if = 0 */
309 unsigned short user_wired_count
; /* for vm_wire */
311 #define MAP_ENTRY_CREATION_DEBUG (1)
312 #define MAP_ENTRY_INSERTION_DEBUG (1)
314 #if MAP_ENTRY_CREATION_DEBUG
315 struct vm_map_header
*vme_creation_maphdr
;
316 uintptr_t vme_creation_bt
[16];
318 #if MAP_ENTRY_INSERTION_DEBUG
319 vm_map_offset_t vme_start_original
;
320 vm_map_offset_t vme_end_original
;
321 uintptr_t vme_insertion_bt
[16];
325 #define VME_SUBMAP_PTR(entry) \
326 (&((entry)->vme_object.vmo_submap))
327 #define VME_SUBMAP(entry) \
328 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
329 #define VME_OBJECT_PTR(entry) \
330 (&((entry)->vme_object.vmo_object))
331 #define VME_OBJECT(entry) \
332 ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
333 #define VME_OFFSET(entry) \
334 ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
335 #define VME_ALIAS_MASK (FOURK_PAGE_MASK)
336 #define VME_ALIAS(entry) \
337 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
341 vm_map_entry_t entry
,
344 entry
->vme_object
.vmo_object
= object
;
345 if (object
!= VM_OBJECT_NULL
&& !object
->internal
) {
346 entry
->vme_resilient_media
= FALSE
;
348 entry
->vme_resilient_codesign
= FALSE
;
349 entry
->used_for_jit
= FALSE
;
353 vm_map_entry_t entry
,
356 entry
->vme_object
.vmo_submap
= submap
;
360 vm_map_entry_t entry
,
361 vm_object_offset_t offset
)
364 alias
= VME_ALIAS(entry
);
365 assert((offset
& FOURK_PAGE_MASK
) == 0);
366 entry
->vme_offset
= offset
| alias
;
370 * The "alias" field can be updated while holding the VM map lock
371 * "shared". It's OK as along as it's the only field that can be
372 * updated without the VM map "exclusive" lock.
376 vm_map_entry_t entry
,
379 vm_object_offset_t offset
;
380 offset
= VME_OFFSET(entry
);
381 entry
->vme_offset
= offset
| ((unsigned int)alias
& VME_ALIAS_MASK
);
386 vm_map_entry_t entry
,
387 vm_object_size_t length
)
390 vm_object_offset_t offset
;
392 object
= VME_OBJECT(entry
);
393 offset
= VME_OFFSET(entry
);
394 vm_object_shadow(&object
, &offset
, length
);
395 if (object
!= VME_OBJECT(entry
)) {
396 VME_OBJECT_SET(entry
, object
);
397 entry
->use_pmap
= TRUE
;
399 if (offset
!= VME_OFFSET(entry
)) {
400 VME_OFFSET_SET(entry
, offset
);
406 * Convenience macros for dealing with superpages
407 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
409 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
410 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
411 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
412 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
415 * wired_counts are unsigned short. This value is used to safeguard
416 * against any mishaps due to runaway user programs.
418 #define MAX_WIRE_COUNT 65535
423 * Type: struct vm_map_header
426 * Header for a vm_map and a vm_map_copy.
430 struct vm_map_header
{
431 struct vm_map_links links
; /* first, last, min, max */
432 int nentries
; /* Number of entries */
433 boolean_t entries_pageable
;
434 /* are map entries pageable? */
435 #ifdef VM_MAP_STORE_USE_RB
436 struct rb_head rb_head_store
;
438 int page_shift
; /* page shift */
441 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
442 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
443 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
446 * Type: vm_map_t [exported; contents invisible]
449 * An address map -- a directory relating valid
450 * regions of a task's address space to the corresponding
451 * virtual memory objects.
454 * Maps are doubly-linked lists of map entries, sorted
455 * by address. One hint is used to start
456 * searches again from the last successful search,
457 * insertion, or removal. Another hint is used to
458 * quickly find free space.
461 lck_rw_t lock
; /* map lock */
462 struct vm_map_header hdr
; /* Map entry header */
463 #define min_offset hdr.links.start /* start of range */
464 #define max_offset hdr.links.end /* end of range */
465 pmap_t
XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap
; /* Physical map */
466 vm_map_size_t size
; /* virtual size */
467 vm_map_size_t user_wire_limit
;/* rlimit on user locked memory */
468 vm_map_size_t user_wire_size
; /* current size of user locked memory in this map */
469 #if XNU_TARGET_OS_OSX
470 vm_map_offset_t vmmap_high_start
;
471 #endif /* XNU_TARGET_OS_OSX */
475 * If map->disable_vmentry_reuse == TRUE:
476 * the end address of the highest allocated vm_map_entry_t.
478 vm_map_offset_t vmu1_highest_entry_end
;
480 * For a nested VM map:
481 * the lowest address in this nested VM map that we would
482 * expect to be unnested under normal operation (i.e. for
483 * regular copy-on-write on DATA section).
485 vm_map_offset_t vmu1_lowest_unnestable_start
;
487 #define highest_entry_end vmu1.vmu1_highest_entry_end
488 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
489 decl_lck_mtx_data(, s_lock
); /* Lock ref, res fields */
490 lck_mtx_ext_t s_lock_ext
;
491 vm_map_entry_t hint
; /* hint for quick lookups */
493 struct vm_map_links
* vmmap_hole_hint
; /* hint for quick hole lookups */
494 struct vm_map_corpse_footprint_header
*vmmap_corpse_footprint
;
496 #define hole_hint vmmap_u_1.vmmap_hole_hint
497 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
499 vm_map_entry_t _first_free
; /* First free space hint */
500 struct vm_map_links
* _holes
; /* links all holes between entries */
501 } f_s
; /* Union for free space data structures being used */
503 #define first_free f_s._first_free
504 #define holes_list f_s._holes
506 struct os_refcnt map_refcnt
; /* Reference count */
509 /* boolean_t */ wait_for_space
:1, /* Should callers wait for space? */
510 /* boolean_t */ wiring_required
:1, /* All memory wired? */
511 /* boolean_t */ no_zero_fill
:1, /* No zero fill absent pages */
512 /* boolean_t */ mapped_in_other_pmaps
:1, /* has this submap been mapped in maps that use a different pmap */
513 /* boolean_t */ switch_protect
:1, /* Protect map from write faults while switched */
514 /* boolean_t */ disable_vmentry_reuse
:1, /* All vm entries should keep using newer and higher addresses in the map */
515 /* boolean_t */ map_disallow_data_exec
:1, /* Disallow execution from data pages on exec-permissive architectures */
516 /* boolean_t */ holelistenabled
:1,
517 /* boolean_t */ is_nested_map
:1,
518 /* boolean_t */ map_disallow_new_exec
:1, /* Disallow new executable code */
519 /* boolean_t */ jit_entry_exists
:1,
520 /* boolean_t */ has_corpse_footprint
:1,
521 /* boolean_t */ terminated
:1,
522 /* boolean_t */ is_alien
:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
523 /* boolean_t */ cs_enforcement
:1, /* code-signing enforcement */
524 /* boolean_t */ reserved_regions
:1, /* has reserved regions. The map size that userspace sees should ignore these. */
525 /* boolean_t */ single_jit
:1, /* only allow one JIT mapping */
526 /* reserved */ pad
:15;
527 unsigned int timestamp
; /* Version number */
530 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
531 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
532 #define vm_map_first_entry(map) ((map)->hdr.links.next)
533 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
536 * Type: vm_map_version_t [exported; contents invisible]
539 * Map versions may be used to quickly validate a previous
543 * Because they are bulky objects, map versions are usually
544 * passed by reference.
547 * Just a timestamp for the main map.
549 typedef struct vm_map_version
{
550 unsigned int main_timestamp
;
554 * Type: vm_map_copy_t [exported; contents invisible]
557 * A map copy object represents a region of virtual memory
558 * that has been copied from an address map but is still
561 * A map copy object may only be used by a single thread
565 * There are three formats for map copy objects.
566 * The first is very similar to the main
567 * address map in structure, and as a result, some
568 * of the internal maintenance functions/macros can
569 * be used with either address maps or map copy objects.
571 * The map copy object contains a header links
572 * entry onto which the other entries that represent
573 * the region are chained.
575 * The second format is a single vm object. This was used
576 * primarily in the pageout path - but is not currently used
577 * except for placeholder copy objects (see vm_map_copy_copy()).
579 * The third format is a kernel buffer copy object - for data
580 * small enough that physical copies were the most efficient
581 * method. This method uses a zero-sized array unioned with
582 * other format-specific data in the 'c_u' member. This unsized
583 * array overlaps the other elements and allows us to use this
584 * extra structure space for physical memory copies. On 64-bit
585 * systems this saves ~64 bytes per vm_map_copy.
590 #define VM_MAP_COPY_ENTRY_LIST 1
591 #define VM_MAP_COPY_OBJECT 2
592 #define VM_MAP_COPY_KERNEL_BUFFER 3
593 vm_object_offset_t offset
;
596 struct vm_map_header hdr
; /* ENTRY_LIST */
597 vm_object_t object
; /* OBJECT */
598 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata
; /* KERNEL_BUFFER */
603 #define cpy_hdr c_u.hdr
605 #define cpy_object c_u.object
606 #define cpy_kdata c_u.kdata
608 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
609 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
610 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
613 * Useful macros for entry list copy objects
616 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
617 #define vm_map_copy_first_entry(copy) \
618 ((copy)->cpy_hdr.links.next)
619 #define vm_map_copy_last_entry(copy) \
620 ((copy)->cpy_hdr.links.prev)
623 vm_map_copy_adjust_to_target(
624 vm_map_copy_t copy_map
,
625 vm_map_offset_t offset
,
629 vm_map_copy_t
*target_copy_map_p
,
630 vm_map_offset_t
*overmap_start_p
,
631 vm_map_offset_t
*overmap_end_p
,
632 vm_map_offset_t
*trimmed_start_p
);
635 * Macros: vm_map_lock, etc. [internal use only]
637 * Perform locking on the data portion of a map.
638 * When multiple maps are to be locked, order by map address.
639 * (See vm_map.c::vm_remap())
642 #define vm_map_lock_init(map) \
643 ((map)->timestamp = 0 , \
644 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
646 #define vm_map_lock(map) \
648 DTRACE_VM(vm_map_lock_w); \
649 lck_rw_lock_exclusive(&(map)->lock); \
652 #define vm_map_unlock(map) \
654 DTRACE_VM(vm_map_unlock_w); \
655 (map)->timestamp++; \
656 lck_rw_done(&(map)->lock); \
659 #define vm_map_lock_read(map) \
661 DTRACE_VM(vm_map_lock_r); \
662 lck_rw_lock_shared(&(map)->lock); \
665 #define vm_map_unlock_read(map) \
667 DTRACE_VM(vm_map_unlock_r); \
668 lck_rw_done(&(map)->lock); \
671 #define vm_map_lock_write_to_read(map) \
673 DTRACE_VM(vm_map_lock_downgrade); \
674 (map)->timestamp++; \
675 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
678 __attribute__((always_inline
))
679 int vm_map_lock_read_to_write(vm_map_t map
);
681 __attribute__((always_inline
))
682 boolean_t
vm_map_try_lock(vm_map_t map
);
684 __attribute__((always_inline
))
685 boolean_t
vm_map_try_lock_read(vm_map_t map
);
687 int vm_self_region_page_shift(vm_map_t target_map
);
688 int vm_self_region_page_shift_safely(vm_map_t target_map
);
690 #if MACH_ASSERT || DEBUG
691 #define vm_map_lock_assert_held(map) \
692 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
693 #define vm_map_lock_assert_shared(map) \
694 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
695 #define vm_map_lock_assert_exclusive(map) \
696 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
697 #define vm_map_lock_assert_notheld(map) \
698 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
699 #else /* MACH_ASSERT || DEBUG */
700 #define vm_map_lock_assert_held(map)
701 #define vm_map_lock_assert_shared(map)
702 #define vm_map_lock_assert_exclusive(map)
703 #define vm_map_lock_assert_notheld(map)
704 #endif /* MACH_ASSERT || DEBUG */
707 * Exported procedures that operate on vm_map_t.
710 /* Initialize the module */
711 extern void vm_map_init(void);
713 extern void vm_kernel_reserved_entry_init(void);
715 /* Allocate a range in the specified virtual address map and
716 * return the entry allocated for that range. */
717 extern kern_return_t
vm_map_find_space(
719 vm_map_address_t
*address
, /* OUT */
721 vm_map_offset_t mask
,
723 vm_map_kernel_flags_t vmk_flags
,
725 vm_map_entry_t
*o_entry
); /* OUT */
727 /* flags for vm_map_find_space */
728 #define VM_MAP_FIND_LAST_FREE 0x01
730 extern void vm_map_clip_start(
732 vm_map_entry_t entry
,
733 vm_map_offset_t endaddr
);
734 extern void vm_map_clip_end(
736 vm_map_entry_t entry
,
737 vm_map_offset_t endaddr
);
738 extern boolean_t
vm_map_entry_should_cow_for_true_share(
739 vm_map_entry_t entry
);
741 /* Lookup map entry containing or the specified address in the given map */
742 extern boolean_t
vm_map_lookup_entry(
744 vm_map_address_t address
,
745 vm_map_entry_t
*entry
); /* OUT */
747 extern void vm_map_copy_remap(
749 vm_map_entry_t where
,
751 vm_map_offset_t adjustment
,
754 vm_inherit_t inheritance
);
756 /* Find the VM object, offset, and protection for a given virtual address
757 * in the specified map, assuming a page fault of the type specified. */
758 extern kern_return_t
vm_map_lookup_locked(
759 vm_map_t
*var_map
, /* IN/OUT */
760 vm_map_address_t vaddr
,
761 vm_prot_t fault_type
,
762 int object_lock_type
,
763 vm_map_version_t
*out_version
, /* OUT */
764 vm_object_t
*object
, /* OUT */
765 vm_object_offset_t
*offset
, /* OUT */
766 vm_prot_t
*out_prot
, /* OUT */
767 boolean_t
*wired
, /* OUT */
768 vm_object_fault_info_t fault_info
, /* OUT */
769 vm_map_t
*real_map
, /* OUT */
770 bool *contended
); /* OUT */
772 /* Verifies that the map has not changed since the given version. */
773 extern boolean_t
vm_map_verify(
775 vm_map_version_t
*version
); /* REF */
777 extern vm_map_entry_t
vm_map_entry_insert(
779 vm_map_entry_t insp_entry
,
780 vm_map_offset_t start
,
783 vm_object_offset_t offset
,
784 vm_map_kernel_flags_t vmk_flags
,
785 boolean_t needs_copy
,
787 boolean_t in_transition
,
788 vm_prot_t cur_protection
,
789 vm_prot_t max_protection
,
790 vm_behavior_t behavior
,
791 vm_inherit_t inheritance
,
792 unsigned short wired_count
,
795 boolean_t no_copy_on_read
,
796 unsigned int superpage_size
,
797 boolean_t clear_map_aligned
,
799 boolean_t used_for_jit
,
801 boolean_t translated_allow_execute
);
805 * Functions implemented as macros
807 #define vm_map_min(map) ((map)->min_offset)
808 /* Lowest valid address in
811 #define vm_map_max(map) ((map)->max_offset)
812 /* Highest valid address */
814 #define vm_map_pmap(map) ((map)->pmap)
815 /* Physical map associated
816 * with this address map */
818 /* Gain a reference to an existing map */
819 extern void vm_map_reference(
823 * Submap object. Must be used to create memory to be put
824 * in a submap by vm_map_submap.
826 extern vm_object_t vm_submap_object
;
829 * Wait and wakeup macros for in_transition map entries.
831 #define vm_map_entry_wait(map, interruptible) \
832 ((map)->timestamp++ , \
833 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
834 (event_t)&(map)->hdr, interruptible))
837 #define vm_map_entry_wakeup(map) \
838 thread_wakeup((event_t)(&(map)->hdr))
841 /* simplify map entries */
842 extern void vm_map_simplify_entry(
844 vm_map_entry_t this_entry
);
845 extern void vm_map_simplify(
847 vm_map_offset_t start
);
849 /* Move the information in a map copy object to a new map copy object */
850 extern vm_map_copy_t
vm_map_copy_copy(
853 /* Create a copy object from an object. */
854 extern kern_return_t
vm_map_copyin_object(
856 vm_object_offset_t offset
,
857 vm_object_size_t size
,
858 vm_map_copy_t
*copy_result
); /* OUT */
860 extern kern_return_t
vm_map_random_address_for_size(
862 vm_map_offset_t
*address
,
865 /* Enter a mapping */
866 extern kern_return_t
vm_map_enter(
868 vm_map_offset_t
*address
,
870 vm_map_offset_t mask
,
872 vm_map_kernel_flags_t vmk_flags
,
875 vm_object_offset_t offset
,
876 boolean_t needs_copy
,
877 vm_prot_t cur_protection
,
878 vm_prot_t max_protection
,
879 vm_inherit_t inheritance
);
882 extern kern_return_t
vm_map_enter_fourk(
884 vm_map_offset_t
*address
,
886 vm_map_offset_t mask
,
888 vm_map_kernel_flags_t vmk_flags
,
891 vm_object_offset_t offset
,
892 boolean_t needs_copy
,
893 vm_prot_t cur_protection
,
894 vm_prot_t max_protection
,
895 vm_inherit_t inheritance
);
896 #endif /* __arm64__ */
898 /* XXX should go away - replaced with regular enter of contig object */
899 extern kern_return_t
vm_map_enter_cpm(
901 vm_map_address_t
*addr
,
905 extern kern_return_t
vm_map_remap(
907 vm_map_offset_t
*address
,
909 vm_map_offset_t mask
,
911 vm_map_kernel_flags_t vmk_flags
,
914 vm_map_offset_t memory_address
,
916 vm_prot_t
*cur_protection
,
917 vm_prot_t
*max_protection
,
918 vm_inherit_t inheritance
);
922 * Read and write from a kernel buffer to a specified map.
924 extern kern_return_t
vm_map_write_user(
927 vm_map_offset_t dst_addr
,
930 extern kern_return_t
vm_map_read_user(
932 vm_map_offset_t src_addr
,
936 /* Create a new task map using an existing task map as a template. */
937 extern vm_map_t
vm_map_fork(
941 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
942 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
943 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
945 /* Change inheritance */
946 extern kern_return_t
vm_map_inherit(
948 vm_map_offset_t start
,
950 vm_inherit_t new_inheritance
);
952 /* Add or remove machine-dependent attributes from map regions */
953 extern kern_return_t
vm_map_machine_attribute(
955 vm_map_offset_t start
,
957 vm_machine_attribute_t attribute
,
958 vm_machine_attribute_val_t
* value
); /* IN/OUT */
960 extern kern_return_t
vm_map_msync(
962 vm_map_address_t address
,
964 vm_sync_t sync_flags
);
966 /* Set paging behavior */
967 extern kern_return_t
vm_map_behavior_set(
969 vm_map_offset_t start
,
971 vm_behavior_t new_behavior
);
973 extern kern_return_t
vm_map_region(
975 vm_map_offset_t
*address
,
977 vm_region_flavor_t flavor
,
978 vm_region_info_t info
,
979 mach_msg_type_number_t
*count
,
980 mach_port_t
*object_name
);
982 extern kern_return_t
vm_map_region_recurse_64(
984 vm_map_offset_t
*address
,
986 natural_t
*nesting_depth
,
987 vm_region_submap_info_64_t info
,
988 mach_msg_type_number_t
*count
);
990 extern kern_return_t
vm_map_page_query_internal(
992 vm_map_offset_t offset
,
996 extern kern_return_t
vm_map_query_volatile(
998 mach_vm_size_t
*volatile_virtual_size_p
,
999 mach_vm_size_t
*volatile_resident_size_p
,
1000 mach_vm_size_t
*volatile_compressed_size_p
,
1001 mach_vm_size_t
*volatile_pmap_size_p
,
1002 mach_vm_size_t
*volatile_compressed_pmap_size_p
);
1004 extern kern_return_t
vm_map_submap(
1006 vm_map_offset_t start
,
1007 vm_map_offset_t end
,
1009 vm_map_offset_t offset
,
1010 boolean_t use_pmap
);
1012 extern void vm_map_submap_pmap_clean(
1014 vm_map_offset_t start
,
1015 vm_map_offset_t end
,
1017 vm_map_offset_t offset
);
1019 /* Convert from a map entry port to a map */
1020 extern vm_map_t
convert_port_entry_to_map(
1023 /* Convert from a port to a vm_object */
1024 extern vm_object_t
convert_port_entry_to_object(
1028 extern kern_return_t
vm_map_set_cache_attr(
1030 vm_map_offset_t va
);
1033 /* definitions related to overriding the NX behavior */
1035 #define VM_ABI_32 0x1
1036 #define VM_ABI_64 0x2
1038 extern int override_nx(vm_map_t map
, uint32_t user_tag
);
1041 extern void vm_map_region_top_walk(
1042 vm_map_entry_t entry
,
1043 vm_region_top_info_t top
);
1044 extern void vm_map_region_walk(
1047 vm_map_entry_t entry
,
1048 vm_object_offset_t offset
,
1049 vm_object_size_t range
,
1050 vm_region_extended_info_t extended
,
1051 boolean_t look_for_pages
,
1052 mach_msg_type_number_t count
);
1056 extern void vm_map_copy_footprint_ledgers(
1059 extern void vm_map_copy_ledger(
1065 * Represents a single region of virtual address space that should be reserved
1066 * (pre-mapped) in a user address space.
1068 struct vm_reserved_region
{
1070 vm_map_offset_t vmrr_addr
;
1071 vm_map_size_t vmrr_size
;
1075 * Return back a machine-dependent array of address space regions that should be
1076 * reserved by the VM. This function is defined in the machine-dependent
1077 * machine_routines.c files.
1079 extern size_t ml_get_vm_reserved_regions(
1081 struct vm_reserved_region
**regions
);
1083 #endif /* MACH_KERNEL_PRIVATE */
1087 /* Create an empty map */
1088 extern vm_map_t
vm_map_create(
1090 vm_map_offset_t min_off
,
1091 vm_map_offset_t max_off
,
1092 boolean_t pageable
);
1093 extern vm_map_t
vm_map_create_options(
1095 vm_map_offset_t min_off
,
1096 vm_map_offset_t max_off
,
1098 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1099 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1100 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1101 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1103 extern vm_map_size_t
vm_map_adjusted_size(vm_map_t map
);
1105 extern void vm_map_disable_hole_optimization(vm_map_t map
);
1107 /* Get rid of a map */
1108 extern void vm_map_destroy(
1112 /* Lose a reference */
1113 extern void vm_map_deallocate(
1116 /* Lose a reference */
1117 extern void vm_map_inspect_deallocate(
1118 vm_map_inspect_t map
);
1120 /* Lose a reference */
1121 extern void vm_map_read_deallocate(
1124 extern vm_map_t
vm_map_switch(
1127 /* Change protection */
1128 extern kern_return_t
vm_map_protect(
1130 vm_map_offset_t start
,
1131 vm_map_offset_t end
,
1135 /* Check protection */
1136 extern boolean_t
vm_map_check_protection(
1138 vm_map_offset_t start
,
1139 vm_map_offset_t end
,
1140 vm_prot_t protection
);
1142 extern boolean_t
vm_map_cs_enforcement(
1144 extern void vm_map_cs_enforcement_set(
1148 extern kern_return_t
vm_map_cs_wx_enable(vm_map_t map
);
1150 /* wire down a region */
1152 #ifdef XNU_KERNEL_PRIVATE
1154 extern kern_return_t
vm_map_wire_kernel(
1156 vm_map_offset_t start
,
1157 vm_map_offset_t end
,
1158 vm_prot_t access_type
,
1160 boolean_t user_wire
);
1162 extern kern_return_t
vm_map_wire_and_extract_kernel(
1164 vm_map_offset_t start
,
1165 vm_prot_t access_type
,
1167 boolean_t user_wire
,
1168 ppnum_t
*physpage_p
);
1170 /* kext exported versions */
1172 extern kern_return_t
vm_map_wire_external(
1174 vm_map_offset_t start
,
1175 vm_map_offset_t end
,
1176 vm_prot_t access_type
,
1177 boolean_t user_wire
);
1179 extern kern_return_t
vm_map_wire_and_extract_external(
1181 vm_map_offset_t start
,
1182 vm_prot_t access_type
,
1183 boolean_t user_wire
,
1184 ppnum_t
*physpage_p
);
1186 #else /* XNU_KERNEL_PRIVATE */
1188 extern kern_return_t
vm_map_wire(
1190 vm_map_offset_t start
,
1191 vm_map_offset_t end
,
1192 vm_prot_t access_type
,
1193 boolean_t user_wire
);
1195 extern kern_return_t
vm_map_wire_and_extract(
1197 vm_map_offset_t start
,
1198 vm_prot_t access_type
,
1199 boolean_t user_wire
,
1200 ppnum_t
*physpage_p
);
1202 #endif /* !XNU_KERNEL_PRIVATE */
1204 /* unwire a region */
1205 extern kern_return_t
vm_map_unwire(
1207 vm_map_offset_t start
,
1208 vm_map_offset_t end
,
1209 boolean_t user_wire
);
1211 #ifdef XNU_KERNEL_PRIVATE
1213 /* Enter a mapping of a memory object */
1214 extern kern_return_t
vm_map_enter_mem_object(
1216 vm_map_offset_t
*address
,
1218 vm_map_offset_t mask
,
1220 vm_map_kernel_flags_t vmk_flags
,
1223 vm_object_offset_t offset
,
1224 boolean_t needs_copy
,
1225 vm_prot_t cur_protection
,
1226 vm_prot_t max_protection
,
1227 vm_inherit_t inheritance
);
1229 /* Enter a mapping of a memory object */
1230 extern kern_return_t
vm_map_enter_mem_object_prefault(
1232 vm_map_offset_t
*address
,
1234 vm_map_offset_t mask
,
1236 vm_map_kernel_flags_t vmk_flags
,
1239 vm_object_offset_t offset
,
1240 vm_prot_t cur_protection
,
1241 vm_prot_t max_protection
,
1242 upl_page_list_ptr_t page_list
,
1243 unsigned int page_list_count
);
1245 /* Enter a mapping of a memory object */
1246 extern kern_return_t
vm_map_enter_mem_object_control(
1248 vm_map_offset_t
*address
,
1250 vm_map_offset_t mask
,
1252 vm_map_kernel_flags_t vmk_flags
,
1254 memory_object_control_t control
,
1255 vm_object_offset_t offset
,
1256 boolean_t needs_copy
,
1257 vm_prot_t cur_protection
,
1258 vm_prot_t max_protection
,
1259 vm_inherit_t inheritance
);
1261 extern kern_return_t
vm_map_terminate(
1264 extern void vm_map_require(
1267 #endif /* !XNU_KERNEL_PRIVATE */
1269 /* Deallocate a region */
1270 extern kern_return_t
vm_map_remove(
1272 vm_map_offset_t start
,
1273 vm_map_offset_t end
,
1276 /* Deallocate a region when the map is already locked */
1277 extern kern_return_t
vm_map_remove_locked(
1279 vm_map_offset_t start
,
1280 vm_map_offset_t end
,
1283 /* Discard a copy without using it */
1284 extern void vm_map_copy_discard(
1285 vm_map_copy_t copy
);
1287 /* Overwrite existing memory with a copy */
1288 extern kern_return_t
vm_map_copy_overwrite(
1290 vm_map_address_t dst_addr
,
1292 vm_map_size_t copy_size
,
1293 boolean_t interruptible
);
1295 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1298 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1299 extern boolean_t
vm_map_copy_validate_size(
1302 vm_map_size_t
*size
);
1304 /* Place a copy into a map */
1305 extern kern_return_t
vm_map_copyout(
1307 vm_map_address_t
*dst_addr
, /* OUT */
1308 vm_map_copy_t copy
);
1310 extern kern_return_t
vm_map_copyout_size(
1312 vm_map_address_t
*dst_addr
, /* OUT */
1314 vm_map_size_t copy_size
);
1316 extern kern_return_t
vm_map_copyout_internal(
1318 vm_map_address_t
*dst_addr
, /* OUT */
1320 vm_map_size_t copy_size
,
1321 boolean_t consume_on_success
,
1322 vm_prot_t cur_protection
,
1323 vm_prot_t max_protection
,
1324 vm_inherit_t inheritance
);
1326 extern kern_return_t
vm_map_copyin(
1328 vm_map_address_t src_addr
,
1330 boolean_t src_destroy
,
1331 vm_map_copy_t
*copy_result
); /* OUT */
1333 extern kern_return_t
vm_map_copyin_common(
1335 vm_map_address_t src_addr
,
1337 boolean_t src_destroy
,
1338 boolean_t src_volatile
,
1339 vm_map_copy_t
*copy_result
, /* OUT */
1340 boolean_t use_maxprot
);
1342 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1343 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1344 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1345 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1346 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1347 extern kern_return_t
vm_map_copyin_internal(
1349 vm_map_address_t src_addr
,
1352 vm_map_copy_t
*copy_result
); /* OUT */
1354 extern kern_return_t
vm_map_copy_extract(
1356 vm_map_address_t src_addr
,
1359 vm_map_copy_t
*copy_result
, /* OUT */
1360 vm_prot_t
*cur_prot
, /* OUT */
1361 vm_prot_t
*max_prot
, /* OUT */
1362 vm_inherit_t inheritance
,
1363 vm_map_kernel_flags_t vmk_flags
);
1366 extern void vm_map_disable_NX(
1369 extern void vm_map_disallow_data_exec(
1372 extern void vm_map_set_64bit(
1375 extern void vm_map_set_32bit(
1378 extern void vm_map_set_jumbo(
1381 extern void vm_map_set_jit_entitled(
1384 extern void vm_map_set_max_addr(
1385 vm_map_t map
, vm_map_offset_t new_max_offset
);
1387 extern boolean_t
vm_map_has_hard_pagezero(
1389 vm_map_offset_t pagezero_size
);
1390 extern void vm_commit_pagezero_status(vm_map_t tmap
);
1393 static inline boolean_t
1394 vm_map_is_64bit(__unused vm_map_t map
)
1399 extern boolean_t
vm_map_is_64bit(
1404 extern kern_return_t
vm_map_raise_max_offset(
1406 vm_map_offset_t new_max_offset
);
1408 extern kern_return_t
vm_map_raise_min_offset(
1410 vm_map_offset_t new_min_offset
);
1411 #if XNU_TARGET_OS_OSX
1412 extern void vm_map_set_high_start(
1414 vm_map_offset_t high_start
);
1415 #endif /* XNU_TARGET_OS_OSX */
1417 extern vm_map_offset_t
vm_compute_max_offset(
1420 extern void vm_map_get_max_aslr_slide_section(
1422 int64_t *max_sections
,
1423 int64_t *section_size
);
1425 extern uint64_t vm_map_get_max_aslr_slide_pages(
1428 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1431 extern void vm_map_set_user_wire_limit(
1435 extern void vm_map_switch_protect(
1439 extern void vm_map_iokit_mapped_region(
1443 extern void vm_map_iokit_unmapped_region(
1448 extern boolean_t
first_free_is_valid(vm_map_t
);
1450 extern int vm_map_page_shift(
1453 extern vm_map_offset_t
vm_map_page_mask(
1456 extern int vm_map_page_size(
1459 extern vm_map_offset_t
vm_map_round_page_mask(
1460 vm_map_offset_t offset
,
1461 vm_map_offset_t mask
);
1463 extern vm_map_offset_t
vm_map_trunc_page_mask(
1464 vm_map_offset_t offset
,
1465 vm_map_offset_t mask
);
1467 extern boolean_t
vm_map_page_aligned(
1468 vm_map_offset_t offset
,
1469 vm_map_offset_t mask
);
1472 vm_map_range_overflows(vm_map_offset_t addr
, vm_map_size_t size
)
1474 vm_map_offset_t sum
;
1475 return os_add_overflow(addr
, size
, &sum
);
1479 mach_vm_range_overflows(mach_vm_offset_t addr
, mach_vm_size_t size
)
1481 mach_vm_offset_t sum
;
1482 return os_add_overflow(addr
, size
, &sum
);
1485 #ifdef XNU_KERNEL_PRIVATE
1487 #if XNU_TARGET_OS_OSX
1488 extern void vm_map_mark_alien(vm_map_t map
);
1489 extern void vm_map_single_jit(vm_map_t map
);
1490 #endif /* XNU_TARGET_OS_OSX */
1492 extern kern_return_t
vm_map_page_info(
1494 vm_map_offset_t offset
,
1495 vm_page_info_flavor_t flavor
,
1496 vm_page_info_t info
,
1497 mach_msg_type_number_t
*count
);
1498 extern kern_return_t
vm_map_page_range_info_internal(
1500 vm_map_offset_t start_offset
,
1501 vm_map_offset_t end_offset
,
1502 int effective_page_shift
,
1503 vm_page_info_flavor_t flavor
,
1504 vm_page_info_t info
,
1505 mach_msg_type_number_t
*count
);
1506 #endif /* XNU_KERNEL_PRIVATE */
1509 #ifdef MACH_KERNEL_PRIVATE
1512 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1513 * usual form; it handles a copyin based on the current protection
1514 * (current protection == VM_PROT_NONE) is a failure.
1515 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1516 * access. The difference is that a region with no current access
1517 * BUT possible maximum access is rejected by vm_map_copyin(), but
1518 * returned by vm_map_copyin_maxprot.
1520 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1521 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1522 FALSE, copy_result, FALSE)
1524 #define vm_map_copyin_maxprot(src_map, \
1525 src_addr, len, src_destroy, copy_result) \
1526 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1527 FALSE, copy_result, TRUE)
1531 * Internal macros for rounding and truncation of vm_map offsets and sizes
1533 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1534 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1537 * Macros for rounding and truncation of vm_map offsets and sizes
1539 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1540 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1541 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1542 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1546 vm_map_t map __unused
)
1549 if (VM_MAP_PAGE_SHIFT(map
) < PAGE_SHIFT
||
1550 pmap_is_exotic(map
->pmap
)) {
1553 #endif /* __arm64__ */
1559 vm_map_t map __unused
)
1562 * An "alien" process/task/map/pmap should mostly behave
1563 * as it currently would on iOS.
1565 #if XNU_TARGET_OS_OSX
1566 if (map
->is_alien
) {
1570 #else /* XNU_TARGET_OS_OSX */
1572 #endif /* XNU_TARGET_OS_OSX */
1576 VM_MAP_POLICY_WX_FAIL(
1577 vm_map_t map __unused
)
1579 if (VM_MAP_IS_ALIEN(map
)) {
1586 VM_MAP_POLICY_WX_STRIP_X(
1587 vm_map_t map __unused
)
1589 if (VM_MAP_IS_ALIEN(map
)) {
1596 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1597 vm_map_t map __unused
)
1599 if (VM_MAP_IS_ALIEN(map
) || map
->single_jit
) {
1606 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1609 return VM_MAP_IS_ALIEN(map
);
1613 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1614 vm_map_t map __unused
)
1616 if (VM_MAP_IS_ALIEN(map
)) {
1623 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1624 vm_map_t map __unused
)
1626 if (VM_MAP_IS_ALIEN(map
)) {
1633 VM_MAP_POLICY_ALLOW_JIT_COPY(
1634 vm_map_t map __unused
)
1636 if (VM_MAP_IS_ALIEN(map
)) {
1643 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1644 vm_map_t map __unused
)
1648 #else /* __x86_64__ */
1649 if (VM_MAP_IS_EXOTIC(map
)) {
1653 #endif /* __x86_64__ */
1657 vm_prot_to_wimg(unsigned int prot
, unsigned int *wimg
)
1660 case MAP_MEM_NOOP
: break;
1661 case MAP_MEM_IO
: *wimg
= VM_WIMG_IO
; break;
1662 case MAP_MEM_COPYBACK
: *wimg
= VM_WIMG_USE_DEFAULT
; break;
1663 case MAP_MEM_INNERWBACK
: *wimg
= VM_WIMG_INNERWBACK
; break;
1664 case MAP_MEM_POSTED
: *wimg
= VM_WIMG_POSTED
; break;
1665 case MAP_MEM_POSTED_REORDERED
: *wimg
= VM_WIMG_POSTED_REORDERED
; break;
1666 case MAP_MEM_POSTED_COMBINED_REORDERED
: *wimg
= VM_WIMG_POSTED_COMBINED_REORDERED
; break;
1667 case MAP_MEM_WTHRU
: *wimg
= VM_WIMG_WTHRU
; break;
1668 case MAP_MEM_WCOMB
: *wimg
= VM_WIMG_WCOMB
; break;
1669 case MAP_MEM_RT
: *wimg
= VM_WIMG_RT
; break;
1674 #endif /* MACH_KERNEL_PRIVATE */
1676 #ifdef XNU_KERNEL_PRIVATE
1677 extern kern_return_t
vm_map_set_page_shift(vm_map_t map
, int pageshift
);
1678 extern bool vm_map_is_exotic(vm_map_t map
);
1679 extern bool vm_map_is_alien(vm_map_t map
);
1680 #endif /* XNU_KERNEL_PRIVATE */
1682 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1683 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1686 * Flags for vm_map_remove() and vm_map_delete()
1688 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1689 #define VM_MAP_REMOVE_KUNWIRE 0x1
1690 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1691 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1692 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1693 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1694 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1695 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1696 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1697 #define VM_MAP_REMOVE_GAPS_OK 0x100
1699 /* Support for UPLs from vm_maps */
1701 #ifdef XNU_KERNEL_PRIVATE
1703 extern kern_return_t
vm_map_get_upl(
1704 vm_map_t target_map
,
1705 vm_map_offset_t map_offset
,
1708 upl_page_info_array_t page_info
,
1709 unsigned int *page_infoCnt
,
1710 upl_control_flags_t
*flags
,
1712 int force_data_sync
);
1714 #endif /* XNU_KERNEL_PRIVATE */
1717 vm_map_sizes(vm_map_t map
,
1718 vm_map_size_t
* psize
,
1719 vm_map_size_t
* pfree
,
1720 vm_map_size_t
* plargest_free
);
1722 #if CONFIG_DYNAMIC_CODE_SIGNING
1723 extern kern_return_t
vm_map_sign(vm_map_t map
,
1724 vm_map_offset_t start
,
1725 vm_map_offset_t end
);
1728 extern kern_return_t
vm_map_partial_reap(
1730 unsigned int *reclaimed_resident
,
1731 unsigned int *reclaimed_compressed
);
1734 #if DEVELOPMENT || DEBUG
1736 extern int vm_map_disconnect_page_mappings(
1740 extern kern_return_t
vm_map_inject_error(vm_map_t map
, vm_map_offset_t vaddr
);
1747 extern kern_return_t
vm_map_freeze(
1749 unsigned int *purgeable_count
,
1750 unsigned int *wired_count
,
1751 unsigned int *clean_count
,
1752 unsigned int *dirty_count
,
1753 unsigned int dirty_budget
,
1754 unsigned int *shared_count
,
1755 int *freezer_error_code
,
1756 boolean_t eval_only
);
1758 #define FREEZER_ERROR_GENERIC (-1)
1759 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1760 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1761 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1762 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1769 * In some cases, we don't have a real VM object but still want to return a
1770 * unique ID (to avoid a memory region looking like shared memory), so build
1771 * a fake pointer based on the map's ledger and the index of the ledger being
1774 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1776 #endif /* KERNEL_PRIVATE */
1778 #endif /* _VM_VM_MAP_H_ */