2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Virtual memory map module definitions.
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
83 #include <os/overflow.h>
87 #include <sys/cdefs.h>
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
95 extern void vm_map_reference(vm_map_t map
);
96 extern vm_map_t
current_map(void);
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t
vm_map_exec(
105 cpu_subtype_t cpu_subtype
,
110 #ifdef MACH_KERNEL_PRIVATE
112 #include <task_swapper.h>
113 #include <mach_assert.h>
115 #include <vm/vm_object.h>
116 #include <vm/vm_page.h>
117 #include <kern/locks.h>
118 #include <kern/zalloc.h>
119 #include <kern/macro_help.h>
121 #include <kern/thread.h>
122 #include <os/refcnt.h>
124 #define current_map_fast() (current_thread()->map)
125 #define current_map() (current_map_fast())
127 #include <vm/vm_map_store.h>
133 * vm_map_t the high-level address map data structure.
134 * vm_map_entry_t an entry in an address map.
135 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
136 * vm_map_copy_t represents memory copied from an address map,
137 * used for inter-map copy operations
139 typedef struct vm_map_entry
*vm_map_entry_t
;
140 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
144 * Type: vm_map_object_t [internal use only]
147 * The target of an address mapping, either a virtual
148 * memory object or a sub map (of the kernel map).
150 typedef union vm_map_object
{
151 vm_object_t vmo_object
; /* object object */
152 vm_map_t vmo_submap
; /* belongs to another map */
155 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
156 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
157 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
158 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
159 #if VM_NAMED_ENTRY_LIST
160 extern queue_head_t vm_named_entry_list
;
161 #endif /* VM_NAMED_ENTRY_LIST */
164 * Type: vm_named_entry_t [internal use only]
167 * Description of a mapping to a memory cache object.
170 * While the handle to this object is used as a means to map
171 * and pass around the right to map regions backed by pagers
172 * of all sorts, the named_entry itself is only manipulated
173 * by the kernel. Named entries hold information on the
174 * right to map a region of a cached object. Namely,
175 * the target cache object, the beginning and ending of the
176 * region to be mapped, and the permissions, (read, write)
177 * with which it can be mapped.
181 struct vm_named_entry
{
182 decl_lck_mtx_data(, Lock
); /* Synchronization */
184 vm_map_t map
; /* map backing submap */
185 vm_map_copy_t copy
; /* a VM map copy */
187 vm_object_offset_t offset
; /* offset into object */
188 vm_object_size_t size
; /* size of region */
189 vm_object_offset_t data_offset
; /* offset to first byte of data */
190 vm_prot_t protection
; /* access permissions */
191 int ref_count
; /* Number of references */
192 unsigned int /* Is backing.xxx : */
193 /* boolean_t */ is_object
:1, /* ... a VM object (wrapped in a VM map copy) */
194 /* boolean_t */ internal
:1, /* ... an internal object */
195 /* boolean_t */ is_sub_map
:1, /* ... a submap? */
196 /* boolean_t */ is_copy
:1; /* ... a VM map copy */
197 #if VM_NAMED_ENTRY_LIST
198 queue_chain_t named_entry_list
;
199 int named_entry_alias
;
200 mach_port_t named_entry_port
;
201 #define NAMED_ENTRY_BT_DEPTH 16
202 void *named_entry_bt
[NAMED_ENTRY_BT_DEPTH
];
203 #endif /* VM_NAMED_ENTRY_LIST */
207 * Type: vm_map_entry_t [internal use only]
210 * A single mapping within an address map.
213 * Address map entries consist of start and end addresses,
214 * a VM object (or sub map) and offset into that object,
215 * and user-exported inheritance and protection information.
216 * Control information for virtual copy operations is also
217 * stored in the address map entry.
220 struct vm_map_links
{
221 struct vm_map_entry
*prev
; /* previous entry */
222 struct vm_map_entry
*next
; /* next entry */
223 vm_map_offset_t start
; /* start address */
224 vm_map_offset_t end
; /* end address */
228 * FOOTPRINT ACCOUNTING:
229 * The "memory footprint" is better described in the pmap layer.
231 * At the VM level, these 2 vm_map_entry_t fields are relevant:
233 * For an "iokit_mapped" entry, we add the size of the entry to the
234 * footprint when the entry is entered into the map and we subtract that
235 * size when the entry is removed. No other accounting should take place.
236 * "use_pmap" should be FALSE but is not taken into account.
237 * use_pmap: (only when is_sub_map is FALSE)
238 * This indicates if we should ask the pmap layer to account for pages
239 * in this mapping. If FALSE, we expect that another form of accounting
240 * is being used (e.g. "iokit_mapped" or the explicit accounting of
241 * non-volatile purgable memory).
243 * So the logic is mostly:
244 * if entry->is_sub_map == TRUE
245 * anything in a submap does not count for the footprint
246 * else if entry->iokit_mapped == TRUE
247 * footprint includes the entire virtual size of this entry
248 * else if entry->use_pmap == FALSE
249 * tell pmap NOT to account for pages being pmap_enter()'d from this
250 * mapping (i.e. use "alternate accounting")
252 * pmap will account for pages being pmap_enter()'d from this mapping
253 * as it sees fit (only if anonymous, etc...)
256 struct vm_map_entry
{
257 struct vm_map_links links
; /* links to other entries */
258 #define vme_prev links.prev
259 #define vme_next links.next
260 #define vme_start links.start
261 #define vme_end links.end
263 struct vm_map_store store
;
264 union vm_map_object vme_object
; /* object I point to */
265 vm_object_offset_t vme_offset
; /* offset into object */
268 /* boolean_t */ is_shared
:1, /* region is shared */
269 /* boolean_t */ is_sub_map
:1, /* Is "object" a submap? */
270 /* boolean_t */ in_transition
:1, /* Entry being changed */
271 /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */
272 /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */
273 /* behavior is not defined for submap type */
274 /* boolean_t */ needs_copy
:1, /* object need to be copied? */
276 /* Only in task maps: */
277 /* vm_prot_t */ protection
:3, /* protection code */
278 /* vm_prot_t */ max_protection
:3, /* maximum protection */
279 /* vm_inherit_t */ inheritance
:2, /* inheritance */
280 /* boolean_t */ use_pmap
:1, /*
281 * use_pmap is overloaded:
284 * else (i.e. if object):
285 * use pmap accounting
288 /* boolean_t */ no_cache
:1, /* should new pages be cached? */
289 /* boolean_t */ permanent
:1, /* mapping can not be removed */
290 /* boolean_t */ superpage_size
:1, /* use superpages of a certain size */
291 /* boolean_t */ map_aligned
:1, /* align to map's page size */
292 /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of
293 * this entry it is being deleted
294 * without unwiring them */
295 /* boolean_t */ used_for_jit
:1,
296 /* boolean_t */ pmap_cs_associated
:1, /* pmap_cs will validate */
297 /* boolean_t */ from_reserved_zone
:1, /* Allocated from
298 * kernel reserved zone */
300 /* iokit accounting: use the virtual size rather than resident size: */
301 /* boolean_t */ iokit_acct
:1,
302 /* boolean_t */ vme_resilient_codesign
:1,
303 /* boolean_t */ vme_resilient_media
:1,
304 /* boolean_t */ vme_atomic
:1, /* entry cannot be split/coalesced */
305 /* boolean_t */ vme_no_copy_on_read
:1,
306 /* boolean_t */ translated_allow_execute
:1, /* execute in translated processes */
309 unsigned short wired_count
; /* can be paged if = 0 */
310 unsigned short user_wired_count
; /* for vm_wire */
312 #define MAP_ENTRY_CREATION_DEBUG (1)
313 #define MAP_ENTRY_INSERTION_DEBUG (1)
315 #if MAP_ENTRY_CREATION_DEBUG
316 struct vm_map_header
*vme_creation_maphdr
;
317 uintptr_t vme_creation_bt
[16];
319 #if MAP_ENTRY_INSERTION_DEBUG
320 vm_map_offset_t vme_start_original
;
321 vm_map_offset_t vme_end_original
;
322 uintptr_t vme_insertion_bt
[16];
326 #define VME_SUBMAP_PTR(entry) \
327 (&((entry)->vme_object.vmo_submap))
328 #define VME_SUBMAP(entry) \
329 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
330 #define VME_OBJECT_PTR(entry) \
331 (&((entry)->vme_object.vmo_object))
332 #define VME_OBJECT(entry) \
333 ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
334 #define VME_OFFSET(entry) \
335 ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
336 #define VME_ALIAS_MASK (FOURK_PAGE_MASK)
337 #define VME_ALIAS(entry) \
338 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
342 vm_map_entry_t entry
,
345 entry
->vme_object
.vmo_object
= object
;
346 if (object
!= VM_OBJECT_NULL
&& !object
->internal
) {
347 entry
->vme_resilient_media
= FALSE
;
349 entry
->vme_resilient_codesign
= FALSE
;
350 entry
->used_for_jit
= FALSE
;
354 vm_map_entry_t entry
,
357 entry
->vme_object
.vmo_submap
= submap
;
361 vm_map_entry_t entry
,
362 vm_object_offset_t offset
)
365 alias
= VME_ALIAS(entry
);
366 assert((offset
& FOURK_PAGE_MASK
) == 0);
367 entry
->vme_offset
= offset
| alias
;
371 * The "alias" field can be updated while holding the VM map lock
372 * "shared". It's OK as along as it's the only field that can be
373 * updated without the VM map "exclusive" lock.
377 vm_map_entry_t entry
,
380 vm_object_offset_t offset
;
381 offset
= VME_OFFSET(entry
);
382 entry
->vme_offset
= offset
| ((unsigned int)alias
& VME_ALIAS_MASK
);
387 vm_map_entry_t entry
,
388 vm_object_size_t length
)
391 vm_object_offset_t offset
;
393 object
= VME_OBJECT(entry
);
394 offset
= VME_OFFSET(entry
);
395 vm_object_shadow(&object
, &offset
, length
);
396 if (object
!= VME_OBJECT(entry
)) {
397 VME_OBJECT_SET(entry
, object
);
398 entry
->use_pmap
= TRUE
;
400 if (offset
!= VME_OFFSET(entry
)) {
401 VME_OFFSET_SET(entry
, offset
);
407 * Convenience macros for dealing with superpages
408 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
410 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
411 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
412 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
413 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
416 * wired_counts are unsigned short. This value is used to safeguard
417 * against any mishaps due to runaway user programs.
419 #define MAX_WIRE_COUNT 65535
424 * Type: struct vm_map_header
427 * Header for a vm_map and a vm_map_copy.
431 struct vm_map_header
{
432 struct vm_map_links links
; /* first, last, min, max */
433 int nentries
; /* Number of entries */
434 boolean_t entries_pageable
;
435 /* are map entries pageable? */
436 #ifdef VM_MAP_STORE_USE_RB
437 struct rb_head rb_head_store
;
439 int page_shift
; /* page shift */
442 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
443 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
444 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
447 * Type: vm_map_t [exported; contents invisible]
450 * An address map -- a directory relating valid
451 * regions of a task's address space to the corresponding
452 * virtual memory objects.
455 * Maps are doubly-linked lists of map entries, sorted
456 * by address. One hint is used to start
457 * searches again from the last successful search,
458 * insertion, or removal. Another hint is used to
459 * quickly find free space.
462 lck_rw_t lock
; /* map lock */
463 struct vm_map_header hdr
; /* Map entry header */
464 #define min_offset hdr.links.start /* start of range */
465 #define max_offset hdr.links.end /* end of range */
466 pmap_t
XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap
; /* Physical map */
467 vm_map_size_t size
; /* virtual size */
468 vm_map_size_t user_wire_limit
;/* rlimit on user locked memory */
469 vm_map_size_t user_wire_size
; /* current size of user locked memory in this map */
471 vm_map_offset_t vmmap_high_start
;
476 * If map->disable_vmentry_reuse == TRUE:
477 * the end address of the highest allocated vm_map_entry_t.
479 vm_map_offset_t vmu1_highest_entry_end
;
481 * For a nested VM map:
482 * the lowest address in this nested VM map that we would
483 * expect to be unnested under normal operation (i.e. for
484 * regular copy-on-write on DATA section).
486 vm_map_offset_t vmu1_lowest_unnestable_start
;
488 #define highest_entry_end vmu1.vmu1_highest_entry_end
489 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
490 decl_lck_mtx_data(, s_lock
); /* Lock ref, res fields */
491 lck_mtx_ext_t s_lock_ext
;
492 vm_map_entry_t hint
; /* hint for quick lookups */
494 struct vm_map_links
* vmmap_hole_hint
; /* hint for quick hole lookups */
495 struct vm_map_corpse_footprint_header
*vmmap_corpse_footprint
;
497 #define hole_hint vmmap_u_1.vmmap_hole_hint
498 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
500 vm_map_entry_t _first_free
; /* First free space hint */
501 struct vm_map_links
* _holes
; /* links all holes between entries */
502 } f_s
; /* Union for free space data structures being used */
504 #define first_free f_s._first_free
505 #define holes_list f_s._holes
507 struct os_refcnt map_refcnt
; /* Reference count */
510 int res_count
; /* Residence count (swap) */
511 int sw_state
; /* Swap state */
512 #endif /* TASK_SWAPPER */
515 /* boolean_t */ wait_for_space
:1, /* Should callers wait for space? */
516 /* boolean_t */ wiring_required
:1, /* All memory wired? */
517 /* boolean_t */ no_zero_fill
:1, /*No zero fill absent pages */
518 /* boolean_t */ mapped_in_other_pmaps
:1, /*has this submap been mapped in maps that use a different pmap */
519 /* boolean_t */ switch_protect
:1, /* Protect map from write faults while switched */
520 /* boolean_t */ disable_vmentry_reuse
:1, /* All vm entries should keep using newer and higher addresses in the map */
521 /* boolean_t */ map_disallow_data_exec
:1, /* Disallow execution from data pages on exec-permissive architectures */
522 /* boolean_t */ holelistenabled
:1,
523 /* boolean_t */ is_nested_map
:1,
524 /* boolean_t */ map_disallow_new_exec
:1, /* Disallow new executable code */
525 /* boolean_t */ jit_entry_exists
:1,
526 /* boolean_t */ has_corpse_footprint
:1,
527 /* boolean_t */ terminated
:1,
528 /* boolean_t */ is_alien
:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
529 /* boolean_t */ cs_enforcement
:1, /* code-signing enforcement */
530 /* boolean_t */ reserved_regions
:1, /* has reserved regions. The map size that userspace sees should ignore these. */
531 /* reserved */ pad
:16;
532 unsigned int timestamp
; /* Version number */
535 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
536 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
537 #define vm_map_first_entry(map) ((map)->hdr.links.next)
538 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
542 * VM map swap states. There are no transition states.
544 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
545 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
546 #endif /* TASK_SWAPPER */
549 * Type: vm_map_version_t [exported; contents invisible]
552 * Map versions may be used to quickly validate a previous
556 * Because they are bulky objects, map versions are usually
557 * passed by reference.
560 * Just a timestamp for the main map.
562 typedef struct vm_map_version
{
563 unsigned int main_timestamp
;
567 * Type: vm_map_copy_t [exported; contents invisible]
570 * A map copy object represents a region of virtual memory
571 * that has been copied from an address map but is still
574 * A map copy object may only be used by a single thread
578 * There are three formats for map copy objects.
579 * The first is very similar to the main
580 * address map in structure, and as a result, some
581 * of the internal maintenance functions/macros can
582 * be used with either address maps or map copy objects.
584 * The map copy object contains a header links
585 * entry onto which the other entries that represent
586 * the region are chained.
588 * The second format is a single vm object. This was used
589 * primarily in the pageout path - but is not currently used
590 * except for placeholder copy objects (see vm_map_copy_copy()).
592 * The third format is a kernel buffer copy object - for data
593 * small enough that physical copies were the most efficient
594 * method. This method uses a zero-sized array unioned with
595 * other format-specific data in the 'c_u' member. This unsized
596 * array overlaps the other elements and allows us to use this
597 * extra structure space for physical memory copies. On 64-bit
598 * systems this saves ~64 bytes per vm_map_copy.
603 #define VM_MAP_COPY_ENTRY_LIST 1
604 #define VM_MAP_COPY_OBJECT 2
605 #define VM_MAP_COPY_KERNEL_BUFFER 3
606 vm_object_offset_t offset
;
609 struct vm_map_header hdr
; /* ENTRY_LIST */
610 vm_object_t object
; /* OBJECT */
611 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata
; /* KERNEL_BUFFER */
616 #define cpy_hdr c_u.hdr
618 #define cpy_object c_u.object
619 #define cpy_kdata c_u.kdata
621 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
622 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
623 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
626 * Useful macros for entry list copy objects
629 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
630 #define vm_map_copy_first_entry(copy) \
631 ((copy)->cpy_hdr.links.next)
632 #define vm_map_copy_last_entry(copy) \
633 ((copy)->cpy_hdr.links.prev)
636 vm_map_copy_adjust_to_target(
637 vm_map_copy_t copy_map
,
638 vm_map_offset_t offset
,
642 vm_map_copy_t
*target_copy_map_p
,
643 vm_map_offset_t
*overmap_start_p
,
644 vm_map_offset_t
*overmap_end_p
,
645 vm_map_offset_t
*trimmed_start_p
);
648 * Macros: vm_map_lock, etc. [internal use only]
650 * Perform locking on the data portion of a map.
651 * When multiple maps are to be locked, order by map address.
652 * (See vm_map.c::vm_remap())
655 #define vm_map_lock_init(map) \
656 ((map)->timestamp = 0 , \
657 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
659 #define vm_map_lock(map) \
661 DTRACE_VM(vm_map_lock_w); \
662 lck_rw_lock_exclusive(&(map)->lock); \
665 #define vm_map_unlock(map) \
667 DTRACE_VM(vm_map_unlock_w); \
668 (map)->timestamp++; \
669 lck_rw_done(&(map)->lock); \
672 #define vm_map_lock_read(map) \
674 DTRACE_VM(vm_map_lock_r); \
675 lck_rw_lock_shared(&(map)->lock); \
678 #define vm_map_unlock_read(map) \
680 DTRACE_VM(vm_map_unlock_r); \
681 lck_rw_done(&(map)->lock); \
684 #define vm_map_lock_write_to_read(map) \
686 DTRACE_VM(vm_map_lock_downgrade); \
687 (map)->timestamp++; \
688 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
691 __attribute__((always_inline
))
692 int vm_map_lock_read_to_write(vm_map_t map
);
694 __attribute__((always_inline
))
695 boolean_t
vm_map_try_lock(vm_map_t map
);
697 __attribute__((always_inline
))
698 boolean_t
vm_map_try_lock_read(vm_map_t map
);
700 int vm_self_region_page_shift(vm_map_t target_map
);
701 int vm_self_region_page_shift_safely(vm_map_t target_map
);
703 #if MACH_ASSERT || DEBUG
704 #define vm_map_lock_assert_held(map) \
705 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
706 #define vm_map_lock_assert_shared(map) \
707 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
708 #define vm_map_lock_assert_exclusive(map) \
709 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
710 #define vm_map_lock_assert_notheld(map) \
711 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
712 #else /* MACH_ASSERT || DEBUG */
713 #define vm_map_lock_assert_held(map)
714 #define vm_map_lock_assert_shared(map)
715 #define vm_map_lock_assert_exclusive(map)
716 #define vm_map_lock_assert_notheld(map)
717 #endif /* MACH_ASSERT || DEBUG */
720 * Exported procedures that operate on vm_map_t.
723 /* Initialize the module */
724 extern void vm_map_init(void);
726 extern void vm_kernel_reserved_entry_init(void);
728 /* Allocate a range in the specified virtual address map and
729 * return the entry allocated for that range. */
730 extern kern_return_t
vm_map_find_space(
732 vm_map_address_t
*address
, /* OUT */
734 vm_map_offset_t mask
,
736 vm_map_kernel_flags_t vmk_flags
,
738 vm_map_entry_t
*o_entry
); /* OUT */
740 /* flags for vm_map_find_space */
741 #define VM_MAP_FIND_LAST_FREE 0x01
743 extern void vm_map_clip_start(
745 vm_map_entry_t entry
,
746 vm_map_offset_t endaddr
);
747 extern void vm_map_clip_end(
749 vm_map_entry_t entry
,
750 vm_map_offset_t endaddr
);
751 extern boolean_t
vm_map_entry_should_cow_for_true_share(
752 vm_map_entry_t entry
);
754 /* Lookup map entry containing or the specified address in the given map */
755 extern boolean_t
vm_map_lookup_entry(
757 vm_map_address_t address
,
758 vm_map_entry_t
*entry
); /* OUT */
760 extern void vm_map_copy_remap(
762 vm_map_entry_t where
,
764 vm_map_offset_t adjustment
,
767 vm_inherit_t inheritance
);
769 /* Find the VM object, offset, and protection for a given virtual address
770 * in the specified map, assuming a page fault of the type specified. */
771 extern kern_return_t
vm_map_lookup_locked(
772 vm_map_t
*var_map
, /* IN/OUT */
773 vm_map_address_t vaddr
,
774 vm_prot_t fault_type
,
775 int object_lock_type
,
776 vm_map_version_t
*out_version
, /* OUT */
777 vm_object_t
*object
, /* OUT */
778 vm_object_offset_t
*offset
, /* OUT */
779 vm_prot_t
*out_prot
, /* OUT */
780 boolean_t
*wired
, /* OUT */
781 vm_object_fault_info_t fault_info
, /* OUT */
782 vm_map_t
*real_map
, /* OUT */
783 bool *contended
); /* OUT */
785 /* Verifies that the map has not changed since the given version. */
786 extern boolean_t
vm_map_verify(
788 vm_map_version_t
*version
); /* REF */
790 extern vm_map_entry_t
vm_map_entry_insert(
792 vm_map_entry_t insp_entry
,
793 vm_map_offset_t start
,
796 vm_object_offset_t offset
,
797 vm_map_kernel_flags_t vmk_flags
,
798 boolean_t needs_copy
,
800 boolean_t in_transition
,
801 vm_prot_t cur_protection
,
802 vm_prot_t max_protection
,
803 vm_behavior_t behavior
,
804 vm_inherit_t inheritance
,
805 unsigned short wired_count
,
808 boolean_t no_copy_on_read
,
809 unsigned int superpage_size
,
810 boolean_t clear_map_aligned
,
812 boolean_t used_for_jit
,
814 boolean_t translated_allow_execute
);
818 * Functions implemented as macros
820 #define vm_map_min(map) ((map)->min_offset)
821 /* Lowest valid address in
824 #define vm_map_max(map) ((map)->max_offset)
825 /* Highest valid address */
827 #define vm_map_pmap(map) ((map)->pmap)
828 /* Physical map associated
829 * with this address map */
832 * Macros/functions for map residence counts and swapin/out of vm maps
837 /* Gain a reference to an existing map */
838 extern void vm_map_reference(
840 /* Lose a residence count */
841 extern void vm_map_res_deallocate(
843 /* Gain a residence count on a map */
844 extern void vm_map_res_reference(
846 /* Gain reference & residence counts to possibly swapped-out map */
847 extern void vm_map_reference_swap(
850 #else /* MACH_ASSERT */
852 #define vm_map_reference(map) \
854 vm_map_t Map = (map); \
856 lck_mtx_lock(&Map->s_lock); \
858 os_ref_retain(&Map->map_refcnt); \
859 lck_mtx_unlock(&Map->s_lock); \
863 #define vm_map_res_reference(map) \
865 vm_map_t Lmap = (map); \
866 if (Lmap->res_count == 0) { \
867 lck_mtx_unlock(&Lmap->s_lock);\
869 vm_map_swapin(Lmap); \
870 lck_mtx_lock(&Lmap->s_lock); \
872 vm_map_unlock(Lmap); \
877 #define vm_map_res_deallocate(map) \
879 vm_map_t Map = (map); \
880 if (--Map->res_count == 0) { \
881 lck_mtx_unlock(&Map->s_lock); \
883 vm_map_swapout(Map); \
884 vm_map_unlock(Map); \
885 lck_mtx_lock(&Map->s_lock); \
889 #define vm_map_reference_swap(map) \
891 vm_map_t Map = (map); \
892 lck_mtx_lock(&Map->s_lock); \
893 os_ref_retain(&Map->map_refcnt);\
894 vm_map_res_reference(Map); \
895 lck_mtx_unlock(&Map->s_lock); \
897 #endif /* MACH_ASSERT */
899 extern void vm_map_swapin(
902 extern void vm_map_swapout(
905 #else /* TASK_SWAPPER */
907 #define vm_map_reference(map) \
909 vm_map_t Map = (map); \
911 lck_mtx_lock(&Map->s_lock); \
912 os_ref_retain(&Map->map_refcnt);\
913 lck_mtx_unlock(&Map->s_lock); \
917 #define vm_map_reference_swap(map) vm_map_reference(map)
918 #define vm_map_res_reference(map)
919 #define vm_map_res_deallocate(map)
921 #endif /* TASK_SWAPPER */
924 * Submap object. Must be used to create memory to be put
925 * in a submap by vm_map_submap.
927 extern vm_object_t vm_submap_object
;
930 * Wait and wakeup macros for in_transition map entries.
932 #define vm_map_entry_wait(map, interruptible) \
933 ((map)->timestamp++ , \
934 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
935 (event_t)&(map)->hdr, interruptible))
938 #define vm_map_entry_wakeup(map) \
939 thread_wakeup((event_t)(&(map)->hdr))
942 #define vm_map_ref_fast(map) \
944 lck_mtx_lock(&map->s_lock); \
946 vm_map_res_reference(map); \
947 lck_mtx_unlock(&map->s_lock); \
950 #define vm_map_dealloc_fast(map) \
954 lck_mtx_lock(&map->s_lock); \
955 c = --map->ref_count; \
957 vm_map_res_deallocate(map); \
958 lck_mtx_unlock(&map->s_lock); \
960 vm_map_destroy(map); \
964 /* simplify map entries */
965 extern void vm_map_simplify_entry(
967 vm_map_entry_t this_entry
);
968 extern void vm_map_simplify(
970 vm_map_offset_t start
);
972 /* Move the information in a map copy object to a new map copy object */
973 extern vm_map_copy_t
vm_map_copy_copy(
976 /* Create a copy object from an object. */
977 extern kern_return_t
vm_map_copyin_object(
979 vm_object_offset_t offset
,
980 vm_object_size_t size
,
981 vm_map_copy_t
*copy_result
); /* OUT */
983 extern kern_return_t
vm_map_random_address_for_size(
985 vm_map_offset_t
*address
,
988 /* Enter a mapping */
989 extern kern_return_t
vm_map_enter(
991 vm_map_offset_t
*address
,
993 vm_map_offset_t mask
,
995 vm_map_kernel_flags_t vmk_flags
,
998 vm_object_offset_t offset
,
999 boolean_t needs_copy
,
1000 vm_prot_t cur_protection
,
1001 vm_prot_t max_protection
,
1002 vm_inherit_t inheritance
);
1005 extern kern_return_t
vm_map_enter_fourk(
1007 vm_map_offset_t
*address
,
1009 vm_map_offset_t mask
,
1011 vm_map_kernel_flags_t vmk_flags
,
1014 vm_object_offset_t offset
,
1015 boolean_t needs_copy
,
1016 vm_prot_t cur_protection
,
1017 vm_prot_t max_protection
,
1018 vm_inherit_t inheritance
);
1019 #endif /* __arm64__ */
1021 /* XXX should go away - replaced with regular enter of contig object */
1022 extern kern_return_t
vm_map_enter_cpm(
1024 vm_map_address_t
*addr
,
1028 extern kern_return_t
vm_map_remap(
1029 vm_map_t target_map
,
1030 vm_map_offset_t
*address
,
1032 vm_map_offset_t mask
,
1034 vm_map_kernel_flags_t vmk_flags
,
1037 vm_map_offset_t memory_address
,
1039 vm_prot_t
*cur_protection
,
1040 vm_prot_t
*max_protection
,
1041 vm_inherit_t inheritance
);
1045 * Read and write from a kernel buffer to a specified map.
1047 extern kern_return_t
vm_map_write_user(
1050 vm_map_offset_t dst_addr
,
1053 extern kern_return_t
vm_map_read_user(
1055 vm_map_offset_t src_addr
,
1059 /* Create a new task map using an existing task map as a template. */
1060 extern vm_map_t
vm_map_fork(
1064 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
1065 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
1066 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
1068 /* Change inheritance */
1069 extern kern_return_t
vm_map_inherit(
1071 vm_map_offset_t start
,
1072 vm_map_offset_t end
,
1073 vm_inherit_t new_inheritance
);
1075 /* Add or remove machine-dependent attributes from map regions */
1076 extern kern_return_t
vm_map_machine_attribute(
1078 vm_map_offset_t start
,
1079 vm_map_offset_t end
,
1080 vm_machine_attribute_t attribute
,
1081 vm_machine_attribute_val_t
* value
); /* IN/OUT */
1083 extern kern_return_t
vm_map_msync(
1085 vm_map_address_t address
,
1087 vm_sync_t sync_flags
);
1089 /* Set paging behavior */
1090 extern kern_return_t
vm_map_behavior_set(
1092 vm_map_offset_t start
,
1093 vm_map_offset_t end
,
1094 vm_behavior_t new_behavior
);
1096 extern kern_return_t
vm_map_region(
1098 vm_map_offset_t
*address
,
1099 vm_map_size_t
*size
,
1100 vm_region_flavor_t flavor
,
1101 vm_region_info_t info
,
1102 mach_msg_type_number_t
*count
,
1103 mach_port_t
*object_name
);
1105 extern kern_return_t
vm_map_region_recurse_64(
1107 vm_map_offset_t
*address
,
1108 vm_map_size_t
*size
,
1109 natural_t
*nesting_depth
,
1110 vm_region_submap_info_64_t info
,
1111 mach_msg_type_number_t
*count
);
1113 extern kern_return_t
vm_map_page_query_internal(
1115 vm_map_offset_t offset
,
1119 extern kern_return_t
vm_map_query_volatile(
1121 mach_vm_size_t
*volatile_virtual_size_p
,
1122 mach_vm_size_t
*volatile_resident_size_p
,
1123 mach_vm_size_t
*volatile_compressed_size_p
,
1124 mach_vm_size_t
*volatile_pmap_size_p
,
1125 mach_vm_size_t
*volatile_compressed_pmap_size_p
);
1127 extern kern_return_t
vm_map_submap(
1129 vm_map_offset_t start
,
1130 vm_map_offset_t end
,
1132 vm_map_offset_t offset
,
1133 boolean_t use_pmap
);
1135 extern void vm_map_submap_pmap_clean(
1137 vm_map_offset_t start
,
1138 vm_map_offset_t end
,
1140 vm_map_offset_t offset
);
1142 /* Convert from a map entry port to a map */
1143 extern vm_map_t
convert_port_entry_to_map(
1146 /* Convert from a port to a vm_object */
1147 extern vm_object_t
convert_port_entry_to_object(
1151 extern kern_return_t
vm_map_set_cache_attr(
1153 vm_map_offset_t va
);
1156 /* definitions related to overriding the NX behavior */
1158 #define VM_ABI_32 0x1
1159 #define VM_ABI_64 0x2
1161 extern int override_nx(vm_map_t map
, uint32_t user_tag
);
1164 extern void vm_map_region_top_walk(
1165 vm_map_entry_t entry
,
1166 vm_region_top_info_t top
);
1167 extern void vm_map_region_walk(
1170 vm_map_entry_t entry
,
1171 vm_object_offset_t offset
,
1172 vm_object_size_t range
,
1173 vm_region_extended_info_t extended
,
1174 boolean_t look_for_pages
,
1175 mach_msg_type_number_t count
);
1179 extern void vm_map_copy_footprint_ledgers(
1182 extern void vm_map_copy_ledger(
1188 * Represents a single region of virtual address space that should be reserved
1189 * (pre-mapped) in a user address space.
1191 struct vm_reserved_region
{
1193 vm_map_offset_t vmrr_addr
;
1194 vm_map_size_t vmrr_size
;
1198 * Return back a machine-dependent array of address space regions that should be
1199 * reserved by the VM. This function is defined in the machine-dependent
1200 * machine_routines.c files.
1202 extern size_t ml_get_vm_reserved_regions(
1204 struct vm_reserved_region
**regions
);
1206 #endif /* MACH_KERNEL_PRIVATE */
1210 /* Create an empty map */
1211 extern vm_map_t
vm_map_create(
1213 vm_map_offset_t min_off
,
1214 vm_map_offset_t max_off
,
1215 boolean_t pageable
);
1216 extern vm_map_t
vm_map_create_options(
1218 vm_map_offset_t min_off
,
1219 vm_map_offset_t max_off
,
1221 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1222 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1223 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1224 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1226 extern vm_map_size_t
vm_map_adjusted_size(vm_map_t map
);
1228 extern void vm_map_disable_hole_optimization(vm_map_t map
);
1230 /* Get rid of a map */
1231 extern void vm_map_destroy(
1235 /* Lose a reference */
1236 extern void vm_map_deallocate(
1239 /* Lose a reference */
1240 extern void vm_map_inspect_deallocate(
1241 vm_map_inspect_t map
);
1243 /* Lose a reference */
1244 extern void vm_map_read_deallocate(
1247 extern vm_map_t
vm_map_switch(
1250 /* Change protection */
1251 extern kern_return_t
vm_map_protect(
1253 vm_map_offset_t start
,
1254 vm_map_offset_t end
,
1258 /* Check protection */
1259 extern boolean_t
vm_map_check_protection(
1261 vm_map_offset_t start
,
1262 vm_map_offset_t end
,
1263 vm_prot_t protection
);
1265 extern boolean_t
vm_map_cs_enforcement(
1267 extern void vm_map_cs_enforcement_set(
1271 extern kern_return_t
vm_map_cs_wx_enable(vm_map_t map
);
1273 /* wire down a region */
1275 #ifdef XNU_KERNEL_PRIVATE
1277 extern kern_return_t
vm_map_wire_kernel(
1279 vm_map_offset_t start
,
1280 vm_map_offset_t end
,
1281 vm_prot_t access_type
,
1283 boolean_t user_wire
);
1285 extern kern_return_t
vm_map_wire_and_extract_kernel(
1287 vm_map_offset_t start
,
1288 vm_prot_t access_type
,
1290 boolean_t user_wire
,
1291 ppnum_t
*physpage_p
);
1293 /* kext exported versions */
1295 extern kern_return_t
vm_map_wire_external(
1297 vm_map_offset_t start
,
1298 vm_map_offset_t end
,
1299 vm_prot_t access_type
,
1300 boolean_t user_wire
);
1302 extern kern_return_t
vm_map_wire_and_extract_external(
1304 vm_map_offset_t start
,
1305 vm_prot_t access_type
,
1306 boolean_t user_wire
,
1307 ppnum_t
*physpage_p
);
1309 #else /* XNU_KERNEL_PRIVATE */
1311 extern kern_return_t
vm_map_wire(
1313 vm_map_offset_t start
,
1314 vm_map_offset_t end
,
1315 vm_prot_t access_type
,
1316 boolean_t user_wire
);
1318 extern kern_return_t
vm_map_wire_and_extract(
1320 vm_map_offset_t start
,
1321 vm_prot_t access_type
,
1322 boolean_t user_wire
,
1323 ppnum_t
*physpage_p
);
1325 #endif /* !XNU_KERNEL_PRIVATE */
1327 /* unwire a region */
1328 extern kern_return_t
vm_map_unwire(
1330 vm_map_offset_t start
,
1331 vm_map_offset_t end
,
1332 boolean_t user_wire
);
1334 #ifdef XNU_KERNEL_PRIVATE
1336 /* Enter a mapping of a memory object */
1337 extern kern_return_t
vm_map_enter_mem_object(
1339 vm_map_offset_t
*address
,
1341 vm_map_offset_t mask
,
1343 vm_map_kernel_flags_t vmk_flags
,
1346 vm_object_offset_t offset
,
1347 boolean_t needs_copy
,
1348 vm_prot_t cur_protection
,
1349 vm_prot_t max_protection
,
1350 vm_inherit_t inheritance
);
1352 /* Enter a mapping of a memory object */
1353 extern kern_return_t
vm_map_enter_mem_object_prefault(
1355 vm_map_offset_t
*address
,
1357 vm_map_offset_t mask
,
1359 vm_map_kernel_flags_t vmk_flags
,
1362 vm_object_offset_t offset
,
1363 vm_prot_t cur_protection
,
1364 vm_prot_t max_protection
,
1365 upl_page_list_ptr_t page_list
,
1366 unsigned int page_list_count
);
1368 /* Enter a mapping of a memory object */
1369 extern kern_return_t
vm_map_enter_mem_object_control(
1371 vm_map_offset_t
*address
,
1373 vm_map_offset_t mask
,
1375 vm_map_kernel_flags_t vmk_flags
,
1377 memory_object_control_t control
,
1378 vm_object_offset_t offset
,
1379 boolean_t needs_copy
,
1380 vm_prot_t cur_protection
,
1381 vm_prot_t max_protection
,
1382 vm_inherit_t inheritance
);
1384 extern kern_return_t
vm_map_terminate(
1387 #endif /* !XNU_KERNEL_PRIVATE */
1389 /* Deallocate a region */
1390 extern kern_return_t
vm_map_remove(
1392 vm_map_offset_t start
,
1393 vm_map_offset_t end
,
1396 /* Deallocate a region when the map is already locked */
1397 extern kern_return_t
vm_map_remove_locked(
1399 vm_map_offset_t start
,
1400 vm_map_offset_t end
,
1403 /* Discard a copy without using it */
1404 extern void vm_map_copy_discard(
1405 vm_map_copy_t copy
);
1407 /* Overwrite existing memory with a copy */
1408 extern kern_return_t
vm_map_copy_overwrite(
1410 vm_map_address_t dst_addr
,
1412 vm_map_size_t copy_size
,
1413 boolean_t interruptible
);
1415 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1418 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1419 extern boolean_t
vm_map_copy_validate_size(
1422 vm_map_size_t
*size
);
1424 /* Place a copy into a map */
1425 extern kern_return_t
vm_map_copyout(
1427 vm_map_address_t
*dst_addr
, /* OUT */
1428 vm_map_copy_t copy
);
1430 extern kern_return_t
vm_map_copyout_size(
1432 vm_map_address_t
*dst_addr
, /* OUT */
1434 vm_map_size_t copy_size
);
1436 extern kern_return_t
vm_map_copyout_internal(
1438 vm_map_address_t
*dst_addr
, /* OUT */
1440 vm_map_size_t copy_size
,
1441 boolean_t consume_on_success
,
1442 vm_prot_t cur_protection
,
1443 vm_prot_t max_protection
,
1444 vm_inherit_t inheritance
);
1446 extern kern_return_t
vm_map_copyin(
1448 vm_map_address_t src_addr
,
1450 boolean_t src_destroy
,
1451 vm_map_copy_t
*copy_result
); /* OUT */
1453 extern kern_return_t
vm_map_copyin_common(
1455 vm_map_address_t src_addr
,
1457 boolean_t src_destroy
,
1458 boolean_t src_volatile
,
1459 vm_map_copy_t
*copy_result
, /* OUT */
1460 boolean_t use_maxprot
);
1462 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1463 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1464 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1465 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1466 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1467 extern kern_return_t
vm_map_copyin_internal(
1469 vm_map_address_t src_addr
,
1472 vm_map_copy_t
*copy_result
); /* OUT */
1474 extern kern_return_t
vm_map_copy_extract(
1476 vm_map_address_t src_addr
,
1478 vm_prot_t required_prot
,
1480 vm_map_copy_t
*copy_result
, /* OUT */
1481 vm_prot_t
*cur_prot
, /* OUT */
1482 vm_prot_t
*max_prot
, /* OUT */
1483 vm_inherit_t inheritance
,
1484 vm_map_kernel_flags_t vmk_flags
);
1487 extern void vm_map_disable_NX(
1490 extern void vm_map_disallow_data_exec(
1493 extern void vm_map_set_64bit(
1496 extern void vm_map_set_32bit(
1499 extern void vm_map_set_jumbo(
1502 extern void vm_map_set_jit_entitled(
1505 extern void vm_map_set_max_addr(
1506 vm_map_t map
, vm_map_offset_t new_max_offset
);
1508 extern boolean_t
vm_map_has_hard_pagezero(
1510 vm_map_offset_t pagezero_size
);
1511 extern void vm_commit_pagezero_status(vm_map_t tmap
);
1514 static inline boolean_t
1515 vm_map_is_64bit(__unused vm_map_t map
)
1520 extern boolean_t
vm_map_is_64bit(
1525 extern kern_return_t
vm_map_raise_max_offset(
1527 vm_map_offset_t new_max_offset
);
1529 extern kern_return_t
vm_map_raise_min_offset(
1531 vm_map_offset_t new_min_offset
);
1532 #if !CONFIG_EMBEDDED
1533 extern void vm_map_set_high_start(
1535 vm_map_offset_t high_start
);
1538 extern vm_map_offset_t
vm_compute_max_offset(
1541 extern void vm_map_get_max_aslr_slide_section(
1543 int64_t *max_sections
,
1544 int64_t *section_size
);
1546 extern uint64_t vm_map_get_max_aslr_slide_pages(
1549 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1552 extern void vm_map_set_user_wire_limit(
1556 extern void vm_map_switch_protect(
1560 extern void vm_map_iokit_mapped_region(
1564 extern void vm_map_iokit_unmapped_region(
1569 extern boolean_t
first_free_is_valid(vm_map_t
);
1571 extern int vm_map_page_shift(
1574 extern vm_map_offset_t
vm_map_page_mask(
1577 extern int vm_map_page_size(
1580 extern vm_map_offset_t
vm_map_round_page_mask(
1581 vm_map_offset_t offset
,
1582 vm_map_offset_t mask
);
1584 extern vm_map_offset_t
vm_map_trunc_page_mask(
1585 vm_map_offset_t offset
,
1586 vm_map_offset_t mask
);
1588 extern boolean_t
vm_map_page_aligned(
1589 vm_map_offset_t offset
,
1590 vm_map_offset_t mask
);
1593 vm_map_range_overflows(vm_map_offset_t addr
, vm_map_size_t size
)
1595 vm_map_offset_t sum
;
1596 return os_add_overflow(addr
, size
, &sum
);
1600 mach_vm_range_overflows(mach_vm_offset_t addr
, mach_vm_size_t size
)
1602 mach_vm_offset_t sum
;
1603 return os_add_overflow(addr
, size
, &sum
);
1606 #ifdef XNU_KERNEL_PRIVATE
1608 #if XNU_TARGET_OS_OSX
1609 extern void vm_map_mark_alien(vm_map_t map
);
1610 #endif /* XNU_TARGET_OS_OSX */
1612 extern kern_return_t
vm_map_page_info(
1614 vm_map_offset_t offset
,
1615 vm_page_info_flavor_t flavor
,
1616 vm_page_info_t info
,
1617 mach_msg_type_number_t
*count
);
1618 extern kern_return_t
vm_map_page_range_info_internal(
1620 vm_map_offset_t start_offset
,
1621 vm_map_offset_t end_offset
,
1622 int effective_page_shift
,
1623 vm_page_info_flavor_t flavor
,
1624 vm_page_info_t info
,
1625 mach_msg_type_number_t
*count
);
1626 #endif /* XNU_KERNEL_PRIVATE */
1629 #ifdef MACH_KERNEL_PRIVATE
1632 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1633 * usual form; it handles a copyin based on the current protection
1634 * (current protection == VM_PROT_NONE) is a failure.
1635 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1636 * access. The difference is that a region with no current access
1637 * BUT possible maximum access is rejected by vm_map_copyin(), but
1638 * returned by vm_map_copyin_maxprot.
1640 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1641 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1642 FALSE, copy_result, FALSE)
1644 #define vm_map_copyin_maxprot(src_map, \
1645 src_addr, len, src_destroy, copy_result) \
1646 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1647 FALSE, copy_result, TRUE)
1651 * Internal macros for rounding and truncation of vm_map offsets and sizes
1653 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1654 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1657 * Macros for rounding and truncation of vm_map offsets and sizes
1659 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1660 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1661 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1662 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1666 vm_map_t map __unused
)
1669 if (VM_MAP_PAGE_SHIFT(map
) < PAGE_SHIFT
||
1670 pmap_is_exotic(map
->pmap
)) {
1673 #endif /* __arm64__ */
1679 vm_map_t map __unused
)
1682 * An "alien" process/task/map/pmap should mostly behave
1683 * as it currently would on iOS.
1685 #if XNU_TARGET_OS_OSX
1686 if (map
->is_alien
) {
1690 #else /* XNU_TARGET_OS_OSX */
1692 #endif /* XNU_TARGET_OS_OSX */
1696 VM_MAP_POLICY_WX_FAIL(
1697 vm_map_t map __unused
)
1699 if (VM_MAP_IS_ALIEN(map
)) {
1706 VM_MAP_POLICY_WX_STRIP_X(
1707 vm_map_t map __unused
)
1709 if (VM_MAP_IS_ALIEN(map
)) {
1716 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1717 vm_map_t map __unused
)
1719 if (VM_MAP_IS_ALIEN(map
)) {
1726 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1729 return VM_MAP_IS_ALIEN(map
);
1733 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1734 vm_map_t map __unused
)
1736 if (VM_MAP_IS_ALIEN(map
)) {
1743 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1744 vm_map_t map __unused
)
1746 if (VM_MAP_IS_ALIEN(map
)) {
1753 VM_MAP_POLICY_ALLOW_JIT_COPY(
1754 vm_map_t map __unused
)
1756 if (VM_MAP_IS_ALIEN(map
)) {
1763 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1764 vm_map_t map __unused
)
1768 #else /* __x86_64__ */
1769 if (VM_MAP_IS_EXOTIC(map
)) {
1773 #endif /* __x86_64__ */
1777 vm_prot_to_wimg(unsigned int prot
, unsigned int *wimg
)
1780 case MAP_MEM_NOOP
: break;
1781 case MAP_MEM_IO
: *wimg
= VM_WIMG_IO
; break;
1782 case MAP_MEM_COPYBACK
: *wimg
= VM_WIMG_USE_DEFAULT
; break;
1783 case MAP_MEM_INNERWBACK
: *wimg
= VM_WIMG_INNERWBACK
; break;
1784 case MAP_MEM_POSTED
: *wimg
= VM_WIMG_POSTED
; break;
1785 case MAP_MEM_POSTED_REORDERED
: *wimg
= VM_WIMG_POSTED_REORDERED
; break;
1786 case MAP_MEM_POSTED_COMBINED_REORDERED
: *wimg
= VM_WIMG_POSTED_COMBINED_REORDERED
; break;
1787 case MAP_MEM_WTHRU
: *wimg
= VM_WIMG_WTHRU
; break;
1788 case MAP_MEM_WCOMB
: *wimg
= VM_WIMG_WCOMB
; break;
1789 case MAP_MEM_RT
: *wimg
= VM_WIMG_RT
; break;
1794 #endif /* MACH_KERNEL_PRIVATE */
1796 #ifdef XNU_KERNEL_PRIVATE
1797 extern kern_return_t
vm_map_set_page_shift(vm_map_t map
, int pageshift
);
1798 extern bool vm_map_is_exotic(vm_map_t map
);
1799 extern bool vm_map_is_alien(vm_map_t map
);
1800 #endif /* XNU_KERNEL_PRIVATE */
1802 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1803 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1806 * Flags for vm_map_remove() and vm_map_delete()
1808 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1809 #define VM_MAP_REMOVE_KUNWIRE 0x1
1810 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1811 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1812 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1813 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1814 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1815 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1816 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1817 #define VM_MAP_REMOVE_GAPS_OK 0x100
1819 /* Support for UPLs from vm_maps */
1821 #ifdef XNU_KERNEL_PRIVATE
1823 extern kern_return_t
vm_map_get_upl(
1824 vm_map_t target_map
,
1825 vm_map_offset_t map_offset
,
1828 upl_page_info_array_t page_info
,
1829 unsigned int *page_infoCnt
,
1830 upl_control_flags_t
*flags
,
1832 int force_data_sync
);
1834 #endif /* XNU_KERNEL_PRIVATE */
1837 vm_map_sizes(vm_map_t map
,
1838 vm_map_size_t
* psize
,
1839 vm_map_size_t
* pfree
,
1840 vm_map_size_t
* plargest_free
);
1842 #if CONFIG_DYNAMIC_CODE_SIGNING
1843 extern kern_return_t
vm_map_sign(vm_map_t map
,
1844 vm_map_offset_t start
,
1845 vm_map_offset_t end
);
1848 extern kern_return_t
vm_map_partial_reap(
1850 unsigned int *reclaimed_resident
,
1851 unsigned int *reclaimed_compressed
);
1854 #if DEVELOPMENT || DEBUG
1856 extern int vm_map_disconnect_page_mappings(
1860 extern kern_return_t
vm_map_inject_error(vm_map_t map
, vm_map_offset_t vaddr
);
1867 extern kern_return_t
vm_map_freeze(
1869 unsigned int *purgeable_count
,
1870 unsigned int *wired_count
,
1871 unsigned int *clean_count
,
1872 unsigned int *dirty_count
,
1873 unsigned int dirty_budget
,
1874 unsigned int *shared_count
,
1875 int *freezer_error_code
,
1876 boolean_t eval_only
);
1878 #define FREEZER_ERROR_GENERIC (-1)
1879 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1880 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1881 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1882 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1889 * In some cases, we don't have a real VM object but still want to return a
1890 * unique ID (to avoid a memory region looking like shared memory), so build
1891 * a fake pointer based on the map's ledger and the index of the ledger being
1894 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1896 #endif /* KERNEL_PRIVATE */
1898 #endif /* _VM_VM_MAP_H_ */