2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Virtual memory map module definitions.
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
83 #include <os/overflow.h>
87 #include <sys/cdefs.h>
91 extern void vm_map_reference(vm_map_t map
);
92 extern vm_map_t
current_map(void);
94 /* Setup reserved areas in a new VM map */
95 extern kern_return_t
vm_map_exec(
101 cpu_subtype_t cpu_subtype
);
105 #ifdef MACH_KERNEL_PRIVATE
107 #include <task_swapper.h>
108 #include <mach_assert.h>
110 #include <vm/vm_object.h>
111 #include <vm/vm_page.h>
112 #include <kern/locks.h>
113 #include <kern/zalloc.h>
114 #include <kern/macro_help.h>
116 #include <kern/thread.h>
117 #include <os/refcnt.h>
119 #define current_map_fast() (current_thread()->map)
120 #define current_map() (current_map_fast())
122 #include <vm/vm_map_store.h>
128 * vm_map_t the high-level address map data structure.
129 * vm_map_entry_t an entry in an address map.
130 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
131 * vm_map_copy_t represents memory copied from an address map,
132 * used for inter-map copy operations
134 typedef struct vm_map_entry
*vm_map_entry_t
;
135 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
139 * Type: vm_map_object_t [internal use only]
142 * The target of an address mapping, either a virtual
143 * memory object or a sub map (of the kernel map).
145 typedef union vm_map_object
{
146 vm_object_t vmo_object
; /* object object */
147 vm_map_t vmo_submap
; /* belongs to another map */
150 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
151 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
152 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
153 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
154 #if VM_NAMED_ENTRY_LIST
155 extern queue_head_t vm_named_entry_list
;
156 #endif /* VM_NAMED_ENTRY_LIST */
159 * Type: vm_named_entry_t [internal use only]
162 * Description of a mapping to a memory cache object.
165 * While the handle to this object is used as a means to map
166 * and pass around the right to map regions backed by pagers
167 * of all sorts, the named_entry itself is only manipulated
168 * by the kernel. Named entries hold information on the
169 * right to map a region of a cached object. Namely,
170 * the target cache object, the beginning and ending of the
171 * region to be mapped, and the permissions, (read, write)
172 * with which it can be mapped.
176 struct vm_named_entry
{
177 decl_lck_mtx_data(, Lock
); /* Synchronization */
179 vm_object_t object
; /* object I point to */
180 vm_map_t map
; /* map backing submap */
181 vm_map_copy_t copy
; /* a VM map copy */
183 vm_object_offset_t offset
; /* offset into object */
184 vm_object_size_t size
; /* size of region */
185 vm_object_offset_t data_offset
; /* offset to first byte of data */
186 vm_prot_t protection
; /* access permissions */
187 int ref_count
; /* Number of references */
188 unsigned int /* Is backing.xxx : */
189 /* boolean_t */ internal
:1, /* ... an internal object */
190 /* boolean_t */ is_sub_map
:1, /* ... a submap? */
191 /* boolean_t */ is_copy
:1; /* ... a VM map copy */
192 #if VM_NAMED_ENTRY_LIST
193 queue_chain_t named_entry_list
;
194 int named_entry_alias
;
195 mach_port_t named_entry_port
;
196 #define NAMED_ENTRY_BT_DEPTH 16
197 void *named_entry_bt
[NAMED_ENTRY_BT_DEPTH
];
198 #endif /* VM_NAMED_ENTRY_LIST */
202 * Type: vm_map_entry_t [internal use only]
205 * A single mapping within an address map.
208 * Address map entries consist of start and end addresses,
209 * a VM object (or sub map) and offset into that object,
210 * and user-exported inheritance and protection information.
211 * Control information for virtual copy operations is also
212 * stored in the address map entry.
215 struct vm_map_links
{
216 struct vm_map_entry
*prev
; /* previous entry */
217 struct vm_map_entry
*next
; /* next entry */
218 vm_map_offset_t start
; /* start address */
219 vm_map_offset_t end
; /* end address */
223 * FOOTPRINT ACCOUNTING:
224 * The "memory footprint" is better described in the pmap layer.
226 * At the VM level, these 2 vm_map_entry_t fields are relevant:
228 * For an "iokit_mapped" entry, we add the size of the entry to the
229 * footprint when the entry is entered into the map and we subtract that
230 * size when the entry is removed. No other accounting should take place.
231 * "use_pmap" should be FALSE but is not taken into account.
232 * use_pmap: (only when is_sub_map is FALSE)
233 * This indicates if we should ask the pmap layer to account for pages
234 * in this mapping. If FALSE, we expect that another form of accounting
235 * is being used (e.g. "iokit_mapped" or the explicit accounting of
236 * non-volatile purgable memory).
238 * So the logic is mostly:
239 * if entry->is_sub_map == TRUE
240 * anything in a submap does not count for the footprint
241 * else if entry->iokit_mapped == TRUE
242 * footprint includes the entire virtual size of this entry
243 * else if entry->use_pmap == FALSE
244 * tell pmap NOT to account for pages being pmap_enter()'d from this
245 * mapping (i.e. use "alternate accounting")
247 * pmap will account for pages being pmap_enter()'d from this mapping
248 * as it sees fit (only if anonymous, etc...)
251 struct vm_map_entry
{
252 struct vm_map_links links
; /* links to other entries */
253 #define vme_prev links.prev
254 #define vme_next links.next
255 #define vme_start links.start
256 #define vme_end links.end
258 struct vm_map_store store
;
259 union vm_map_object vme_object
; /* object I point to */
260 vm_object_offset_t vme_offset
; /* offset into object */
263 /* boolean_t */ is_shared
:1, /* region is shared */
264 /* boolean_t */ is_sub_map
:1, /* Is "object" a submap? */
265 /* boolean_t */ in_transition
:1, /* Entry being changed */
266 /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */
267 /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */
268 /* behavior is not defined for submap type */
269 /* boolean_t */ needs_copy
:1, /* object need to be copied? */
271 /* Only in task maps: */
272 /* vm_prot_t */ protection
:3, /* protection code */
273 /* vm_prot_t */ max_protection
:3, /* maximum protection */
274 /* vm_inherit_t */ inheritance
:2, /* inheritance */
275 /* boolean_t */ use_pmap
:1, /*
276 * use_pmap is overloaded:
279 * else (i.e. if object):
280 * use pmap accounting
283 /* boolean_t */ no_cache
:1, /* should new pages be cached? */
284 /* boolean_t */ permanent
:1, /* mapping can not be removed */
285 /* boolean_t */ superpage_size
:1, /* use superpages of a certain size */
286 /* boolean_t */ map_aligned
:1, /* align to map's page size */
287 /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of
288 * this entry it is being deleted
289 * without unwiring them */
290 /* boolean_t */ used_for_jit
:1,
291 /* boolean_t */ pmap_cs_associated
:1, /* pmap_cs will validate */
292 /* boolean_t */ from_reserved_zone
:1, /* Allocated from
293 * kernel reserved zone */
295 /* iokit accounting: use the virtual size rather than resident size: */
296 /* boolean_t */ iokit_acct
:1,
297 /* boolean_t */ vme_resilient_codesign
:1,
298 /* boolean_t */ vme_resilient_media
:1,
299 /* boolean_t */ vme_atomic
:1, /* entry cannot be split/coalesced */
300 /* boolean_t */ vme_no_copy_on_read
:1,
303 unsigned short wired_count
; /* can be paged if = 0 */
304 unsigned short user_wired_count
; /* for vm_wire */
306 #define MAP_ENTRY_CREATION_DEBUG (1)
307 #define MAP_ENTRY_INSERTION_DEBUG (1)
309 #if MAP_ENTRY_CREATION_DEBUG
310 struct vm_map_header
*vme_creation_maphdr
;
311 uintptr_t vme_creation_bt
[16];
313 #if MAP_ENTRY_INSERTION_DEBUG
314 uintptr_t vme_insertion_bt
[16];
318 #define VME_SUBMAP_PTR(entry) \
319 (&((entry)->vme_object.vmo_submap))
320 #define VME_SUBMAP(entry) \
321 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
322 #define VME_OBJECT_PTR(entry) \
323 (&((entry)->vme_object.vmo_object))
324 #define VME_OBJECT(entry) \
325 ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
326 #define VME_OFFSET(entry) \
327 ((entry)->vme_offset & ~PAGE_MASK)
328 #define VME_ALIAS_MASK (PAGE_MASK)
329 #define VME_ALIAS(entry) \
330 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
334 vm_map_entry_t entry
,
337 entry
->vme_object
.vmo_object
= object
;
338 if (object
!= VM_OBJECT_NULL
&& !object
->internal
) {
339 entry
->vme_resilient_media
= FALSE
;
341 entry
->vme_resilient_codesign
= FALSE
;
342 entry
->used_for_jit
= FALSE
;
346 vm_map_entry_t entry
,
349 entry
->vme_object
.vmo_submap
= submap
;
353 vm_map_entry_t entry
,
354 vm_map_offset_t offset
)
357 alias
= VME_ALIAS(entry
);
358 assert((offset
& PAGE_MASK
) == 0);
359 entry
->vme_offset
= offset
| alias
;
363 * The "alias" field can be updated while holding the VM map lock
364 * "shared". It's OK as along as it's the only field that can be
365 * updated without the VM map "exclusive" lock.
369 vm_map_entry_t entry
,
372 vm_map_offset_t offset
;
373 offset
= VME_OFFSET(entry
);
374 entry
->vme_offset
= offset
| (alias
& VME_ALIAS_MASK
);
379 vm_map_entry_t entry
,
380 vm_object_size_t length
)
383 vm_object_offset_t offset
;
385 object
= VME_OBJECT(entry
);
386 offset
= VME_OFFSET(entry
);
387 vm_object_shadow(&object
, &offset
, length
);
388 if (object
!= VME_OBJECT(entry
)) {
389 VME_OBJECT_SET(entry
, object
);
390 entry
->use_pmap
= TRUE
;
392 if (offset
!= VME_OFFSET(entry
)) {
393 VME_OFFSET_SET(entry
, offset
);
399 * Convenience macros for dealing with superpages
400 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
402 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
403 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
404 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
405 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
408 * wired_counts are unsigned short. This value is used to safeguard
409 * against any mishaps due to runaway user programs.
411 #define MAX_WIRE_COUNT 65535
416 * Type: struct vm_map_header
419 * Header for a vm_map and a vm_map_copy.
423 struct vm_map_header
{
424 struct vm_map_links links
; /* first, last, min, max */
425 int nentries
; /* Number of entries */
426 boolean_t entries_pageable
;
427 /* are map entries pageable? */
428 #ifdef VM_MAP_STORE_USE_RB
429 struct rb_head rb_head_store
;
431 int page_shift
; /* page shift */
434 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
435 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
436 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
439 * Type: vm_map_t [exported; contents invisible]
442 * An address map -- a directory relating valid
443 * regions of a task's address space to the corresponding
444 * virtual memory objects.
447 * Maps are doubly-linked lists of map entries, sorted
448 * by address. One hint is used to start
449 * searches again from the last successful search,
450 * insertion, or removal. Another hint is used to
451 * quickly find free space.
454 lck_rw_t lock
; /* map lock */
455 struct vm_map_header hdr
; /* Map entry header */
456 #define min_offset hdr.links.start /* start of range */
457 #define max_offset hdr.links.end /* end of range */
458 pmap_t pmap
; /* Physical map */
459 vm_map_size_t size
; /* virtual size */
460 vm_map_size_t user_wire_limit
;/* rlimit on user locked memory */
461 vm_map_size_t user_wire_size
; /* current size of user locked memory in this map */
463 vm_map_offset_t vmmap_high_start
;
468 * If map->disable_vmentry_reuse == TRUE:
469 * the end address of the highest allocated vm_map_entry_t.
471 vm_map_offset_t vmu1_highest_entry_end
;
473 * For a nested VM map:
474 * the lowest address in this nested VM map that we would
475 * expect to be unnested under normal operation (i.e. for
476 * regular copy-on-write on DATA section).
478 vm_map_offset_t vmu1_lowest_unnestable_start
;
480 #define highest_entry_end vmu1.vmu1_highest_entry_end
481 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
482 decl_lck_mtx_data(, s_lock
); /* Lock ref, res fields */
483 lck_mtx_ext_t s_lock_ext
;
484 vm_map_entry_t hint
; /* hint for quick lookups */
486 struct vm_map_links
* vmmap_hole_hint
; /* hint for quick hole lookups */
487 struct vm_map_corpse_footprint_header
*vmmap_corpse_footprint
;
489 #define hole_hint vmmap_u_1.vmmap_hole_hint
490 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
492 vm_map_entry_t _first_free
; /* First free space hint */
493 struct vm_map_links
* _holes
; /* links all holes between entries */
494 } f_s
; /* Union for free space data structures being used */
496 #define first_free f_s._first_free
497 #define holes_list f_s._holes
499 struct os_refcnt map_refcnt
; /* Reference count */
502 int res_count
; /* Residence count (swap) */
503 int sw_state
; /* Swap state */
504 #endif /* TASK_SWAPPER */
507 /* boolean_t */ wait_for_space
:1, /* Should callers wait for space? */
508 /* boolean_t */ wiring_required
:1, /* All memory wired? */
509 /* boolean_t */ no_zero_fill
:1, /*No zero fill absent pages */
510 /* boolean_t */ mapped_in_other_pmaps
:1, /*has this submap been mapped in maps that use a different pmap */
511 /* boolean_t */ switch_protect
:1, /* Protect map from write faults while switched */
512 /* boolean_t */ disable_vmentry_reuse
:1, /* All vm entries should keep using newer and higher addresses in the map */
513 /* boolean_t */ map_disallow_data_exec
:1, /* Disallow execution from data pages on exec-permissive architectures */
514 /* boolean_t */ holelistenabled
:1,
515 /* boolean_t */ is_nested_map
:1,
516 /* boolean_t */ map_disallow_new_exec
:1, /* Disallow new executable code */
517 /* boolean_t */ jit_entry_exists
:1,
518 /* boolean_t */ has_corpse_footprint
:1,
519 /* reserved */ pad
:20;
520 unsigned int timestamp
; /* Version number */
523 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
524 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
525 #define vm_map_first_entry(map) ((map)->hdr.links.next)
526 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
530 * VM map swap states. There are no transition states.
532 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
533 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
534 #endif /* TASK_SWAPPER */
537 * Type: vm_map_version_t [exported; contents invisible]
540 * Map versions may be used to quickly validate a previous
544 * Because they are bulky objects, map versions are usually
545 * passed by reference.
548 * Just a timestamp for the main map.
550 typedef struct vm_map_version
{
551 unsigned int main_timestamp
;
555 * Type: vm_map_copy_t [exported; contents invisible]
558 * A map copy object represents a region of virtual memory
559 * that has been copied from an address map but is still
562 * A map copy object may only be used by a single thread
566 * There are three formats for map copy objects.
567 * The first is very similar to the main
568 * address map in structure, and as a result, some
569 * of the internal maintenance functions/macros can
570 * be used with either address maps or map copy objects.
572 * The map copy object contains a header links
573 * entry onto which the other entries that represent
574 * the region are chained.
576 * The second format is a single vm object. This was used
577 * primarily in the pageout path - but is not currently used
578 * except for placeholder copy objects (see vm_map_copy_copy()).
580 * The third format is a kernel buffer copy object - for data
581 * small enough that physical copies were the most efficient
582 * method. This method uses a zero-sized array unioned with
583 * other format-specific data in the 'c_u' member. This unsized
584 * array overlaps the other elements and allows us to use this
585 * extra structure space for physical memory copies. On 64-bit
586 * systems this saves ~64 bytes per vm_map_copy.
591 #define VM_MAP_COPY_ENTRY_LIST 1
592 #define VM_MAP_COPY_OBJECT 2
593 #define VM_MAP_COPY_KERNEL_BUFFER 3
594 vm_object_offset_t offset
;
597 struct vm_map_header hdr
; /* ENTRY_LIST */
598 vm_object_t object
; /* OBJECT */
599 uint8_t kdata
[0]; /* KERNEL_BUFFER */
604 #define cpy_hdr c_u.hdr
606 #define cpy_object c_u.object
607 #define cpy_kdata c_u.kdata
608 #define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata))
610 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
611 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
612 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
615 * Useful macros for entry list copy objects
618 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
619 #define vm_map_copy_first_entry(copy) \
620 ((copy)->cpy_hdr.links.next)
621 #define vm_map_copy_last_entry(copy) \
622 ((copy)->cpy_hdr.links.prev)
625 * Macros: vm_map_lock, etc. [internal use only]
627 * Perform locking on the data portion of a map.
628 * When multiple maps are to be locked, order by map address.
629 * (See vm_map.c::vm_remap())
632 #define vm_map_lock_init(map) \
633 ((map)->timestamp = 0 , \
634 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
636 #define vm_map_lock(map) \
638 DTRACE_VM(vm_map_lock_w); \
639 lck_rw_lock_exclusive(&(map)->lock); \
642 #define vm_map_unlock(map) \
644 DTRACE_VM(vm_map_unlock_w); \
645 (map)->timestamp++; \
646 lck_rw_done(&(map)->lock); \
649 #define vm_map_lock_read(map) \
651 DTRACE_VM(vm_map_lock_r); \
652 lck_rw_lock_shared(&(map)->lock); \
655 #define vm_map_unlock_read(map) \
657 DTRACE_VM(vm_map_unlock_r); \
658 lck_rw_done(&(map)->lock); \
661 #define vm_map_lock_write_to_read(map) \
663 DTRACE_VM(vm_map_lock_downgrade); \
664 (map)->timestamp++; \
665 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
668 __attribute__((always_inline
))
669 int vm_map_lock_read_to_write(vm_map_t map
);
671 __attribute__((always_inline
))
672 boolean_t
vm_map_try_lock(vm_map_t map
);
674 __attribute__((always_inline
))
675 boolean_t
vm_map_try_lock_read(vm_map_t map
);
677 #if MACH_ASSERT || DEBUG
678 #define vm_map_lock_assert_held(map) \
679 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
680 #define vm_map_lock_assert_shared(map) \
681 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
682 #define vm_map_lock_assert_exclusive(map) \
683 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
684 #define vm_map_lock_assert_notheld(map) \
685 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
686 #else /* MACH_ASSERT || DEBUG */
687 #define vm_map_lock_assert_held(map)
688 #define vm_map_lock_assert_shared(map)
689 #define vm_map_lock_assert_exclusive(map)
690 #define vm_map_lock_assert_notheld(map)
691 #endif /* MACH_ASSERT || DEBUG */
694 * Exported procedures that operate on vm_map_t.
697 /* Initialize the module */
698 extern void vm_map_init(void);
700 extern void vm_kernel_reserved_entry_init(void);
702 /* Allocate a range in the specified virtual address map and
703 * return the entry allocated for that range. */
704 extern kern_return_t
vm_map_find_space(
706 vm_map_address_t
*address
, /* OUT */
708 vm_map_offset_t mask
,
710 vm_map_kernel_flags_t vmk_flags
,
712 vm_map_entry_t
*o_entry
); /* OUT */
714 extern void vm_map_clip_start(
716 vm_map_entry_t entry
,
717 vm_map_offset_t endaddr
);
718 extern void vm_map_clip_end(
720 vm_map_entry_t entry
,
721 vm_map_offset_t endaddr
);
722 extern boolean_t
vm_map_entry_should_cow_for_true_share(
723 vm_map_entry_t entry
);
725 /* Lookup map entry containing or the specified address in the given map */
726 extern boolean_t
vm_map_lookup_entry(
728 vm_map_address_t address
,
729 vm_map_entry_t
*entry
); /* OUT */
731 extern void vm_map_copy_remap(
733 vm_map_entry_t where
,
735 vm_map_offset_t adjustment
,
738 vm_inherit_t inheritance
);
740 /* Find the VM object, offset, and protection for a given virtual address
741 * in the specified map, assuming a page fault of the type specified. */
742 extern kern_return_t
vm_map_lookup_locked(
743 vm_map_t
*var_map
, /* IN/OUT */
744 vm_map_address_t vaddr
,
745 vm_prot_t fault_type
,
746 int object_lock_type
,
747 vm_map_version_t
*out_version
, /* OUT */
748 vm_object_t
*object
, /* OUT */
749 vm_object_offset_t
*offset
, /* OUT */
750 vm_prot_t
*out_prot
, /* OUT */
751 boolean_t
*wired
, /* OUT */
752 vm_object_fault_info_t fault_info
, /* OUT */
753 vm_map_t
*real_map
); /* OUT */
755 /* Verifies that the map has not changed since the given version. */
756 extern boolean_t
vm_map_verify(
758 vm_map_version_t
*version
); /* REF */
760 extern vm_map_entry_t
vm_map_entry_insert(
762 vm_map_entry_t insp_entry
,
763 vm_map_offset_t start
,
766 vm_object_offset_t offset
,
767 boolean_t needs_copy
,
769 boolean_t in_transition
,
770 vm_prot_t cur_protection
,
771 vm_prot_t max_protection
,
772 vm_behavior_t behavior
,
773 vm_inherit_t inheritance
,
774 unsigned wired_count
,
777 boolean_t no_copy_on_read
,
778 unsigned int superpage_size
,
779 boolean_t clear_map_aligned
,
781 boolean_t used_for_jit
,
786 * Functions implemented as macros
788 #define vm_map_min(map) ((map)->min_offset)
789 /* Lowest valid address in
792 #define vm_map_max(map) ((map)->max_offset)
793 /* Highest valid address */
795 #define vm_map_pmap(map) ((map)->pmap)
796 /* Physical map associated
797 * with this address map */
800 * Macros/functions for map residence counts and swapin/out of vm maps
805 /* Gain a reference to an existing map */
806 extern void vm_map_reference(
808 /* Lose a residence count */
809 extern void vm_map_res_deallocate(
811 /* Gain a residence count on a map */
812 extern void vm_map_res_reference(
814 /* Gain reference & residence counts to possibly swapped-out map */
815 extern void vm_map_reference_swap(
818 #else /* MACH_ASSERT */
820 #define vm_map_reference(map) \
822 vm_map_t Map = (map); \
824 lck_mtx_lock(&Map->s_lock); \
826 os_ref_retain(&Map->map_refcnt); \
827 lck_mtx_unlock(&Map->s_lock); \
831 #define vm_map_res_reference(map) \
833 vm_map_t Lmap = (map); \
834 if (Lmap->res_count == 0) { \
835 lck_mtx_unlock(&Lmap->s_lock);\
837 vm_map_swapin(Lmap); \
838 lck_mtx_lock(&Lmap->s_lock); \
840 vm_map_unlock(Lmap); \
845 #define vm_map_res_deallocate(map) \
847 vm_map_t Map = (map); \
848 if (--Map->res_count == 0) { \
849 lck_mtx_unlock(&Map->s_lock); \
851 vm_map_swapout(Map); \
852 vm_map_unlock(Map); \
853 lck_mtx_lock(&Map->s_lock); \
857 #define vm_map_reference_swap(map) \
859 vm_map_t Map = (map); \
860 lck_mtx_lock(&Map->s_lock); \
861 os_ref_retain(&Map->map_refcnt);\
862 vm_map_res_reference(Map); \
863 lck_mtx_unlock(&Map->s_lock); \
865 #endif /* MACH_ASSERT */
867 extern void vm_map_swapin(
870 extern void vm_map_swapout(
873 #else /* TASK_SWAPPER */
875 #define vm_map_reference(map) \
877 vm_map_t Map = (map); \
879 lck_mtx_lock(&Map->s_lock); \
880 os_ref_retain(&Map->map_refcnt);\
881 lck_mtx_unlock(&Map->s_lock); \
885 #define vm_map_reference_swap(map) vm_map_reference(map)
886 #define vm_map_res_reference(map)
887 #define vm_map_res_deallocate(map)
889 #endif /* TASK_SWAPPER */
892 * Submap object. Must be used to create memory to be put
893 * in a submap by vm_map_submap.
895 extern vm_object_t vm_submap_object
;
898 * Wait and wakeup macros for in_transition map entries.
900 #define vm_map_entry_wait(map, interruptible) \
901 ((map)->timestamp++ , \
902 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
903 (event_t)&(map)->hdr, interruptible))
906 #define vm_map_entry_wakeup(map) \
907 thread_wakeup((event_t)(&(map)->hdr))
910 #define vm_map_ref_fast(map) \
912 lck_mtx_lock(&map->s_lock); \
914 vm_map_res_reference(map); \
915 lck_mtx_unlock(&map->s_lock); \
918 #define vm_map_dealloc_fast(map) \
922 lck_mtx_lock(&map->s_lock); \
923 c = --map->ref_count; \
925 vm_map_res_deallocate(map); \
926 lck_mtx_unlock(&map->s_lock); \
928 vm_map_destroy(map); \
932 /* simplify map entries */
933 extern void vm_map_simplify_entry(
935 vm_map_entry_t this_entry
);
936 extern void vm_map_simplify(
938 vm_map_offset_t start
);
940 /* Move the information in a map copy object to a new map copy object */
941 extern vm_map_copy_t
vm_map_copy_copy(
944 /* Create a copy object from an object. */
945 extern kern_return_t
vm_map_copyin_object(
947 vm_object_offset_t offset
,
948 vm_object_size_t size
,
949 vm_map_copy_t
*copy_result
); /* OUT */
951 extern kern_return_t
vm_map_random_address_for_size(
953 vm_map_offset_t
*address
,
956 /* Enter a mapping */
957 extern kern_return_t
vm_map_enter(
959 vm_map_offset_t
*address
,
961 vm_map_offset_t mask
,
963 vm_map_kernel_flags_t vmk_flags
,
966 vm_object_offset_t offset
,
967 boolean_t needs_copy
,
968 vm_prot_t cur_protection
,
969 vm_prot_t max_protection
,
970 vm_inherit_t inheritance
);
973 extern kern_return_t
vm_map_enter_fourk(
975 vm_map_offset_t
*address
,
977 vm_map_offset_t mask
,
979 vm_map_kernel_flags_t vmk_flags
,
982 vm_object_offset_t offset
,
983 boolean_t needs_copy
,
984 vm_prot_t cur_protection
,
985 vm_prot_t max_protection
,
986 vm_inherit_t inheritance
);
987 #endif /* __arm64__ */
989 /* XXX should go away - replaced with regular enter of contig object */
990 extern kern_return_t
vm_map_enter_cpm(
992 vm_map_address_t
*addr
,
996 extern kern_return_t
vm_map_remap(
998 vm_map_offset_t
*address
,
1000 vm_map_offset_t mask
,
1002 vm_map_kernel_flags_t vmk_flags
,
1005 vm_map_offset_t memory_address
,
1007 vm_prot_t
*cur_protection
,
1008 vm_prot_t
*max_protection
,
1009 vm_inherit_t inheritance
);
1013 * Read and write from a kernel buffer to a specified map.
1015 extern kern_return_t
vm_map_write_user(
1018 vm_map_offset_t dst_addr
,
1021 extern kern_return_t
vm_map_read_user(
1023 vm_map_offset_t src_addr
,
1027 /* Create a new task map using an existing task map as a template. */
1028 extern vm_map_t
vm_map_fork(
1032 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
1033 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
1034 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
1036 /* Change inheritance */
1037 extern kern_return_t
vm_map_inherit(
1039 vm_map_offset_t start
,
1040 vm_map_offset_t end
,
1041 vm_inherit_t new_inheritance
);
1043 /* Add or remove machine-dependent attributes from map regions */
1044 extern kern_return_t
vm_map_machine_attribute(
1046 vm_map_offset_t start
,
1047 vm_map_offset_t end
,
1048 vm_machine_attribute_t attribute
,
1049 vm_machine_attribute_val_t
* value
); /* IN/OUT */
1051 extern kern_return_t
vm_map_msync(
1053 vm_map_address_t address
,
1055 vm_sync_t sync_flags
);
1057 /* Set paging behavior */
1058 extern kern_return_t
vm_map_behavior_set(
1060 vm_map_offset_t start
,
1061 vm_map_offset_t end
,
1062 vm_behavior_t new_behavior
);
1064 extern kern_return_t
vm_map_region(
1066 vm_map_offset_t
*address
,
1067 vm_map_size_t
*size
,
1068 vm_region_flavor_t flavor
,
1069 vm_region_info_t info
,
1070 mach_msg_type_number_t
*count
,
1071 mach_port_t
*object_name
);
1073 extern kern_return_t
vm_map_region_recurse_64(
1075 vm_map_offset_t
*address
,
1076 vm_map_size_t
*size
,
1077 natural_t
*nesting_depth
,
1078 vm_region_submap_info_64_t info
,
1079 mach_msg_type_number_t
*count
);
1081 extern kern_return_t
vm_map_page_query_internal(
1083 vm_map_offset_t offset
,
1087 extern kern_return_t
vm_map_query_volatile(
1089 mach_vm_size_t
*volatile_virtual_size_p
,
1090 mach_vm_size_t
*volatile_resident_size_p
,
1091 mach_vm_size_t
*volatile_compressed_size_p
,
1092 mach_vm_size_t
*volatile_pmap_size_p
,
1093 mach_vm_size_t
*volatile_compressed_pmap_size_p
);
1095 extern kern_return_t
vm_map_submap(
1097 vm_map_offset_t start
,
1098 vm_map_offset_t end
,
1100 vm_map_offset_t offset
,
1101 boolean_t use_pmap
);
1103 extern void vm_map_submap_pmap_clean(
1105 vm_map_offset_t start
,
1106 vm_map_offset_t end
,
1108 vm_map_offset_t offset
);
1110 /* Convert from a map entry port to a map */
1111 extern vm_map_t
convert_port_entry_to_map(
1114 /* Convert from a port to a vm_object */
1115 extern vm_object_t
convert_port_entry_to_object(
1119 extern kern_return_t
vm_map_set_cache_attr(
1121 vm_map_offset_t va
);
1124 /* definitions related to overriding the NX behavior */
1126 #define VM_ABI_32 0x1
1127 #define VM_ABI_64 0x2
1129 extern int override_nx(vm_map_t map
, uint32_t user_tag
);
1132 extern kern_return_t
vm_map_entry_cs_associate(
1134 vm_map_entry_t entry
,
1135 vm_map_kernel_flags_t vmk_flags
);
1136 #endif /* PMAP_CS */
1138 extern void vm_map_region_top_walk(
1139 vm_map_entry_t entry
,
1140 vm_region_top_info_t top
);
1141 extern void vm_map_region_walk(
1144 vm_map_entry_t entry
,
1145 vm_object_offset_t offset
,
1146 vm_object_size_t range
,
1147 vm_region_extended_info_t extended
,
1148 boolean_t look_for_pages
,
1149 mach_msg_type_number_t count
);
1152 struct vm_map_corpse_footprint_header
{
1153 vm_size_t cf_size
; /* allocated buffer size */
1154 uint32_t cf_last_region
; /* offset of last region in buffer */
1156 uint32_t cfu_last_zeroes
; /* during creation:
1157 * number of "zero" dispositions at
1158 * end of last region */
1159 uint32_t cfu_hint_region
; /* during lookup:
1160 * offset of last looked up region */
1161 #define cf_last_zeroes cfu.cfu_last_zeroes
1162 #define cf_hint_region cfu.cfu_hint_region
1165 struct vm_map_corpse_footprint_region
{
1166 vm_map_offset_t cfr_vaddr
; /* region start virtual address */
1167 uint32_t cfr_num_pages
; /* number of pages in this "region" */
1168 unsigned char cfr_disposition
[0]; /* disposition of each page */
1169 } __attribute__((packed
));
1171 extern kern_return_t
vm_map_corpse_footprint_collect(
1173 vm_map_entry_t old_entry
,
1175 extern void vm_map_corpse_footprint_collect_done(
1178 extern kern_return_t
vm_map_corpse_footprint_query_page_info(
1183 extern void vm_map_copy_footprint_ledgers(
1186 extern void vm_map_copy_ledger(
1191 #endif /* MACH_KERNEL_PRIVATE */
1195 /* Create an empty map */
1196 extern vm_map_t
vm_map_create(
1198 vm_map_offset_t min_off
,
1199 vm_map_offset_t max_off
,
1200 boolean_t pageable
);
1201 extern vm_map_t
vm_map_create_options(
1203 vm_map_offset_t min_off
,
1204 vm_map_offset_t max_off
,
1206 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1207 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1208 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1209 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1211 extern void vm_map_disable_hole_optimization(vm_map_t map
);
1213 /* Get rid of a map */
1214 extern void vm_map_destroy(
1218 /* Lose a reference */
1219 extern void vm_map_deallocate(
1222 extern vm_map_t
vm_map_switch(
1225 /* Change protection */
1226 extern kern_return_t
vm_map_protect(
1228 vm_map_offset_t start
,
1229 vm_map_offset_t end
,
1233 /* Check protection */
1234 extern boolean_t
vm_map_check_protection(
1236 vm_map_offset_t start
,
1237 vm_map_offset_t end
,
1238 vm_prot_t protection
);
1240 /* wire down a region */
1242 #ifdef XNU_KERNEL_PRIVATE
1244 extern kern_return_t
vm_map_wire_kernel(
1246 vm_map_offset_t start
,
1247 vm_map_offset_t end
,
1248 vm_prot_t access_type
,
1250 boolean_t user_wire
);
1252 extern kern_return_t
vm_map_wire_and_extract_kernel(
1254 vm_map_offset_t start
,
1255 vm_prot_t access_type
,
1257 boolean_t user_wire
,
1258 ppnum_t
*physpage_p
);
1260 /* kext exported versions */
1262 extern kern_return_t
vm_map_wire_external(
1264 vm_map_offset_t start
,
1265 vm_map_offset_t end
,
1266 vm_prot_t access_type
,
1267 boolean_t user_wire
);
1269 extern kern_return_t
vm_map_wire_and_extract_external(
1271 vm_map_offset_t start
,
1272 vm_prot_t access_type
,
1273 boolean_t user_wire
,
1274 ppnum_t
*physpage_p
);
1276 #else /* XNU_KERNEL_PRIVATE */
1278 extern kern_return_t
vm_map_wire(
1280 vm_map_offset_t start
,
1281 vm_map_offset_t end
,
1282 vm_prot_t access_type
,
1283 boolean_t user_wire
);
1285 extern kern_return_t
vm_map_wire_and_extract(
1287 vm_map_offset_t start
,
1288 vm_prot_t access_type
,
1289 boolean_t user_wire
,
1290 ppnum_t
*physpage_p
);
1292 #endif /* !XNU_KERNEL_PRIVATE */
1294 /* unwire a region */
1295 extern kern_return_t
vm_map_unwire(
1297 vm_map_offset_t start
,
1298 vm_map_offset_t end
,
1299 boolean_t user_wire
);
1301 #ifdef XNU_KERNEL_PRIVATE
1303 /* Enter a mapping of a memory object */
1304 extern kern_return_t
vm_map_enter_mem_object(
1306 vm_map_offset_t
*address
,
1308 vm_map_offset_t mask
,
1310 vm_map_kernel_flags_t vmk_flags
,
1313 vm_object_offset_t offset
,
1314 boolean_t needs_copy
,
1315 vm_prot_t cur_protection
,
1316 vm_prot_t max_protection
,
1317 vm_inherit_t inheritance
);
1319 /* Enter a mapping of a memory object */
1320 extern kern_return_t
vm_map_enter_mem_object_prefault(
1322 vm_map_offset_t
*address
,
1324 vm_map_offset_t mask
,
1326 vm_map_kernel_flags_t vmk_flags
,
1329 vm_object_offset_t offset
,
1330 vm_prot_t cur_protection
,
1331 vm_prot_t max_protection
,
1332 upl_page_list_ptr_t page_list
,
1333 unsigned int page_list_count
);
1335 /* Enter a mapping of a memory object */
1336 extern kern_return_t
vm_map_enter_mem_object_control(
1338 vm_map_offset_t
*address
,
1340 vm_map_offset_t mask
,
1342 vm_map_kernel_flags_t vmk_flags
,
1344 memory_object_control_t control
,
1345 vm_object_offset_t offset
,
1346 boolean_t needs_copy
,
1347 vm_prot_t cur_protection
,
1348 vm_prot_t max_protection
,
1349 vm_inherit_t inheritance
);
1351 #endif /* !XNU_KERNEL_PRIVATE */
1353 /* Deallocate a region */
1354 extern kern_return_t
vm_map_remove(
1356 vm_map_offset_t start
,
1357 vm_map_offset_t end
,
1360 /* Deallocate a region when the map is already locked */
1361 extern kern_return_t
vm_map_remove_locked(
1363 vm_map_offset_t start
,
1364 vm_map_offset_t end
,
1367 /* Discard a copy without using it */
1368 extern void vm_map_copy_discard(
1369 vm_map_copy_t copy
);
1371 /* Overwrite existing memory with a copy */
1372 extern kern_return_t
vm_map_copy_overwrite(
1374 vm_map_address_t dst_addr
,
1376 boolean_t interruptible
);
1378 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1379 extern boolean_t
vm_map_copy_validate_size(
1382 vm_map_size_t
*size
);
1384 /* Place a copy into a map */
1385 extern kern_return_t
vm_map_copyout(
1387 vm_map_address_t
*dst_addr
, /* OUT */
1388 vm_map_copy_t copy
);
1390 extern kern_return_t
vm_map_copyout_size(
1392 vm_map_address_t
*dst_addr
, /* OUT */
1394 vm_map_size_t copy_size
);
1396 extern kern_return_t
vm_map_copyout_internal(
1398 vm_map_address_t
*dst_addr
, /* OUT */
1400 vm_map_size_t copy_size
,
1401 boolean_t consume_on_success
,
1402 vm_prot_t cur_protection
,
1403 vm_prot_t max_protection
,
1404 vm_inherit_t inheritance
);
1406 extern kern_return_t
vm_map_copyin(
1408 vm_map_address_t src_addr
,
1410 boolean_t src_destroy
,
1411 vm_map_copy_t
*copy_result
); /* OUT */
1413 extern kern_return_t
vm_map_copyin_common(
1415 vm_map_address_t src_addr
,
1417 boolean_t src_destroy
,
1418 boolean_t src_volatile
,
1419 vm_map_copy_t
*copy_result
, /* OUT */
1420 boolean_t use_maxprot
);
1422 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1423 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1424 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1425 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1426 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1427 extern kern_return_t
vm_map_copyin_internal(
1429 vm_map_address_t src_addr
,
1432 vm_map_copy_t
*copy_result
); /* OUT */
1434 extern kern_return_t
vm_map_copy_extract(
1436 vm_map_address_t src_addr
,
1438 vm_map_copy_t
*copy_result
, /* OUT */
1439 vm_prot_t
*cur_prot
, /* OUT */
1440 vm_prot_t
*max_prot
);
1443 extern void vm_map_disable_NX(
1446 extern void vm_map_disallow_data_exec(
1449 extern void vm_map_set_64bit(
1452 extern void vm_map_set_32bit(
1455 extern void vm_map_set_jumbo(
1458 extern void vm_map_set_jit_entitled(
1461 extern void vm_map_set_max_addr(
1462 vm_map_t map
, vm_map_offset_t new_max_offset
);
1464 extern boolean_t
vm_map_has_hard_pagezero(
1466 vm_map_offset_t pagezero_size
);
1467 extern void vm_commit_pagezero_status(vm_map_t tmap
);
1470 static inline boolean_t
1471 vm_map_is_64bit(__unused vm_map_t map
)
1476 extern boolean_t
vm_map_is_64bit(
1481 extern kern_return_t
vm_map_raise_max_offset(
1483 vm_map_offset_t new_max_offset
);
1485 extern kern_return_t
vm_map_raise_min_offset(
1487 vm_map_offset_t new_min_offset
);
1488 #if !CONFIG_EMBEDDED
1489 extern void vm_map_set_high_start(
1491 vm_map_offset_t high_start
);
1494 extern vm_map_offset_t
vm_compute_max_offset(
1497 extern void vm_map_get_max_aslr_slide_section(
1499 int64_t *max_sections
,
1500 int64_t *section_size
);
1502 extern uint64_t vm_map_get_max_aslr_slide_pages(
1505 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1508 extern void vm_map_set_user_wire_limit(
1512 extern void vm_map_switch_protect(
1516 extern void vm_map_iokit_mapped_region(
1520 extern void vm_map_iokit_unmapped_region(
1525 extern boolean_t
first_free_is_valid(vm_map_t
);
1527 extern int vm_map_page_shift(
1530 extern vm_map_offset_t
vm_map_page_mask(
1533 extern int vm_map_page_size(
1536 extern vm_map_offset_t
vm_map_round_page_mask(
1537 vm_map_offset_t offset
,
1538 vm_map_offset_t mask
);
1540 extern vm_map_offset_t
vm_map_trunc_page_mask(
1541 vm_map_offset_t offset
,
1542 vm_map_offset_t mask
);
1544 extern boolean_t
vm_map_page_aligned(
1545 vm_map_offset_t offset
,
1546 vm_map_offset_t mask
);
1549 vm_map_range_overflows(vm_map_offset_t addr
, vm_map_size_t size
)
1551 vm_map_offset_t sum
;
1552 return os_add_overflow(addr
, size
, &sum
);
1556 mach_vm_range_overflows(mach_vm_offset_t addr
, mach_vm_size_t size
)
1558 mach_vm_offset_t sum
;
1559 return os_add_overflow(addr
, size
, &sum
);
1562 #ifdef XNU_KERNEL_PRIVATE
1563 extern kern_return_t
vm_map_page_info(
1565 vm_map_offset_t offset
,
1566 vm_page_info_flavor_t flavor
,
1567 vm_page_info_t info
,
1568 mach_msg_type_number_t
*count
);
1569 extern kern_return_t
vm_map_page_range_info_internal(
1571 vm_map_offset_t start_offset
,
1572 vm_map_offset_t end_offset
,
1573 vm_page_info_flavor_t flavor
,
1574 vm_page_info_t info
,
1575 mach_msg_type_number_t
*count
);
1576 #endif /* XNU_KERNEL_PRIVATE */
1579 #ifdef MACH_KERNEL_PRIVATE
1582 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1583 * usual form; it handles a copyin based on the current protection
1584 * (current protection == VM_PROT_NONE) is a failure.
1585 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1586 * access. The difference is that a region with no current access
1587 * BUT possible maximum access is rejected by vm_map_copyin(), but
1588 * returned by vm_map_copyin_maxprot.
1590 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1591 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1592 FALSE, copy_result, FALSE)
1594 #define vm_map_copyin_maxprot(src_map, \
1595 src_addr, len, src_destroy, copy_result) \
1596 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1597 FALSE, copy_result, TRUE)
1601 * Internal macros for rounding and truncation of vm_map offsets and sizes
1603 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1604 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1607 * Macros for rounding and truncation of vm_map offsets and sizes
1609 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1610 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1611 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1612 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1615 vm_prot_to_wimg(unsigned int prot
, unsigned int *wimg
)
1618 case MAP_MEM_NOOP
: break;
1619 case MAP_MEM_IO
: *wimg
= VM_WIMG_IO
; break;
1620 case MAP_MEM_COPYBACK
: *wimg
= VM_WIMG_USE_DEFAULT
; break;
1621 case MAP_MEM_INNERWBACK
: *wimg
= VM_WIMG_INNERWBACK
; break;
1622 case MAP_MEM_POSTED
: *wimg
= VM_WIMG_POSTED
; break;
1623 case MAP_MEM_POSTED_REORDERED
: *wimg
= VM_WIMG_POSTED_REORDERED
; break;
1624 case MAP_MEM_POSTED_COMBINED_REORDERED
: *wimg
= VM_WIMG_POSTED_COMBINED_REORDERED
; break;
1625 case MAP_MEM_WTHRU
: *wimg
= VM_WIMG_WTHRU
; break;
1626 case MAP_MEM_WCOMB
: *wimg
= VM_WIMG_WCOMB
; break;
1627 case MAP_MEM_RT
: *wimg
= VM_WIMG_RT
; break;
1629 panic("Unrecognized mapping type %u\n", prot
);
1633 #endif /* MACH_KERNEL_PRIVATE */
1635 #ifdef XNU_KERNEL_PRIVATE
1636 extern kern_return_t
vm_map_set_page_shift(vm_map_t map
, int pageshift
);
1637 #endif /* XNU_KERNEL_PRIVATE */
1639 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1640 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1643 * Flags for vm_map_remove() and vm_map_delete()
1645 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1646 #define VM_MAP_REMOVE_KUNWIRE 0x1
1647 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1648 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1649 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1650 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1651 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1652 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1653 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1654 #define VM_MAP_REMOVE_GAPS_OK 0x100
1656 /* Support for UPLs from vm_maps */
1658 #ifdef XNU_KERNEL_PRIVATE
1660 extern kern_return_t
vm_map_get_upl(
1661 vm_map_t target_map
,
1662 vm_map_offset_t map_offset
,
1665 upl_page_info_array_t page_info
,
1666 unsigned int *page_infoCnt
,
1667 upl_control_flags_t
*flags
,
1669 int force_data_sync
);
1671 #endif /* XNU_KERNEL_PRIVATE */
1674 vm_map_sizes(vm_map_t map
,
1675 vm_map_size_t
* psize
,
1676 vm_map_size_t
* pfree
,
1677 vm_map_size_t
* plargest_free
);
1679 #if CONFIG_DYNAMIC_CODE_SIGNING
1680 extern kern_return_t
vm_map_sign(vm_map_t map
,
1681 vm_map_offset_t start
,
1682 vm_map_offset_t end
);
1685 extern kern_return_t
vm_map_partial_reap(
1687 unsigned int *reclaimed_resident
,
1688 unsigned int *reclaimed_compressed
);
1691 #if DEVELOPMENT || DEBUG
1693 extern int vm_map_disconnect_page_mappings(
1701 extern kern_return_t
vm_map_freeze(
1703 unsigned int *purgeable_count
,
1704 unsigned int *wired_count
,
1705 unsigned int *clean_count
,
1706 unsigned int *dirty_count
,
1707 unsigned int dirty_budget
,
1708 unsigned int *shared_count
,
1709 int *freezer_error_code
,
1710 boolean_t eval_only
);
1713 #define FREEZER_ERROR_GENERIC (-1)
1714 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1715 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1716 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1717 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1724 * In some cases, we don't have a real VM object but still want to return a
1725 * unique ID (to avoid a memory region looking like shared memory), so build
1726 * a fake pointer based on the map's ledger and the index of the ledger being
1729 #define INFO_MAKE_FAKE_OBJECT_ID(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1731 #endif /* KERNEL_PRIVATE */
1733 #endif /* _VM_VM_MAP_H_ */