2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Virtual memory map module definitions.
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
85 #include <sys/cdefs.h>
89 extern void vm_map_reference(vm_map_t map
);
90 extern vm_map_t
current_map(void);
92 /* Setup reserved areas in a new VM map */
93 extern kern_return_t
vm_map_exec(
102 #ifdef MACH_KERNEL_PRIVATE
104 #include <task_swapper.h>
105 #include <mach_assert.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <kern/locks.h>
110 #include <kern/zalloc.h>
111 #include <kern/macro_help.h>
113 #include <kern/thread.h>
115 #define current_map_fast() (current_thread()->map)
116 #define current_map() (current_map_fast())
118 #include <vm/vm_map_store.h>
124 * vm_map_t the high-level address map data structure.
125 * vm_map_entry_t an entry in an address map.
126 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
127 * vm_map_copy_t represents memory copied from an address map,
128 * used for inter-map copy operations
130 typedef struct vm_map_entry
*vm_map_entry_t
;
131 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
135 * Type: vm_map_object_t [internal use only]
138 * The target of an address mapping, either a virtual
139 * memory object or a sub map (of the kernel map).
141 typedef union vm_map_object
{
142 vm_object_t vmo_object
; /* object object */
143 vm_map_t vmo_submap
; /* belongs to another map */
146 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
147 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
148 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
149 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
152 * Type: vm_named_entry_t [internal use only]
155 * Description of a mapping to a memory cache object.
158 * While the handle to this object is used as a means to map
159 * and pass around the right to map regions backed by pagers
160 * of all sorts, the named_entry itself is only manipulated
161 * by the kernel. Named entries hold information on the
162 * right to map a region of a cached object. Namely,
163 * the target cache object, the beginning and ending of the
164 * region to be mapped, and the permissions, (read, write)
165 * with which it can be mapped.
169 struct vm_named_entry
{
170 decl_lck_mtx_data(, Lock
) /* Synchronization */
172 vm_object_t object
; /* object I point to */
173 vm_map_t map
; /* map backing submap */
174 vm_map_copy_t copy
; /* a VM map copy */
176 vm_object_offset_t offset
; /* offset into object */
177 vm_object_size_t size
; /* size of region */
178 vm_object_offset_t data_offset
; /* offset to first byte of data */
179 vm_prot_t protection
; /* access permissions */
180 int ref_count
; /* Number of references */
181 unsigned int /* Is backing.xxx : */
182 /* boolean_t */ internal
:1, /* ... an internal object */
183 /* boolean_t */ is_sub_map
:1, /* ... a submap? */
184 /* boolean_t */ is_copy
:1; /* ... a VM map copy */
188 * Type: vm_map_entry_t [internal use only]
191 * A single mapping within an address map.
194 * Address map entries consist of start and end addresses,
195 * a VM object (or sub map) and offset into that object,
196 * and user-exported inheritance and protection information.
197 * Control information for virtual copy operations is also
198 * stored in the address map entry.
201 struct vm_map_links
{
202 struct vm_map_entry
*prev
; /* previous entry */
203 struct vm_map_entry
*next
; /* next entry */
204 vm_map_offset_t start
; /* start address */
205 vm_map_offset_t end
; /* end address */
210 * The "alias" field can be updated while holding the VM map lock
211 * "shared". It's OK as along as it's the only field that can be
212 * updated without the VM map "exclusive" lock.
214 #define VME_OBJECT(entry) ((entry)->vme_object.vmo_object)
215 #define VME_OBJECT_SET(entry, object) \
217 (entry)->vme_object.vmo_object = (object); \
219 #define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap)
220 #define VME_SUBMAP_SET(entry, submap) \
222 (entry)->vme_object.vmo_submap = (submap); \
224 #define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK)
225 #define VME_OFFSET_SET(entry, offset) \
228 __alias = VME_ALIAS((entry)); \
229 assert((offset & PAGE_MASK) == 0); \
230 (entry)->vme_offset = offset | __alias; \
232 #define VME_OBJECT_SHADOW(entry, length) \
234 vm_object_t __object; \
235 vm_object_offset_t __offset; \
236 __object = VME_OBJECT((entry)); \
237 __offset = VME_OFFSET((entry)); \
238 vm_object_shadow(&__object, &__offset, (length)); \
239 if (__object != VME_OBJECT((entry))) { \
240 VME_OBJECT_SET((entry), __object); \
241 (entry)->use_pmap = TRUE; \
243 if (__offset != VME_OFFSET((entry))) { \
244 VME_OFFSET_SET((entry), __offset); \
248 #define VME_ALIAS_MASK (PAGE_MASK)
249 #define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
250 #define VME_ALIAS_SET(entry, alias) \
252 vm_map_offset_t __offset; \
253 __offset = VME_OFFSET((entry)); \
254 (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK); \
258 * FOOTPRINT ACCOUNTING:
259 * The "memory footprint" is better described in the pmap layer.
261 * At the VM level, these 2 vm_map_entry_t fields are relevant:
263 * For an "iokit_mapped" entry, we add the size of the entry to the
264 * footprint when the entry is entered into the map and we subtract that
265 * size when the entry is removed. No other accounting should take place.
266 * "use_pmap" should be FALSE but is not taken into account.
267 * use_pmap: (only when is_sub_map is FALSE)
268 * This indicates if we should ask the pmap layer to account for pages
269 * in this mapping. If FALSE, we expect that another form of accounting
270 * is being used (e.g. "iokit_mapped" or the explicit accounting of
271 * non-volatile purgable memory).
273 * So the logic is mostly:
274 * if entry->is_sub_map == TRUE
275 * anything in a submap does not count for the footprint
276 * else if entry->iokit_mapped == TRUE
277 * footprint includes the entire virtual size of this entry
278 * else if entry->use_pmap == FALSE
279 * tell pmap NOT to account for pages being pmap_enter()'d from this
280 * mapping (i.e. use "alternate accounting")
282 * pmap will account for pages being pmap_enter()'d from this mapping
283 * as it sees fit (only if anonymous, etc...)
286 struct vm_map_entry
{
287 struct vm_map_links links
; /* links to other entries */
288 #define vme_prev links.prev
289 #define vme_next links.next
290 #define vme_start links.start
291 #define vme_end links.end
293 struct vm_map_store store
;
294 union vm_map_object vme_object
; /* object I point to */
295 vm_object_offset_t vme_offset
; /* offset into object */
298 /* boolean_t */ is_shared
:1, /* region is shared */
299 /* boolean_t */ is_sub_map
:1, /* Is "object" a submap? */
300 /* boolean_t */ in_transition
:1, /* Entry being changed */
301 /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */
302 /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */
303 /* behavior is not defined for submap type */
304 /* boolean_t */ needs_copy
:1, /* object need to be copied? */
306 /* Only in task maps: */
307 /* vm_prot_t */ protection
:3, /* protection code */
308 /* vm_prot_t */ max_protection
:3, /* maximum protection */
309 /* vm_inherit_t */ inheritance
:2, /* inheritance */
310 /* boolean_t */ use_pmap
:1, /*
311 * use_pmap is overloaded:
314 * else (i.e. if object):
315 * use pmap accounting
318 /* boolean_t */ no_cache
:1, /* should new pages be cached? */
319 /* boolean_t */ permanent
:1, /* mapping can not be removed */
320 /* boolean_t */ superpage_size
:1, /* use superpages of a certain size */
321 /* boolean_t */ map_aligned
:1, /* align to map's page size */
322 /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of
323 * this entry it is being deleted
324 * without unwiring them */
325 /* boolean_t */ used_for_jit
:1,
326 /* boolean_t */ from_reserved_zone
:1, /* Allocated from
327 * kernel reserved zone */
329 /* iokit accounting: use the virtual size rather than resident size: */
330 /* boolean_t */ iokit_acct
:1,
331 /* boolean_t */ vme_resilient_codesign
:1,
332 /* boolean_t */ vme_resilient_media
:1,
333 /* boolean_t */ vme_atomic
:1, /* entry cannot be split/coalesced */
337 unsigned short wired_count
; /* can be paged if = 0 */
338 unsigned short user_wired_count
; /* for vm_wire */
340 #define MAP_ENTRY_CREATION_DEBUG (1)
341 #define MAP_ENTRY_INSERTION_DEBUG (1)
343 #if MAP_ENTRY_CREATION_DEBUG
344 struct vm_map_header
*vme_creation_maphdr
;
345 uintptr_t vme_creation_bt
[16];
347 #if MAP_ENTRY_INSERTION_DEBUG
348 uintptr_t vme_insertion_bt
[16];
353 * Convenience macros for dealing with superpages
354 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
356 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
357 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
358 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
359 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
362 * wired_counts are unsigned short. This value is used to safeguard
363 * against any mishaps due to runaway user programs.
365 #define MAX_WIRE_COUNT 65535
370 * Type: struct vm_map_header
373 * Header for a vm_map and a vm_map_copy.
377 struct vm_map_header
{
378 struct vm_map_links links
; /* first, last, min, max */
379 int nentries
; /* Number of entries */
380 boolean_t entries_pageable
;
381 /* are map entries pageable? */
382 #ifdef VM_MAP_STORE_USE_RB
383 struct rb_head rb_head_store
;
385 int page_shift
; /* page shift */
388 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
389 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
390 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
393 * Type: vm_map_t [exported; contents invisible]
396 * An address map -- a directory relating valid
397 * regions of a task's address space to the corresponding
398 * virtual memory objects.
401 * Maps are doubly-linked lists of map entries, sorted
402 * by address. One hint is used to start
403 * searches again from the last successful search,
404 * insertion, or removal. Another hint is used to
405 * quickly find free space.
408 lck_rw_t lock
; /* map lock */
409 struct vm_map_header hdr
; /* Map entry header */
410 #define min_offset hdr.links.start /* start of range */
411 #define max_offset hdr.links.end /* end of range */
412 pmap_t pmap
; /* Physical map */
413 vm_map_size_t size
; /* virtual size */
414 vm_map_size_t user_wire_limit
;/* rlimit on user locked memory */
415 vm_map_size_t user_wire_size
; /* current size of user locked memory in this map */
417 vm_map_offset_t vmmap_high_start
;
418 #endif /* __x86_64__ */
422 * If map->disable_vmentry_reuse == TRUE:
423 * the end address of the highest allocated vm_map_entry_t.
425 vm_map_offset_t vmu1_highest_entry_end
;
427 * For a nested VM map:
428 * the lowest address in this nested VM map that we would
429 * expect to be unnested under normal operation (i.e. for
430 * regular copy-on-write on DATA section).
432 vm_map_offset_t vmu1_lowest_unnestable_start
;
434 #define highest_entry_end vmu1.vmu1_highest_entry_end
435 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
437 int ref_count
; /* Reference count */
439 int res_count
; /* Residence count (swap) */
440 int sw_state
; /* Swap state */
441 #endif /* TASK_SWAPPER */
442 decl_lck_mtx_data(, s_lock
) /* Lock ref, res fields */
443 lck_mtx_ext_t s_lock_ext
;
444 vm_map_entry_t hint
; /* hint for quick lookups */
445 struct vm_map_links
* hole_hint
; /* hint for quick hole lookups */
447 vm_map_entry_t _first_free
; /* First free space hint */
448 struct vm_map_links
* _holes
; /* links all holes between entries */
449 }f_s
; /* Union for free space data structures being used */
451 #define first_free f_s._first_free
452 #define holes_list f_s._holes
455 /* boolean_t */ wait_for_space
:1, /* Should callers wait for space? */
456 /* boolean_t */ wiring_required
:1, /* All memory wired? */
457 /* boolean_t */ no_zero_fill
:1, /*No zero fill absent pages */
458 /* boolean_t */ mapped_in_other_pmaps
:1, /*has this submap been mapped in maps that use a different pmap */
459 /* boolean_t */ switch_protect
:1, /* Protect map from write faults while switched */
460 /* boolean_t */ disable_vmentry_reuse
:1, /* All vm entries should keep using newer and higher addresses in the map */
461 /* boolean_t */ map_disallow_data_exec
:1, /* Disallow execution from data pages on exec-permissive architectures */
462 /* boolean_t */ holelistenabled
:1,
463 /* boolean_t */ is_nested_map
:1,
464 /* boolean_t */ map_disallow_new_exec
:1, /* Disallow new executable code */
465 /* reserved */ pad
:22;
466 unsigned int timestamp
; /* Version number */
467 unsigned int color_rr
; /* next color (not protected by a lock) */
469 boolean_t jit_entry_exists
;
472 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
473 #define vm_map_first_entry(map) ((map)->hdr.links.next)
474 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
478 * VM map swap states. There are no transition states.
480 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
481 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
482 #endif /* TASK_SWAPPER */
485 * Type: vm_map_version_t [exported; contents invisible]
488 * Map versions may be used to quickly validate a previous
492 * Because they are bulky objects, map versions are usually
493 * passed by reference.
496 * Just a timestamp for the main map.
498 typedef struct vm_map_version
{
499 unsigned int main_timestamp
;
503 * Type: vm_map_copy_t [exported; contents invisible]
506 * A map copy object represents a region of virtual memory
507 * that has been copied from an address map but is still
510 * A map copy object may only be used by a single thread
514 * There are three formats for map copy objects.
515 * The first is very similar to the main
516 * address map in structure, and as a result, some
517 * of the internal maintenance functions/macros can
518 * be used with either address maps or map copy objects.
520 * The map copy object contains a header links
521 * entry onto which the other entries that represent
522 * the region are chained.
524 * The second format is a single vm object. This was used
525 * primarily in the pageout path - but is not currently used
526 * except for placeholder copy objects (see vm_map_copy_copy()).
528 * The third format is a kernel buffer copy object - for data
529 * small enough that physical copies were the most efficient
530 * method. This method uses a zero-sized array unioned with
531 * other format-specific data in the 'c_u' member. This unsized
532 * array overlaps the other elements and allows us to use this
533 * extra structure space for physical memory copies. On 64-bit
534 * systems this saves ~64 bytes per vm_map_copy.
539 #define VM_MAP_COPY_ENTRY_LIST 1
540 #define VM_MAP_COPY_OBJECT 2
541 #define VM_MAP_COPY_KERNEL_BUFFER 3
542 vm_object_offset_t offset
;
545 struct vm_map_header hdr
; /* ENTRY_LIST */
546 vm_object_t object
; /* OBJECT */
547 uint8_t kdata
[0]; /* KERNEL_BUFFER */
552 #define cpy_hdr c_u.hdr
554 #define cpy_object c_u.object
555 #define cpy_kdata c_u.kdata
556 #define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata))
558 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
559 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
560 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
563 * Useful macros for entry list copy objects
566 #define vm_map_copy_to_entry(copy) \
567 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
568 #define vm_map_copy_first_entry(copy) \
569 ((copy)->cpy_hdr.links.next)
570 #define vm_map_copy_last_entry(copy) \
571 ((copy)->cpy_hdr.links.prev)
574 * Macros: vm_map_lock, etc. [internal use only]
576 * Perform locking on the data portion of a map.
577 * When multiple maps are to be locked, order by map address.
578 * (See vm_map.c::vm_remap())
581 #define vm_map_lock_init(map) \
582 ((map)->timestamp = 0 , \
583 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
585 #define vm_map_lock(map) lck_rw_lock_exclusive(&(map)->lock)
586 #define vm_map_unlock(map) \
587 ((map)->timestamp++ , lck_rw_done(&(map)->lock))
588 #define vm_map_lock_read(map) lck_rw_lock_shared(&(map)->lock)
589 #define vm_map_unlock_read(map) lck_rw_done(&(map)->lock)
590 #define vm_map_lock_write_to_read(map) \
591 ((map)->timestamp++ , lck_rw_lock_exclusive_to_shared(&(map)->lock))
592 /* lock_read_to_write() returns FALSE on failure. Macro evaluates to
593 * zero on success and non-zero value on failure.
595 #define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE)
597 #define vm_map_try_lock(map) lck_rw_try_lock_exclusive(&(map)->lock)
598 #define vm_map_try_lock_read(map) lck_rw_try_lock_shared(&(map)->lock)
600 #if MACH_ASSERT || DEBUG
601 #define vm_map_lock_assert_held(map) \
602 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
603 #define vm_map_lock_assert_shared(map) \
604 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
605 #define vm_map_lock_assert_exclusive(map) \
606 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
607 #define vm_map_lock_assert_notheld(map) \
608 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
609 #else /* MACH_ASSERT || DEBUG */
610 #define vm_map_lock_assert_held(map)
611 #define vm_map_lock_assert_shared(map)
612 #define vm_map_lock_assert_exclusive(map)
613 #define vm_map_lock_assert_notheld(map)
614 #endif /* MACH_ASSERT || DEBUG */
617 * Exported procedures that operate on vm_map_t.
620 /* Initialize the module */
621 extern void vm_map_init(void);
623 extern void vm_kernel_reserved_entry_init(void);
625 /* Allocate a range in the specified virtual address map and
626 * return the entry allocated for that range. */
627 extern kern_return_t
vm_map_find_space(
629 vm_map_address_t
*address
, /* OUT */
631 vm_map_offset_t mask
,
633 vm_map_kernel_flags_t vmk_flags
,
635 vm_map_entry_t
*o_entry
); /* OUT */
637 extern void vm_map_clip_start(
639 vm_map_entry_t entry
,
640 vm_map_offset_t endaddr
);
641 extern void vm_map_clip_end(
643 vm_map_entry_t entry
,
644 vm_map_offset_t endaddr
);
645 extern boolean_t
vm_map_entry_should_cow_for_true_share(
646 vm_map_entry_t entry
);
648 /* Lookup map entry containing or the specified address in the given map */
649 extern boolean_t
vm_map_lookup_entry(
651 vm_map_address_t address
,
652 vm_map_entry_t
*entry
); /* OUT */
654 extern void vm_map_copy_remap(
656 vm_map_entry_t where
,
658 vm_map_offset_t adjustment
,
661 vm_inherit_t inheritance
);
663 /* Find the VM object, offset, and protection for a given virtual address
664 * in the specified map, assuming a page fault of the type specified. */
665 extern kern_return_t
vm_map_lookup_locked(
666 vm_map_t
*var_map
, /* IN/OUT */
667 vm_map_address_t vaddr
,
668 vm_prot_t fault_type
,
669 int object_lock_type
,
670 vm_map_version_t
*out_version
, /* OUT */
671 vm_object_t
*object
, /* OUT */
672 vm_object_offset_t
*offset
, /* OUT */
673 vm_prot_t
*out_prot
, /* OUT */
674 boolean_t
*wired
, /* OUT */
675 vm_object_fault_info_t fault_info
, /* OUT */
676 vm_map_t
*real_map
); /* OUT */
678 /* Verifies that the map has not changed since the given version. */
679 extern boolean_t
vm_map_verify(
681 vm_map_version_t
*version
); /* REF */
683 extern vm_map_entry_t
vm_map_entry_insert(
685 vm_map_entry_t insp_entry
,
686 vm_map_offset_t start
,
689 vm_object_offset_t offset
,
690 boolean_t needs_copy
,
692 boolean_t in_transition
,
693 vm_prot_t cur_protection
,
694 vm_prot_t max_protection
,
695 vm_behavior_t behavior
,
696 vm_inherit_t inheritance
,
697 unsigned wired_count
,
700 unsigned int superpage_size
,
701 boolean_t clear_map_aligned
,
703 boolean_t used_for_jit
,
708 * Functions implemented as macros
710 #define vm_map_min(map) ((map)->min_offset)
711 /* Lowest valid address in
714 #define vm_map_max(map) ((map)->max_offset)
715 /* Highest valid address */
717 #define vm_map_pmap(map) ((map)->pmap)
718 /* Physical map associated
719 * with this address map */
722 * Macros/functions for map residence counts and swapin/out of vm maps
727 /* Gain a reference to an existing map */
728 extern void vm_map_reference(
730 /* Lose a residence count */
731 extern void vm_map_res_deallocate(
733 /* Gain a residence count on a map */
734 extern void vm_map_res_reference(
736 /* Gain reference & residence counts to possibly swapped-out map */
737 extern void vm_map_reference_swap(
740 #else /* MACH_ASSERT */
742 #define vm_map_reference(map) \
744 vm_map_t Map = (map); \
746 lck_mtx_lock(&Map->s_lock); \
749 lck_mtx_unlock(&Map->s_lock); \
753 #define vm_map_res_reference(map) \
755 vm_map_t Lmap = (map); \
756 if (Lmap->res_count == 0) { \
757 lck_mtx_unlock(&Lmap->s_lock);\
759 vm_map_swapin(Lmap); \
760 lck_mtx_lock(&Lmap->s_lock); \
762 vm_map_unlock(Lmap); \
767 #define vm_map_res_deallocate(map) \
769 vm_map_t Map = (map); \
770 if (--Map->res_count == 0) { \
771 lck_mtx_unlock(&Map->s_lock); \
773 vm_map_swapout(Map); \
774 vm_map_unlock(Map); \
775 lck_mtx_lock(&Map->s_lock); \
779 #define vm_map_reference_swap(map) \
781 vm_map_t Map = (map); \
782 lck_mtx_lock(&Map->s_lock); \
784 vm_map_res_reference(Map); \
785 lck_mtx_unlock(&Map->s_lock); \
787 #endif /* MACH_ASSERT */
789 extern void vm_map_swapin(
792 extern void vm_map_swapout(
795 #else /* TASK_SWAPPER */
797 #define vm_map_reference(map) \
799 vm_map_t Map = (map); \
801 lck_mtx_lock(&Map->s_lock); \
803 lck_mtx_unlock(&Map->s_lock); \
807 #define vm_map_reference_swap(map) vm_map_reference(map)
808 #define vm_map_res_reference(map)
809 #define vm_map_res_deallocate(map)
811 #endif /* TASK_SWAPPER */
814 * Submap object. Must be used to create memory to be put
815 * in a submap by vm_map_submap.
817 extern vm_object_t vm_submap_object
;
820 * Wait and wakeup macros for in_transition map entries.
822 #define vm_map_entry_wait(map, interruptible) \
823 ((map)->timestamp++ , \
824 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
825 (event_t)&(map)->hdr, interruptible))
828 #define vm_map_entry_wakeup(map) \
829 thread_wakeup((event_t)(&(map)->hdr))
832 #define vm_map_ref_fast(map) \
834 lck_mtx_lock(&map->s_lock); \
836 vm_map_res_reference(map); \
837 lck_mtx_unlock(&map->s_lock); \
840 #define vm_map_dealloc_fast(map) \
844 lck_mtx_lock(&map->s_lock); \
845 c = --map->ref_count; \
847 vm_map_res_deallocate(map); \
848 lck_mtx_unlock(&map->s_lock); \
850 vm_map_destroy(map); \
854 /* simplify map entries */
855 extern void vm_map_simplify_entry(
857 vm_map_entry_t this_entry
);
858 extern void vm_map_simplify(
860 vm_map_offset_t start
);
862 /* Move the information in a map copy object to a new map copy object */
863 extern vm_map_copy_t
vm_map_copy_copy(
866 /* Create a copy object from an object. */
867 extern kern_return_t
vm_map_copyin_object(
869 vm_object_offset_t offset
,
870 vm_object_size_t size
,
871 vm_map_copy_t
*copy_result
); /* OUT */
873 extern kern_return_t
vm_map_random_address_for_size(
875 vm_map_offset_t
*address
,
878 /* Enter a mapping */
879 extern kern_return_t
vm_map_enter(
881 vm_map_offset_t
*address
,
883 vm_map_offset_t mask
,
885 vm_map_kernel_flags_t vmk_flags
,
888 vm_object_offset_t offset
,
889 boolean_t needs_copy
,
890 vm_prot_t cur_protection
,
891 vm_prot_t max_protection
,
892 vm_inherit_t inheritance
);
895 extern kern_return_t
vm_map_enter_fourk(
897 vm_map_offset_t
*address
,
899 vm_map_offset_t mask
,
901 vm_map_kernel_flags_t vmk_flags
,
904 vm_object_offset_t offset
,
905 boolean_t needs_copy
,
906 vm_prot_t cur_protection
,
907 vm_prot_t max_protection
,
908 vm_inherit_t inheritance
);
909 #endif /* __arm64__ */
911 /* XXX should go away - replaced with regular enter of contig object */
912 extern kern_return_t
vm_map_enter_cpm(
914 vm_map_address_t
*addr
,
918 extern kern_return_t
vm_map_remap(
920 vm_map_offset_t
*address
,
922 vm_map_offset_t mask
,
924 vm_map_kernel_flags_t vmk_flags
,
927 vm_map_offset_t memory_address
,
929 vm_prot_t
*cur_protection
,
930 vm_prot_t
*max_protection
,
931 vm_inherit_t inheritance
);
935 * Read and write from a kernel buffer to a specified map.
937 extern kern_return_t
vm_map_write_user(
940 vm_map_offset_t dst_addr
,
943 extern kern_return_t
vm_map_read_user(
945 vm_map_offset_t src_addr
,
949 /* Create a new task map using an existing task map as a template. */
950 extern vm_map_t
vm_map_fork(
954 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
955 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
957 /* Change inheritance */
958 extern kern_return_t
vm_map_inherit(
960 vm_map_offset_t start
,
962 vm_inherit_t new_inheritance
);
964 /* Add or remove machine-dependent attributes from map regions */
965 extern kern_return_t
vm_map_machine_attribute(
967 vm_map_offset_t start
,
969 vm_machine_attribute_t attribute
,
970 vm_machine_attribute_val_t
* value
); /* IN/OUT */
972 extern kern_return_t
vm_map_msync(
974 vm_map_address_t address
,
976 vm_sync_t sync_flags
);
978 /* Set paging behavior */
979 extern kern_return_t
vm_map_behavior_set(
981 vm_map_offset_t start
,
983 vm_behavior_t new_behavior
);
985 extern kern_return_t
vm_map_region(
987 vm_map_offset_t
*address
,
989 vm_region_flavor_t flavor
,
990 vm_region_info_t info
,
991 mach_msg_type_number_t
*count
,
992 mach_port_t
*object_name
);
994 extern kern_return_t
vm_map_region_recurse_64(
996 vm_map_offset_t
*address
,
998 natural_t
*nesting_depth
,
999 vm_region_submap_info_64_t info
,
1000 mach_msg_type_number_t
*count
);
1002 extern kern_return_t
vm_map_page_query_internal(
1004 vm_map_offset_t offset
,
1008 extern kern_return_t
vm_map_query_volatile(
1010 mach_vm_size_t
*volatile_virtual_size_p
,
1011 mach_vm_size_t
*volatile_resident_size_p
,
1012 mach_vm_size_t
*volatile_compressed_size_p
,
1013 mach_vm_size_t
*volatile_pmap_size_p
,
1014 mach_vm_size_t
*volatile_compressed_pmap_size_p
);
1016 extern kern_return_t
vm_map_submap(
1018 vm_map_offset_t start
,
1019 vm_map_offset_t end
,
1021 vm_map_offset_t offset
,
1022 boolean_t use_pmap
);
1024 extern void vm_map_submap_pmap_clean(
1026 vm_map_offset_t start
,
1027 vm_map_offset_t end
,
1029 vm_map_offset_t offset
);
1031 /* Convert from a map entry port to a map */
1032 extern vm_map_t
convert_port_entry_to_map(
1035 /* Convert from a port to a vm_object */
1036 extern vm_object_t
convert_port_entry_to_object(
1040 extern kern_return_t
vm_map_set_cache_attr(
1042 vm_map_offset_t va
);
1045 /* definitions related to overriding the NX behavior */
1047 #define VM_ABI_32 0x1
1048 #define VM_ABI_64 0x2
1050 extern int override_nx(vm_map_t map
, uint32_t user_tag
);
1052 extern void vm_map_region_top_walk(
1053 vm_map_entry_t entry
,
1054 vm_region_top_info_t top
);
1055 extern void vm_map_region_walk(
1058 vm_map_entry_t entry
,
1059 vm_object_offset_t offset
,
1060 vm_object_size_t range
,
1061 vm_region_extended_info_t extended
,
1062 boolean_t look_for_pages
,
1063 mach_msg_type_number_t count
);
1065 #endif /* MACH_KERNEL_PRIVATE */
1069 /* Create an empty map */
1070 extern vm_map_t
vm_map_create(
1072 vm_map_offset_t min_off
,
1073 vm_map_offset_t max_off
,
1074 boolean_t pageable
);
1076 extern void vm_map_disable_hole_optimization(vm_map_t map
);
1078 /* Get rid of a map */
1079 extern void vm_map_destroy(
1083 /* Lose a reference */
1084 extern void vm_map_deallocate(
1087 extern vm_map_t
vm_map_switch(
1090 /* Change protection */
1091 extern kern_return_t
vm_map_protect(
1093 vm_map_offset_t start
,
1094 vm_map_offset_t end
,
1098 /* Check protection */
1099 extern boolean_t
vm_map_check_protection(
1101 vm_map_offset_t start
,
1102 vm_map_offset_t end
,
1103 vm_prot_t protection
);
1105 /* wire down a region */
1107 #ifdef XNU_KERNEL_PRIVATE
1109 extern kern_return_t
vm_map_wire_kernel(
1111 vm_map_offset_t start
,
1112 vm_map_offset_t end
,
1113 vm_prot_t access_type
,
1115 boolean_t user_wire
);
1117 extern kern_return_t
vm_map_wire_and_extract_kernel(
1119 vm_map_offset_t start
,
1120 vm_prot_t access_type
,
1122 boolean_t user_wire
,
1123 ppnum_t
*physpage_p
);
1125 /* kext exported versions */
1127 extern kern_return_t
vm_map_wire_external(
1129 vm_map_offset_t start
,
1130 vm_map_offset_t end
,
1131 vm_prot_t access_type
,
1132 boolean_t user_wire
);
1134 extern kern_return_t
vm_map_wire_and_extract_external(
1136 vm_map_offset_t start
,
1137 vm_prot_t access_type
,
1138 boolean_t user_wire
,
1139 ppnum_t
*physpage_p
);
1141 #else /* XNU_KERNEL_PRIVATE */
1143 extern kern_return_t
vm_map_wire(
1145 vm_map_offset_t start
,
1146 vm_map_offset_t end
,
1147 vm_prot_t access_type
,
1148 boolean_t user_wire
);
1150 extern kern_return_t
vm_map_wire_and_extract(
1152 vm_map_offset_t start
,
1153 vm_prot_t access_type
,
1154 boolean_t user_wire
,
1155 ppnum_t
*physpage_p
);
1157 #endif /* !XNU_KERNEL_PRIVATE */
1159 /* unwire a region */
1160 extern kern_return_t
vm_map_unwire(
1162 vm_map_offset_t start
,
1163 vm_map_offset_t end
,
1164 boolean_t user_wire
);
1166 #ifdef XNU_KERNEL_PRIVATE
1168 /* Enter a mapping of a memory object */
1169 extern kern_return_t
vm_map_enter_mem_object(
1171 vm_map_offset_t
*address
,
1173 vm_map_offset_t mask
,
1175 vm_map_kernel_flags_t vmk_flags
,
1178 vm_object_offset_t offset
,
1179 boolean_t needs_copy
,
1180 vm_prot_t cur_protection
,
1181 vm_prot_t max_protection
,
1182 vm_inherit_t inheritance
);
1184 /* Enter a mapping of a memory object */
1185 extern kern_return_t
vm_map_enter_mem_object_prefault(
1187 vm_map_offset_t
*address
,
1189 vm_map_offset_t mask
,
1191 vm_map_kernel_flags_t vmk_flags
,
1194 vm_object_offset_t offset
,
1195 vm_prot_t cur_protection
,
1196 vm_prot_t max_protection
,
1197 upl_page_list_ptr_t page_list
,
1198 unsigned int page_list_count
);
1200 /* Enter a mapping of a memory object */
1201 extern kern_return_t
vm_map_enter_mem_object_control(
1203 vm_map_offset_t
*address
,
1205 vm_map_offset_t mask
,
1207 vm_map_kernel_flags_t vmk_flags
,
1209 memory_object_control_t control
,
1210 vm_object_offset_t offset
,
1211 boolean_t needs_copy
,
1212 vm_prot_t cur_protection
,
1213 vm_prot_t max_protection
,
1214 vm_inherit_t inheritance
);
1216 #endif /* !XNU_KERNEL_PRIVATE */
1218 /* Deallocate a region */
1219 extern kern_return_t
vm_map_remove(
1221 vm_map_offset_t start
,
1222 vm_map_offset_t end
,
1225 /* Deallocate a region when the map is already locked */
1226 extern kern_return_t
vm_map_remove_locked(
1228 vm_map_offset_t start
,
1229 vm_map_offset_t end
,
1232 /* Discard a copy without using it */
1233 extern void vm_map_copy_discard(
1234 vm_map_copy_t copy
);
1236 /* Overwrite existing memory with a copy */
1237 extern kern_return_t
vm_map_copy_overwrite(
1239 vm_map_address_t dst_addr
,
1241 boolean_t interruptible
);
1243 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1244 extern boolean_t
vm_map_copy_validate_size(
1247 vm_map_size_t
*size
);
1249 /* Place a copy into a map */
1250 extern kern_return_t
vm_map_copyout(
1252 vm_map_address_t
*dst_addr
, /* OUT */
1253 vm_map_copy_t copy
);
1255 extern kern_return_t
vm_map_copyout_size(
1257 vm_map_address_t
*dst_addr
, /* OUT */
1259 vm_map_size_t copy_size
);
1261 extern kern_return_t
vm_map_copyout_internal(
1263 vm_map_address_t
*dst_addr
, /* OUT */
1265 vm_map_size_t copy_size
,
1266 boolean_t consume_on_success
,
1267 vm_prot_t cur_protection
,
1268 vm_prot_t max_protection
,
1269 vm_inherit_t inheritance
);
1271 extern kern_return_t
vm_map_copyin(
1273 vm_map_address_t src_addr
,
1275 boolean_t src_destroy
,
1276 vm_map_copy_t
*copy_result
); /* OUT */
1278 extern kern_return_t
vm_map_copyin_common(
1280 vm_map_address_t src_addr
,
1282 boolean_t src_destroy
,
1283 boolean_t src_volatile
,
1284 vm_map_copy_t
*copy_result
, /* OUT */
1285 boolean_t use_maxprot
);
1287 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1288 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1289 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1290 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1291 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1292 extern kern_return_t
vm_map_copyin_internal(
1294 vm_map_address_t src_addr
,
1297 vm_map_copy_t
*copy_result
); /* OUT */
1299 extern kern_return_t
vm_map_copy_extract(
1301 vm_map_address_t src_addr
,
1303 vm_map_copy_t
*copy_result
, /* OUT */
1304 vm_prot_t
*cur_prot
, /* OUT */
1305 vm_prot_t
*max_prot
);
1308 extern void vm_map_disable_NX(
1311 extern void vm_map_disallow_data_exec(
1314 extern void vm_map_set_64bit(
1317 extern void vm_map_set_32bit(
1320 extern void vm_map_set_jumbo(
1323 extern boolean_t
vm_map_has_hard_pagezero(
1325 vm_map_offset_t pagezero_size
);
1326 extern void vm_commit_pagezero_status(vm_map_t tmap
);
1329 static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map
) { return 0; }
1331 extern boolean_t
vm_map_is_64bit(
1336 extern kern_return_t
vm_map_raise_max_offset(
1338 vm_map_offset_t new_max_offset
);
1340 extern kern_return_t
vm_map_raise_min_offset(
1342 vm_map_offset_t new_min_offset
);
1344 extern void vm_map_set_high_start(
1346 vm_map_offset_t high_start
);
1347 #endif /* __x86_64__ */
1349 extern vm_map_offset_t
vm_compute_max_offset(
1352 extern void vm_map_get_max_aslr_slide_section(
1354 int64_t *max_sections
,
1355 int64_t *section_size
);
1357 extern uint64_t vm_map_get_max_aslr_slide_pages(
1360 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1363 extern void vm_map_set_user_wire_limit(
1367 extern void vm_map_switch_protect(
1371 extern void vm_map_iokit_mapped_region(
1375 extern void vm_map_iokit_unmapped_region(
1380 extern boolean_t
first_free_is_valid(vm_map_t
);
1382 extern int vm_map_page_shift(
1385 extern vm_map_offset_t
vm_map_page_mask(
1388 extern int vm_map_page_size(
1391 extern vm_map_offset_t
vm_map_round_page_mask(
1392 vm_map_offset_t offset
,
1393 vm_map_offset_t mask
);
1395 extern vm_map_offset_t
vm_map_trunc_page_mask(
1396 vm_map_offset_t offset
,
1397 vm_map_offset_t mask
);
1399 extern boolean_t
vm_map_page_aligned(
1400 vm_map_offset_t offset
,
1401 vm_map_offset_t mask
);
1403 #ifdef XNU_KERNEL_PRIVATE
1404 extern kern_return_t
vm_map_page_info(
1406 vm_map_offset_t offset
,
1407 vm_page_info_flavor_t flavor
,
1408 vm_page_info_t info
,
1409 mach_msg_type_number_t
*count
);
1410 extern kern_return_t
vm_map_page_range_info_internal(
1412 vm_map_offset_t start_offset
,
1413 vm_map_offset_t end_offset
,
1414 vm_page_info_flavor_t flavor
,
1415 vm_page_info_t info
,
1416 mach_msg_type_number_t
*count
);
1417 #endif /* XNU_KERNEL_PRIVATE */
1420 #ifdef MACH_KERNEL_PRIVATE
1423 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1424 * usual form; it handles a copyin based on the current protection
1425 * (current protection == VM_PROT_NONE) is a failure.
1426 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1427 * access. The difference is that a region with no current access
1428 * BUT possible maximum access is rejected by vm_map_copyin(), but
1429 * returned by vm_map_copyin_maxprot.
1431 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1432 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1433 FALSE, copy_result, FALSE)
1435 #define vm_map_copyin_maxprot(src_map, \
1436 src_addr, len, src_destroy, copy_result) \
1437 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1438 FALSE, copy_result, TRUE)
1442 * Internal macros for rounding and truncation of vm_map offsets and sizes
1444 #define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1445 #define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1448 * Macros for rounding and truncation of vm_map offsets and sizes
1450 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1451 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1452 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1453 #define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
1455 static inline void vm_prot_to_wimg(unsigned int prot
, unsigned int *wimg
)
1458 case MAP_MEM_NOOP
: break;
1459 case MAP_MEM_IO
: *wimg
= VM_WIMG_IO
; break;
1460 case MAP_MEM_COPYBACK
: *wimg
= VM_WIMG_USE_DEFAULT
; break;
1461 case MAP_MEM_INNERWBACK
: *wimg
= VM_WIMG_INNERWBACK
; break;
1462 case MAP_MEM_POSTED
: *wimg
= VM_WIMG_POSTED
; break;
1463 case MAP_MEM_WTHRU
: *wimg
= VM_WIMG_WTHRU
; break;
1464 case MAP_MEM_WCOMB
: *wimg
= VM_WIMG_WCOMB
; break;
1466 panic("Unrecognized mapping type %u\n", prot
);
1470 #endif /* MACH_KERNEL_PRIVATE */
1472 #ifdef XNU_KERNEL_PRIVATE
1473 extern kern_return_t
vm_map_set_page_shift(vm_map_t map
, int pageshift
);
1474 #endif /* XNU_KERNEL_PRIVATE */
1476 #define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1477 #define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1480 * Flags for vm_map_remove() and vm_map_delete()
1482 #define VM_MAP_NO_FLAGS 0x0
1483 #define VM_MAP_REMOVE_KUNWIRE 0x1
1484 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1485 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1486 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1487 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1488 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1489 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1490 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1492 /* Support for UPLs from vm_maps */
1494 #ifdef XNU_KERNEL_PRIVATE
1496 extern kern_return_t
vm_map_get_upl(
1497 vm_map_t target_map
,
1498 vm_map_offset_t map_offset
,
1501 upl_page_info_array_t page_info
,
1502 unsigned int *page_infoCnt
,
1503 upl_control_flags_t
*flags
,
1505 int force_data_sync
);
1507 #endif /* XNU_KERNEL_PRIVATE */
1510 vm_map_sizes(vm_map_t map
,
1511 vm_map_size_t
* psize
,
1512 vm_map_size_t
* pfree
,
1513 vm_map_size_t
* plargest_free
);
1515 #if CONFIG_DYNAMIC_CODE_SIGNING
1516 extern kern_return_t
vm_map_sign(vm_map_t map
,
1517 vm_map_offset_t start
,
1518 vm_map_offset_t end
);
1521 extern kern_return_t
vm_map_partial_reap(
1523 unsigned int *reclaimed_resident
,
1524 unsigned int *reclaimed_compressed
);
1527 #if DEVELOPMENT || DEBUG
1529 extern int vm_map_disconnect_page_mappings(
1537 extern kern_return_t
vm_map_freeze(
1539 unsigned int *purgeable_count
,
1540 unsigned int *wired_count
,
1541 unsigned int *clean_count
,
1542 unsigned int *dirty_count
,
1543 unsigned int dirty_budget
,
1544 boolean_t
*has_shared
);
1550 * In some cases, we don't have a real VM object but still want to return a
1551 * unique ID (to avoid a memory region looking like shared memory), so build
1552 * a fake pointer based on the map's ledger and the index of the ledger being
1555 #define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1557 #endif /* KERNEL_PRIVATE */
1559 #endif /* _VM_VM_MAP_H_ */