2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Virtual memory map module definitions.
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
85 #include <sys/cdefs.h>
89 extern void vm_map_reference(vm_map_t map
);
90 extern vm_map_t
current_map(void);
92 /* Setup reserved areas in a new VM map */
93 extern kern_return_t
vm_map_exec(
101 #ifdef MACH_KERNEL_PRIVATE
103 #include <task_swapper.h>
104 #include <mach_assert.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <kern/lock.h>
109 #include <kern/zalloc.h>
110 #include <kern/macro_help.h>
112 #include <kern/thread.h>
114 #define current_map_fast() (current_thread()->map)
115 #define current_map() (current_map_fast())
117 #include <vm/vm_map_store.h>
123 * vm_map_t the high-level address map data structure.
124 * vm_map_entry_t an entry in an address map.
125 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
126 * vm_map_copy_t represents memory copied from an address map,
127 * used for inter-map copy operations
129 typedef struct vm_map_entry
*vm_map_entry_t
;
130 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
134 * Type: vm_map_object_t [internal use only]
137 * The target of an address mapping, either a virtual
138 * memory object or a sub map (of the kernel map).
140 typedef union vm_map_object
{
141 vm_object_t vm_object
; /* object object */
142 vm_map_t sub_map
; /* belongs to another map */
145 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
146 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
147 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
148 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
151 * Type: vm_named_entry_t [internal use only]
154 * Description of a mapping to a memory cache object.
157 * While the handle to this object is used as a means to map
158 * and pass around the right to map regions backed by pagers
159 * of all sorts, the named_entry itself is only manipulated
160 * by the kernel. Named entries hold information on the
161 * right to map a region of a cached object. Namely,
162 * the target cache object, the beginning and ending of the
163 * region to be mapped, and the permissions, (read, write)
164 * with which it can be mapped.
168 struct vm_named_entry
{
169 decl_lck_mtx_data(, Lock
) /* Synchronization */
171 vm_object_t object
; /* object I point to */
172 memory_object_t pager
; /* amo pager port */
173 vm_map_t map
; /* map backing submap */
175 vm_object_offset_t offset
; /* offset into object */
176 vm_object_size_t size
; /* size of region */
177 vm_prot_t protection
; /* access permissions */
178 int ref_count
; /* Number of references */
179 unsigned int /* Is backing.xxx : */
180 /* boolean_t */ internal
:1, /* ... an internal object */
181 /* boolean_t */ is_sub_map
:1, /* ... a submap? */
182 /* boolean_t */ is_pager
:1; /* ... a pager port */
186 * Type: vm_map_entry_t [internal use only]
189 * A single mapping within an address map.
192 * Address map entries consist of start and end addresses,
193 * a VM object (or sub map) and offset into that object,
194 * and user-exported inheritance and protection information.
195 * Control information for virtual copy operations is also
196 * stored in the address map entry.
199 struct vm_map_links
{
200 struct vm_map_entry
*prev
; /* previous entry */
201 struct vm_map_entry
*next
; /* next entry */
202 vm_map_offset_t start
; /* start address */
203 vm_map_offset_t end
; /* end address */
206 struct vm_map_entry
{
207 struct vm_map_links links
; /* links to other entries */
208 #define vme_prev links.prev
209 #define vme_next links.next
210 #define vme_start links.start
211 #define vme_end links.end
213 struct vm_map_store store
;
214 union vm_map_object object
; /* object I point to */
215 vm_object_offset_t offset
; /* offset into object */
217 /* boolean_t */ is_shared
:1, /* region is shared */
218 /* boolean_t */ is_sub_map
:1, /* Is "object" a submap? */
219 /* boolean_t */ in_transition
:1, /* Entry being changed */
220 /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */
221 /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */
222 /* behavior is not defined for submap type */
223 /* boolean_t */ needs_copy
:1, /* object need to be copied? */
224 /* Only in task maps: */
225 /* vm_prot_t */ protection
:3, /* protection code */
226 /* vm_prot_t */ max_protection
:3,/* maximum protection */
227 /* vm_inherit_t */ inheritance
:2, /* inheritance */
228 /* boolean_t */ use_pmap
:1, /* nested pmaps */
231 * The "alias" field can be updated while holding the VM map lock
232 * "shared". It's OK as along as it's the only field that can be
233 * updated without the VM map "exclusive" lock.
235 /* unsigned char */ alias
:8, /* user alias */
236 /* boolean_t */ no_cache
:1, /* should new pages be cached? */
237 /* boolean_t */ permanent
:1, /* mapping can not be removed */
238 /* boolean_t */ superpage_size
:3,/* use superpages of a certain size */
239 /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
240 /* boolean_t */ used_for_jit
:1,
241 /* boolean_t */ from_reserved_zone
:1; /* Allocated from
242 * kernel reserved zone */
243 unsigned short wired_count
; /* can be paged if = 0 */
244 unsigned short user_wired_count
; /* for vm_wire */
246 #define MAP_ENTRY_CREATION_DEBUG (1)
248 #if MAP_ENTRY_CREATION_DEBUG
249 uintptr_t vme_bt
[16];
254 * Convenience macros for dealing with superpages
255 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
257 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
258 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
259 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
260 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
263 * wired_counts are unsigned short. This value is used to safeguard
264 * against any mishaps due to runaway user programs.
266 #define MAX_WIRE_COUNT 65535
271 * Type: struct vm_map_header
274 * Header for a vm_map and a vm_map_copy.
278 struct vm_map_header
{
279 struct vm_map_links links
; /* first, last, min, max */
280 int nentries
; /* Number of entries */
281 boolean_t entries_pageable
;
282 /* are map entries pageable? */
283 vm_map_offset_t highest_entry_end_addr
; /* The ending address of the highest allocated vm_entry_t */
284 #ifdef VM_MAP_STORE_USE_RB
285 struct rb_head rb_head_store
;
290 * Type: vm_map_t [exported; contents invisible]
293 * An address map -- a directory relating valid
294 * regions of a task's address space to the corresponding
295 * virtual memory objects.
298 * Maps are doubly-linked lists of map entries, sorted
299 * by address. One hint is used to start
300 * searches again from the last successful search,
301 * insertion, or removal. Another hint is used to
302 * quickly find free space.
305 lock_t lock
; /* uni- and smp-lock */
306 struct vm_map_header hdr
; /* Map entry header */
307 #define min_offset hdr.links.start /* start of range */
308 #define max_offset hdr.links.end /* end of range */
309 #define highest_entry_end hdr.highest_entry_end_addr
310 pmap_t pmap
; /* Physical map */
311 vm_map_size_t size
; /* virtual size */
312 vm_map_size_t user_wire_limit
;/* rlimit on user locked memory */
313 vm_map_size_t user_wire_size
; /* current size of user locked memory in this map */
314 int ref_count
; /* Reference count */
316 int res_count
; /* Residence count (swap) */
317 int sw_state
; /* Swap state */
318 #endif /* TASK_SWAPPER */
319 decl_lck_mtx_data(, s_lock
) /* Lock ref, res fields */
320 lck_mtx_ext_t s_lock_ext
;
321 vm_map_entry_t hint
; /* hint for quick lookups */
322 vm_map_entry_t first_free
; /* First free space hint */
324 /* boolean_t */ wait_for_space
:1, /* Should callers wait for space? */
325 /* boolean_t */ wiring_required
:1, /* All memory wired? */
326 /* boolean_t */ no_zero_fill
:1, /*No zero fill absent pages */
327 /* boolean_t */ mapped_in_other_pmaps
:1, /*has this submap been mapped in maps that use a different pmap */
328 /* boolean_t */ switch_protect
:1, /* Protect map from write faults while switched */
329 /* boolean_t */ disable_vmentry_reuse
:1, /* All vm entries should keep using newer and higher addresses in the map */
330 /* boolean_t */ map_disallow_data_exec
:1, /* Disallow execution from data pages on exec-permissive architectures */
331 /* reserved */ pad
:25;
332 unsigned int timestamp
; /* Version number */
333 unsigned int color_rr
; /* next color (not protected by a lock) */
335 void *default_freezer_handle
;
337 boolean_t jit_entry_exists
;
340 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
341 #define vm_map_first_entry(map) ((map)->hdr.links.next)
342 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
346 * VM map swap states. There are no transition states.
348 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
349 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
350 #endif /* TASK_SWAPPER */
353 * Type: vm_map_version_t [exported; contents invisible]
356 * Map versions may be used to quickly validate a previous
360 * Because they are bulky objects, map versions are usually
361 * passed by reference.
364 * Just a timestamp for the main map.
366 typedef struct vm_map_version
{
367 unsigned int main_timestamp
;
371 * Type: vm_map_copy_t [exported; contents invisible]
374 * A map copy object represents a region of virtual memory
375 * that has been copied from an address map but is still
378 * A map copy object may only be used by a single thread
382 * There are three formats for map copy objects.
383 * The first is very similar to the main
384 * address map in structure, and as a result, some
385 * of the internal maintenance functions/macros can
386 * be used with either address maps or map copy objects.
388 * The map copy object contains a header links
389 * entry onto which the other entries that represent
390 * the region are chained.
392 * The second format is a single vm object. This was used
393 * primarily in the pageout path - but is not currently used
394 * except for placeholder copy objects (see vm_map_copy_copy()).
396 * The third format is a kernel buffer copy object - for data
397 * small enough that physical copies were the most efficient
403 #define VM_MAP_COPY_ENTRY_LIST 1
404 #define VM_MAP_COPY_OBJECT 2
405 #define VM_MAP_COPY_KERNEL_BUFFER 3
406 vm_object_offset_t offset
;
409 struct vm_map_header hdr
; /* ENTRY_LIST */
410 vm_object_t object
; /* OBJECT */
412 void *kdata
; /* KERNEL_BUFFER */
413 vm_size_t kalloc_size
; /* size of this copy_t */
419 #define cpy_hdr c_u.hdr
421 #define cpy_object c_u.object
423 #define cpy_kdata c_u.c_k.kdata
424 #define cpy_kalloc_size c_u.c_k.kalloc_size
428 * Useful macros for entry list copy objects
431 #define vm_map_copy_to_entry(copy) \
432 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
433 #define vm_map_copy_first_entry(copy) \
434 ((copy)->cpy_hdr.links.next)
435 #define vm_map_copy_last_entry(copy) \
436 ((copy)->cpy_hdr.links.prev)
439 * Macros: vm_map_lock, etc. [internal use only]
441 * Perform locking on the data portion of a map.
442 * When multiple maps are to be locked, order by map address.
443 * (See vm_map.c::vm_remap())
446 #define vm_map_lock_init(map) \
447 ((map)->timestamp = 0 , \
448 lock_init(&(map)->lock, TRUE, 0, 0))
450 #define vm_map_lock(map) lock_write(&(map)->lock)
451 #define vm_map_unlock(map) \
452 ((map)->timestamp++ , lock_write_done(&(map)->lock))
453 #define vm_map_lock_read(map) lock_read(&(map)->lock)
454 #define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
455 #define vm_map_lock_write_to_read(map) \
456 ((map)->timestamp++ , lock_write_to_read(&(map)->lock))
457 /* lock_read_to_write() returns FALSE on failure. Macro evaluates to
458 * zero on success and non-zero value on failure.
460 #define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE)
463 * Exported procedures that operate on vm_map_t.
466 /* Initialize the module */
467 extern void vm_map_init(void) __attribute__((section("__TEXT, initcode")));
469 extern void vm_kernel_reserved_entry_init(void) __attribute__((section("__TEXT, initcode")));
471 /* Allocate a range in the specified virtual address map and
472 * return the entry allocated for that range. */
473 extern kern_return_t
vm_map_find_space(
475 vm_map_address_t
*address
, /* OUT */
477 vm_map_offset_t mask
,
479 vm_map_entry_t
*o_entry
); /* OUT */
481 extern void vm_map_clip_start(
483 vm_map_entry_t entry
,
484 vm_map_offset_t endaddr
);
485 extern void vm_map_clip_end(
487 vm_map_entry_t entry
,
488 vm_map_offset_t endaddr
);
490 extern boolean_t
vm_map_entry_should_cow_for_true_share(
491 vm_map_entry_t entry
);
492 #endif /* !CONFIG_EMBEDDED */
494 /* Lookup map entry containing or the specified address in the given map */
495 extern boolean_t
vm_map_lookup_entry(
497 vm_map_address_t address
,
498 vm_map_entry_t
*entry
); /* OUT */
500 /* Find the VM object, offset, and protection for a given virtual address
501 * in the specified map, assuming a page fault of the type specified. */
502 extern kern_return_t
vm_map_lookup_locked(
503 vm_map_t
*var_map
, /* IN/OUT */
504 vm_map_address_t vaddr
,
505 vm_prot_t fault_type
,
506 int object_lock_type
,
507 vm_map_version_t
*out_version
, /* OUT */
508 vm_object_t
*object
, /* OUT */
509 vm_object_offset_t
*offset
, /* OUT */
510 vm_prot_t
*out_prot
, /* OUT */
511 boolean_t
*wired
, /* OUT */
512 vm_object_fault_info_t fault_info
, /* OUT */
513 vm_map_t
*real_map
); /* OUT */
515 /* Verifies that the map has not changed since the given version. */
516 extern boolean_t
vm_map_verify(
518 vm_map_version_t
*version
); /* REF */
520 extern vm_map_entry_t
vm_map_entry_insert(
522 vm_map_entry_t insp_entry
,
523 vm_map_offset_t start
,
526 vm_object_offset_t offset
,
527 boolean_t needs_copy
,
529 boolean_t in_transition
,
530 vm_prot_t cur_protection
,
531 vm_prot_t max_protection
,
532 vm_behavior_t behavior
,
533 vm_inherit_t inheritance
,
534 unsigned wired_count
,
537 unsigned int superpage_size
);
541 * Functions implemented as macros
543 #define vm_map_min(map) ((map)->min_offset)
544 /* Lowest valid address in
547 #define vm_map_max(map) ((map)->max_offset)
548 /* Highest valid address */
550 #define vm_map_pmap(map) ((map)->pmap)
551 /* Physical map associated
552 * with this address map */
554 #define vm_map_verify_done(map, version) vm_map_unlock_read(map)
555 /* Operation that required
556 * a verified lookup is
560 * Macros/functions for map residence counts and swapin/out of vm maps
565 /* Gain a reference to an existing map */
566 extern void vm_map_reference(
568 /* Lose a residence count */
569 extern void vm_map_res_deallocate(
571 /* Gain a residence count on a map */
572 extern void vm_map_res_reference(
574 /* Gain reference & residence counts to possibly swapped-out map */
575 extern void vm_map_reference_swap(
578 #else /* MACH_ASSERT */
580 #define vm_map_reference(map) \
582 vm_map_t Map = (map); \
584 lck_mtx_lock(&Map->s_lock); \
587 lck_mtx_unlock(&Map->s_lock); \
591 #define vm_map_res_reference(map) \
593 vm_map_t Lmap = (map); \
594 if (Lmap->res_count == 0) { \
595 lck_mtx_unlock(&Lmap->s_lock);\
597 vm_map_swapin(Lmap); \
598 lck_mtx_lock(&Lmap->s_lock); \
600 vm_map_unlock(Lmap); \
605 #define vm_map_res_deallocate(map) \
607 vm_map_t Map = (map); \
608 if (--Map->res_count == 0) { \
609 lck_mtx_unlock(&Map->s_lock); \
611 vm_map_swapout(Map); \
612 vm_map_unlock(Map); \
613 lck_mtx_lock(&Map->s_lock); \
617 #define vm_map_reference_swap(map) \
619 vm_map_t Map = (map); \
620 lck_mtx_lock(&Map->s_lock); \
622 vm_map_res_reference(Map); \
623 lck_mtx_unlock(&Map->s_lock); \
625 #endif /* MACH_ASSERT */
627 extern void vm_map_swapin(
630 extern void vm_map_swapout(
633 #else /* TASK_SWAPPER */
635 #define vm_map_reference(map) \
637 vm_map_t Map = (map); \
639 lck_mtx_lock(&Map->s_lock); \
641 lck_mtx_unlock(&Map->s_lock); \
645 #define vm_map_reference_swap(map) vm_map_reference(map)
646 #define vm_map_res_reference(map)
647 #define vm_map_res_deallocate(map)
649 #endif /* TASK_SWAPPER */
652 * Submap object. Must be used to create memory to be put
653 * in a submap by vm_map_submap.
655 extern vm_object_t vm_submap_object
;
658 * Wait and wakeup macros for in_transition map entries.
660 #define vm_map_entry_wait(map, interruptible) \
661 ((map)->timestamp++ , \
662 thread_sleep_lock_write((event_t)&(map)->hdr, \
663 &(map)->lock, interruptible))
666 #define vm_map_entry_wakeup(map) \
667 thread_wakeup((event_t)(&(map)->hdr))
670 #define vm_map_ref_fast(map) \
672 lck_mtx_lock(&map->s_lock); \
674 vm_map_res_reference(map); \
675 lck_mtx_unlock(&map->s_lock); \
678 #define vm_map_dealloc_fast(map) \
682 lck_mtx_lock(&map->s_lock); \
683 c = --map->ref_count; \
685 vm_map_res_deallocate(map); \
686 lck_mtx_unlock(&map->s_lock); \
688 vm_map_destroy(map); \
692 /* simplify map entries */
693 extern void vm_map_simplify_entry(
695 vm_map_entry_t this_entry
);
696 extern void vm_map_simplify(
698 vm_map_offset_t start
);
700 /* Move the information in a map copy object to a new map copy object */
701 extern vm_map_copy_t
vm_map_copy_copy(
704 /* Create a copy object from an object. */
705 extern kern_return_t
vm_map_copyin_object(
707 vm_object_offset_t offset
,
708 vm_object_size_t size
,
709 vm_map_copy_t
*copy_result
); /* OUT */
711 extern kern_return_t
vm_map_random_address_for_size(
713 vm_map_offset_t
*address
,
716 /* Enter a mapping */
717 extern kern_return_t
vm_map_enter(
719 vm_map_offset_t
*address
,
721 vm_map_offset_t mask
,
724 vm_object_offset_t offset
,
725 boolean_t needs_copy
,
726 vm_prot_t cur_protection
,
727 vm_prot_t max_protection
,
728 vm_inherit_t inheritance
);
730 /* XXX should go away - replaced with regular enter of contig object */
731 extern kern_return_t
vm_map_enter_cpm(
733 vm_map_address_t
*addr
,
737 extern kern_return_t
vm_map_remap(
739 vm_map_offset_t
*address
,
741 vm_map_offset_t mask
,
744 vm_map_offset_t memory_address
,
746 vm_prot_t
*cur_protection
,
747 vm_prot_t
*max_protection
,
748 vm_inherit_t inheritance
);
752 * Read and write from a kernel buffer to a specified map.
754 extern kern_return_t
vm_map_write_user(
757 vm_map_offset_t dst_addr
,
760 extern kern_return_t
vm_map_read_user(
762 vm_map_offset_t src_addr
,
766 /* Create a new task map using an existing task map as a template. */
767 extern vm_map_t
vm_map_fork(
771 /* Change inheritance */
772 extern kern_return_t
vm_map_inherit(
774 vm_map_offset_t start
,
776 vm_inherit_t new_inheritance
);
778 /* Add or remove machine-dependent attributes from map regions */
779 extern kern_return_t
vm_map_machine_attribute(
781 vm_map_offset_t start
,
783 vm_machine_attribute_t attribute
,
784 vm_machine_attribute_val_t
* value
); /* IN/OUT */
786 extern kern_return_t
vm_map_msync(
788 vm_map_address_t address
,
790 vm_sync_t sync_flags
);
792 /* Set paging behavior */
793 extern kern_return_t
vm_map_behavior_set(
795 vm_map_offset_t start
,
797 vm_behavior_t new_behavior
);
799 extern kern_return_t
vm_map_purgable_control(
801 vm_map_offset_t address
,
802 vm_purgable_t control
,
805 extern kern_return_t
vm_map_region(
807 vm_map_offset_t
*address
,
809 vm_region_flavor_t flavor
,
810 vm_region_info_t info
,
811 mach_msg_type_number_t
*count
,
812 mach_port_t
*object_name
);
814 extern kern_return_t
vm_map_region_recurse_64(
816 vm_map_offset_t
*address
,
818 natural_t
*nesting_depth
,
819 vm_region_submap_info_64_t info
,
820 mach_msg_type_number_t
*count
);
822 extern kern_return_t
vm_map_page_query_internal(
824 vm_map_offset_t offset
,
829 extern kern_return_t
vm_map_submap(
831 vm_map_offset_t start
,
834 vm_map_offset_t offset
,
837 extern void vm_map_submap_pmap_clean(
839 vm_map_offset_t start
,
842 vm_map_offset_t offset
);
844 /* Convert from a map entry port to a map */
845 extern vm_map_t
convert_port_entry_to_map(
848 /* Convert from a port to a vm_object */
849 extern vm_object_t
convert_port_entry_to_object(
853 extern kern_return_t
vm_map_set_cache_attr(
858 /* definitions related to overriding the NX behavior */
860 #define VM_ABI_32 0x1
861 #define VM_ABI_64 0x2
863 extern int override_nx(vm_map_t map
, uint32_t user_tag
);
865 #endif /* MACH_KERNEL_PRIVATE */
869 /* Create an empty map */
870 extern vm_map_t
vm_map_create(
872 vm_map_offset_t min_off
,
873 vm_map_offset_t max_off
,
876 /* Get rid of a map */
877 extern void vm_map_destroy(
881 /* Lose a reference */
882 extern void vm_map_deallocate(
885 extern vm_map_t
vm_map_switch(
888 /* Change protection */
889 extern kern_return_t
vm_map_protect(
891 vm_map_offset_t start
,
896 /* Check protection */
897 extern boolean_t
vm_map_check_protection(
899 vm_map_offset_t start
,
901 vm_prot_t protection
);
903 /* wire down a region */
904 extern kern_return_t
vm_map_wire(
906 vm_map_offset_t start
,
908 vm_prot_t access_type
,
909 boolean_t user_wire
);
911 /* unwire a region */
912 extern kern_return_t
vm_map_unwire(
914 vm_map_offset_t start
,
916 boolean_t user_wire
);
918 /* Enter a mapping of a memory object */
919 extern kern_return_t
vm_map_enter_mem_object(
921 vm_map_offset_t
*address
,
923 vm_map_offset_t mask
,
926 vm_object_offset_t offset
,
927 boolean_t needs_copy
,
928 vm_prot_t cur_protection
,
929 vm_prot_t max_protection
,
930 vm_inherit_t inheritance
);
932 /* Enter a mapping of a memory object */
933 extern kern_return_t
vm_map_enter_mem_object_control(
935 vm_map_offset_t
*address
,
937 vm_map_offset_t mask
,
939 memory_object_control_t control
,
940 vm_object_offset_t offset
,
941 boolean_t needs_copy
,
942 vm_prot_t cur_protection
,
943 vm_prot_t max_protection
,
944 vm_inherit_t inheritance
);
946 /* Deallocate a region */
947 extern kern_return_t
vm_map_remove(
949 vm_map_offset_t start
,
953 /* Discard a copy without using it */
954 extern void vm_map_copy_discard(
957 /* Overwrite existing memory with a copy */
958 extern kern_return_t
vm_map_copy_overwrite(
960 vm_map_address_t dst_addr
,
962 boolean_t interruptible
);
964 /* Place a copy into a map */
965 extern kern_return_t
vm_map_copyout(
967 vm_map_address_t
*dst_addr
, /* OUT */
970 extern kern_return_t
vm_map_copyin(
972 vm_map_address_t src_addr
,
974 boolean_t src_destroy
,
975 vm_map_copy_t
*copy_result
); /* OUT */
977 extern kern_return_t
vm_map_copyin_common(
979 vm_map_address_t src_addr
,
981 boolean_t src_destroy
,
982 boolean_t src_volatile
,
983 vm_map_copy_t
*copy_result
, /* OUT */
984 boolean_t use_maxprot
);
986 extern void vm_map_disable_NX(
989 extern void vm_map_disallow_data_exec(
992 extern void vm_map_set_64bit(
995 extern void vm_map_set_32bit(
998 extern boolean_t
vm_map_has_hard_pagezero(
1000 vm_map_offset_t pagezero_size
);
1002 extern boolean_t
vm_map_is_64bit(
1004 #define vm_map_has_4GB_pagezero(map) vm_map_has_hard_pagezero(map, (vm_map_offset_t)0x100000000ULL)
1007 extern void vm_map_set_4GB_pagezero(
1010 extern void vm_map_clear_4GB_pagezero(
1013 extern kern_return_t
vm_map_raise_max_offset(
1015 vm_map_offset_t new_max_offset
);
1017 extern kern_return_t
vm_map_raise_min_offset(
1019 vm_map_offset_t new_min_offset
);
1021 extern vm_map_offset_t
vm_compute_max_offset(
1024 extern void vm_map_set_user_wire_limit(
1028 extern void vm_map_switch_protect(
1032 extern boolean_t
first_free_is_valid(vm_map_t
);
1034 #ifdef XNU_KERNEL_PRIVATE
1035 extern kern_return_t
vm_map_page_info(
1037 vm_map_offset_t offset
,
1038 vm_page_info_flavor_t flavor
,
1039 vm_page_info_t info
,
1040 mach_msg_type_number_t
*count
);
1041 #endif /* XNU_KERNEL_PRIVATE */
1044 #ifdef MACH_KERNEL_PRIVATE
1047 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1048 * usual form; it handles a copyin based on the current protection
1049 * (current protection == VM_PROT_NONE) is a failure.
1050 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1051 * access. The difference is that a region with no current access
1052 * BUT possible maximum access is rejected by vm_map_copyin(), but
1053 * returned by vm_map_copyin_maxprot.
1055 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1056 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1057 FALSE, copy_result, FALSE)
1059 #define vm_map_copyin_maxprot(src_map, \
1060 src_addr, len, src_destroy, copy_result) \
1061 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1062 FALSE, copy_result, TRUE)
1064 #endif /* MACH_KERNEL_PRIVATE */
1067 * Macros for rounding and truncation of vm_map offsets and sizes
1069 #define vm_map_round_page(x) (((vm_map_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1070 #define vm_map_trunc_page(x) ((vm_map_offset_t)(x) & ~((signed)PAGE_MASK))
1073 * Flags for vm_map_remove() and vm_map_delete()
1075 #define VM_MAP_NO_FLAGS 0x0
1076 #define VM_MAP_REMOVE_KUNWIRE 0x1
1077 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1078 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1079 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1080 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1082 /* Support for UPLs from vm_maps */
1084 extern kern_return_t
vm_map_get_upl(
1085 vm_map_t target_map
,
1086 vm_map_offset_t map_offset
,
1089 upl_page_info_array_t page_info
,
1090 unsigned int *page_infoCnt
,
1092 int force_data_sync
);
1094 #if CONFIG_DYNAMIC_CODE_SIGNING
1095 extern kern_return_t
vm_map_sign(vm_map_t map
,
1096 vm_map_offset_t start
,
1097 vm_map_offset_t end
);
1101 void vm_map_freeze_thaw_init(void);
1102 void vm_map_freeze_thaw(void);
1103 void vm_map_demand_fault(void);
1105 extern kern_return_t
vm_map_freeze_walk(
1107 unsigned int *purgeable_count
,
1108 unsigned int *wired_count
,
1109 unsigned int *clean_count
,
1110 unsigned int *dirty_count
,
1111 unsigned int dirty_budget
,
1112 boolean_t
*has_shared
);
1114 extern kern_return_t
vm_map_freeze(
1116 unsigned int *purgeable_count
,
1117 unsigned int *wired_count
,
1118 unsigned int *clean_count
,
1119 unsigned int *dirty_count
,
1120 unsigned int dirty_budget
,
1121 boolean_t
*has_shared
);
1123 extern kern_return_t
vm_map_thaw(
1129 #endif /* KERNEL_PRIVATE */
1131 #endif /* _VM_VM_MAP_H_ */