2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Virtual memory map module definitions.
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
85 #include <sys/cdefs.h>
89 extern void vm_map_reference(vm_map_t map
);
90 extern vm_map_t
current_map(void);
92 /* Setup reserved areas in a new VM map */
93 extern kern_return_t
vm_map_exec(
101 #ifdef MACH_KERNEL_PRIVATE
103 #include <task_swapper.h>
104 #include <mach_assert.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <kern/lock.h>
109 #include <kern/zalloc.h>
110 #include <kern/macro_help.h>
112 #include <kern/thread.h>
114 #define current_map_fast() (current_thread()->map)
115 #define current_map() (current_map_fast())
117 #include <vm/vm_map_store.h>
123 * vm_map_t the high-level address map data structure.
124 * vm_map_entry_t an entry in an address map.
125 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
126 * vm_map_copy_t represents memory copied from an address map,
127 * used for inter-map copy operations
129 typedef struct vm_map_entry
*vm_map_entry_t
;
130 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
134 * Type: vm_map_object_t [internal use only]
137 * The target of an address mapping, either a virtual
138 * memory object or a sub map (of the kernel map).
140 typedef union vm_map_object
{
141 vm_object_t vm_object
; /* object object */
142 vm_map_t sub_map
; /* belongs to another map */
145 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
146 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
147 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
150 * Type: vm_named_entry_t [internal use only]
153 * Description of a mapping to a memory cache object.
156 * While the handle to this object is used as a means to map
157 * and pass around the right to map regions backed by pagers
158 * of all sorts, the named_entry itself is only manipulated
159 * by the kernel. Named entries hold information on the
160 * right to map a region of a cached object. Namely,
161 * the target cache object, the beginning and ending of the
162 * region to be mapped, and the permissions, (read, write)
163 * with which it can be mapped.
167 struct vm_named_entry
{
168 decl_lck_mtx_data(, Lock
) /* Synchronization */
170 vm_object_t object
; /* object I point to */
171 memory_object_t pager
; /* amo pager port */
172 vm_map_t map
; /* map backing submap */
174 vm_object_offset_t offset
; /* offset into object */
175 vm_object_size_t size
; /* size of region */
176 vm_prot_t protection
; /* access permissions */
177 int ref_count
; /* Number of references */
178 unsigned int /* Is backing.xxx : */
179 /* boolean_t */ internal
:1, /* ... an internal object */
180 /* boolean_t */ is_sub_map
:1, /* ... a submap? */
181 /* boolean_t */ is_pager
:1; /* ... a pager port */
185 * Type: vm_map_entry_t [internal use only]
188 * A single mapping within an address map.
191 * Address map entries consist of start and end addresses,
192 * a VM object (or sub map) and offset into that object,
193 * and user-exported inheritance and protection information.
194 * Control information for virtual copy operations is also
195 * stored in the address map entry.
198 struct vm_map_links
{
199 struct vm_map_entry
*prev
; /* previous entry */
200 struct vm_map_entry
*next
; /* next entry */
201 vm_map_offset_t start
; /* start address */
202 vm_map_offset_t end
; /* end address */
205 struct vm_map_entry
{
206 struct vm_map_links links
; /* links to other entries */
207 #define vme_prev links.prev
208 #define vme_next links.next
209 #define vme_start links.start
210 #define vme_end links.end
212 struct vm_map_store store
;
213 union vm_map_object object
; /* object I point to */
214 vm_object_offset_t offset
; /* offset into object */
216 /* boolean_t */ is_shared
:1, /* region is shared */
217 /* boolean_t */ is_sub_map
:1, /* Is "object" a submap? */
218 /* boolean_t */ in_transition
:1, /* Entry being changed */
219 /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */
220 /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */
221 /* behavior is not defined for submap type */
222 /* boolean_t */ needs_copy
:1, /* object need to be copied? */
223 /* Only in task maps: */
224 /* vm_prot_t */ protection
:3, /* protection code */
225 /* vm_prot_t */ max_protection
:3,/* maximum protection */
226 /* vm_inherit_t */ inheritance
:2, /* inheritance */
227 /* boolean_t */ use_pmap
:1, /* nested pmaps */
230 * The "alias" field can be updated while holding the VM map lock
231 * "shared". It's OK as along as it's the only field that can be
232 * updated without the VM map "exclusive" lock.
234 /* unsigned char */ alias
:8, /* user alias */
235 /* boolean_t */ no_cache
:1, /* should new pages be cached? */
236 /* boolean_t */ permanent
:1, /* mapping can not be removed */
237 /* boolean_t */ superpage_size
:3,/* use superpages of a certain size */
238 /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
239 /* boolean_t */ used_for_jit
:1,
240 /* unsigned char */ pad
:1; /* available bits */
241 unsigned short wired_count
; /* can be paged if = 0 */
242 unsigned short user_wired_count
; /* for vm_wire */
246 * Convenience macros for dealing with superpages
247 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
249 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
250 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
251 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
252 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
255 * wired_counts are unsigned short. This value is used to safeguard
256 * against any mishaps due to runaway user programs.
258 #define MAX_WIRE_COUNT 65535
263 * Type: struct vm_map_header
266 * Header for a vm_map and a vm_map_copy.
270 struct vm_map_header
{
271 struct vm_map_links links
; /* first, last, min, max */
272 int nentries
; /* Number of entries */
273 boolean_t entries_pageable
;
274 /* are map entries pageable? */
275 vm_map_offset_t highest_entry_end_addr
; /* The ending address of the highest allocated vm_entry_t */
276 #ifdef VM_MAP_STORE_USE_RB
277 struct rb_head rb_head_store
;
282 * Type: vm_map_t [exported; contents invisible]
285 * An address map -- a directory relating valid
286 * regions of a task's address space to the corresponding
287 * virtual memory objects.
290 * Maps are doubly-linked lists of map entries, sorted
291 * by address. One hint is used to start
292 * searches again from the last successful search,
293 * insertion, or removal. Another hint is used to
294 * quickly find free space.
297 lock_t lock
; /* uni- and smp-lock */
298 struct vm_map_header hdr
; /* Map entry header */
299 #define min_offset hdr.links.start /* start of range */
300 #define max_offset hdr.links.end /* end of range */
301 #define highest_entry_end hdr.highest_entry_end_addr
302 pmap_t pmap
; /* Physical map */
303 vm_map_size_t size
; /* virtual size */
304 vm_map_size_t user_wire_limit
;/* rlimit on user locked memory */
305 vm_map_size_t user_wire_size
; /* current size of user locked memory in this map */
306 int ref_count
; /* Reference count */
308 int res_count
; /* Residence count (swap) */
309 int sw_state
; /* Swap state */
310 #endif /* TASK_SWAPPER */
311 decl_lck_mtx_data(, s_lock
) /* Lock ref, res fields */
312 lck_mtx_ext_t s_lock_ext
;
313 vm_map_entry_t hint
; /* hint for quick lookups */
314 vm_map_entry_t first_free
; /* First free space hint */
316 /* boolean_t */ wait_for_space
:1, /* Should callers wait for space? */
317 /* boolean_t */ wiring_required
:1, /* All memory wired? */
318 /* boolean_t */ no_zero_fill
:1, /*No zero fill absent pages */
319 /* boolean_t */ mapped
:1, /*has this map been mapped */
320 /* boolean_t */ switch_protect
:1, /* Protect map from write faults while switched */
321 /* boolean_t */ disable_vmentry_reuse
:1, /* All vm entries should keep using newer and higher addresses in the map */
322 /* boolean_t */ map_disallow_data_exec
:1, /* Disallow execution from data pages on exec-permissive architectures */
323 /* reserved */ pad
:25;
324 unsigned int timestamp
; /* Version number */
325 unsigned int color_rr
; /* next color (not protected by a lock) */
327 void *default_freezer_toc
;
329 boolean_t jit_entry_exists
;
332 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
333 #define vm_map_first_entry(map) ((map)->hdr.links.next)
334 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
338 * VM map swap states. There are no transition states.
340 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
341 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
342 #endif /* TASK_SWAPPER */
345 * Type: vm_map_version_t [exported; contents invisible]
348 * Map versions may be used to quickly validate a previous
352 * Because they are bulky objects, map versions are usually
353 * passed by reference.
356 * Just a timestamp for the main map.
358 typedef struct vm_map_version
{
359 unsigned int main_timestamp
;
363 * Type: vm_map_copy_t [exported; contents invisible]
366 * A map copy object represents a region of virtual memory
367 * that has been copied from an address map but is still
370 * A map copy object may only be used by a single thread
374 * There are three formats for map copy objects.
375 * The first is very similar to the main
376 * address map in structure, and as a result, some
377 * of the internal maintenance functions/macros can
378 * be used with either address maps or map copy objects.
380 * The map copy object contains a header links
381 * entry onto which the other entries that represent
382 * the region are chained.
384 * The second format is a single vm object. This was used
385 * primarily in the pageout path - but is not currently used
386 * except for placeholder copy objects (see vm_map_copy_copy()).
388 * The third format is a kernel buffer copy object - for data
389 * small enough that physical copies were the most efficient
395 #define VM_MAP_COPY_ENTRY_LIST 1
396 #define VM_MAP_COPY_OBJECT 2
397 #define VM_MAP_COPY_KERNEL_BUFFER 3
398 vm_object_offset_t offset
;
401 struct vm_map_header hdr
; /* ENTRY_LIST */
402 vm_object_t object
; /* OBJECT */
404 void *kdata
; /* KERNEL_BUFFER */
405 vm_size_t kalloc_size
; /* size of this copy_t */
411 #define cpy_hdr c_u.hdr
413 #define cpy_object c_u.object
415 #define cpy_kdata c_u.c_k.kdata
416 #define cpy_kalloc_size c_u.c_k.kalloc_size
420 * Useful macros for entry list copy objects
423 #define vm_map_copy_to_entry(copy) \
424 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
425 #define vm_map_copy_first_entry(copy) \
426 ((copy)->cpy_hdr.links.next)
427 #define vm_map_copy_last_entry(copy) \
428 ((copy)->cpy_hdr.links.prev)
431 * Macros: vm_map_lock, etc. [internal use only]
433 * Perform locking on the data portion of a map.
434 * When multiple maps are to be locked, order by map address.
435 * (See vm_map.c::vm_remap())
438 #define vm_map_lock_init(map) \
439 ((map)->timestamp = 0 , \
440 lock_init(&(map)->lock, TRUE, 0, 0))
442 #define vm_map_lock(map) lock_write(&(map)->lock)
443 #define vm_map_unlock(map) \
444 ((map)->timestamp++ , lock_write_done(&(map)->lock))
445 #define vm_map_lock_read(map) lock_read(&(map)->lock)
446 #define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
447 #define vm_map_lock_write_to_read(map) \
448 ((map)->timestamp++ , lock_write_to_read(&(map)->lock))
449 /* lock_read_to_write() returns FALSE on failure. Macro evaluates to
450 * zero on success and non-zero value on failure.
452 #define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE)
455 * Exported procedures that operate on vm_map_t.
458 /* Initialize the module */
459 extern void vm_map_init(void) __attribute__((section("__TEXT, initcode")));
461 /* Allocate a range in the specified virtual address map and
462 * return the entry allocated for that range. */
463 extern kern_return_t
vm_map_find_space(
465 vm_map_address_t
*address
, /* OUT */
467 vm_map_offset_t mask
,
469 vm_map_entry_t
*o_entry
); /* OUT */
471 /* Lookup map entry containing or the specified address in the given map */
472 extern boolean_t
vm_map_lookup_entry(
474 vm_map_address_t address
,
475 vm_map_entry_t
*entry
); /* OUT */
477 /* Find the VM object, offset, and protection for a given virtual address
478 * in the specified map, assuming a page fault of the type specified. */
479 extern kern_return_t
vm_map_lookup_locked(
480 vm_map_t
*var_map
, /* IN/OUT */
481 vm_map_address_t vaddr
,
482 vm_prot_t fault_type
,
483 int object_lock_type
,
484 vm_map_version_t
*out_version
, /* OUT */
485 vm_object_t
*object
, /* OUT */
486 vm_object_offset_t
*offset
, /* OUT */
487 vm_prot_t
*out_prot
, /* OUT */
488 boolean_t
*wired
, /* OUT */
489 vm_object_fault_info_t fault_info
, /* OUT */
490 vm_map_t
*real_map
); /* OUT */
492 /* Verifies that the map has not changed since the given version. */
493 extern boolean_t
vm_map_verify(
495 vm_map_version_t
*version
); /* REF */
497 extern vm_map_entry_t
vm_map_entry_insert(
499 vm_map_entry_t insp_entry
,
500 vm_map_offset_t start
,
503 vm_object_offset_t offset
,
504 boolean_t needs_copy
,
506 boolean_t in_transition
,
507 vm_prot_t cur_protection
,
508 vm_prot_t max_protection
,
509 vm_behavior_t behavior
,
510 vm_inherit_t inheritance
,
511 unsigned wired_count
,
514 unsigned int superpage_size
);
518 * Functions implemented as macros
520 #define vm_map_min(map) ((map)->min_offset)
521 /* Lowest valid address in
524 #define vm_map_max(map) ((map)->max_offset)
525 /* Highest valid address */
527 #define vm_map_pmap(map) ((map)->pmap)
528 /* Physical map associated
529 * with this address map */
531 #define vm_map_verify_done(map, version) vm_map_unlock_read(map)
532 /* Operation that required
533 * a verified lookup is
537 * Macros/functions for map residence counts and swapin/out of vm maps
542 /* Gain a reference to an existing map */
543 extern void vm_map_reference(
545 /* Lose a residence count */
546 extern void vm_map_res_deallocate(
548 /* Gain a residence count on a map */
549 extern void vm_map_res_reference(
551 /* Gain reference & residence counts to possibly swapped-out map */
552 extern void vm_map_reference_swap(
555 #else /* MACH_ASSERT */
557 #define vm_map_reference(map) \
559 vm_map_t Map = (map); \
561 lck_mtx_lock(&Map->s_lock); \
564 lck_mtx_unlock(&Map->s_lock); \
568 #define vm_map_res_reference(map) \
570 vm_map_t Lmap = (map); \
571 if (Lmap->res_count == 0) { \
572 lck_mtx_unlock(&Lmap->s_lock);\
574 vm_map_swapin(Lmap); \
575 lck_mtx_lock(&Lmap->s_lock); \
577 vm_map_unlock(Lmap); \
582 #define vm_map_res_deallocate(map) \
584 vm_map_t Map = (map); \
585 if (--Map->res_count == 0) { \
586 lck_mtx_unlock(&Map->s_lock); \
588 vm_map_swapout(Map); \
589 vm_map_unlock(Map); \
590 lck_mtx_lock(&Map->s_lock); \
594 #define vm_map_reference_swap(map) \
596 vm_map_t Map = (map); \
597 lck_mtx_lock(&Map->s_lock); \
599 vm_map_res_reference(Map); \
600 lck_mtx_unlock(&Map->s_lock); \
602 #endif /* MACH_ASSERT */
604 extern void vm_map_swapin(
607 extern void vm_map_swapout(
610 #else /* TASK_SWAPPER */
612 #define vm_map_reference(map) \
614 vm_map_t Map = (map); \
616 lck_mtx_lock(&Map->s_lock); \
618 lck_mtx_unlock(&Map->s_lock); \
622 #define vm_map_reference_swap(map) vm_map_reference(map)
623 #define vm_map_res_reference(map)
624 #define vm_map_res_deallocate(map)
626 #endif /* TASK_SWAPPER */
629 * Submap object. Must be used to create memory to be put
630 * in a submap by vm_map_submap.
632 extern vm_object_t vm_submap_object
;
635 * Wait and wakeup macros for in_transition map entries.
637 #define vm_map_entry_wait(map, interruptible) \
638 ((map)->timestamp++ , \
639 thread_sleep_lock_write((event_t)&(map)->hdr, \
640 &(map)->lock, interruptible))
643 #define vm_map_entry_wakeup(map) \
644 thread_wakeup((event_t)(&(map)->hdr))
647 #define vm_map_ref_fast(map) \
649 lck_mtx_lock(&map->s_lock); \
651 vm_map_res_reference(map); \
652 lck_mtx_unlock(&map->s_lock); \
655 #define vm_map_dealloc_fast(map) \
659 lck_mtx_lock(&map->s_lock); \
660 c = --map->ref_count; \
662 vm_map_res_deallocate(map); \
663 lck_mtx_unlock(&map->s_lock); \
665 vm_map_destroy(map); \
669 /* simplify map entries */
670 extern void vm_map_simplify_entry(
672 vm_map_entry_t this_entry
);
673 extern void vm_map_simplify(
675 vm_map_offset_t start
);
677 /* Move the information in a map copy object to a new map copy object */
678 extern vm_map_copy_t
vm_map_copy_copy(
681 /* Create a copy object from an object. */
682 extern kern_return_t
vm_map_copyin_object(
684 vm_object_offset_t offset
,
685 vm_object_size_t size
,
686 vm_map_copy_t
*copy_result
); /* OUT */
688 /* Enter a mapping */
689 extern kern_return_t
vm_map_enter(
691 vm_map_offset_t
*address
,
693 vm_map_offset_t mask
,
696 vm_object_offset_t offset
,
697 boolean_t needs_copy
,
698 vm_prot_t cur_protection
,
699 vm_prot_t max_protection
,
700 vm_inherit_t inheritance
);
702 /* XXX should go away - replaced with regular enter of contig object */
703 extern kern_return_t
vm_map_enter_cpm(
705 vm_map_address_t
*addr
,
709 extern kern_return_t
vm_map_remap(
711 vm_map_offset_t
*address
,
713 vm_map_offset_t mask
,
716 vm_map_offset_t memory_address
,
718 vm_prot_t
*cur_protection
,
719 vm_prot_t
*max_protection
,
720 vm_inherit_t inheritance
);
724 * Read and write from a kernel buffer to a specified map.
726 extern kern_return_t
vm_map_write_user(
729 vm_map_offset_t dst_addr
,
732 extern kern_return_t
vm_map_read_user(
734 vm_map_offset_t src_addr
,
738 /* Create a new task map using an existing task map as a template. */
739 extern vm_map_t
vm_map_fork(
742 /* Change inheritance */
743 extern kern_return_t
vm_map_inherit(
745 vm_map_offset_t start
,
747 vm_inherit_t new_inheritance
);
749 /* Add or remove machine-dependent attributes from map regions */
750 extern kern_return_t
vm_map_machine_attribute(
752 vm_map_offset_t start
,
754 vm_machine_attribute_t attribute
,
755 vm_machine_attribute_val_t
* value
); /* IN/OUT */
757 extern kern_return_t
vm_map_msync(
759 vm_map_address_t address
,
761 vm_sync_t sync_flags
);
763 /* Set paging behavior */
764 extern kern_return_t
vm_map_behavior_set(
766 vm_map_offset_t start
,
768 vm_behavior_t new_behavior
);
770 extern kern_return_t
vm_map_purgable_control(
772 vm_map_offset_t address
,
773 vm_purgable_t control
,
776 extern kern_return_t
vm_map_region(
778 vm_map_offset_t
*address
,
780 vm_region_flavor_t flavor
,
781 vm_region_info_t info
,
782 mach_msg_type_number_t
*count
,
783 mach_port_t
*object_name
);
785 extern kern_return_t
vm_map_region_recurse_64(
787 vm_map_offset_t
*address
,
789 natural_t
*nesting_depth
,
790 vm_region_submap_info_64_t info
,
791 mach_msg_type_number_t
*count
);
793 extern kern_return_t
vm_map_page_query_internal(
795 vm_map_offset_t offset
,
800 extern kern_return_t
vm_map_submap(
802 vm_map_offset_t start
,
805 vm_map_offset_t offset
,
808 extern void vm_map_submap_pmap_clean(
810 vm_map_offset_t start
,
813 vm_map_offset_t offset
);
815 /* Convert from a map entry port to a map */
816 extern vm_map_t
convert_port_entry_to_map(
819 /* Convert from a port to a vm_object */
820 extern vm_object_t
convert_port_entry_to_object(
824 extern kern_return_t
vm_map_set_cache_attr(
829 /* definitions related to overriding the NX behavior */
831 #define VM_ABI_32 0x1
832 #define VM_ABI_64 0x2
834 extern int override_nx(vm_map_t map
, uint32_t user_tag
);
836 #endif /* MACH_KERNEL_PRIVATE */
840 /* Create an empty map */
841 extern vm_map_t
vm_map_create(
843 vm_map_offset_t min_off
,
844 vm_map_offset_t max_off
,
847 /* Get rid of a map */
848 extern void vm_map_destroy(
852 /* Lose a reference */
853 extern void vm_map_deallocate(
856 extern vm_map_t
vm_map_switch(
859 /* Change protection */
860 extern kern_return_t
vm_map_protect(
862 vm_map_offset_t start
,
867 /* Check protection */
868 extern boolean_t
vm_map_check_protection(
870 vm_map_offset_t start
,
872 vm_prot_t protection
);
874 /* wire down a region */
875 extern kern_return_t
vm_map_wire(
877 vm_map_offset_t start
,
879 vm_prot_t access_type
,
880 boolean_t user_wire
);
882 /* unwire a region */
883 extern kern_return_t
vm_map_unwire(
885 vm_map_offset_t start
,
887 boolean_t user_wire
);
889 /* Enter a mapping of a memory object */
890 extern kern_return_t
vm_map_enter_mem_object(
892 vm_map_offset_t
*address
,
894 vm_map_offset_t mask
,
897 vm_object_offset_t offset
,
898 boolean_t needs_copy
,
899 vm_prot_t cur_protection
,
900 vm_prot_t max_protection
,
901 vm_inherit_t inheritance
);
903 /* Enter a mapping of a memory object */
904 extern kern_return_t
vm_map_enter_mem_object_control(
906 vm_map_offset_t
*address
,
908 vm_map_offset_t mask
,
910 memory_object_control_t control
,
911 vm_object_offset_t offset
,
912 boolean_t needs_copy
,
913 vm_prot_t cur_protection
,
914 vm_prot_t max_protection
,
915 vm_inherit_t inheritance
);
917 /* Deallocate a region */
918 extern kern_return_t
vm_map_remove(
920 vm_map_offset_t start
,
924 /* Discard a copy without using it */
925 extern void vm_map_copy_discard(
928 /* Overwrite existing memory with a copy */
929 extern kern_return_t
vm_map_copy_overwrite(
931 vm_map_address_t dst_addr
,
933 boolean_t interruptible
);
935 /* Place a copy into a map */
936 extern kern_return_t
vm_map_copyout(
938 vm_map_address_t
*dst_addr
, /* OUT */
941 extern kern_return_t
vm_map_copyin(
943 vm_map_address_t src_addr
,
945 boolean_t src_destroy
,
946 vm_map_copy_t
*copy_result
); /* OUT */
948 extern kern_return_t
vm_map_copyin_common(
950 vm_map_address_t src_addr
,
952 boolean_t src_destroy
,
953 boolean_t src_volatile
,
954 vm_map_copy_t
*copy_result
, /* OUT */
955 boolean_t use_maxprot
);
957 extern void vm_map_disable_NX(
960 extern void vm_map_disallow_data_exec(
963 extern void vm_map_set_64bit(
966 extern void vm_map_set_32bit(
969 extern boolean_t
vm_map_is_64bit(
972 extern boolean_t
vm_map_has_4GB_pagezero(
975 extern void vm_map_set_4GB_pagezero(
978 extern void vm_map_clear_4GB_pagezero(
981 extern kern_return_t
vm_map_raise_min_offset(
983 vm_map_offset_t new_min_offset
);
985 extern vm_map_offset_t
vm_compute_max_offset(
988 extern void vm_map_set_user_wire_limit(
992 extern void vm_map_switch_protect(
996 extern boolean_t
first_free_is_valid(vm_map_t
);
998 #ifdef XNU_KERNEL_PRIVATE
999 extern kern_return_t
vm_map_page_info(
1001 vm_map_offset_t offset
,
1002 vm_page_info_flavor_t flavor
,
1003 vm_page_info_t info
,
1004 mach_msg_type_number_t
*count
);
1005 #endif /* XNU_KERNEL_PRIVATE */
1008 #ifdef MACH_KERNEL_PRIVATE
1011 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1012 * usual form; it handles a copyin based on the current protection
1013 * (current protection == VM_PROT_NONE) is a failure.
1014 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1015 * access. The difference is that a region with no current access
1016 * BUT possible maximum access is rejected by vm_map_copyin(), but
1017 * returned by vm_map_copyin_maxprot.
1019 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1020 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1021 FALSE, copy_result, FALSE)
1023 #define vm_map_copyin_maxprot(src_map, \
1024 src_addr, len, src_destroy, copy_result) \
1025 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1026 FALSE, copy_result, TRUE)
1028 #endif /* MACH_KERNEL_PRIVATE */
1031 * Macros for rounding and truncation of vm_map offsets and sizes
1033 #define vm_map_round_page(x) (((vm_map_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1034 #define vm_map_trunc_page(x) ((vm_map_offset_t)(x) & ~((signed)PAGE_MASK))
1037 * Flags for vm_map_remove() and vm_map_delete()
1039 #define VM_MAP_NO_FLAGS 0x0
1040 #define VM_MAP_REMOVE_KUNWIRE 0x1
1041 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1042 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1043 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1044 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1046 /* Support for UPLs from vm_maps */
1048 extern kern_return_t
vm_map_get_upl(
1049 vm_map_t target_map
,
1050 vm_map_offset_t map_offset
,
1053 upl_page_info_array_t page_info
,
1054 unsigned int *page_infoCnt
,
1056 int force_data_sync
);
1058 #if CONFIG_DYNAMIC_CODE_SIGNING
1059 extern kern_return_t
vm_map_sign(vm_map_t map
,
1060 vm_map_offset_t start
,
1061 vm_map_offset_t end
);
1065 extern kern_return_t
vm_map_freeze_walk(
1067 unsigned int *purgeable_count
,
1068 unsigned int *wired_count
,
1069 unsigned int *clean_count
,
1070 unsigned int *dirty_count
,
1071 boolean_t
*has_shared
);
1073 extern kern_return_t
vm_map_freeze(
1075 unsigned int *purgeable_count
,
1076 unsigned int *wired_count
,
1077 unsigned int *clean_count
,
1078 unsigned int *dirty_count
,
1079 boolean_t
*has_shared
);
1081 extern void vm_map_thaw(
1087 #endif /* KERNEL_PRIVATE */
1089 #endif /* _VM_VM_MAP_H_ */