]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map.h
e07ad654950ce39b86e635222589edfcd10078fa
[apple/xnu.git] / osfmk / vm / vm_map.h
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84
85 #ifdef KERNEL_PRIVATE
86
87 #include <sys/cdefs.h>
88
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
92
93 __BEGIN_DECLS
94
95 extern void vm_map_reference(vm_map_t map);
96 extern vm_map_t current_map(void);
97
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t vm_map_exec(
100 vm_map_t new_map,
101 task_t task,
102 boolean_t is64bit,
103 void *fsroot,
104 cpu_type_t cpu,
105 cpu_subtype_t cpu_subtype,
106 boolean_t reslide);
107
108 __END_DECLS
109
110 #ifdef MACH_KERNEL_PRIVATE
111
112 #include <task_swapper.h>
113 #include <mach_assert.h>
114
115 #include <vm/vm_object.h>
116 #include <vm/vm_page.h>
117 #include <kern/locks.h>
118 #include <kern/zalloc.h>
119 #include <kern/macro_help.h>
120
121 #include <kern/thread.h>
122 #include <os/refcnt.h>
123
124 #define current_map_fast() (current_thread()->map)
125 #define current_map() (current_map_fast())
126
127 #include <vm/vm_map_store.h>
128
129
130 /*
131 * Types defined:
132 *
133 * vm_map_t the high-level address map data structure.
134 * vm_map_entry_t an entry in an address map.
135 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
136 * vm_map_copy_t represents memory copied from an address map,
137 * used for inter-map copy operations
138 */
139 typedef struct vm_map_entry *vm_map_entry_t;
140 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
141
142
143 /*
144 * Type: vm_map_object_t [internal use only]
145 *
146 * Description:
147 * The target of an address mapping, either a virtual
148 * memory object or a sub map (of the kernel map).
149 */
150 typedef union vm_map_object {
151 vm_object_t vmo_object; /* object object */
152 vm_map_t vmo_submap; /* belongs to another map */
153 } vm_map_object_t;
154
155 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
156 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
157 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
158 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
159 #if VM_NAMED_ENTRY_LIST
160 extern queue_head_t vm_named_entry_list;
161 #endif /* VM_NAMED_ENTRY_LIST */
162
163 /*
164 * Type: vm_named_entry_t [internal use only]
165 *
166 * Description:
167 * Description of a mapping to a memory cache object.
168 *
169 * Implementation:
170 * While the handle to this object is used as a means to map
171 * and pass around the right to map regions backed by pagers
172 * of all sorts, the named_entry itself is only manipulated
173 * by the kernel. Named entries hold information on the
174 * right to map a region of a cached object. Namely,
175 * the target cache object, the beginning and ending of the
176 * region to be mapped, and the permissions, (read, write)
177 * with which it can be mapped.
178 *
179 */
180
181 struct vm_named_entry {
182 decl_lck_mtx_data(, Lock); /* Synchronization */
183 union {
184 vm_map_t map; /* map backing submap */
185 vm_map_copy_t copy; /* a VM map copy */
186 } backing;
187 vm_object_offset_t offset; /* offset into object */
188 vm_object_size_t size; /* size of region */
189 vm_object_offset_t data_offset; /* offset to first byte of data */
190 vm_prot_t protection; /* access permissions */
191 int ref_count; /* Number of references */
192 unsigned int /* Is backing.xxx : */
193 /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
194 /* boolean_t */ internal:1, /* ... an internal object */
195 /* boolean_t */ is_sub_map:1, /* ... a submap? */
196 /* boolean_t */ is_copy:1; /* ... a VM map copy */
197 #if VM_NAMED_ENTRY_LIST
198 queue_chain_t named_entry_list;
199 int named_entry_alias;
200 mach_port_t named_entry_port;
201 #define NAMED_ENTRY_BT_DEPTH 16
202 void *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
203 #endif /* VM_NAMED_ENTRY_LIST */
204 };
205
206 /*
207 * Type: vm_map_entry_t [internal use only]
208 *
209 * Description:
210 * A single mapping within an address map.
211 *
212 * Implementation:
213 * Address map entries consist of start and end addresses,
214 * a VM object (or sub map) and offset into that object,
215 * and user-exported inheritance and protection information.
216 * Control information for virtual copy operations is also
217 * stored in the address map entry.
218 */
219
220 struct vm_map_links {
221 struct vm_map_entry *prev; /* previous entry */
222 struct vm_map_entry *next; /* next entry */
223 vm_map_offset_t start; /* start address */
224 vm_map_offset_t end; /* end address */
225 };
226
227 /*
228 * FOOTPRINT ACCOUNTING:
229 * The "memory footprint" is better described in the pmap layer.
230 *
231 * At the VM level, these 2 vm_map_entry_t fields are relevant:
232 * iokit_mapped:
233 * For an "iokit_mapped" entry, we add the size of the entry to the
234 * footprint when the entry is entered into the map and we subtract that
235 * size when the entry is removed. No other accounting should take place.
236 * "use_pmap" should be FALSE but is not taken into account.
237 * use_pmap: (only when is_sub_map is FALSE)
238 * This indicates if we should ask the pmap layer to account for pages
239 * in this mapping. If FALSE, we expect that another form of accounting
240 * is being used (e.g. "iokit_mapped" or the explicit accounting of
241 * non-volatile purgable memory).
242 *
243 * So the logic is mostly:
244 * if entry->is_sub_map == TRUE
245 * anything in a submap does not count for the footprint
246 * else if entry->iokit_mapped == TRUE
247 * footprint includes the entire virtual size of this entry
248 * else if entry->use_pmap == FALSE
249 * tell pmap NOT to account for pages being pmap_enter()'d from this
250 * mapping (i.e. use "alternate accounting")
251 * else
252 * pmap will account for pages being pmap_enter()'d from this mapping
253 * as it sees fit (only if anonymous, etc...)
254 */
255
256 struct vm_map_entry {
257 struct vm_map_links links; /* links to other entries */
258 #define vme_prev links.prev
259 #define vme_next links.next
260 #define vme_start links.start
261 #define vme_end links.end
262
263 struct vm_map_store store;
264 union vm_map_object vme_object; /* object I point to */
265 vm_object_offset_t vme_offset; /* offset into object */
266
267 unsigned int
268 /* boolean_t */ is_shared:1, /* region is shared */
269 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
270 /* boolean_t */ in_transition:1, /* Entry being changed */
271 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
272 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
273 /* behavior is not defined for submap type */
274 /* boolean_t */ needs_copy:1, /* object need to be copied? */
275
276 /* Only in task maps: */
277 /* vm_prot_t */ protection:3, /* protection code */
278 /* vm_prot_t */ max_protection:3, /* maximum protection */
279 /* vm_inherit_t */ inheritance:2, /* inheritance */
280 /* boolean_t */ use_pmap:1, /*
281 * use_pmap is overloaded:
282 * if "is_sub_map":
283 * use a nested pmap?
284 * else (i.e. if object):
285 * use pmap accounting
286 * for footprint?
287 */
288 /* boolean_t */ no_cache:1, /* should new pages be cached? */
289 /* boolean_t */ permanent:1, /* mapping can not be removed */
290 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
291 /* boolean_t */ map_aligned:1, /* align to map's page size */
292 /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
293 * this entry it is being deleted
294 * without unwiring them */
295 /* boolean_t */ used_for_jit:1,
296 /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
297 /* boolean_t */ from_reserved_zone:1, /* Allocated from
298 * kernel reserved zone */
299
300 /* iokit accounting: use the virtual size rather than resident size: */
301 /* boolean_t */ iokit_acct:1,
302 /* boolean_t */ vme_resilient_codesign:1,
303 /* boolean_t */ vme_resilient_media:1,
304 /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
305 /* boolean_t */ vme_no_copy_on_read:1,
306 /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
307 __unused:2;
308
309 unsigned short wired_count; /* can be paged if = 0 */
310 unsigned short user_wired_count; /* for vm_wire */
311 #if DEBUG
312 #define MAP_ENTRY_CREATION_DEBUG (1)
313 #define MAP_ENTRY_INSERTION_DEBUG (1)
314 #endif
315 #if MAP_ENTRY_CREATION_DEBUG
316 struct vm_map_header *vme_creation_maphdr;
317 uintptr_t vme_creation_bt[16];
318 #endif
319 #if MAP_ENTRY_INSERTION_DEBUG
320 vm_map_offset_t vme_start_original;
321 vm_map_offset_t vme_end_original;
322 uintptr_t vme_insertion_bt[16];
323 #endif
324 };
325
326 #define VME_SUBMAP_PTR(entry) \
327 (&((entry)->vme_object.vmo_submap))
328 #define VME_SUBMAP(entry) \
329 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
330 #define VME_OBJECT_PTR(entry) \
331 (&((entry)->vme_object.vmo_object))
332 #define VME_OBJECT(entry) \
333 ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
334 #define VME_OFFSET(entry) \
335 ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
336 #define VME_ALIAS_MASK (FOURK_PAGE_MASK)
337 #define VME_ALIAS(entry) \
338 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
339
340 static inline void
341 VME_OBJECT_SET(
342 vm_map_entry_t entry,
343 vm_object_t object)
344 {
345 entry->vme_object.vmo_object = object;
346 if (object != VM_OBJECT_NULL && !object->internal) {
347 entry->vme_resilient_media = FALSE;
348 }
349 entry->vme_resilient_codesign = FALSE;
350 entry->used_for_jit = FALSE;
351 }
352 static inline void
353 VME_SUBMAP_SET(
354 vm_map_entry_t entry,
355 vm_map_t submap)
356 {
357 entry->vme_object.vmo_submap = submap;
358 }
359 static inline void
360 VME_OFFSET_SET(
361 vm_map_entry_t entry,
362 vm_object_offset_t offset)
363 {
364 unsigned int alias;
365 alias = VME_ALIAS(entry);
366 assert((offset & FOURK_PAGE_MASK) == 0);
367 entry->vme_offset = offset | alias;
368 }
369 /*
370 * IMPORTANT:
371 * The "alias" field can be updated while holding the VM map lock
372 * "shared". It's OK as along as it's the only field that can be
373 * updated without the VM map "exclusive" lock.
374 */
375 static inline void
376 VME_ALIAS_SET(
377 vm_map_entry_t entry,
378 int alias)
379 {
380 vm_object_offset_t offset;
381 offset = VME_OFFSET(entry);
382 entry->vme_offset = offset | ((unsigned int)alias & VME_ALIAS_MASK);
383 }
384
385 static inline void
386 VME_OBJECT_SHADOW(
387 vm_map_entry_t entry,
388 vm_object_size_t length)
389 {
390 vm_object_t object;
391 vm_object_offset_t offset;
392
393 object = VME_OBJECT(entry);
394 offset = VME_OFFSET(entry);
395 vm_object_shadow(&object, &offset, length);
396 if (object != VME_OBJECT(entry)) {
397 VME_OBJECT_SET(entry, object);
398 entry->use_pmap = TRUE;
399 }
400 if (offset != VME_OFFSET(entry)) {
401 VME_OFFSET_SET(entry, offset);
402 }
403 }
404
405
406 /*
407 * Convenience macros for dealing with superpages
408 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
409 */
410 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
411 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
412 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
413 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
414
415 /*
416 * wired_counts are unsigned short. This value is used to safeguard
417 * against any mishaps due to runaway user programs.
418 */
419 #define MAX_WIRE_COUNT 65535
420
421
422
423 /*
424 * Type: struct vm_map_header
425 *
426 * Description:
427 * Header for a vm_map and a vm_map_copy.
428 */
429
430
431 struct vm_map_header {
432 struct vm_map_links links; /* first, last, min, max */
433 int nentries; /* Number of entries */
434 boolean_t entries_pageable;
435 /* are map entries pageable? */
436 #ifdef VM_MAP_STORE_USE_RB
437 struct rb_head rb_head_store;
438 #endif
439 int page_shift; /* page shift */
440 };
441
442 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
443 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
444 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
445
446 /*
447 * Type: vm_map_t [exported; contents invisible]
448 *
449 * Description:
450 * An address map -- a directory relating valid
451 * regions of a task's address space to the corresponding
452 * virtual memory objects.
453 *
454 * Implementation:
455 * Maps are doubly-linked lists of map entries, sorted
456 * by address. One hint is used to start
457 * searches again from the last successful search,
458 * insertion, or removal. Another hint is used to
459 * quickly find free space.
460 */
461 struct _vm_map {
462 lck_rw_t lock; /* map lock */
463 struct vm_map_header hdr; /* Map entry header */
464 #define min_offset hdr.links.start /* start of range */
465 #define max_offset hdr.links.end /* end of range */
466 pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
467 vm_map_size_t size; /* virtual size */
468 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
469 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
470 #if !CONFIG_EMBEDDED
471 vm_map_offset_t vmmap_high_start;
472 #endif
473
474 union {
475 /*
476 * If map->disable_vmentry_reuse == TRUE:
477 * the end address of the highest allocated vm_map_entry_t.
478 */
479 vm_map_offset_t vmu1_highest_entry_end;
480 /*
481 * For a nested VM map:
482 * the lowest address in this nested VM map that we would
483 * expect to be unnested under normal operation (i.e. for
484 * regular copy-on-write on DATA section).
485 */
486 vm_map_offset_t vmu1_lowest_unnestable_start;
487 } vmu1;
488 #define highest_entry_end vmu1.vmu1_highest_entry_end
489 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
490 decl_lck_mtx_data(, s_lock); /* Lock ref, res fields */
491 lck_mtx_ext_t s_lock_ext;
492 vm_map_entry_t hint; /* hint for quick lookups */
493 union {
494 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
495 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
496 } vmmap_u_1;
497 #define hole_hint vmmap_u_1.vmmap_hole_hint
498 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
499 union {
500 vm_map_entry_t _first_free; /* First free space hint */
501 struct vm_map_links* _holes; /* links all holes between entries */
502 } f_s; /* Union for free space data structures being used */
503
504 #define first_free f_s._first_free
505 #define holes_list f_s._holes
506
507 struct os_refcnt map_refcnt; /* Reference count */
508
509 #if TASK_SWAPPER
510 int res_count; /* Residence count (swap) */
511 int sw_state; /* Swap state */
512 #endif /* TASK_SWAPPER */
513
514 unsigned int
515 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
516 /* boolean_t */ wiring_required:1, /* All memory wired? */
517 /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */
518 /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
519 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
520 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
521 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
522 /* boolean_t */ holelistenabled:1,
523 /* boolean_t */ is_nested_map:1,
524 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
525 /* boolean_t */ jit_entry_exists:1,
526 /* boolean_t */ has_corpse_footprint:1,
527 /* boolean_t */ terminated:1,
528 /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
529 /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
530 /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
531 /* reserved */ pad:16;
532 unsigned int timestamp; /* Version number */
533 };
534
535 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
536 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
537 #define vm_map_first_entry(map) ((map)->hdr.links.next)
538 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
539
540 #if TASK_SWAPPER
541 /*
542 * VM map swap states. There are no transition states.
543 */
544 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
545 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
546 #endif /* TASK_SWAPPER */
547
548 /*
549 * Type: vm_map_version_t [exported; contents invisible]
550 *
551 * Description:
552 * Map versions may be used to quickly validate a previous
553 * lookup operation.
554 *
555 * Usage note:
556 * Because they are bulky objects, map versions are usually
557 * passed by reference.
558 *
559 * Implementation:
560 * Just a timestamp for the main map.
561 */
562 typedef struct vm_map_version {
563 unsigned int main_timestamp;
564 } vm_map_version_t;
565
566 /*
567 * Type: vm_map_copy_t [exported; contents invisible]
568 *
569 * Description:
570 * A map copy object represents a region of virtual memory
571 * that has been copied from an address map but is still
572 * in transit.
573 *
574 * A map copy object may only be used by a single thread
575 * at a time.
576 *
577 * Implementation:
578 * There are three formats for map copy objects.
579 * The first is very similar to the main
580 * address map in structure, and as a result, some
581 * of the internal maintenance functions/macros can
582 * be used with either address maps or map copy objects.
583 *
584 * The map copy object contains a header links
585 * entry onto which the other entries that represent
586 * the region are chained.
587 *
588 * The second format is a single vm object. This was used
589 * primarily in the pageout path - but is not currently used
590 * except for placeholder copy objects (see vm_map_copy_copy()).
591 *
592 * The third format is a kernel buffer copy object - for data
593 * small enough that physical copies were the most efficient
594 * method. This method uses a zero-sized array unioned with
595 * other format-specific data in the 'c_u' member. This unsized
596 * array overlaps the other elements and allows us to use this
597 * extra structure space for physical memory copies. On 64-bit
598 * systems this saves ~64 bytes per vm_map_copy.
599 */
600
601 struct vm_map_copy {
602 int type;
603 #define VM_MAP_COPY_ENTRY_LIST 1
604 #define VM_MAP_COPY_OBJECT 2
605 #define VM_MAP_COPY_KERNEL_BUFFER 3
606 vm_object_offset_t offset;
607 vm_map_size_t size;
608 union {
609 struct vm_map_header hdr; /* ENTRY_LIST */
610 vm_object_t object; /* OBJECT */
611 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
612 } c_u;
613 };
614
615
616 #define cpy_hdr c_u.hdr
617
618 #define cpy_object c_u.object
619 #define cpy_kdata c_u.kdata
620
621 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
622 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
623 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
624
625 /*
626 * Useful macros for entry list copy objects
627 */
628
629 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
630 #define vm_map_copy_first_entry(copy) \
631 ((copy)->cpy_hdr.links.next)
632 #define vm_map_copy_last_entry(copy) \
633 ((copy)->cpy_hdr.links.prev)
634
635 extern kern_return_t
636 vm_map_copy_adjust_to_target(
637 vm_map_copy_t copy_map,
638 vm_map_offset_t offset,
639 vm_map_size_t size,
640 vm_map_t target_map,
641 boolean_t copy,
642 vm_map_copy_t *target_copy_map_p,
643 vm_map_offset_t *overmap_start_p,
644 vm_map_offset_t *overmap_end_p,
645 vm_map_offset_t *trimmed_start_p);
646
647 /*
648 * Macros: vm_map_lock, etc. [internal use only]
649 * Description:
650 * Perform locking on the data portion of a map.
651 * When multiple maps are to be locked, order by map address.
652 * (See vm_map.c::vm_remap())
653 */
654
655 #define vm_map_lock_init(map) \
656 ((map)->timestamp = 0 , \
657 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
658
659 #define vm_map_lock(map) \
660 MACRO_BEGIN \
661 DTRACE_VM(vm_map_lock_w); \
662 lck_rw_lock_exclusive(&(map)->lock); \
663 MACRO_END
664
665 #define vm_map_unlock(map) \
666 MACRO_BEGIN \
667 DTRACE_VM(vm_map_unlock_w); \
668 (map)->timestamp++; \
669 lck_rw_done(&(map)->lock); \
670 MACRO_END
671
672 #define vm_map_lock_read(map) \
673 MACRO_BEGIN \
674 DTRACE_VM(vm_map_lock_r); \
675 lck_rw_lock_shared(&(map)->lock); \
676 MACRO_END
677
678 #define vm_map_unlock_read(map) \
679 MACRO_BEGIN \
680 DTRACE_VM(vm_map_unlock_r); \
681 lck_rw_done(&(map)->lock); \
682 MACRO_END
683
684 #define vm_map_lock_write_to_read(map) \
685 MACRO_BEGIN \
686 DTRACE_VM(vm_map_lock_downgrade); \
687 (map)->timestamp++; \
688 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
689 MACRO_END
690
691 __attribute__((always_inline))
692 int vm_map_lock_read_to_write(vm_map_t map);
693
694 __attribute__((always_inline))
695 boolean_t vm_map_try_lock(vm_map_t map);
696
697 __attribute__((always_inline))
698 boolean_t vm_map_try_lock_read(vm_map_t map);
699
700 int vm_self_region_page_shift(vm_map_t target_map);
701 int vm_self_region_page_shift_safely(vm_map_t target_map);
702
703 #if MACH_ASSERT || DEBUG
704 #define vm_map_lock_assert_held(map) \
705 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
706 #define vm_map_lock_assert_shared(map) \
707 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
708 #define vm_map_lock_assert_exclusive(map) \
709 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
710 #define vm_map_lock_assert_notheld(map) \
711 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
712 #else /* MACH_ASSERT || DEBUG */
713 #define vm_map_lock_assert_held(map)
714 #define vm_map_lock_assert_shared(map)
715 #define vm_map_lock_assert_exclusive(map)
716 #define vm_map_lock_assert_notheld(map)
717 #endif /* MACH_ASSERT || DEBUG */
718
719 /*
720 * Exported procedures that operate on vm_map_t.
721 */
722
723 /* Initialize the module */
724 extern void vm_map_init(void);
725
726 extern void vm_kernel_reserved_entry_init(void);
727
728 /* Allocate a range in the specified virtual address map and
729 * return the entry allocated for that range. */
730 extern kern_return_t vm_map_find_space(
731 vm_map_t map,
732 vm_map_address_t *address, /* OUT */
733 vm_map_size_t size,
734 vm_map_offset_t mask,
735 int flags,
736 vm_map_kernel_flags_t vmk_flags,
737 vm_tag_t tag,
738 vm_map_entry_t *o_entry); /* OUT */
739
740 /* flags for vm_map_find_space */
741 #define VM_MAP_FIND_LAST_FREE 0x01
742
743 extern void vm_map_clip_start(
744 vm_map_t map,
745 vm_map_entry_t entry,
746 vm_map_offset_t endaddr);
747 extern void vm_map_clip_end(
748 vm_map_t map,
749 vm_map_entry_t entry,
750 vm_map_offset_t endaddr);
751 extern boolean_t vm_map_entry_should_cow_for_true_share(
752 vm_map_entry_t entry);
753
754 /* Lookup map entry containing or the specified address in the given map */
755 extern boolean_t vm_map_lookup_entry(
756 vm_map_t map,
757 vm_map_address_t address,
758 vm_map_entry_t *entry); /* OUT */
759
760 extern void vm_map_copy_remap(
761 vm_map_t map,
762 vm_map_entry_t where,
763 vm_map_copy_t copy,
764 vm_map_offset_t adjustment,
765 vm_prot_t cur_prot,
766 vm_prot_t max_prot,
767 vm_inherit_t inheritance);
768
769 /* Find the VM object, offset, and protection for a given virtual address
770 * in the specified map, assuming a page fault of the type specified. */
771 extern kern_return_t vm_map_lookup_locked(
772 vm_map_t *var_map, /* IN/OUT */
773 vm_map_address_t vaddr,
774 vm_prot_t fault_type,
775 int object_lock_type,
776 vm_map_version_t *out_version, /* OUT */
777 vm_object_t *object, /* OUT */
778 vm_object_offset_t *offset, /* OUT */
779 vm_prot_t *out_prot, /* OUT */
780 boolean_t *wired, /* OUT */
781 vm_object_fault_info_t fault_info, /* OUT */
782 vm_map_t *real_map, /* OUT */
783 bool *contended); /* OUT */
784
785 /* Verifies that the map has not changed since the given version. */
786 extern boolean_t vm_map_verify(
787 vm_map_t map,
788 vm_map_version_t *version); /* REF */
789
790 extern vm_map_entry_t vm_map_entry_insert(
791 vm_map_t map,
792 vm_map_entry_t insp_entry,
793 vm_map_offset_t start,
794 vm_map_offset_t end,
795 vm_object_t object,
796 vm_object_offset_t offset,
797 boolean_t needs_copy,
798 boolean_t is_shared,
799 boolean_t in_transition,
800 vm_prot_t cur_protection,
801 vm_prot_t max_protection,
802 vm_behavior_t behavior,
803 vm_inherit_t inheritance,
804 unsigned short wired_count,
805 boolean_t no_cache,
806 boolean_t permanent,
807 boolean_t no_copy_on_read,
808 unsigned int superpage_size,
809 boolean_t clear_map_aligned,
810 boolean_t is_submap,
811 boolean_t used_for_jit,
812 int alias,
813 boolean_t translated_allow_execute);
814
815
816 /*
817 * Functions implemented as macros
818 */
819 #define vm_map_min(map) ((map)->min_offset)
820 /* Lowest valid address in
821 * a map */
822
823 #define vm_map_max(map) ((map)->max_offset)
824 /* Highest valid address */
825
826 #define vm_map_pmap(map) ((map)->pmap)
827 /* Physical map associated
828 * with this address map */
829
830 /*
831 * Macros/functions for map residence counts and swapin/out of vm maps
832 */
833 #if TASK_SWAPPER
834
835 #if MACH_ASSERT
836 /* Gain a reference to an existing map */
837 extern void vm_map_reference(
838 vm_map_t map);
839 /* Lose a residence count */
840 extern void vm_map_res_deallocate(
841 vm_map_t map);
842 /* Gain a residence count on a map */
843 extern void vm_map_res_reference(
844 vm_map_t map);
845 /* Gain reference & residence counts to possibly swapped-out map */
846 extern void vm_map_reference_swap(
847 vm_map_t map);
848
849 #else /* MACH_ASSERT */
850
851 #define vm_map_reference(map) \
852 MACRO_BEGIN \
853 vm_map_t Map = (map); \
854 if (Map) { \
855 lck_mtx_lock(&Map->s_lock); \
856 Map->res_count++; \
857 os_ref_retain(&Map->map_refcnt); \
858 lck_mtx_unlock(&Map->s_lock); \
859 } \
860 MACRO_END
861
862 #define vm_map_res_reference(map) \
863 MACRO_BEGIN \
864 vm_map_t Lmap = (map); \
865 if (Lmap->res_count == 0) { \
866 lck_mtx_unlock(&Lmap->s_lock);\
867 vm_map_lock(Lmap); \
868 vm_map_swapin(Lmap); \
869 lck_mtx_lock(&Lmap->s_lock); \
870 ++Lmap->res_count; \
871 vm_map_unlock(Lmap); \
872 } else \
873 ++Lmap->res_count; \
874 MACRO_END
875
876 #define vm_map_res_deallocate(map) \
877 MACRO_BEGIN \
878 vm_map_t Map = (map); \
879 if (--Map->res_count == 0) { \
880 lck_mtx_unlock(&Map->s_lock); \
881 vm_map_lock(Map); \
882 vm_map_swapout(Map); \
883 vm_map_unlock(Map); \
884 lck_mtx_lock(&Map->s_lock); \
885 } \
886 MACRO_END
887
888 #define vm_map_reference_swap(map) \
889 MACRO_BEGIN \
890 vm_map_t Map = (map); \
891 lck_mtx_lock(&Map->s_lock); \
892 os_ref_retain(&Map->map_refcnt);\
893 vm_map_res_reference(Map); \
894 lck_mtx_unlock(&Map->s_lock); \
895 MACRO_END
896 #endif /* MACH_ASSERT */
897
898 extern void vm_map_swapin(
899 vm_map_t map);
900
901 extern void vm_map_swapout(
902 vm_map_t map);
903
904 #else /* TASK_SWAPPER */
905
906 #define vm_map_reference(map) \
907 MACRO_BEGIN \
908 vm_map_t Map = (map); \
909 if (Map) { \
910 lck_mtx_lock(&Map->s_lock); \
911 os_ref_retain(&Map->map_refcnt);\
912 lck_mtx_unlock(&Map->s_lock); \
913 } \
914 MACRO_END
915
916 #define vm_map_reference_swap(map) vm_map_reference(map)
917 #define vm_map_res_reference(map)
918 #define vm_map_res_deallocate(map)
919
920 #endif /* TASK_SWAPPER */
921
922 /*
923 * Submap object. Must be used to create memory to be put
924 * in a submap by vm_map_submap.
925 */
926 extern vm_object_t vm_submap_object;
927
928 /*
929 * Wait and wakeup macros for in_transition map entries.
930 */
931 #define vm_map_entry_wait(map, interruptible) \
932 ((map)->timestamp++ , \
933 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
934 (event_t)&(map)->hdr, interruptible))
935
936
937 #define vm_map_entry_wakeup(map) \
938 thread_wakeup((event_t)(&(map)->hdr))
939
940
941 #define vm_map_ref_fast(map) \
942 MACRO_BEGIN \
943 lck_mtx_lock(&map->s_lock); \
944 map->ref_count++; \
945 vm_map_res_reference(map); \
946 lck_mtx_unlock(&map->s_lock); \
947 MACRO_END
948
949 #define vm_map_dealloc_fast(map) \
950 MACRO_BEGIN \
951 int c; \
952 \
953 lck_mtx_lock(&map->s_lock); \
954 c = --map->ref_count; \
955 if (c > 0) \
956 vm_map_res_deallocate(map); \
957 lck_mtx_unlock(&map->s_lock); \
958 if (c == 0) \
959 vm_map_destroy(map); \
960 MACRO_END
961
962
963 /* simplify map entries */
964 extern void vm_map_simplify_entry(
965 vm_map_t map,
966 vm_map_entry_t this_entry);
967 extern void vm_map_simplify(
968 vm_map_t map,
969 vm_map_offset_t start);
970
971 /* Move the information in a map copy object to a new map copy object */
972 extern vm_map_copy_t vm_map_copy_copy(
973 vm_map_copy_t copy);
974
975 /* Create a copy object from an object. */
976 extern kern_return_t vm_map_copyin_object(
977 vm_object_t object,
978 vm_object_offset_t offset,
979 vm_object_size_t size,
980 vm_map_copy_t *copy_result); /* OUT */
981
982 extern kern_return_t vm_map_random_address_for_size(
983 vm_map_t map,
984 vm_map_offset_t *address,
985 vm_map_size_t size);
986
987 /* Enter a mapping */
988 extern kern_return_t vm_map_enter(
989 vm_map_t map,
990 vm_map_offset_t *address,
991 vm_map_size_t size,
992 vm_map_offset_t mask,
993 int flags,
994 vm_map_kernel_flags_t vmk_flags,
995 vm_tag_t tag,
996 vm_object_t object,
997 vm_object_offset_t offset,
998 boolean_t needs_copy,
999 vm_prot_t cur_protection,
1000 vm_prot_t max_protection,
1001 vm_inherit_t inheritance);
1002
1003 #if __arm64__
1004 extern kern_return_t vm_map_enter_fourk(
1005 vm_map_t map,
1006 vm_map_offset_t *address,
1007 vm_map_size_t size,
1008 vm_map_offset_t mask,
1009 int flags,
1010 vm_map_kernel_flags_t vmk_flags,
1011 vm_tag_t tag,
1012 vm_object_t object,
1013 vm_object_offset_t offset,
1014 boolean_t needs_copy,
1015 vm_prot_t cur_protection,
1016 vm_prot_t max_protection,
1017 vm_inherit_t inheritance);
1018 #endif /* __arm64__ */
1019
1020 /* XXX should go away - replaced with regular enter of contig object */
1021 extern kern_return_t vm_map_enter_cpm(
1022 vm_map_t map,
1023 vm_map_address_t *addr,
1024 vm_map_size_t size,
1025 int flags);
1026
1027 extern kern_return_t vm_map_remap(
1028 vm_map_t target_map,
1029 vm_map_offset_t *address,
1030 vm_map_size_t size,
1031 vm_map_offset_t mask,
1032 int flags,
1033 vm_map_kernel_flags_t vmk_flags,
1034 vm_tag_t tag,
1035 vm_map_t src_map,
1036 vm_map_offset_t memory_address,
1037 boolean_t copy,
1038 vm_prot_t *cur_protection,
1039 vm_prot_t *max_protection,
1040 vm_inherit_t inheritance);
1041
1042
1043 /*
1044 * Read and write from a kernel buffer to a specified map.
1045 */
1046 extern kern_return_t vm_map_write_user(
1047 vm_map_t map,
1048 void *src_p,
1049 vm_map_offset_t dst_addr,
1050 vm_size_t size);
1051
1052 extern kern_return_t vm_map_read_user(
1053 vm_map_t map,
1054 vm_map_offset_t src_addr,
1055 void *dst_p,
1056 vm_size_t size);
1057
1058 /* Create a new task map using an existing task map as a template. */
1059 extern vm_map_t vm_map_fork(
1060 ledger_t ledger,
1061 vm_map_t old_map,
1062 int options);
1063 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
1064 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
1065 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
1066
1067 /* Change inheritance */
1068 extern kern_return_t vm_map_inherit(
1069 vm_map_t map,
1070 vm_map_offset_t start,
1071 vm_map_offset_t end,
1072 vm_inherit_t new_inheritance);
1073
1074 /* Add or remove machine-dependent attributes from map regions */
1075 extern kern_return_t vm_map_machine_attribute(
1076 vm_map_t map,
1077 vm_map_offset_t start,
1078 vm_map_offset_t end,
1079 vm_machine_attribute_t attribute,
1080 vm_machine_attribute_val_t* value); /* IN/OUT */
1081
1082 extern kern_return_t vm_map_msync(
1083 vm_map_t map,
1084 vm_map_address_t address,
1085 vm_map_size_t size,
1086 vm_sync_t sync_flags);
1087
1088 /* Set paging behavior */
1089 extern kern_return_t vm_map_behavior_set(
1090 vm_map_t map,
1091 vm_map_offset_t start,
1092 vm_map_offset_t end,
1093 vm_behavior_t new_behavior);
1094
1095 extern kern_return_t vm_map_region(
1096 vm_map_t map,
1097 vm_map_offset_t *address,
1098 vm_map_size_t *size,
1099 vm_region_flavor_t flavor,
1100 vm_region_info_t info,
1101 mach_msg_type_number_t *count,
1102 mach_port_t *object_name);
1103
1104 extern kern_return_t vm_map_region_recurse_64(
1105 vm_map_t map,
1106 vm_map_offset_t *address,
1107 vm_map_size_t *size,
1108 natural_t *nesting_depth,
1109 vm_region_submap_info_64_t info,
1110 mach_msg_type_number_t *count);
1111
1112 extern kern_return_t vm_map_page_query_internal(
1113 vm_map_t map,
1114 vm_map_offset_t offset,
1115 int *disposition,
1116 int *ref_count);
1117
1118 extern kern_return_t vm_map_query_volatile(
1119 vm_map_t map,
1120 mach_vm_size_t *volatile_virtual_size_p,
1121 mach_vm_size_t *volatile_resident_size_p,
1122 mach_vm_size_t *volatile_compressed_size_p,
1123 mach_vm_size_t *volatile_pmap_size_p,
1124 mach_vm_size_t *volatile_compressed_pmap_size_p);
1125
1126 extern kern_return_t vm_map_submap(
1127 vm_map_t map,
1128 vm_map_offset_t start,
1129 vm_map_offset_t end,
1130 vm_map_t submap,
1131 vm_map_offset_t offset,
1132 boolean_t use_pmap);
1133
1134 extern void vm_map_submap_pmap_clean(
1135 vm_map_t map,
1136 vm_map_offset_t start,
1137 vm_map_offset_t end,
1138 vm_map_t sub_map,
1139 vm_map_offset_t offset);
1140
1141 /* Convert from a map entry port to a map */
1142 extern vm_map_t convert_port_entry_to_map(
1143 ipc_port_t port);
1144
1145 /* Convert from a port to a vm_object */
1146 extern vm_object_t convert_port_entry_to_object(
1147 ipc_port_t port);
1148
1149
1150 extern kern_return_t vm_map_set_cache_attr(
1151 vm_map_t map,
1152 vm_map_offset_t va);
1153
1154
1155 /* definitions related to overriding the NX behavior */
1156
1157 #define VM_ABI_32 0x1
1158 #define VM_ABI_64 0x2
1159
1160 extern int override_nx(vm_map_t map, uint32_t user_tag);
1161
1162 #if PMAP_CS
1163 extern kern_return_t vm_map_entry_cs_associate(
1164 vm_map_t map,
1165 vm_map_entry_t entry,
1166 vm_map_kernel_flags_t vmk_flags);
1167 #endif /* PMAP_CS */
1168
1169 extern void vm_map_region_top_walk(
1170 vm_map_entry_t entry,
1171 vm_region_top_info_t top);
1172 extern void vm_map_region_walk(
1173 vm_map_t map,
1174 vm_map_offset_t va,
1175 vm_map_entry_t entry,
1176 vm_object_offset_t offset,
1177 vm_object_size_t range,
1178 vm_region_extended_info_t extended,
1179 boolean_t look_for_pages,
1180 mach_msg_type_number_t count);
1181
1182
1183
1184 extern void vm_map_copy_footprint_ledgers(
1185 task_t old_task,
1186 task_t new_task);
1187 extern void vm_map_copy_ledger(
1188 task_t old_task,
1189 task_t new_task,
1190 int ledger_entry);
1191
1192 /**
1193 * Represents a single region of virtual address space that should be reserved
1194 * (pre-mapped) in a user address space.
1195 */
1196 struct vm_reserved_region {
1197 char *vmrr_name;
1198 vm_map_offset_t vmrr_addr;
1199 vm_map_size_t vmrr_size;
1200 };
1201
1202 /**
1203 * Return back a machine-dependent array of address space regions that should be
1204 * reserved by the VM. This function is defined in the machine-dependent
1205 * machine_routines.c files.
1206 */
1207 extern size_t ml_get_vm_reserved_regions(
1208 bool vm_is64bit,
1209 struct vm_reserved_region **regions);
1210
1211 #endif /* MACH_KERNEL_PRIVATE */
1212
1213 __BEGIN_DECLS
1214
1215 /* Create an empty map */
1216 extern vm_map_t vm_map_create(
1217 pmap_t pmap,
1218 vm_map_offset_t min_off,
1219 vm_map_offset_t max_off,
1220 boolean_t pageable);
1221 extern vm_map_t vm_map_create_options(
1222 pmap_t pmap,
1223 vm_map_offset_t min_off,
1224 vm_map_offset_t max_off,
1225 int options);
1226 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1227 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1228 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1229 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1230
1231 extern vm_map_size_t vm_map_adjusted_size(vm_map_t map);
1232
1233 extern void vm_map_disable_hole_optimization(vm_map_t map);
1234
1235 /* Get rid of a map */
1236 extern void vm_map_destroy(
1237 vm_map_t map,
1238 int flags);
1239
1240 /* Lose a reference */
1241 extern void vm_map_deallocate(
1242 vm_map_t map);
1243
1244 /* Lose a reference */
1245 extern void vm_map_inspect_deallocate(
1246 vm_map_inspect_t map);
1247
1248 /* Lose a reference */
1249 extern void vm_map_read_deallocate(
1250 vm_map_read_t map);
1251
1252 extern vm_map_t vm_map_switch(
1253 vm_map_t map);
1254
1255 /* Change protection */
1256 extern kern_return_t vm_map_protect(
1257 vm_map_t map,
1258 vm_map_offset_t start,
1259 vm_map_offset_t end,
1260 vm_prot_t new_prot,
1261 boolean_t set_max);
1262
1263 /* Check protection */
1264 extern boolean_t vm_map_check_protection(
1265 vm_map_t map,
1266 vm_map_offset_t start,
1267 vm_map_offset_t end,
1268 vm_prot_t protection);
1269
1270 extern boolean_t vm_map_cs_enforcement(
1271 vm_map_t map);
1272 extern void vm_map_cs_enforcement_set(
1273 vm_map_t map,
1274 boolean_t val);
1275
1276 /* wire down a region */
1277
1278 #ifdef XNU_KERNEL_PRIVATE
1279
1280 extern kern_return_t vm_map_wire_kernel(
1281 vm_map_t map,
1282 vm_map_offset_t start,
1283 vm_map_offset_t end,
1284 vm_prot_t access_type,
1285 vm_tag_t tag,
1286 boolean_t user_wire);
1287
1288 extern kern_return_t vm_map_wire_and_extract_kernel(
1289 vm_map_t map,
1290 vm_map_offset_t start,
1291 vm_prot_t access_type,
1292 vm_tag_t tag,
1293 boolean_t user_wire,
1294 ppnum_t *physpage_p);
1295
1296 /* kext exported versions */
1297
1298 extern kern_return_t vm_map_wire_external(
1299 vm_map_t map,
1300 vm_map_offset_t start,
1301 vm_map_offset_t end,
1302 vm_prot_t access_type,
1303 boolean_t user_wire);
1304
1305 extern kern_return_t vm_map_wire_and_extract_external(
1306 vm_map_t map,
1307 vm_map_offset_t start,
1308 vm_prot_t access_type,
1309 boolean_t user_wire,
1310 ppnum_t *physpage_p);
1311
1312 #else /* XNU_KERNEL_PRIVATE */
1313
1314 extern kern_return_t vm_map_wire(
1315 vm_map_t map,
1316 vm_map_offset_t start,
1317 vm_map_offset_t end,
1318 vm_prot_t access_type,
1319 boolean_t user_wire);
1320
1321 extern kern_return_t vm_map_wire_and_extract(
1322 vm_map_t map,
1323 vm_map_offset_t start,
1324 vm_prot_t access_type,
1325 boolean_t user_wire,
1326 ppnum_t *physpage_p);
1327
1328 #endif /* !XNU_KERNEL_PRIVATE */
1329
1330 /* unwire a region */
1331 extern kern_return_t vm_map_unwire(
1332 vm_map_t map,
1333 vm_map_offset_t start,
1334 vm_map_offset_t end,
1335 boolean_t user_wire);
1336
1337 #ifdef XNU_KERNEL_PRIVATE
1338
1339 /* Enter a mapping of a memory object */
1340 extern kern_return_t vm_map_enter_mem_object(
1341 vm_map_t map,
1342 vm_map_offset_t *address,
1343 vm_map_size_t size,
1344 vm_map_offset_t mask,
1345 int flags,
1346 vm_map_kernel_flags_t vmk_flags,
1347 vm_tag_t tag,
1348 ipc_port_t port,
1349 vm_object_offset_t offset,
1350 boolean_t needs_copy,
1351 vm_prot_t cur_protection,
1352 vm_prot_t max_protection,
1353 vm_inherit_t inheritance);
1354
1355 /* Enter a mapping of a memory object */
1356 extern kern_return_t vm_map_enter_mem_object_prefault(
1357 vm_map_t map,
1358 vm_map_offset_t *address,
1359 vm_map_size_t size,
1360 vm_map_offset_t mask,
1361 int flags,
1362 vm_map_kernel_flags_t vmk_flags,
1363 vm_tag_t tag,
1364 ipc_port_t port,
1365 vm_object_offset_t offset,
1366 vm_prot_t cur_protection,
1367 vm_prot_t max_protection,
1368 upl_page_list_ptr_t page_list,
1369 unsigned int page_list_count);
1370
1371 /* Enter a mapping of a memory object */
1372 extern kern_return_t vm_map_enter_mem_object_control(
1373 vm_map_t map,
1374 vm_map_offset_t *address,
1375 vm_map_size_t size,
1376 vm_map_offset_t mask,
1377 int flags,
1378 vm_map_kernel_flags_t vmk_flags,
1379 vm_tag_t tag,
1380 memory_object_control_t control,
1381 vm_object_offset_t offset,
1382 boolean_t needs_copy,
1383 vm_prot_t cur_protection,
1384 vm_prot_t max_protection,
1385 vm_inherit_t inheritance);
1386
1387 extern kern_return_t vm_map_terminate(
1388 vm_map_t map);
1389
1390 #endif /* !XNU_KERNEL_PRIVATE */
1391
1392 /* Deallocate a region */
1393 extern kern_return_t vm_map_remove(
1394 vm_map_t map,
1395 vm_map_offset_t start,
1396 vm_map_offset_t end,
1397 boolean_t flags);
1398
1399 /* Deallocate a region when the map is already locked */
1400 extern kern_return_t vm_map_remove_locked(
1401 vm_map_t map,
1402 vm_map_offset_t start,
1403 vm_map_offset_t end,
1404 boolean_t flags);
1405
1406 /* Discard a copy without using it */
1407 extern void vm_map_copy_discard(
1408 vm_map_copy_t copy);
1409
1410 /* Overwrite existing memory with a copy */
1411 extern kern_return_t vm_map_copy_overwrite(
1412 vm_map_t dst_map,
1413 vm_map_address_t dst_addr,
1414 vm_map_copy_t copy,
1415 vm_map_size_t copy_size,
1416 boolean_t interruptible);
1417
1418 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1419
1420
1421 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1422 extern boolean_t vm_map_copy_validate_size(
1423 vm_map_t dst_map,
1424 vm_map_copy_t copy,
1425 vm_map_size_t *size);
1426
1427 /* Place a copy into a map */
1428 extern kern_return_t vm_map_copyout(
1429 vm_map_t dst_map,
1430 vm_map_address_t *dst_addr, /* OUT */
1431 vm_map_copy_t copy);
1432
1433 extern kern_return_t vm_map_copyout_size(
1434 vm_map_t dst_map,
1435 vm_map_address_t *dst_addr, /* OUT */
1436 vm_map_copy_t copy,
1437 vm_map_size_t copy_size);
1438
1439 extern kern_return_t vm_map_copyout_internal(
1440 vm_map_t dst_map,
1441 vm_map_address_t *dst_addr, /* OUT */
1442 vm_map_copy_t copy,
1443 vm_map_size_t copy_size,
1444 boolean_t consume_on_success,
1445 vm_prot_t cur_protection,
1446 vm_prot_t max_protection,
1447 vm_inherit_t inheritance);
1448
1449 extern kern_return_t vm_map_copyin(
1450 vm_map_t src_map,
1451 vm_map_address_t src_addr,
1452 vm_map_size_t len,
1453 boolean_t src_destroy,
1454 vm_map_copy_t *copy_result); /* OUT */
1455
1456 extern kern_return_t vm_map_copyin_common(
1457 vm_map_t src_map,
1458 vm_map_address_t src_addr,
1459 vm_map_size_t len,
1460 boolean_t src_destroy,
1461 boolean_t src_volatile,
1462 vm_map_copy_t *copy_result, /* OUT */
1463 boolean_t use_maxprot);
1464
1465 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1466 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1467 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1468 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1469 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1470 extern kern_return_t vm_map_copyin_internal(
1471 vm_map_t src_map,
1472 vm_map_address_t src_addr,
1473 vm_map_size_t len,
1474 int flags,
1475 vm_map_copy_t *copy_result); /* OUT */
1476
1477 extern kern_return_t vm_map_copy_extract(
1478 vm_map_t src_map,
1479 vm_map_address_t src_addr,
1480 vm_map_size_t len,
1481 vm_prot_t required_prot,
1482 boolean_t copy,
1483 vm_map_copy_t *copy_result, /* OUT */
1484 vm_prot_t *cur_prot, /* OUT */
1485 vm_prot_t *max_prot, /* OUT */
1486 vm_inherit_t inheritance,
1487 vm_map_kernel_flags_t vmk_flags);
1488
1489
1490 extern void vm_map_disable_NX(
1491 vm_map_t map);
1492
1493 extern void vm_map_disallow_data_exec(
1494 vm_map_t map);
1495
1496 extern void vm_map_set_64bit(
1497 vm_map_t map);
1498
1499 extern void vm_map_set_32bit(
1500 vm_map_t map);
1501
1502 extern void vm_map_set_jumbo(
1503 vm_map_t map);
1504
1505 extern void vm_map_set_jit_entitled(
1506 vm_map_t map);
1507
1508 extern void vm_map_set_max_addr(
1509 vm_map_t map, vm_map_offset_t new_max_offset);
1510
1511 extern boolean_t vm_map_has_hard_pagezero(
1512 vm_map_t map,
1513 vm_map_offset_t pagezero_size);
1514 extern void vm_commit_pagezero_status(vm_map_t tmap);
1515
1516 #ifdef __arm__
1517 static inline boolean_t
1518 vm_map_is_64bit(__unused vm_map_t map)
1519 {
1520 return 0;
1521 }
1522 #else
1523 extern boolean_t vm_map_is_64bit(
1524 vm_map_t map);
1525 #endif
1526
1527
1528 extern kern_return_t vm_map_raise_max_offset(
1529 vm_map_t map,
1530 vm_map_offset_t new_max_offset);
1531
1532 extern kern_return_t vm_map_raise_min_offset(
1533 vm_map_t map,
1534 vm_map_offset_t new_min_offset);
1535 #if !CONFIG_EMBEDDED
1536 extern void vm_map_set_high_start(
1537 vm_map_t map,
1538 vm_map_offset_t high_start);
1539 #endif
1540
1541 extern vm_map_offset_t vm_compute_max_offset(
1542 boolean_t is64);
1543
1544 extern void vm_map_get_max_aslr_slide_section(
1545 vm_map_t map,
1546 int64_t *max_sections,
1547 int64_t *section_size);
1548
1549 extern uint64_t vm_map_get_max_aslr_slide_pages(
1550 vm_map_t map);
1551
1552 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1553 vm_map_t map);
1554
1555 extern void vm_map_set_user_wire_limit(
1556 vm_map_t map,
1557 vm_size_t limit);
1558
1559 extern void vm_map_switch_protect(
1560 vm_map_t map,
1561 boolean_t val);
1562
1563 extern void vm_map_iokit_mapped_region(
1564 vm_map_t map,
1565 vm_size_t bytes);
1566
1567 extern void vm_map_iokit_unmapped_region(
1568 vm_map_t map,
1569 vm_size_t bytes);
1570
1571
1572 extern boolean_t first_free_is_valid(vm_map_t);
1573
1574 extern int vm_map_page_shift(
1575 vm_map_t map);
1576
1577 extern vm_map_offset_t vm_map_page_mask(
1578 vm_map_t map);
1579
1580 extern int vm_map_page_size(
1581 vm_map_t map);
1582
1583 extern vm_map_offset_t vm_map_round_page_mask(
1584 vm_map_offset_t offset,
1585 vm_map_offset_t mask);
1586
1587 extern vm_map_offset_t vm_map_trunc_page_mask(
1588 vm_map_offset_t offset,
1589 vm_map_offset_t mask);
1590
1591 extern boolean_t vm_map_page_aligned(
1592 vm_map_offset_t offset,
1593 vm_map_offset_t mask);
1594
1595 static inline int
1596 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1597 {
1598 vm_map_offset_t sum;
1599 return os_add_overflow(addr, size, &sum);
1600 }
1601
1602 static inline int
1603 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1604 {
1605 mach_vm_offset_t sum;
1606 return os_add_overflow(addr, size, &sum);
1607 }
1608
1609 #ifdef XNU_KERNEL_PRIVATE
1610
1611 #if XNU_TARGET_OS_OSX
1612 extern void vm_map_mark_alien(vm_map_t map);
1613 #endif /* XNU_TARGET_OS_OSX */
1614
1615 extern kern_return_t vm_map_page_info(
1616 vm_map_t map,
1617 vm_map_offset_t offset,
1618 vm_page_info_flavor_t flavor,
1619 vm_page_info_t info,
1620 mach_msg_type_number_t *count);
1621 extern kern_return_t vm_map_page_range_info_internal(
1622 vm_map_t map,
1623 vm_map_offset_t start_offset,
1624 vm_map_offset_t end_offset,
1625 int effective_page_shift,
1626 vm_page_info_flavor_t flavor,
1627 vm_page_info_t info,
1628 mach_msg_type_number_t *count);
1629 #endif /* XNU_KERNEL_PRIVATE */
1630
1631
1632 #ifdef MACH_KERNEL_PRIVATE
1633
1634 /*
1635 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1636 * usual form; it handles a copyin based on the current protection
1637 * (current protection == VM_PROT_NONE) is a failure.
1638 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1639 * access. The difference is that a region with no current access
1640 * BUT possible maximum access is rejected by vm_map_copyin(), but
1641 * returned by vm_map_copyin_maxprot.
1642 */
1643 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1644 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1645 FALSE, copy_result, FALSE)
1646
1647 #define vm_map_copyin_maxprot(src_map, \
1648 src_addr, len, src_destroy, copy_result) \
1649 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1650 FALSE, copy_result, TRUE)
1651
1652
1653 /*
1654 * Internal macros for rounding and truncation of vm_map offsets and sizes
1655 */
1656 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1657 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1658
1659 /*
1660 * Macros for rounding and truncation of vm_map offsets and sizes
1661 */
1662 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1663 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1664 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1665 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1666
1667 static inline bool
1668 VM_MAP_IS_EXOTIC(
1669 vm_map_t map __unused)
1670 {
1671 #if __arm64__
1672 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1673 pmap_is_exotic(map->pmap)) {
1674 return true;
1675 }
1676 #endif /* __arm64__ */
1677 return false;
1678 }
1679
1680 static inline bool
1681 VM_MAP_IS_ALIEN(
1682 vm_map_t map __unused)
1683 {
1684 /*
1685 * An "alien" process/task/map/pmap should mostly behave
1686 * as it currently would on iOS.
1687 */
1688 #if XNU_TARGET_OS_OSX
1689 if (map->is_alien) {
1690 return true;
1691 }
1692 return false;
1693 #else /* XNU_TARGET_OS_OSX */
1694 return true;
1695 #endif /* XNU_TARGET_OS_OSX */
1696 }
1697
1698 static inline bool
1699 VM_MAP_POLICY_WX_FAIL(
1700 vm_map_t map __unused)
1701 {
1702 if (VM_MAP_IS_ALIEN(map)) {
1703 return false;
1704 }
1705 return true;
1706 }
1707
1708 static inline bool
1709 VM_MAP_POLICY_WX_STRIP_X(
1710 vm_map_t map __unused)
1711 {
1712 if (VM_MAP_IS_ALIEN(map)) {
1713 return true;
1714 }
1715 return false;
1716 }
1717
1718 static inline bool
1719 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1720 vm_map_t map __unused)
1721 {
1722 if (VM_MAP_IS_ALIEN(map)) {
1723 return false;
1724 }
1725 return true;
1726 }
1727
1728 static inline bool
1729 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1730 vm_map_t map)
1731 {
1732 return VM_MAP_IS_ALIEN(map);
1733 }
1734
1735 static inline bool
1736 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1737 vm_map_t map __unused)
1738 {
1739 if (VM_MAP_IS_ALIEN(map)) {
1740 return false;
1741 }
1742 return true;
1743 }
1744
1745 static inline bool
1746 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1747 vm_map_t map __unused)
1748 {
1749 if (VM_MAP_IS_ALIEN(map)) {
1750 return false;
1751 }
1752 return true;
1753 }
1754
1755 static inline bool
1756 VM_MAP_POLICY_ALLOW_JIT_COPY(
1757 vm_map_t map __unused)
1758 {
1759 if (VM_MAP_IS_ALIEN(map)) {
1760 return false;
1761 }
1762 return true;
1763 }
1764
1765 static inline bool
1766 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1767 vm_map_t map __unused)
1768 {
1769 #if __x86_64__
1770 return true;
1771 #else /* __x86_64__ */
1772 if (VM_MAP_IS_EXOTIC(map)) {
1773 return true;
1774 }
1775 return false;
1776 #endif /* __x86_64__ */
1777 }
1778
1779 static inline void
1780 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1781 {
1782 switch (prot) {
1783 case MAP_MEM_NOOP: break;
1784 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1785 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1786 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1787 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1788 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
1789 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1790 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1791 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1792 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
1793 default: break;
1794 }
1795 }
1796
1797 #endif /* MACH_KERNEL_PRIVATE */
1798
1799 #ifdef XNU_KERNEL_PRIVATE
1800 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1801 extern bool vm_map_is_exotic(vm_map_t map);
1802 extern bool vm_map_is_alien(vm_map_t map);
1803 #endif /* XNU_KERNEL_PRIVATE */
1804
1805 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1806 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1807
1808 /*
1809 * Flags for vm_map_remove() and vm_map_delete()
1810 */
1811 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1812 #define VM_MAP_REMOVE_KUNWIRE 0x1
1813 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1814 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1815 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1816 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1817 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1818 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1819 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1820 #define VM_MAP_REMOVE_GAPS_OK 0x100
1821
1822 /* Support for UPLs from vm_maps */
1823
1824 #ifdef XNU_KERNEL_PRIVATE
1825
1826 extern kern_return_t vm_map_get_upl(
1827 vm_map_t target_map,
1828 vm_map_offset_t map_offset,
1829 upl_size_t *size,
1830 upl_t *upl,
1831 upl_page_info_array_t page_info,
1832 unsigned int *page_infoCnt,
1833 upl_control_flags_t *flags,
1834 vm_tag_t tag,
1835 int force_data_sync);
1836
1837 #endif /* XNU_KERNEL_PRIVATE */
1838
1839 extern void
1840 vm_map_sizes(vm_map_t map,
1841 vm_map_size_t * psize,
1842 vm_map_size_t * pfree,
1843 vm_map_size_t * plargest_free);
1844
1845 #if CONFIG_DYNAMIC_CODE_SIGNING
1846 extern kern_return_t vm_map_sign(vm_map_t map,
1847 vm_map_offset_t start,
1848 vm_map_offset_t end);
1849 #endif
1850
1851 extern kern_return_t vm_map_partial_reap(
1852 vm_map_t map,
1853 unsigned int *reclaimed_resident,
1854 unsigned int *reclaimed_compressed);
1855
1856
1857 #if DEVELOPMENT || DEBUG
1858
1859 extern int vm_map_disconnect_page_mappings(
1860 vm_map_t map,
1861 boolean_t);
1862
1863 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1864
1865 #endif
1866
1867
1868 #if CONFIG_FREEZE
1869
1870 extern kern_return_t vm_map_freeze(
1871 task_t task,
1872 unsigned int *purgeable_count,
1873 unsigned int *wired_count,
1874 unsigned int *clean_count,
1875 unsigned int *dirty_count,
1876 unsigned int dirty_budget,
1877 unsigned int *shared_count,
1878 int *freezer_error_code,
1879 boolean_t eval_only);
1880
1881 #define FREEZER_ERROR_GENERIC (-1)
1882 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1883 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1884 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1885 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1886
1887 #endif
1888
1889 __END_DECLS
1890
1891 /*
1892 * In some cases, we don't have a real VM object but still want to return a
1893 * unique ID (to avoid a memory region looking like shared memory), so build
1894 * a fake pointer based on the map's ledger and the index of the ledger being
1895 * reported.
1896 */
1897 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1898
1899 #endif /* KERNEL_PRIVATE */
1900
1901 #endif /* _VM_VM_MAP_H_ */