]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map.h
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.h
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84
85 #ifdef KERNEL_PRIVATE
86
87 #include <sys/cdefs.h>
88
89 __BEGIN_DECLS
90
91 extern void vm_map_reference(vm_map_t map);
92 extern vm_map_t current_map(void);
93
94 /* Setup reserved areas in a new VM map */
95 extern kern_return_t vm_map_exec(
96 vm_map_t new_map,
97 task_t task,
98 boolean_t is64bit,
99 void *fsroot,
100 cpu_type_t cpu,
101 cpu_subtype_t cpu_subtype);
102
103 __END_DECLS
104
105 #ifdef MACH_KERNEL_PRIVATE
106
107 #include <task_swapper.h>
108 #include <mach_assert.h>
109
110 #include <vm/vm_object.h>
111 #include <vm/vm_page.h>
112 #include <kern/locks.h>
113 #include <kern/zalloc.h>
114 #include <kern/macro_help.h>
115
116 #include <kern/thread.h>
117 #include <os/refcnt.h>
118
119 #define current_map_fast() (current_thread()->map)
120 #define current_map() (current_map_fast())
121
122 #include <vm/vm_map_store.h>
123
124
125 /*
126 * Types defined:
127 *
128 * vm_map_t the high-level address map data structure.
129 * vm_map_entry_t an entry in an address map.
130 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
131 * vm_map_copy_t represents memory copied from an address map,
132 * used for inter-map copy operations
133 */
134 typedef struct vm_map_entry *vm_map_entry_t;
135 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
136
137
138 /*
139 * Type: vm_map_object_t [internal use only]
140 *
141 * Description:
142 * The target of an address mapping, either a virtual
143 * memory object or a sub map (of the kernel map).
144 */
145 typedef union vm_map_object {
146 vm_object_t vmo_object; /* object object */
147 vm_map_t vmo_submap; /* belongs to another map */
148 } vm_map_object_t;
149
150 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
151 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
152 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
153 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
154 #if VM_NAMED_ENTRY_LIST
155 extern queue_head_t vm_named_entry_list;
156 #endif /* VM_NAMED_ENTRY_LIST */
157
158 /*
159 * Type: vm_named_entry_t [internal use only]
160 *
161 * Description:
162 * Description of a mapping to a memory cache object.
163 *
164 * Implementation:
165 * While the handle to this object is used as a means to map
166 * and pass around the right to map regions backed by pagers
167 * of all sorts, the named_entry itself is only manipulated
168 * by the kernel. Named entries hold information on the
169 * right to map a region of a cached object. Namely,
170 * the target cache object, the beginning and ending of the
171 * region to be mapped, and the permissions, (read, write)
172 * with which it can be mapped.
173 *
174 */
175
176 struct vm_named_entry {
177 decl_lck_mtx_data(, Lock); /* Synchronization */
178 union {
179 vm_object_t object; /* object I point to */
180 vm_map_t map; /* map backing submap */
181 vm_map_copy_t copy; /* a VM map copy */
182 } backing;
183 vm_object_offset_t offset; /* offset into object */
184 vm_object_size_t size; /* size of region */
185 vm_object_offset_t data_offset; /* offset to first byte of data */
186 vm_prot_t protection; /* access permissions */
187 int ref_count; /* Number of references */
188 unsigned int /* Is backing.xxx : */
189 /* boolean_t */ internal:1, /* ... an internal object */
190 /* boolean_t */ is_sub_map:1, /* ... a submap? */
191 /* boolean_t */ is_copy:1; /* ... a VM map copy */
192 #if VM_NAMED_ENTRY_LIST
193 queue_chain_t named_entry_list;
194 int named_entry_alias;
195 mach_port_t named_entry_port;
196 #define NAMED_ENTRY_BT_DEPTH 16
197 void *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
198 #endif /* VM_NAMED_ENTRY_LIST */
199 };
200
201 /*
202 * Type: vm_map_entry_t [internal use only]
203 *
204 * Description:
205 * A single mapping within an address map.
206 *
207 * Implementation:
208 * Address map entries consist of start and end addresses,
209 * a VM object (or sub map) and offset into that object,
210 * and user-exported inheritance and protection information.
211 * Control information for virtual copy operations is also
212 * stored in the address map entry.
213 */
214
215 struct vm_map_links {
216 struct vm_map_entry *prev; /* previous entry */
217 struct vm_map_entry *next; /* next entry */
218 vm_map_offset_t start; /* start address */
219 vm_map_offset_t end; /* end address */
220 };
221
222 /*
223 * FOOTPRINT ACCOUNTING:
224 * The "memory footprint" is better described in the pmap layer.
225 *
226 * At the VM level, these 2 vm_map_entry_t fields are relevant:
227 * iokit_mapped:
228 * For an "iokit_mapped" entry, we add the size of the entry to the
229 * footprint when the entry is entered into the map and we subtract that
230 * size when the entry is removed. No other accounting should take place.
231 * "use_pmap" should be FALSE but is not taken into account.
232 * use_pmap: (only when is_sub_map is FALSE)
233 * This indicates if we should ask the pmap layer to account for pages
234 * in this mapping. If FALSE, we expect that another form of accounting
235 * is being used (e.g. "iokit_mapped" or the explicit accounting of
236 * non-volatile purgable memory).
237 *
238 * So the logic is mostly:
239 * if entry->is_sub_map == TRUE
240 * anything in a submap does not count for the footprint
241 * else if entry->iokit_mapped == TRUE
242 * footprint includes the entire virtual size of this entry
243 * else if entry->use_pmap == FALSE
244 * tell pmap NOT to account for pages being pmap_enter()'d from this
245 * mapping (i.e. use "alternate accounting")
246 * else
247 * pmap will account for pages being pmap_enter()'d from this mapping
248 * as it sees fit (only if anonymous, etc...)
249 */
250
251 struct vm_map_entry {
252 struct vm_map_links links; /* links to other entries */
253 #define vme_prev links.prev
254 #define vme_next links.next
255 #define vme_start links.start
256 #define vme_end links.end
257
258 struct vm_map_store store;
259 union vm_map_object vme_object; /* object I point to */
260 vm_object_offset_t vme_offset; /* offset into object */
261
262 unsigned int
263 /* boolean_t */ is_shared:1, /* region is shared */
264 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
265 /* boolean_t */ in_transition:1, /* Entry being changed */
266 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
267 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
268 /* behavior is not defined for submap type */
269 /* boolean_t */ needs_copy:1, /* object need to be copied? */
270
271 /* Only in task maps: */
272 /* vm_prot_t */ protection:3, /* protection code */
273 /* vm_prot_t */ max_protection:3, /* maximum protection */
274 /* vm_inherit_t */ inheritance:2, /* inheritance */
275 /* boolean_t */ use_pmap:1, /*
276 * use_pmap is overloaded:
277 * if "is_sub_map":
278 * use a nested pmap?
279 * else (i.e. if object):
280 * use pmap accounting
281 * for footprint?
282 */
283 /* boolean_t */ no_cache:1, /* should new pages be cached? */
284 /* boolean_t */ permanent:1, /* mapping can not be removed */
285 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
286 /* boolean_t */ map_aligned:1, /* align to map's page size */
287 /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
288 * this entry it is being deleted
289 * without unwiring them */
290 /* boolean_t */ used_for_jit:1,
291 /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
292 /* boolean_t */ from_reserved_zone:1, /* Allocated from
293 * kernel reserved zone */
294
295 /* iokit accounting: use the virtual size rather than resident size: */
296 /* boolean_t */ iokit_acct:1,
297 /* boolean_t */ vme_resilient_codesign:1,
298 /* boolean_t */ vme_resilient_media:1,
299 /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
300 /* boolean_t */ vme_no_copy_on_read:1,
301 __unused:3;
302
303 unsigned short wired_count; /* can be paged if = 0 */
304 unsigned short user_wired_count; /* for vm_wire */
305 #if DEBUG
306 #define MAP_ENTRY_CREATION_DEBUG (1)
307 #define MAP_ENTRY_INSERTION_DEBUG (1)
308 #endif
309 #if MAP_ENTRY_CREATION_DEBUG
310 struct vm_map_header *vme_creation_maphdr;
311 uintptr_t vme_creation_bt[16];
312 #endif
313 #if MAP_ENTRY_INSERTION_DEBUG
314 uintptr_t vme_insertion_bt[16];
315 #endif
316 };
317
318 #define VME_SUBMAP_PTR(entry) \
319 (&((entry)->vme_object.vmo_submap))
320 #define VME_SUBMAP(entry) \
321 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
322 #define VME_OBJECT_PTR(entry) \
323 (&((entry)->vme_object.vmo_object))
324 #define VME_OBJECT(entry) \
325 ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
326 #define VME_OFFSET(entry) \
327 ((entry)->vme_offset & ~PAGE_MASK)
328 #define VME_ALIAS_MASK (PAGE_MASK)
329 #define VME_ALIAS(entry) \
330 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
331
332 static inline void
333 VME_OBJECT_SET(
334 vm_map_entry_t entry,
335 vm_object_t object)
336 {
337 entry->vme_object.vmo_object = object;
338 if (object != VM_OBJECT_NULL && !object->internal) {
339 entry->vme_resilient_media = FALSE;
340 }
341 entry->vme_resilient_codesign = FALSE;
342 entry->used_for_jit = FALSE;
343 }
344 static inline void
345 VME_SUBMAP_SET(
346 vm_map_entry_t entry,
347 vm_map_t submap)
348 {
349 entry->vme_object.vmo_submap = submap;
350 }
351 static inline void
352 VME_OFFSET_SET(
353 vm_map_entry_t entry,
354 vm_map_offset_t offset)
355 {
356 int alias;
357 alias = VME_ALIAS(entry);
358 assert((offset & PAGE_MASK) == 0);
359 entry->vme_offset = offset | alias;
360 }
361 /*
362 * IMPORTANT:
363 * The "alias" field can be updated while holding the VM map lock
364 * "shared". It's OK as along as it's the only field that can be
365 * updated without the VM map "exclusive" lock.
366 */
367 static inline void
368 VME_ALIAS_SET(
369 vm_map_entry_t entry,
370 int alias)
371 {
372 vm_map_offset_t offset;
373 offset = VME_OFFSET(entry);
374 entry->vme_offset = offset | (alias & VME_ALIAS_MASK);
375 }
376
377 static inline void
378 VME_OBJECT_SHADOW(
379 vm_map_entry_t entry,
380 vm_object_size_t length)
381 {
382 vm_object_t object;
383 vm_object_offset_t offset;
384
385 object = VME_OBJECT(entry);
386 offset = VME_OFFSET(entry);
387 vm_object_shadow(&object, &offset, length);
388 if (object != VME_OBJECT(entry)) {
389 VME_OBJECT_SET(entry, object);
390 entry->use_pmap = TRUE;
391 }
392 if (offset != VME_OFFSET(entry)) {
393 VME_OFFSET_SET(entry, offset);
394 }
395 }
396
397
398 /*
399 * Convenience macros for dealing with superpages
400 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
401 */
402 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
403 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
404 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
405 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
406
407 /*
408 * wired_counts are unsigned short. This value is used to safeguard
409 * against any mishaps due to runaway user programs.
410 */
411 #define MAX_WIRE_COUNT 65535
412
413
414
415 /*
416 * Type: struct vm_map_header
417 *
418 * Description:
419 * Header for a vm_map and a vm_map_copy.
420 */
421
422
423 struct vm_map_header {
424 struct vm_map_links links; /* first, last, min, max */
425 int nentries; /* Number of entries */
426 boolean_t entries_pageable;
427 /* are map entries pageable? */
428 #ifdef VM_MAP_STORE_USE_RB
429 struct rb_head rb_head_store;
430 #endif
431 int page_shift; /* page shift */
432 };
433
434 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
435 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
436 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
437
438 /*
439 * Type: vm_map_t [exported; contents invisible]
440 *
441 * Description:
442 * An address map -- a directory relating valid
443 * regions of a task's address space to the corresponding
444 * virtual memory objects.
445 *
446 * Implementation:
447 * Maps are doubly-linked lists of map entries, sorted
448 * by address. One hint is used to start
449 * searches again from the last successful search,
450 * insertion, or removal. Another hint is used to
451 * quickly find free space.
452 */
453 struct _vm_map {
454 lck_rw_t lock; /* map lock */
455 struct vm_map_header hdr; /* Map entry header */
456 #define min_offset hdr.links.start /* start of range */
457 #define max_offset hdr.links.end /* end of range */
458 pmap_t pmap; /* Physical map */
459 vm_map_size_t size; /* virtual size */
460 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
461 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
462 #if !CONFIG_EMBEDDED
463 vm_map_offset_t vmmap_high_start;
464 #endif
465
466 union {
467 /*
468 * If map->disable_vmentry_reuse == TRUE:
469 * the end address of the highest allocated vm_map_entry_t.
470 */
471 vm_map_offset_t vmu1_highest_entry_end;
472 /*
473 * For a nested VM map:
474 * the lowest address in this nested VM map that we would
475 * expect to be unnested under normal operation (i.e. for
476 * regular copy-on-write on DATA section).
477 */
478 vm_map_offset_t vmu1_lowest_unnestable_start;
479 } vmu1;
480 #define highest_entry_end vmu1.vmu1_highest_entry_end
481 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
482 decl_lck_mtx_data(, s_lock); /* Lock ref, res fields */
483 lck_mtx_ext_t s_lock_ext;
484 vm_map_entry_t hint; /* hint for quick lookups */
485 union {
486 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
487 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
488 } vmmap_u_1;
489 #define hole_hint vmmap_u_1.vmmap_hole_hint
490 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
491 union {
492 vm_map_entry_t _first_free; /* First free space hint */
493 struct vm_map_links* _holes; /* links all holes between entries */
494 } f_s; /* Union for free space data structures being used */
495
496 #define first_free f_s._first_free
497 #define holes_list f_s._holes
498
499 struct os_refcnt map_refcnt; /* Reference count */
500
501 #if TASK_SWAPPER
502 int res_count; /* Residence count (swap) */
503 int sw_state; /* Swap state */
504 #endif /* TASK_SWAPPER */
505
506 unsigned int
507 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
508 /* boolean_t */ wiring_required:1, /* All memory wired? */
509 /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */
510 /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
511 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
512 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
513 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
514 /* boolean_t */ holelistenabled:1,
515 /* boolean_t */ is_nested_map:1,
516 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
517 /* boolean_t */ jit_entry_exists:1,
518 /* boolean_t */ has_corpse_footprint:1,
519 /* boolean_t */ terminated:1,
520 /* reserved */ pad:19;
521 unsigned int timestamp; /* Version number */
522 };
523
524 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
525 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
526 #define vm_map_first_entry(map) ((map)->hdr.links.next)
527 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
528
529 #if TASK_SWAPPER
530 /*
531 * VM map swap states. There are no transition states.
532 */
533 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
534 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
535 #endif /* TASK_SWAPPER */
536
537 /*
538 * Type: vm_map_version_t [exported; contents invisible]
539 *
540 * Description:
541 * Map versions may be used to quickly validate a previous
542 * lookup operation.
543 *
544 * Usage note:
545 * Because they are bulky objects, map versions are usually
546 * passed by reference.
547 *
548 * Implementation:
549 * Just a timestamp for the main map.
550 */
551 typedef struct vm_map_version {
552 unsigned int main_timestamp;
553 } vm_map_version_t;
554
555 /*
556 * Type: vm_map_copy_t [exported; contents invisible]
557 *
558 * Description:
559 * A map copy object represents a region of virtual memory
560 * that has been copied from an address map but is still
561 * in transit.
562 *
563 * A map copy object may only be used by a single thread
564 * at a time.
565 *
566 * Implementation:
567 * There are three formats for map copy objects.
568 * The first is very similar to the main
569 * address map in structure, and as a result, some
570 * of the internal maintenance functions/macros can
571 * be used with either address maps or map copy objects.
572 *
573 * The map copy object contains a header links
574 * entry onto which the other entries that represent
575 * the region are chained.
576 *
577 * The second format is a single vm object. This was used
578 * primarily in the pageout path - but is not currently used
579 * except for placeholder copy objects (see vm_map_copy_copy()).
580 *
581 * The third format is a kernel buffer copy object - for data
582 * small enough that physical copies were the most efficient
583 * method. This method uses a zero-sized array unioned with
584 * other format-specific data in the 'c_u' member. This unsized
585 * array overlaps the other elements and allows us to use this
586 * extra structure space for physical memory copies. On 64-bit
587 * systems this saves ~64 bytes per vm_map_copy.
588 */
589
590 struct vm_map_copy {
591 int type;
592 #define VM_MAP_COPY_ENTRY_LIST 1
593 #define VM_MAP_COPY_OBJECT 2
594 #define VM_MAP_COPY_KERNEL_BUFFER 3
595 vm_object_offset_t offset;
596 vm_map_size_t size;
597 union {
598 struct vm_map_header hdr; /* ENTRY_LIST */
599 vm_object_t object; /* OBJECT */
600 uint8_t kdata[0]; /* KERNEL_BUFFER */
601 } c_u;
602 };
603
604
605 #define cpy_hdr c_u.hdr
606
607 #define cpy_object c_u.object
608 #define cpy_kdata c_u.kdata
609 #define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata))
610
611 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
612 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
613 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
614
615 /*
616 * Useful macros for entry list copy objects
617 */
618
619 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
620 #define vm_map_copy_first_entry(copy) \
621 ((copy)->cpy_hdr.links.next)
622 #define vm_map_copy_last_entry(copy) \
623 ((copy)->cpy_hdr.links.prev)
624
625 /*
626 * Macros: vm_map_lock, etc. [internal use only]
627 * Description:
628 * Perform locking on the data portion of a map.
629 * When multiple maps are to be locked, order by map address.
630 * (See vm_map.c::vm_remap())
631 */
632
633 #define vm_map_lock_init(map) \
634 ((map)->timestamp = 0 , \
635 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
636
637 #define vm_map_lock(map) \
638 MACRO_BEGIN \
639 DTRACE_VM(vm_map_lock_w); \
640 lck_rw_lock_exclusive(&(map)->lock); \
641 MACRO_END
642
643 #define vm_map_unlock(map) \
644 MACRO_BEGIN \
645 DTRACE_VM(vm_map_unlock_w); \
646 (map)->timestamp++; \
647 lck_rw_done(&(map)->lock); \
648 MACRO_END
649
650 #define vm_map_lock_read(map) \
651 MACRO_BEGIN \
652 DTRACE_VM(vm_map_lock_r); \
653 lck_rw_lock_shared(&(map)->lock); \
654 MACRO_END
655
656 #define vm_map_unlock_read(map) \
657 MACRO_BEGIN \
658 DTRACE_VM(vm_map_unlock_r); \
659 lck_rw_done(&(map)->lock); \
660 MACRO_END
661
662 #define vm_map_lock_write_to_read(map) \
663 MACRO_BEGIN \
664 DTRACE_VM(vm_map_lock_downgrade); \
665 (map)->timestamp++; \
666 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
667 MACRO_END
668
669 __attribute__((always_inline))
670 int vm_map_lock_read_to_write(vm_map_t map);
671
672 __attribute__((always_inline))
673 boolean_t vm_map_try_lock(vm_map_t map);
674
675 __attribute__((always_inline))
676 boolean_t vm_map_try_lock_read(vm_map_t map);
677
678 #if MACH_ASSERT || DEBUG
679 #define vm_map_lock_assert_held(map) \
680 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
681 #define vm_map_lock_assert_shared(map) \
682 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
683 #define vm_map_lock_assert_exclusive(map) \
684 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
685 #define vm_map_lock_assert_notheld(map) \
686 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
687 #else /* MACH_ASSERT || DEBUG */
688 #define vm_map_lock_assert_held(map)
689 #define vm_map_lock_assert_shared(map)
690 #define vm_map_lock_assert_exclusive(map)
691 #define vm_map_lock_assert_notheld(map)
692 #endif /* MACH_ASSERT || DEBUG */
693
694 /*
695 * Exported procedures that operate on vm_map_t.
696 */
697
698 /* Initialize the module */
699 extern void vm_map_init(void);
700
701 extern void vm_kernel_reserved_entry_init(void);
702
703 /* Allocate a range in the specified virtual address map and
704 * return the entry allocated for that range. */
705 extern kern_return_t vm_map_find_space(
706 vm_map_t map,
707 vm_map_address_t *address, /* OUT */
708 vm_map_size_t size,
709 vm_map_offset_t mask,
710 int flags,
711 vm_map_kernel_flags_t vmk_flags,
712 vm_tag_t tag,
713 vm_map_entry_t *o_entry); /* OUT */
714
715 extern void vm_map_clip_start(
716 vm_map_t map,
717 vm_map_entry_t entry,
718 vm_map_offset_t endaddr);
719 extern void vm_map_clip_end(
720 vm_map_t map,
721 vm_map_entry_t entry,
722 vm_map_offset_t endaddr);
723 extern boolean_t vm_map_entry_should_cow_for_true_share(
724 vm_map_entry_t entry);
725
726 /* Lookup map entry containing or the specified address in the given map */
727 extern boolean_t vm_map_lookup_entry(
728 vm_map_t map,
729 vm_map_address_t address,
730 vm_map_entry_t *entry); /* OUT */
731
732 extern void vm_map_copy_remap(
733 vm_map_t map,
734 vm_map_entry_t where,
735 vm_map_copy_t copy,
736 vm_map_offset_t adjustment,
737 vm_prot_t cur_prot,
738 vm_prot_t max_prot,
739 vm_inherit_t inheritance);
740
741 /* Find the VM object, offset, and protection for a given virtual address
742 * in the specified map, assuming a page fault of the type specified. */
743 extern kern_return_t vm_map_lookup_locked(
744 vm_map_t *var_map, /* IN/OUT */
745 vm_map_address_t vaddr,
746 vm_prot_t fault_type,
747 int object_lock_type,
748 vm_map_version_t *out_version, /* OUT */
749 vm_object_t *object, /* OUT */
750 vm_object_offset_t *offset, /* OUT */
751 vm_prot_t *out_prot, /* OUT */
752 boolean_t *wired, /* OUT */
753 vm_object_fault_info_t fault_info, /* OUT */
754 vm_map_t *real_map); /* OUT */
755
756 /* Verifies that the map has not changed since the given version. */
757 extern boolean_t vm_map_verify(
758 vm_map_t map,
759 vm_map_version_t *version); /* REF */
760
761 extern vm_map_entry_t vm_map_entry_insert(
762 vm_map_t map,
763 vm_map_entry_t insp_entry,
764 vm_map_offset_t start,
765 vm_map_offset_t end,
766 vm_object_t object,
767 vm_object_offset_t offset,
768 boolean_t needs_copy,
769 boolean_t is_shared,
770 boolean_t in_transition,
771 vm_prot_t cur_protection,
772 vm_prot_t max_protection,
773 vm_behavior_t behavior,
774 vm_inherit_t inheritance,
775 unsigned wired_count,
776 boolean_t no_cache,
777 boolean_t permanent,
778 boolean_t no_copy_on_read,
779 unsigned int superpage_size,
780 boolean_t clear_map_aligned,
781 boolean_t is_submap,
782 boolean_t used_for_jit,
783 int alias);
784
785
786 /*
787 * Functions implemented as macros
788 */
789 #define vm_map_min(map) ((map)->min_offset)
790 /* Lowest valid address in
791 * a map */
792
793 #define vm_map_max(map) ((map)->max_offset)
794 /* Highest valid address */
795
796 #define vm_map_pmap(map) ((map)->pmap)
797 /* Physical map associated
798 * with this address map */
799
800 /*
801 * Macros/functions for map residence counts and swapin/out of vm maps
802 */
803 #if TASK_SWAPPER
804
805 #if MACH_ASSERT
806 /* Gain a reference to an existing map */
807 extern void vm_map_reference(
808 vm_map_t map);
809 /* Lose a residence count */
810 extern void vm_map_res_deallocate(
811 vm_map_t map);
812 /* Gain a residence count on a map */
813 extern void vm_map_res_reference(
814 vm_map_t map);
815 /* Gain reference & residence counts to possibly swapped-out map */
816 extern void vm_map_reference_swap(
817 vm_map_t map);
818
819 #else /* MACH_ASSERT */
820
821 #define vm_map_reference(map) \
822 MACRO_BEGIN \
823 vm_map_t Map = (map); \
824 if (Map) { \
825 lck_mtx_lock(&Map->s_lock); \
826 Map->res_count++; \
827 os_ref_retain(&Map->map_refcnt); \
828 lck_mtx_unlock(&Map->s_lock); \
829 } \
830 MACRO_END
831
832 #define vm_map_res_reference(map) \
833 MACRO_BEGIN \
834 vm_map_t Lmap = (map); \
835 if (Lmap->res_count == 0) { \
836 lck_mtx_unlock(&Lmap->s_lock);\
837 vm_map_lock(Lmap); \
838 vm_map_swapin(Lmap); \
839 lck_mtx_lock(&Lmap->s_lock); \
840 ++Lmap->res_count; \
841 vm_map_unlock(Lmap); \
842 } else \
843 ++Lmap->res_count; \
844 MACRO_END
845
846 #define vm_map_res_deallocate(map) \
847 MACRO_BEGIN \
848 vm_map_t Map = (map); \
849 if (--Map->res_count == 0) { \
850 lck_mtx_unlock(&Map->s_lock); \
851 vm_map_lock(Map); \
852 vm_map_swapout(Map); \
853 vm_map_unlock(Map); \
854 lck_mtx_lock(&Map->s_lock); \
855 } \
856 MACRO_END
857
858 #define vm_map_reference_swap(map) \
859 MACRO_BEGIN \
860 vm_map_t Map = (map); \
861 lck_mtx_lock(&Map->s_lock); \
862 os_ref_retain(&Map->map_refcnt);\
863 vm_map_res_reference(Map); \
864 lck_mtx_unlock(&Map->s_lock); \
865 MACRO_END
866 #endif /* MACH_ASSERT */
867
868 extern void vm_map_swapin(
869 vm_map_t map);
870
871 extern void vm_map_swapout(
872 vm_map_t map);
873
874 #else /* TASK_SWAPPER */
875
876 #define vm_map_reference(map) \
877 MACRO_BEGIN \
878 vm_map_t Map = (map); \
879 if (Map) { \
880 lck_mtx_lock(&Map->s_lock); \
881 os_ref_retain(&Map->map_refcnt);\
882 lck_mtx_unlock(&Map->s_lock); \
883 } \
884 MACRO_END
885
886 #define vm_map_reference_swap(map) vm_map_reference(map)
887 #define vm_map_res_reference(map)
888 #define vm_map_res_deallocate(map)
889
890 #endif /* TASK_SWAPPER */
891
892 /*
893 * Submap object. Must be used to create memory to be put
894 * in a submap by vm_map_submap.
895 */
896 extern vm_object_t vm_submap_object;
897
898 /*
899 * Wait and wakeup macros for in_transition map entries.
900 */
901 #define vm_map_entry_wait(map, interruptible) \
902 ((map)->timestamp++ , \
903 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
904 (event_t)&(map)->hdr, interruptible))
905
906
907 #define vm_map_entry_wakeup(map) \
908 thread_wakeup((event_t)(&(map)->hdr))
909
910
911 #define vm_map_ref_fast(map) \
912 MACRO_BEGIN \
913 lck_mtx_lock(&map->s_lock); \
914 map->ref_count++; \
915 vm_map_res_reference(map); \
916 lck_mtx_unlock(&map->s_lock); \
917 MACRO_END
918
919 #define vm_map_dealloc_fast(map) \
920 MACRO_BEGIN \
921 int c; \
922 \
923 lck_mtx_lock(&map->s_lock); \
924 c = --map->ref_count; \
925 if (c > 0) \
926 vm_map_res_deallocate(map); \
927 lck_mtx_unlock(&map->s_lock); \
928 if (c == 0) \
929 vm_map_destroy(map); \
930 MACRO_END
931
932
933 /* simplify map entries */
934 extern void vm_map_simplify_entry(
935 vm_map_t map,
936 vm_map_entry_t this_entry);
937 extern void vm_map_simplify(
938 vm_map_t map,
939 vm_map_offset_t start);
940
941 /* Move the information in a map copy object to a new map copy object */
942 extern vm_map_copy_t vm_map_copy_copy(
943 vm_map_copy_t copy);
944
945 /* Create a copy object from an object. */
946 extern kern_return_t vm_map_copyin_object(
947 vm_object_t object,
948 vm_object_offset_t offset,
949 vm_object_size_t size,
950 vm_map_copy_t *copy_result); /* OUT */
951
952 extern kern_return_t vm_map_random_address_for_size(
953 vm_map_t map,
954 vm_map_offset_t *address,
955 vm_map_size_t size);
956
957 /* Enter a mapping */
958 extern kern_return_t vm_map_enter(
959 vm_map_t map,
960 vm_map_offset_t *address,
961 vm_map_size_t size,
962 vm_map_offset_t mask,
963 int flags,
964 vm_map_kernel_flags_t vmk_flags,
965 vm_tag_t tag,
966 vm_object_t object,
967 vm_object_offset_t offset,
968 boolean_t needs_copy,
969 vm_prot_t cur_protection,
970 vm_prot_t max_protection,
971 vm_inherit_t inheritance);
972
973 #if __arm64__
974 extern kern_return_t vm_map_enter_fourk(
975 vm_map_t map,
976 vm_map_offset_t *address,
977 vm_map_size_t size,
978 vm_map_offset_t mask,
979 int flags,
980 vm_map_kernel_flags_t vmk_flags,
981 vm_tag_t tag,
982 vm_object_t object,
983 vm_object_offset_t offset,
984 boolean_t needs_copy,
985 vm_prot_t cur_protection,
986 vm_prot_t max_protection,
987 vm_inherit_t inheritance);
988 #endif /* __arm64__ */
989
990 /* XXX should go away - replaced with regular enter of contig object */
991 extern kern_return_t vm_map_enter_cpm(
992 vm_map_t map,
993 vm_map_address_t *addr,
994 vm_map_size_t size,
995 int flags);
996
997 extern kern_return_t vm_map_remap(
998 vm_map_t target_map,
999 vm_map_offset_t *address,
1000 vm_map_size_t size,
1001 vm_map_offset_t mask,
1002 int flags,
1003 vm_map_kernel_flags_t vmk_flags,
1004 vm_tag_t tag,
1005 vm_map_t src_map,
1006 vm_map_offset_t memory_address,
1007 boolean_t copy,
1008 vm_prot_t *cur_protection,
1009 vm_prot_t *max_protection,
1010 vm_inherit_t inheritance);
1011
1012
1013 /*
1014 * Read and write from a kernel buffer to a specified map.
1015 */
1016 extern kern_return_t vm_map_write_user(
1017 vm_map_t map,
1018 void *src_p,
1019 vm_map_offset_t dst_addr,
1020 vm_size_t size);
1021
1022 extern kern_return_t vm_map_read_user(
1023 vm_map_t map,
1024 vm_map_offset_t src_addr,
1025 void *dst_p,
1026 vm_size_t size);
1027
1028 /* Create a new task map using an existing task map as a template. */
1029 extern vm_map_t vm_map_fork(
1030 ledger_t ledger,
1031 vm_map_t old_map,
1032 int options);
1033 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
1034 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
1035 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
1036
1037 /* Change inheritance */
1038 extern kern_return_t vm_map_inherit(
1039 vm_map_t map,
1040 vm_map_offset_t start,
1041 vm_map_offset_t end,
1042 vm_inherit_t new_inheritance);
1043
1044 /* Add or remove machine-dependent attributes from map regions */
1045 extern kern_return_t vm_map_machine_attribute(
1046 vm_map_t map,
1047 vm_map_offset_t start,
1048 vm_map_offset_t end,
1049 vm_machine_attribute_t attribute,
1050 vm_machine_attribute_val_t* value); /* IN/OUT */
1051
1052 extern kern_return_t vm_map_msync(
1053 vm_map_t map,
1054 vm_map_address_t address,
1055 vm_map_size_t size,
1056 vm_sync_t sync_flags);
1057
1058 /* Set paging behavior */
1059 extern kern_return_t vm_map_behavior_set(
1060 vm_map_t map,
1061 vm_map_offset_t start,
1062 vm_map_offset_t end,
1063 vm_behavior_t new_behavior);
1064
1065 extern kern_return_t vm_map_region(
1066 vm_map_t map,
1067 vm_map_offset_t *address,
1068 vm_map_size_t *size,
1069 vm_region_flavor_t flavor,
1070 vm_region_info_t info,
1071 mach_msg_type_number_t *count,
1072 mach_port_t *object_name);
1073
1074 extern kern_return_t vm_map_region_recurse_64(
1075 vm_map_t map,
1076 vm_map_offset_t *address,
1077 vm_map_size_t *size,
1078 natural_t *nesting_depth,
1079 vm_region_submap_info_64_t info,
1080 mach_msg_type_number_t *count);
1081
1082 extern kern_return_t vm_map_page_query_internal(
1083 vm_map_t map,
1084 vm_map_offset_t offset,
1085 int *disposition,
1086 int *ref_count);
1087
1088 extern kern_return_t vm_map_query_volatile(
1089 vm_map_t map,
1090 mach_vm_size_t *volatile_virtual_size_p,
1091 mach_vm_size_t *volatile_resident_size_p,
1092 mach_vm_size_t *volatile_compressed_size_p,
1093 mach_vm_size_t *volatile_pmap_size_p,
1094 mach_vm_size_t *volatile_compressed_pmap_size_p);
1095
1096 extern kern_return_t vm_map_submap(
1097 vm_map_t map,
1098 vm_map_offset_t start,
1099 vm_map_offset_t end,
1100 vm_map_t submap,
1101 vm_map_offset_t offset,
1102 boolean_t use_pmap);
1103
1104 extern void vm_map_submap_pmap_clean(
1105 vm_map_t map,
1106 vm_map_offset_t start,
1107 vm_map_offset_t end,
1108 vm_map_t sub_map,
1109 vm_map_offset_t offset);
1110
1111 /* Convert from a map entry port to a map */
1112 extern vm_map_t convert_port_entry_to_map(
1113 ipc_port_t port);
1114
1115 /* Convert from a port to a vm_object */
1116 extern vm_object_t convert_port_entry_to_object(
1117 ipc_port_t port);
1118
1119
1120 extern kern_return_t vm_map_set_cache_attr(
1121 vm_map_t map,
1122 vm_map_offset_t va);
1123
1124
1125 /* definitions related to overriding the NX behavior */
1126
1127 #define VM_ABI_32 0x1
1128 #define VM_ABI_64 0x2
1129
1130 extern int override_nx(vm_map_t map, uint32_t user_tag);
1131
1132 #if PMAP_CS
1133 extern kern_return_t vm_map_entry_cs_associate(
1134 vm_map_t map,
1135 vm_map_entry_t entry,
1136 vm_map_kernel_flags_t vmk_flags);
1137 #endif /* PMAP_CS */
1138
1139 extern void vm_map_region_top_walk(
1140 vm_map_entry_t entry,
1141 vm_region_top_info_t top);
1142 extern void vm_map_region_walk(
1143 vm_map_t map,
1144 vm_map_offset_t va,
1145 vm_map_entry_t entry,
1146 vm_object_offset_t offset,
1147 vm_object_size_t range,
1148 vm_region_extended_info_t extended,
1149 boolean_t look_for_pages,
1150 mach_msg_type_number_t count);
1151
1152
1153 struct vm_map_corpse_footprint_header {
1154 vm_size_t cf_size; /* allocated buffer size */
1155 uint32_t cf_last_region; /* offset of last region in buffer */
1156 union {
1157 uint32_t cfu_last_zeroes; /* during creation:
1158 * number of "zero" dispositions at
1159 * end of last region */
1160 uint32_t cfu_hint_region; /* during lookup:
1161 * offset of last looked up region */
1162 #define cf_last_zeroes cfu.cfu_last_zeroes
1163 #define cf_hint_region cfu.cfu_hint_region
1164 } cfu;
1165 };
1166 struct vm_map_corpse_footprint_region {
1167 vm_map_offset_t cfr_vaddr; /* region start virtual address */
1168 uint32_t cfr_num_pages; /* number of pages in this "region" */
1169 unsigned char cfr_disposition[0]; /* disposition of each page */
1170 } __attribute__((packed));
1171
1172 extern kern_return_t vm_map_corpse_footprint_collect(
1173 vm_map_t old_map,
1174 vm_map_entry_t old_entry,
1175 vm_map_t new_map);
1176 extern void vm_map_corpse_footprint_collect_done(
1177 vm_map_t new_map);
1178
1179 extern kern_return_t vm_map_corpse_footprint_query_page_info(
1180 vm_map_t map,
1181 vm_map_offset_t va,
1182 int *disp);
1183
1184 extern void vm_map_copy_footprint_ledgers(
1185 task_t old_task,
1186 task_t new_task);
1187 extern void vm_map_copy_ledger(
1188 task_t old_task,
1189 task_t new_task,
1190 int ledger_entry);
1191
1192 #endif /* MACH_KERNEL_PRIVATE */
1193
1194 __BEGIN_DECLS
1195
1196 /* Create an empty map */
1197 extern vm_map_t vm_map_create(
1198 pmap_t pmap,
1199 vm_map_offset_t min_off,
1200 vm_map_offset_t max_off,
1201 boolean_t pageable);
1202 extern vm_map_t vm_map_create_options(
1203 pmap_t pmap,
1204 vm_map_offset_t min_off,
1205 vm_map_offset_t max_off,
1206 int options);
1207 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1208 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1209 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1210 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1211
1212 extern void vm_map_disable_hole_optimization(vm_map_t map);
1213
1214 /* Get rid of a map */
1215 extern void vm_map_destroy(
1216 vm_map_t map,
1217 int flags);
1218
1219 /* Lose a reference */
1220 extern void vm_map_deallocate(
1221 vm_map_t map);
1222
1223 extern vm_map_t vm_map_switch(
1224 vm_map_t map);
1225
1226 /* Change protection */
1227 extern kern_return_t vm_map_protect(
1228 vm_map_t map,
1229 vm_map_offset_t start,
1230 vm_map_offset_t end,
1231 vm_prot_t new_prot,
1232 boolean_t set_max);
1233
1234 /* Check protection */
1235 extern boolean_t vm_map_check_protection(
1236 vm_map_t map,
1237 vm_map_offset_t start,
1238 vm_map_offset_t end,
1239 vm_prot_t protection);
1240
1241 /* wire down a region */
1242
1243 #ifdef XNU_KERNEL_PRIVATE
1244
1245 extern kern_return_t vm_map_wire_kernel(
1246 vm_map_t map,
1247 vm_map_offset_t start,
1248 vm_map_offset_t end,
1249 vm_prot_t access_type,
1250 vm_tag_t tag,
1251 boolean_t user_wire);
1252
1253 extern kern_return_t vm_map_wire_and_extract_kernel(
1254 vm_map_t map,
1255 vm_map_offset_t start,
1256 vm_prot_t access_type,
1257 vm_tag_t tag,
1258 boolean_t user_wire,
1259 ppnum_t *physpage_p);
1260
1261 /* kext exported versions */
1262
1263 extern kern_return_t vm_map_wire_external(
1264 vm_map_t map,
1265 vm_map_offset_t start,
1266 vm_map_offset_t end,
1267 vm_prot_t access_type,
1268 boolean_t user_wire);
1269
1270 extern kern_return_t vm_map_wire_and_extract_external(
1271 vm_map_t map,
1272 vm_map_offset_t start,
1273 vm_prot_t access_type,
1274 boolean_t user_wire,
1275 ppnum_t *physpage_p);
1276
1277 #else /* XNU_KERNEL_PRIVATE */
1278
1279 extern kern_return_t vm_map_wire(
1280 vm_map_t map,
1281 vm_map_offset_t start,
1282 vm_map_offset_t end,
1283 vm_prot_t access_type,
1284 boolean_t user_wire);
1285
1286 extern kern_return_t vm_map_wire_and_extract(
1287 vm_map_t map,
1288 vm_map_offset_t start,
1289 vm_prot_t access_type,
1290 boolean_t user_wire,
1291 ppnum_t *physpage_p);
1292
1293 #endif /* !XNU_KERNEL_PRIVATE */
1294
1295 /* unwire a region */
1296 extern kern_return_t vm_map_unwire(
1297 vm_map_t map,
1298 vm_map_offset_t start,
1299 vm_map_offset_t end,
1300 boolean_t user_wire);
1301
1302 #ifdef XNU_KERNEL_PRIVATE
1303
1304 /* Enter a mapping of a memory object */
1305 extern kern_return_t vm_map_enter_mem_object(
1306 vm_map_t map,
1307 vm_map_offset_t *address,
1308 vm_map_size_t size,
1309 vm_map_offset_t mask,
1310 int flags,
1311 vm_map_kernel_flags_t vmk_flags,
1312 vm_tag_t tag,
1313 ipc_port_t port,
1314 vm_object_offset_t offset,
1315 boolean_t needs_copy,
1316 vm_prot_t cur_protection,
1317 vm_prot_t max_protection,
1318 vm_inherit_t inheritance);
1319
1320 /* Enter a mapping of a memory object */
1321 extern kern_return_t vm_map_enter_mem_object_prefault(
1322 vm_map_t map,
1323 vm_map_offset_t *address,
1324 vm_map_size_t size,
1325 vm_map_offset_t mask,
1326 int flags,
1327 vm_map_kernel_flags_t vmk_flags,
1328 vm_tag_t tag,
1329 ipc_port_t port,
1330 vm_object_offset_t offset,
1331 vm_prot_t cur_protection,
1332 vm_prot_t max_protection,
1333 upl_page_list_ptr_t page_list,
1334 unsigned int page_list_count);
1335
1336 /* Enter a mapping of a memory object */
1337 extern kern_return_t vm_map_enter_mem_object_control(
1338 vm_map_t map,
1339 vm_map_offset_t *address,
1340 vm_map_size_t size,
1341 vm_map_offset_t mask,
1342 int flags,
1343 vm_map_kernel_flags_t vmk_flags,
1344 vm_tag_t tag,
1345 memory_object_control_t control,
1346 vm_object_offset_t offset,
1347 boolean_t needs_copy,
1348 vm_prot_t cur_protection,
1349 vm_prot_t max_protection,
1350 vm_inherit_t inheritance);
1351
1352 extern kern_return_t vm_map_terminate(
1353 vm_map_t map);
1354
1355 #endif /* !XNU_KERNEL_PRIVATE */
1356
1357 /* Deallocate a region */
1358 extern kern_return_t vm_map_remove(
1359 vm_map_t map,
1360 vm_map_offset_t start,
1361 vm_map_offset_t end,
1362 boolean_t flags);
1363
1364 /* Deallocate a region when the map is already locked */
1365 extern kern_return_t vm_map_remove_locked(
1366 vm_map_t map,
1367 vm_map_offset_t start,
1368 vm_map_offset_t end,
1369 boolean_t flags);
1370
1371 /* Discard a copy without using it */
1372 extern void vm_map_copy_discard(
1373 vm_map_copy_t copy);
1374
1375 /* Overwrite existing memory with a copy */
1376 extern kern_return_t vm_map_copy_overwrite(
1377 vm_map_t dst_map,
1378 vm_map_address_t dst_addr,
1379 vm_map_copy_t copy,
1380 boolean_t interruptible);
1381
1382 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1383 extern boolean_t vm_map_copy_validate_size(
1384 vm_map_t dst_map,
1385 vm_map_copy_t copy,
1386 vm_map_size_t *size);
1387
1388 /* Place a copy into a map */
1389 extern kern_return_t vm_map_copyout(
1390 vm_map_t dst_map,
1391 vm_map_address_t *dst_addr, /* OUT */
1392 vm_map_copy_t copy);
1393
1394 extern kern_return_t vm_map_copyout_size(
1395 vm_map_t dst_map,
1396 vm_map_address_t *dst_addr, /* OUT */
1397 vm_map_copy_t copy,
1398 vm_map_size_t copy_size);
1399
1400 extern kern_return_t vm_map_copyout_internal(
1401 vm_map_t dst_map,
1402 vm_map_address_t *dst_addr, /* OUT */
1403 vm_map_copy_t copy,
1404 vm_map_size_t copy_size,
1405 boolean_t consume_on_success,
1406 vm_prot_t cur_protection,
1407 vm_prot_t max_protection,
1408 vm_inherit_t inheritance);
1409
1410 extern kern_return_t vm_map_copyin(
1411 vm_map_t src_map,
1412 vm_map_address_t src_addr,
1413 vm_map_size_t len,
1414 boolean_t src_destroy,
1415 vm_map_copy_t *copy_result); /* OUT */
1416
1417 extern kern_return_t vm_map_copyin_common(
1418 vm_map_t src_map,
1419 vm_map_address_t src_addr,
1420 vm_map_size_t len,
1421 boolean_t src_destroy,
1422 boolean_t src_volatile,
1423 vm_map_copy_t *copy_result, /* OUT */
1424 boolean_t use_maxprot);
1425
1426 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1427 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1428 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1429 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1430 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1431 extern kern_return_t vm_map_copyin_internal(
1432 vm_map_t src_map,
1433 vm_map_address_t src_addr,
1434 vm_map_size_t len,
1435 int flags,
1436 vm_map_copy_t *copy_result); /* OUT */
1437
1438 extern kern_return_t vm_map_copy_extract(
1439 vm_map_t src_map,
1440 vm_map_address_t src_addr,
1441 vm_map_size_t len,
1442 vm_map_copy_t *copy_result, /* OUT */
1443 vm_prot_t *cur_prot, /* OUT */
1444 vm_prot_t *max_prot);
1445
1446
1447 extern void vm_map_disable_NX(
1448 vm_map_t map);
1449
1450 extern void vm_map_disallow_data_exec(
1451 vm_map_t map);
1452
1453 extern void vm_map_set_64bit(
1454 vm_map_t map);
1455
1456 extern void vm_map_set_32bit(
1457 vm_map_t map);
1458
1459 extern void vm_map_set_jumbo(
1460 vm_map_t map);
1461
1462 extern void vm_map_set_jit_entitled(
1463 vm_map_t map);
1464
1465 extern void vm_map_set_max_addr(
1466 vm_map_t map, vm_map_offset_t new_max_offset);
1467
1468 extern boolean_t vm_map_has_hard_pagezero(
1469 vm_map_t map,
1470 vm_map_offset_t pagezero_size);
1471 extern void vm_commit_pagezero_status(vm_map_t tmap);
1472
1473 #ifdef __arm__
1474 static inline boolean_t
1475 vm_map_is_64bit(__unused vm_map_t map)
1476 {
1477 return 0;
1478 }
1479 #else
1480 extern boolean_t vm_map_is_64bit(
1481 vm_map_t map);
1482 #endif
1483
1484
1485 extern kern_return_t vm_map_raise_max_offset(
1486 vm_map_t map,
1487 vm_map_offset_t new_max_offset);
1488
1489 extern kern_return_t vm_map_raise_min_offset(
1490 vm_map_t map,
1491 vm_map_offset_t new_min_offset);
1492 #if !CONFIG_EMBEDDED
1493 extern void vm_map_set_high_start(
1494 vm_map_t map,
1495 vm_map_offset_t high_start);
1496 #endif
1497
1498 extern vm_map_offset_t vm_compute_max_offset(
1499 boolean_t is64);
1500
1501 extern void vm_map_get_max_aslr_slide_section(
1502 vm_map_t map,
1503 int64_t *max_sections,
1504 int64_t *section_size);
1505
1506 extern uint64_t vm_map_get_max_aslr_slide_pages(
1507 vm_map_t map);
1508
1509 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1510 vm_map_t map);
1511
1512 extern void vm_map_set_user_wire_limit(
1513 vm_map_t map,
1514 vm_size_t limit);
1515
1516 extern void vm_map_switch_protect(
1517 vm_map_t map,
1518 boolean_t val);
1519
1520 extern void vm_map_iokit_mapped_region(
1521 vm_map_t map,
1522 vm_size_t bytes);
1523
1524 extern void vm_map_iokit_unmapped_region(
1525 vm_map_t map,
1526 vm_size_t bytes);
1527
1528
1529 extern boolean_t first_free_is_valid(vm_map_t);
1530
1531 extern int vm_map_page_shift(
1532 vm_map_t map);
1533
1534 extern vm_map_offset_t vm_map_page_mask(
1535 vm_map_t map);
1536
1537 extern int vm_map_page_size(
1538 vm_map_t map);
1539
1540 extern vm_map_offset_t vm_map_round_page_mask(
1541 vm_map_offset_t offset,
1542 vm_map_offset_t mask);
1543
1544 extern vm_map_offset_t vm_map_trunc_page_mask(
1545 vm_map_offset_t offset,
1546 vm_map_offset_t mask);
1547
1548 extern boolean_t vm_map_page_aligned(
1549 vm_map_offset_t offset,
1550 vm_map_offset_t mask);
1551
1552 static inline int
1553 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1554 {
1555 vm_map_offset_t sum;
1556 return os_add_overflow(addr, size, &sum);
1557 }
1558
1559 static inline int
1560 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1561 {
1562 mach_vm_offset_t sum;
1563 return os_add_overflow(addr, size, &sum);
1564 }
1565
1566 #ifdef XNU_KERNEL_PRIVATE
1567 extern kern_return_t vm_map_page_info(
1568 vm_map_t map,
1569 vm_map_offset_t offset,
1570 vm_page_info_flavor_t flavor,
1571 vm_page_info_t info,
1572 mach_msg_type_number_t *count);
1573 extern kern_return_t vm_map_page_range_info_internal(
1574 vm_map_t map,
1575 vm_map_offset_t start_offset,
1576 vm_map_offset_t end_offset,
1577 vm_page_info_flavor_t flavor,
1578 vm_page_info_t info,
1579 mach_msg_type_number_t *count);
1580 #endif /* XNU_KERNEL_PRIVATE */
1581
1582
1583 #ifdef MACH_KERNEL_PRIVATE
1584
1585 /*
1586 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1587 * usual form; it handles a copyin based on the current protection
1588 * (current protection == VM_PROT_NONE) is a failure.
1589 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1590 * access. The difference is that a region with no current access
1591 * BUT possible maximum access is rejected by vm_map_copyin(), but
1592 * returned by vm_map_copyin_maxprot.
1593 */
1594 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1595 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1596 FALSE, copy_result, FALSE)
1597
1598 #define vm_map_copyin_maxprot(src_map, \
1599 src_addr, len, src_destroy, copy_result) \
1600 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1601 FALSE, copy_result, TRUE)
1602
1603
1604 /*
1605 * Internal macros for rounding and truncation of vm_map offsets and sizes
1606 */
1607 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1608 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1609
1610 /*
1611 * Macros for rounding and truncation of vm_map offsets and sizes
1612 */
1613 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1614 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1615 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1616 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1617
1618 static inline void
1619 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1620 {
1621 switch (prot) {
1622 case MAP_MEM_NOOP: break;
1623 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1624 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1625 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1626 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1627 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
1628 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1629 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1630 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1631 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
1632 default:
1633 panic("Unrecognized mapping type %u\n", prot);
1634 }
1635 }
1636
1637 #endif /* MACH_KERNEL_PRIVATE */
1638
1639 #ifdef XNU_KERNEL_PRIVATE
1640 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1641 #endif /* XNU_KERNEL_PRIVATE */
1642
1643 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1644 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1645
1646 /*
1647 * Flags for vm_map_remove() and vm_map_delete()
1648 */
1649 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1650 #define VM_MAP_REMOVE_KUNWIRE 0x1
1651 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1652 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1653 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1654 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1655 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1656 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1657 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1658 #define VM_MAP_REMOVE_GAPS_OK 0x100
1659
1660 /* Support for UPLs from vm_maps */
1661
1662 #ifdef XNU_KERNEL_PRIVATE
1663
1664 extern kern_return_t vm_map_get_upl(
1665 vm_map_t target_map,
1666 vm_map_offset_t map_offset,
1667 upl_size_t *size,
1668 upl_t *upl,
1669 upl_page_info_array_t page_info,
1670 unsigned int *page_infoCnt,
1671 upl_control_flags_t *flags,
1672 vm_tag_t tag,
1673 int force_data_sync);
1674
1675 #endif /* XNU_KERNEL_PRIVATE */
1676
1677 extern void
1678 vm_map_sizes(vm_map_t map,
1679 vm_map_size_t * psize,
1680 vm_map_size_t * pfree,
1681 vm_map_size_t * plargest_free);
1682
1683 #if CONFIG_DYNAMIC_CODE_SIGNING
1684 extern kern_return_t vm_map_sign(vm_map_t map,
1685 vm_map_offset_t start,
1686 vm_map_offset_t end);
1687 #endif
1688
1689 extern kern_return_t vm_map_partial_reap(
1690 vm_map_t map,
1691 unsigned int *reclaimed_resident,
1692 unsigned int *reclaimed_compressed);
1693
1694
1695 #if DEVELOPMENT || DEBUG
1696
1697 extern int vm_map_disconnect_page_mappings(
1698 vm_map_t map,
1699 boolean_t);
1700 #endif
1701
1702
1703 #if CONFIG_FREEZE
1704
1705 extern kern_return_t vm_map_freeze(
1706 task_t task,
1707 unsigned int *purgeable_count,
1708 unsigned int *wired_count,
1709 unsigned int *clean_count,
1710 unsigned int *dirty_count,
1711 unsigned int dirty_budget,
1712 unsigned int *shared_count,
1713 int *freezer_error_code,
1714 boolean_t eval_only);
1715
1716
1717 #define FREEZER_ERROR_GENERIC (-1)
1718 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1719 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1720 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1721 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1722
1723 #endif
1724
1725 __END_DECLS
1726
1727 /*
1728 * In some cases, we don't have a real VM object but still want to return a
1729 * unique ID (to avoid a memory region looking like shared memory), so build
1730 * a fake pointer based on the map's ledger and the index of the ledger being
1731 * reported.
1732 */
1733 #define INFO_MAKE_FAKE_OBJECT_ID(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1734
1735 #endif /* KERNEL_PRIVATE */
1736
1737 #endif /* _VM_VM_MAP_H_ */