]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map.h
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.h
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83
84 #ifdef KERNEL_PRIVATE
85
86 #include <sys/cdefs.h>
87
88 __BEGIN_DECLS
89
90 extern void vm_map_reference(vm_map_t map);
91 extern vm_map_t current_map(void);
92
93 /* Setup reserved areas in a new VM map */
94 extern kern_return_t vm_map_exec(
95 vm_map_t new_map,
96 task_t task,
97 boolean_t is64bit,
98 void *fsroot,
99 cpu_type_t cpu,
100 cpu_subtype_t cpu_subtype);
101
102 __END_DECLS
103
104 #ifdef MACH_KERNEL_PRIVATE
105
106 #include <task_swapper.h>
107 #include <mach_assert.h>
108
109 #include <vm/vm_object.h>
110 #include <vm/vm_page.h>
111 #include <kern/locks.h>
112 #include <kern/zalloc.h>
113 #include <kern/macro_help.h>
114
115 #include <kern/thread.h>
116
117 #define current_map_fast() (current_thread()->map)
118 #define current_map() (current_map_fast())
119
120 #include <vm/vm_map_store.h>
121
122
123 /*
124 * Types defined:
125 *
126 * vm_map_t the high-level address map data structure.
127 * vm_map_entry_t an entry in an address map.
128 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
129 * vm_map_copy_t represents memory copied from an address map,
130 * used for inter-map copy operations
131 */
132 typedef struct vm_map_entry *vm_map_entry_t;
133 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
134
135
136 /*
137 * Type: vm_map_object_t [internal use only]
138 *
139 * Description:
140 * The target of an address mapping, either a virtual
141 * memory object or a sub map (of the kernel map).
142 */
143 typedef union vm_map_object {
144 vm_object_t vmo_object; /* object object */
145 vm_map_t vmo_submap; /* belongs to another map */
146 } vm_map_object_t;
147
148 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
149 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
150 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
151 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
152 #if VM_NAMED_ENTRY_LIST
153 extern queue_head_t vm_named_entry_list;
154 #endif /* VM_NAMED_ENTRY_LIST */
155
156 /*
157 * Type: vm_named_entry_t [internal use only]
158 *
159 * Description:
160 * Description of a mapping to a memory cache object.
161 *
162 * Implementation:
163 * While the handle to this object is used as a means to map
164 * and pass around the right to map regions backed by pagers
165 * of all sorts, the named_entry itself is only manipulated
166 * by the kernel. Named entries hold information on the
167 * right to map a region of a cached object. Namely,
168 * the target cache object, the beginning and ending of the
169 * region to be mapped, and the permissions, (read, write)
170 * with which it can be mapped.
171 *
172 */
173
174 struct vm_named_entry {
175 decl_lck_mtx_data(, Lock) /* Synchronization */
176 union {
177 vm_object_t object; /* object I point to */
178 vm_map_t map; /* map backing submap */
179 vm_map_copy_t copy; /* a VM map copy */
180 } backing;
181 vm_object_offset_t offset; /* offset into object */
182 vm_object_size_t size; /* size of region */
183 vm_object_offset_t data_offset; /* offset to first byte of data */
184 vm_prot_t protection; /* access permissions */
185 int ref_count; /* Number of references */
186 unsigned int /* Is backing.xxx : */
187 /* boolean_t */ internal:1, /* ... an internal object */
188 /* boolean_t */ is_sub_map:1, /* ... a submap? */
189 /* boolean_t */ is_copy:1; /* ... a VM map copy */
190 #if VM_NAMED_ENTRY_LIST
191 queue_chain_t named_entry_list;
192 int named_entry_alias;
193 mach_port_t named_entry_port;
194 #define NAMED_ENTRY_BT_DEPTH 16
195 void *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
196 #endif /* VM_NAMED_ENTRY_LIST */
197 };
198
199 /*
200 * Type: vm_map_entry_t [internal use only]
201 *
202 * Description:
203 * A single mapping within an address map.
204 *
205 * Implementation:
206 * Address map entries consist of start and end addresses,
207 * a VM object (or sub map) and offset into that object,
208 * and user-exported inheritance and protection information.
209 * Control information for virtual copy operations is also
210 * stored in the address map entry.
211 */
212
213 struct vm_map_links {
214 struct vm_map_entry *prev; /* previous entry */
215 struct vm_map_entry *next; /* next entry */
216 vm_map_offset_t start; /* start address */
217 vm_map_offset_t end; /* end address */
218 };
219
220 /*
221 * IMPORTANT:
222 * The "alias" field can be updated while holding the VM map lock
223 * "shared". It's OK as along as it's the only field that can be
224 * updated without the VM map "exclusive" lock.
225 */
226 #define VME_OBJECT(entry) ((entry)->vme_object.vmo_object)
227 #define VME_OBJECT_SET(entry, object) \
228 MACRO_BEGIN \
229 (entry)->vme_object.vmo_object = (object); \
230 MACRO_END
231 #define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap)
232 #define VME_SUBMAP_SET(entry, submap) \
233 MACRO_BEGIN \
234 (entry)->vme_object.vmo_submap = (submap); \
235 MACRO_END
236 #define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK)
237 #define VME_OFFSET_SET(entry, offset) \
238 MACRO_BEGIN \
239 int __alias; \
240 __alias = VME_ALIAS((entry)); \
241 assert((offset & PAGE_MASK) == 0); \
242 (entry)->vme_offset = offset | __alias; \
243 MACRO_END
244 #define VME_OBJECT_SHADOW(entry, length) \
245 MACRO_BEGIN \
246 vm_object_t __object; \
247 vm_object_offset_t __offset; \
248 __object = VME_OBJECT((entry)); \
249 __offset = VME_OFFSET((entry)); \
250 vm_object_shadow(&__object, &__offset, (length)); \
251 if (__object != VME_OBJECT((entry))) { \
252 VME_OBJECT_SET((entry), __object); \
253 (entry)->use_pmap = TRUE; \
254 } \
255 if (__offset != VME_OFFSET((entry))) { \
256 VME_OFFSET_SET((entry), __offset); \
257 } \
258 MACRO_END
259
260 #define VME_ALIAS_MASK (PAGE_MASK)
261 #define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
262 #define VME_ALIAS_SET(entry, alias) \
263 MACRO_BEGIN \
264 vm_map_offset_t __offset; \
265 __offset = VME_OFFSET((entry)); \
266 (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK); \
267 MACRO_END
268
269 /*
270 * FOOTPRINT ACCOUNTING:
271 * The "memory footprint" is better described in the pmap layer.
272 *
273 * At the VM level, these 2 vm_map_entry_t fields are relevant:
274 * iokit_mapped:
275 * For an "iokit_mapped" entry, we add the size of the entry to the
276 * footprint when the entry is entered into the map and we subtract that
277 * size when the entry is removed. No other accounting should take place.
278 * "use_pmap" should be FALSE but is not taken into account.
279 * use_pmap: (only when is_sub_map is FALSE)
280 * This indicates if we should ask the pmap layer to account for pages
281 * in this mapping. If FALSE, we expect that another form of accounting
282 * is being used (e.g. "iokit_mapped" or the explicit accounting of
283 * non-volatile purgable memory).
284 *
285 * So the logic is mostly:
286 * if entry->is_sub_map == TRUE
287 * anything in a submap does not count for the footprint
288 * else if entry->iokit_mapped == TRUE
289 * footprint includes the entire virtual size of this entry
290 * else if entry->use_pmap == FALSE
291 * tell pmap NOT to account for pages being pmap_enter()'d from this
292 * mapping (i.e. use "alternate accounting")
293 * else
294 * pmap will account for pages being pmap_enter()'d from this mapping
295 * as it sees fit (only if anonymous, etc...)
296 */
297
298 struct vm_map_entry {
299 struct vm_map_links links; /* links to other entries */
300 #define vme_prev links.prev
301 #define vme_next links.next
302 #define vme_start links.start
303 #define vme_end links.end
304
305 struct vm_map_store store;
306 union vm_map_object vme_object; /* object I point to */
307 vm_object_offset_t vme_offset; /* offset into object */
308
309 unsigned int
310 /* boolean_t */ is_shared:1, /* region is shared */
311 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
312 /* boolean_t */ in_transition:1, /* Entry being changed */
313 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
314 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
315 /* behavior is not defined for submap type */
316 /* boolean_t */ needs_copy:1, /* object need to be copied? */
317
318 /* Only in task maps: */
319 /* vm_prot_t */ protection:3, /* protection code */
320 /* vm_prot_t */ max_protection:3, /* maximum protection */
321 /* vm_inherit_t */ inheritance:2, /* inheritance */
322 /* boolean_t */ use_pmap:1, /*
323 * use_pmap is overloaded:
324 * if "is_sub_map":
325 * use a nested pmap?
326 * else (i.e. if object):
327 * use pmap accounting
328 * for footprint?
329 */
330 /* boolean_t */ no_cache:1, /* should new pages be cached? */
331 /* boolean_t */ permanent:1, /* mapping can not be removed */
332 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
333 /* boolean_t */ map_aligned:1, /* align to map's page size */
334 /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
335 * this entry it is being deleted
336 * without unwiring them */
337 /* boolean_t */ used_for_jit:1,
338 /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
339 /* boolean_t */ from_reserved_zone:1, /* Allocated from
340 * kernel reserved zone */
341
342 /* iokit accounting: use the virtual size rather than resident size: */
343 /* boolean_t */ iokit_acct:1,
344 /* boolean_t */ vme_resilient_codesign:1,
345 /* boolean_t */ vme_resilient_media:1,
346 /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
347 __unused:4;
348 ;
349
350 unsigned short wired_count; /* can be paged if = 0 */
351 unsigned short user_wired_count; /* for vm_wire */
352 #if DEBUG
353 #define MAP_ENTRY_CREATION_DEBUG (1)
354 #define MAP_ENTRY_INSERTION_DEBUG (1)
355 #endif
356 #if MAP_ENTRY_CREATION_DEBUG
357 struct vm_map_header *vme_creation_maphdr;
358 uintptr_t vme_creation_bt[16];
359 #endif
360 #if MAP_ENTRY_INSERTION_DEBUG
361 uintptr_t vme_insertion_bt[16];
362 #endif
363 };
364
365 /*
366 * Convenience macros for dealing with superpages
367 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
368 */
369 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
370 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
371 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
372 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
373
374 /*
375 * wired_counts are unsigned short. This value is used to safeguard
376 * against any mishaps due to runaway user programs.
377 */
378 #define MAX_WIRE_COUNT 65535
379
380
381
382 /*
383 * Type: struct vm_map_header
384 *
385 * Description:
386 * Header for a vm_map and a vm_map_copy.
387 */
388
389
390 struct vm_map_header {
391 struct vm_map_links links; /* first, last, min, max */
392 int nentries; /* Number of entries */
393 boolean_t entries_pageable;
394 /* are map entries pageable? */
395 #ifdef VM_MAP_STORE_USE_RB
396 struct rb_head rb_head_store;
397 #endif
398 int page_shift; /* page shift */
399 };
400
401 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
402 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
403 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
404
405 /*
406 * Type: vm_map_t [exported; contents invisible]
407 *
408 * Description:
409 * An address map -- a directory relating valid
410 * regions of a task's address space to the corresponding
411 * virtual memory objects.
412 *
413 * Implementation:
414 * Maps are doubly-linked lists of map entries, sorted
415 * by address. One hint is used to start
416 * searches again from the last successful search,
417 * insertion, or removal. Another hint is used to
418 * quickly find free space.
419 */
420 struct _vm_map {
421 lck_rw_t lock; /* map lock */
422 struct vm_map_header hdr; /* Map entry header */
423 #define min_offset hdr.links.start /* start of range */
424 #define max_offset hdr.links.end /* end of range */
425 pmap_t pmap; /* Physical map */
426 vm_map_size_t size; /* virtual size */
427 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
428 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
429 #if __x86_64__
430 vm_map_offset_t vmmap_high_start;
431 #endif /* __x86_64__ */
432
433 union {
434 /*
435 * If map->disable_vmentry_reuse == TRUE:
436 * the end address of the highest allocated vm_map_entry_t.
437 */
438 vm_map_offset_t vmu1_highest_entry_end;
439 /*
440 * For a nested VM map:
441 * the lowest address in this nested VM map that we would
442 * expect to be unnested under normal operation (i.e. for
443 * regular copy-on-write on DATA section).
444 */
445 vm_map_offset_t vmu1_lowest_unnestable_start;
446 } vmu1;
447 #define highest_entry_end vmu1.vmu1_highest_entry_end
448 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
449 decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */
450 lck_mtx_ext_t s_lock_ext;
451 vm_map_entry_t hint; /* hint for quick lookups */
452 union {
453 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
454 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
455 } vmmap_u_1;
456 #define hole_hint vmmap_u_1.vmmap_hole_hint
457 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
458 union{
459 vm_map_entry_t _first_free; /* First free space hint */
460 struct vm_map_links* _holes; /* links all holes between entries */
461 } f_s; /* Union for free space data structures being used */
462
463 #define first_free f_s._first_free
464 #define holes_list f_s._holes
465
466 int map_refcnt; /* Reference count */
467
468 #if TASK_SWAPPER
469 int res_count; /* Residence count (swap) */
470 int sw_state; /* Swap state */
471 #endif /* TASK_SWAPPER */
472
473 unsigned int
474 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
475 /* boolean_t */ wiring_required:1, /* All memory wired? */
476 /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */
477 /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
478 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
479 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
480 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
481 /* boolean_t */ holelistenabled:1,
482 /* boolean_t */ is_nested_map:1,
483 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
484 /* boolean_t */ jit_entry_exists:1,
485 /* boolean_t */ has_corpse_footprint:1,
486 /* boolean_t */ warned_delete_gap:1,
487 /* reserved */ pad:19;
488 unsigned int timestamp; /* Version number */
489 };
490
491 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
492 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
493 #define vm_map_first_entry(map) ((map)->hdr.links.next)
494 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
495
496 #if TASK_SWAPPER
497 /*
498 * VM map swap states. There are no transition states.
499 */
500 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
501 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
502 #endif /* TASK_SWAPPER */
503
504 /*
505 * Type: vm_map_version_t [exported; contents invisible]
506 *
507 * Description:
508 * Map versions may be used to quickly validate a previous
509 * lookup operation.
510 *
511 * Usage note:
512 * Because they are bulky objects, map versions are usually
513 * passed by reference.
514 *
515 * Implementation:
516 * Just a timestamp for the main map.
517 */
518 typedef struct vm_map_version {
519 unsigned int main_timestamp;
520 } vm_map_version_t;
521
522 /*
523 * Type: vm_map_copy_t [exported; contents invisible]
524 *
525 * Description:
526 * A map copy object represents a region of virtual memory
527 * that has been copied from an address map but is still
528 * in transit.
529 *
530 * A map copy object may only be used by a single thread
531 * at a time.
532 *
533 * Implementation:
534 * There are three formats for map copy objects.
535 * The first is very similar to the main
536 * address map in structure, and as a result, some
537 * of the internal maintenance functions/macros can
538 * be used with either address maps or map copy objects.
539 *
540 * The map copy object contains a header links
541 * entry onto which the other entries that represent
542 * the region are chained.
543 *
544 * The second format is a single vm object. This was used
545 * primarily in the pageout path - but is not currently used
546 * except for placeholder copy objects (see vm_map_copy_copy()).
547 *
548 * The third format is a kernel buffer copy object - for data
549 * small enough that physical copies were the most efficient
550 * method. This method uses a zero-sized array unioned with
551 * other format-specific data in the 'c_u' member. This unsized
552 * array overlaps the other elements and allows us to use this
553 * extra structure space for physical memory copies. On 64-bit
554 * systems this saves ~64 bytes per vm_map_copy.
555 */
556
557 struct vm_map_copy {
558 int type;
559 #define VM_MAP_COPY_ENTRY_LIST 1
560 #define VM_MAP_COPY_OBJECT 2
561 #define VM_MAP_COPY_KERNEL_BUFFER 3
562 vm_object_offset_t offset;
563 vm_map_size_t size;
564 union {
565 struct vm_map_header hdr; /* ENTRY_LIST */
566 vm_object_t object; /* OBJECT */
567 uint8_t kdata[0]; /* KERNEL_BUFFER */
568 } c_u;
569 };
570
571
572 #define cpy_hdr c_u.hdr
573
574 #define cpy_object c_u.object
575 #define cpy_kdata c_u.kdata
576 #define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata))
577
578 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
579 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
580 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
581
582 /*
583 * Useful macros for entry list copy objects
584 */
585
586 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
587 #define vm_map_copy_first_entry(copy) \
588 ((copy)->cpy_hdr.links.next)
589 #define vm_map_copy_last_entry(copy) \
590 ((copy)->cpy_hdr.links.prev)
591
592 /*
593 * Macros: vm_map_lock, etc. [internal use only]
594 * Description:
595 * Perform locking on the data portion of a map.
596 * When multiple maps are to be locked, order by map address.
597 * (See vm_map.c::vm_remap())
598 */
599
600 #define vm_map_lock_init(map) \
601 ((map)->timestamp = 0 , \
602 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
603
604 #define vm_map_lock(map) \
605 MACRO_BEGIN \
606 DTRACE_VM(vm_map_lock_w); \
607 lck_rw_lock_exclusive(&(map)->lock); \
608 MACRO_END
609
610 #define vm_map_unlock(map) \
611 MACRO_BEGIN \
612 DTRACE_VM(vm_map_unlock_w); \
613 (map)->timestamp++; \
614 lck_rw_done(&(map)->lock); \
615 MACRO_END
616
617 #define vm_map_lock_read(map) \
618 MACRO_BEGIN \
619 DTRACE_VM(vm_map_lock_r); \
620 lck_rw_lock_shared(&(map)->lock); \
621 MACRO_END
622
623 #define vm_map_unlock_read(map) \
624 MACRO_BEGIN \
625 DTRACE_VM(vm_map_unlock_r); \
626 lck_rw_done(&(map)->lock); \
627 MACRO_END
628
629 #define vm_map_lock_write_to_read(map) \
630 MACRO_BEGIN \
631 DTRACE_VM(vm_map_lock_downgrade); \
632 (map)->timestamp++; \
633 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
634 MACRO_END
635
636 /*
637 * lock_read_to_write() returns FALSE on failure. This function evaluates to
638 * zero on success and non-zero value on failure.
639 */
640 static inline int
641 vm_map_lock_read_to_write(vm_map_t map)
642 {
643 if (lck_rw_lock_shared_to_exclusive(&(map)->lock)) {
644 DTRACE_VM(vm_map_lock_upgrade);
645 return 0;
646 }
647 return 1;
648 }
649
650 static inline boolean_t
651 vm_map_try_lock(vm_map_t map)
652 {
653 if (lck_rw_try_lock_exclusive(&(map)->lock)) {
654 DTRACE_VM(vm_map_lock_w);
655 return TRUE;
656 }
657 return FALSE;
658 }
659
660 static inline boolean_t
661 vm_map_try_lock_read(vm_map_t map)
662 {
663 if (lck_rw_try_lock_shared(&(map)->lock)) {
664 DTRACE_VM(vm_map_lock_r);
665 return TRUE;
666 }
667 return FALSE;
668 }
669
670 #if MACH_ASSERT || DEBUG
671 #define vm_map_lock_assert_held(map) \
672 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
673 #define vm_map_lock_assert_shared(map) \
674 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
675 #define vm_map_lock_assert_exclusive(map) \
676 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
677 #define vm_map_lock_assert_notheld(map) \
678 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
679 #else /* MACH_ASSERT || DEBUG */
680 #define vm_map_lock_assert_held(map)
681 #define vm_map_lock_assert_shared(map)
682 #define vm_map_lock_assert_exclusive(map)
683 #define vm_map_lock_assert_notheld(map)
684 #endif /* MACH_ASSERT || DEBUG */
685
686 /*
687 * Exported procedures that operate on vm_map_t.
688 */
689
690 /* Initialize the module */
691 extern void vm_map_init(void);
692
693 extern void vm_kernel_reserved_entry_init(void);
694
695 /* Allocate a range in the specified virtual address map and
696 * return the entry allocated for that range. */
697 extern kern_return_t vm_map_find_space(
698 vm_map_t map,
699 vm_map_address_t *address, /* OUT */
700 vm_map_size_t size,
701 vm_map_offset_t mask,
702 int flags,
703 vm_map_kernel_flags_t vmk_flags,
704 vm_tag_t tag,
705 vm_map_entry_t *o_entry); /* OUT */
706
707 extern void vm_map_clip_start(
708 vm_map_t map,
709 vm_map_entry_t entry,
710 vm_map_offset_t endaddr);
711 extern void vm_map_clip_end(
712 vm_map_t map,
713 vm_map_entry_t entry,
714 vm_map_offset_t endaddr);
715 extern boolean_t vm_map_entry_should_cow_for_true_share(
716 vm_map_entry_t entry);
717
718 /* Lookup map entry containing or the specified address in the given map */
719 extern boolean_t vm_map_lookup_entry(
720 vm_map_t map,
721 vm_map_address_t address,
722 vm_map_entry_t *entry); /* OUT */
723
724 extern void vm_map_copy_remap(
725 vm_map_t map,
726 vm_map_entry_t where,
727 vm_map_copy_t copy,
728 vm_map_offset_t adjustment,
729 vm_prot_t cur_prot,
730 vm_prot_t max_prot,
731 vm_inherit_t inheritance);
732
733 /* Find the VM object, offset, and protection for a given virtual address
734 * in the specified map, assuming a page fault of the type specified. */
735 extern kern_return_t vm_map_lookup_locked(
736 vm_map_t *var_map, /* IN/OUT */
737 vm_map_address_t vaddr,
738 vm_prot_t fault_type,
739 int object_lock_type,
740 vm_map_version_t *out_version, /* OUT */
741 vm_object_t *object, /* OUT */
742 vm_object_offset_t *offset, /* OUT */
743 vm_prot_t *out_prot, /* OUT */
744 boolean_t *wired, /* OUT */
745 vm_object_fault_info_t fault_info, /* OUT */
746 vm_map_t *real_map); /* OUT */
747
748 /* Verifies that the map has not changed since the given version. */
749 extern boolean_t vm_map_verify(
750 vm_map_t map,
751 vm_map_version_t *version); /* REF */
752
753 extern vm_map_entry_t vm_map_entry_insert(
754 vm_map_t map,
755 vm_map_entry_t insp_entry,
756 vm_map_offset_t start,
757 vm_map_offset_t end,
758 vm_object_t object,
759 vm_object_offset_t offset,
760 boolean_t needs_copy,
761 boolean_t is_shared,
762 boolean_t in_transition,
763 vm_prot_t cur_protection,
764 vm_prot_t max_protection,
765 vm_behavior_t behavior,
766 vm_inherit_t inheritance,
767 unsigned wired_count,
768 boolean_t no_cache,
769 boolean_t permanent,
770 unsigned int superpage_size,
771 boolean_t clear_map_aligned,
772 boolean_t is_submap,
773 boolean_t used_for_jit,
774 int alias);
775
776
777 /*
778 * Functions implemented as macros
779 */
780 #define vm_map_min(map) ((map)->min_offset)
781 /* Lowest valid address in
782 * a map */
783
784 #define vm_map_max(map) ((map)->max_offset)
785 /* Highest valid address */
786
787 #define vm_map_pmap(map) ((map)->pmap)
788 /* Physical map associated
789 * with this address map */
790
791 /*
792 * Macros/functions for map residence counts and swapin/out of vm maps
793 */
794 #if TASK_SWAPPER
795
796 #if MACH_ASSERT
797 /* Gain a reference to an existing map */
798 extern void vm_map_reference(
799 vm_map_t map);
800 /* Lose a residence count */
801 extern void vm_map_res_deallocate(
802 vm_map_t map);
803 /* Gain a residence count on a map */
804 extern void vm_map_res_reference(
805 vm_map_t map);
806 /* Gain reference & residence counts to possibly swapped-out map */
807 extern void vm_map_reference_swap(
808 vm_map_t map);
809
810 #else /* MACH_ASSERT */
811
812 #define vm_map_reference(map) \
813 MACRO_BEGIN \
814 vm_map_t Map = (map); \
815 if (Map) { \
816 lck_mtx_lock(&Map->s_lock); \
817 Map->res_count++; \
818 Map->map_refcnt++; \
819 lck_mtx_unlock(&Map->s_lock); \
820 } \
821 MACRO_END
822
823 #define vm_map_res_reference(map) \
824 MACRO_BEGIN \
825 vm_map_t Lmap = (map); \
826 if (Lmap->res_count == 0) { \
827 lck_mtx_unlock(&Lmap->s_lock);\
828 vm_map_lock(Lmap); \
829 vm_map_swapin(Lmap); \
830 lck_mtx_lock(&Lmap->s_lock); \
831 ++Lmap->res_count; \
832 vm_map_unlock(Lmap); \
833 } else \
834 ++Lmap->res_count; \
835 MACRO_END
836
837 #define vm_map_res_deallocate(map) \
838 MACRO_BEGIN \
839 vm_map_t Map = (map); \
840 if (--Map->res_count == 0) { \
841 lck_mtx_unlock(&Map->s_lock); \
842 vm_map_lock(Map); \
843 vm_map_swapout(Map); \
844 vm_map_unlock(Map); \
845 lck_mtx_lock(&Map->s_lock); \
846 } \
847 MACRO_END
848
849 #define vm_map_reference_swap(map) \
850 MACRO_BEGIN \
851 vm_map_t Map = (map); \
852 lck_mtx_lock(&Map->s_lock); \
853 ++Map->map_refcnt; \
854 vm_map_res_reference(Map); \
855 lck_mtx_unlock(&Map->s_lock); \
856 MACRO_END
857 #endif /* MACH_ASSERT */
858
859 extern void vm_map_swapin(
860 vm_map_t map);
861
862 extern void vm_map_swapout(
863 vm_map_t map);
864
865 #else /* TASK_SWAPPER */
866
867 #define vm_map_reference(map) \
868 MACRO_BEGIN \
869 vm_map_t Map = (map); \
870 if (Map) { \
871 lck_mtx_lock(&Map->s_lock); \
872 Map->map_refcnt++; \
873 lck_mtx_unlock(&Map->s_lock); \
874 } \
875 MACRO_END
876
877 #define vm_map_reference_swap(map) vm_map_reference(map)
878 #define vm_map_res_reference(map)
879 #define vm_map_res_deallocate(map)
880
881 #endif /* TASK_SWAPPER */
882
883 /*
884 * Submap object. Must be used to create memory to be put
885 * in a submap by vm_map_submap.
886 */
887 extern vm_object_t vm_submap_object;
888
889 /*
890 * Wait and wakeup macros for in_transition map entries.
891 */
892 #define vm_map_entry_wait(map, interruptible) \
893 ((map)->timestamp++ , \
894 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
895 (event_t)&(map)->hdr, interruptible))
896
897
898 #define vm_map_entry_wakeup(map) \
899 thread_wakeup((event_t)(&(map)->hdr))
900
901
902 #define vm_map_ref_fast(map) \
903 MACRO_BEGIN \
904 lck_mtx_lock(&map->s_lock); \
905 map->ref_count++; \
906 vm_map_res_reference(map); \
907 lck_mtx_unlock(&map->s_lock); \
908 MACRO_END
909
910 #define vm_map_dealloc_fast(map) \
911 MACRO_BEGIN \
912 int c; \
913 \
914 lck_mtx_lock(&map->s_lock); \
915 c = --map->ref_count; \
916 if (c > 0) \
917 vm_map_res_deallocate(map); \
918 lck_mtx_unlock(&map->s_lock); \
919 if (c == 0) \
920 vm_map_destroy(map); \
921 MACRO_END
922
923
924 /* simplify map entries */
925 extern void vm_map_simplify_entry(
926 vm_map_t map,
927 vm_map_entry_t this_entry);
928 extern void vm_map_simplify(
929 vm_map_t map,
930 vm_map_offset_t start);
931
932 /* Move the information in a map copy object to a new map copy object */
933 extern vm_map_copy_t vm_map_copy_copy(
934 vm_map_copy_t copy);
935
936 /* Create a copy object from an object. */
937 extern kern_return_t vm_map_copyin_object(
938 vm_object_t object,
939 vm_object_offset_t offset,
940 vm_object_size_t size,
941 vm_map_copy_t *copy_result); /* OUT */
942
943 extern kern_return_t vm_map_random_address_for_size(
944 vm_map_t map,
945 vm_map_offset_t *address,
946 vm_map_size_t size);
947
948 /* Enter a mapping */
949 extern kern_return_t vm_map_enter(
950 vm_map_t map,
951 vm_map_offset_t *address,
952 vm_map_size_t size,
953 vm_map_offset_t mask,
954 int flags,
955 vm_map_kernel_flags_t vmk_flags,
956 vm_tag_t tag,
957 vm_object_t object,
958 vm_object_offset_t offset,
959 boolean_t needs_copy,
960 vm_prot_t cur_protection,
961 vm_prot_t max_protection,
962 vm_inherit_t inheritance);
963
964 #if __arm64__
965 extern kern_return_t vm_map_enter_fourk(
966 vm_map_t map,
967 vm_map_offset_t *address,
968 vm_map_size_t size,
969 vm_map_offset_t mask,
970 int flags,
971 vm_map_kernel_flags_t vmk_flags,
972 vm_tag_t tag,
973 vm_object_t object,
974 vm_object_offset_t offset,
975 boolean_t needs_copy,
976 vm_prot_t cur_protection,
977 vm_prot_t max_protection,
978 vm_inherit_t inheritance);
979 #endif /* __arm64__ */
980
981 /* XXX should go away - replaced with regular enter of contig object */
982 extern kern_return_t vm_map_enter_cpm(
983 vm_map_t map,
984 vm_map_address_t *addr,
985 vm_map_size_t size,
986 int flags);
987
988 extern kern_return_t vm_map_remap(
989 vm_map_t target_map,
990 vm_map_offset_t *address,
991 vm_map_size_t size,
992 vm_map_offset_t mask,
993 int flags,
994 vm_map_kernel_flags_t vmk_flags,
995 vm_tag_t tag,
996 vm_map_t src_map,
997 vm_map_offset_t memory_address,
998 boolean_t copy,
999 vm_prot_t *cur_protection,
1000 vm_prot_t *max_protection,
1001 vm_inherit_t inheritance);
1002
1003
1004 /*
1005 * Read and write from a kernel buffer to a specified map.
1006 */
1007 extern kern_return_t vm_map_write_user(
1008 vm_map_t map,
1009 void *src_p,
1010 vm_map_offset_t dst_addr,
1011 vm_size_t size);
1012
1013 extern kern_return_t vm_map_read_user(
1014 vm_map_t map,
1015 vm_map_offset_t src_addr,
1016 void *dst_p,
1017 vm_size_t size);
1018
1019 /* Create a new task map using an existing task map as a template. */
1020 extern vm_map_t vm_map_fork(
1021 ledger_t ledger,
1022 vm_map_t old_map,
1023 int options);
1024 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
1025 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
1026 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
1027
1028 /* Change inheritance */
1029 extern kern_return_t vm_map_inherit(
1030 vm_map_t map,
1031 vm_map_offset_t start,
1032 vm_map_offset_t end,
1033 vm_inherit_t new_inheritance);
1034
1035 /* Add or remove machine-dependent attributes from map regions */
1036 extern kern_return_t vm_map_machine_attribute(
1037 vm_map_t map,
1038 vm_map_offset_t start,
1039 vm_map_offset_t end,
1040 vm_machine_attribute_t attribute,
1041 vm_machine_attribute_val_t* value); /* IN/OUT */
1042
1043 extern kern_return_t vm_map_msync(
1044 vm_map_t map,
1045 vm_map_address_t address,
1046 vm_map_size_t size,
1047 vm_sync_t sync_flags);
1048
1049 /* Set paging behavior */
1050 extern kern_return_t vm_map_behavior_set(
1051 vm_map_t map,
1052 vm_map_offset_t start,
1053 vm_map_offset_t end,
1054 vm_behavior_t new_behavior);
1055
1056 extern kern_return_t vm_map_region(
1057 vm_map_t map,
1058 vm_map_offset_t *address,
1059 vm_map_size_t *size,
1060 vm_region_flavor_t flavor,
1061 vm_region_info_t info,
1062 mach_msg_type_number_t *count,
1063 mach_port_t *object_name);
1064
1065 extern kern_return_t vm_map_region_recurse_64(
1066 vm_map_t map,
1067 vm_map_offset_t *address,
1068 vm_map_size_t *size,
1069 natural_t *nesting_depth,
1070 vm_region_submap_info_64_t info,
1071 mach_msg_type_number_t *count);
1072
1073 extern kern_return_t vm_map_page_query_internal(
1074 vm_map_t map,
1075 vm_map_offset_t offset,
1076 int *disposition,
1077 int *ref_count);
1078
1079 extern kern_return_t vm_map_query_volatile(
1080 vm_map_t map,
1081 mach_vm_size_t *volatile_virtual_size_p,
1082 mach_vm_size_t *volatile_resident_size_p,
1083 mach_vm_size_t *volatile_compressed_size_p,
1084 mach_vm_size_t *volatile_pmap_size_p,
1085 mach_vm_size_t *volatile_compressed_pmap_size_p);
1086
1087 extern kern_return_t vm_map_submap(
1088 vm_map_t map,
1089 vm_map_offset_t start,
1090 vm_map_offset_t end,
1091 vm_map_t submap,
1092 vm_map_offset_t offset,
1093 boolean_t use_pmap);
1094
1095 extern void vm_map_submap_pmap_clean(
1096 vm_map_t map,
1097 vm_map_offset_t start,
1098 vm_map_offset_t end,
1099 vm_map_t sub_map,
1100 vm_map_offset_t offset);
1101
1102 /* Convert from a map entry port to a map */
1103 extern vm_map_t convert_port_entry_to_map(
1104 ipc_port_t port);
1105
1106 /* Convert from a port to a vm_object */
1107 extern vm_object_t convert_port_entry_to_object(
1108 ipc_port_t port);
1109
1110
1111 extern kern_return_t vm_map_set_cache_attr(
1112 vm_map_t map,
1113 vm_map_offset_t va);
1114
1115
1116 /* definitions related to overriding the NX behavior */
1117
1118 #define VM_ABI_32 0x1
1119 #define VM_ABI_64 0x2
1120
1121 extern int override_nx(vm_map_t map, uint32_t user_tag);
1122
1123 #if PMAP_CS
1124 extern kern_return_t vm_map_entry_cs_associate(
1125 vm_map_t map,
1126 vm_map_entry_t entry,
1127 vm_map_kernel_flags_t vmk_flags);
1128 #endif /* PMAP_CS */
1129
1130 extern void vm_map_region_top_walk(
1131 vm_map_entry_t entry,
1132 vm_region_top_info_t top);
1133 extern void vm_map_region_walk(
1134 vm_map_t map,
1135 vm_map_offset_t va,
1136 vm_map_entry_t entry,
1137 vm_object_offset_t offset,
1138 vm_object_size_t range,
1139 vm_region_extended_info_t extended,
1140 boolean_t look_for_pages,
1141 mach_msg_type_number_t count);
1142
1143
1144 struct vm_map_corpse_footprint_header {
1145 vm_size_t cf_size; /* allocated buffer size */
1146 uint32_t cf_last_region; /* offset of last region in buffer */
1147 union {
1148 uint32_t cfu_last_zeroes; /* during creation:
1149 * number of "zero" dispositions at
1150 * end of last region */
1151 uint32_t cfu_hint_region; /* during lookup:
1152 * offset of last looked up region */
1153 #define cf_last_zeroes cfu.cfu_last_zeroes
1154 #define cf_hint_region cfu.cfu_hint_region
1155 } cfu;
1156 };
1157 struct vm_map_corpse_footprint_region {
1158 vm_map_offset_t cfr_vaddr; /* region start virtual address */
1159 uint32_t cfr_num_pages; /* number of pages in this "region" */
1160 unsigned char cfr_disposition[0]; /* disposition of each page */
1161 } __attribute__((packed));
1162
1163 extern kern_return_t vm_map_corpse_footprint_collect(
1164 vm_map_t old_map,
1165 vm_map_entry_t old_entry,
1166 vm_map_t new_map);
1167 extern void vm_map_corpse_footprint_collect_done(
1168 vm_map_t new_map);
1169
1170 extern kern_return_t vm_map_corpse_footprint_query_page_info(
1171 vm_map_t map,
1172 vm_map_offset_t va,
1173 int *disp);
1174
1175 extern void vm_map_copy_footprint_ledgers(
1176 task_t old_task,
1177 task_t new_task);
1178 extern void vm_map_copy_ledger(
1179 task_t old_task,
1180 task_t new_task,
1181 int ledger_entry);
1182
1183 #endif /* MACH_KERNEL_PRIVATE */
1184
1185 __BEGIN_DECLS
1186
1187 /* Create an empty map */
1188 extern vm_map_t vm_map_create(
1189 pmap_t pmap,
1190 vm_map_offset_t min_off,
1191 vm_map_offset_t max_off,
1192 boolean_t pageable);
1193 extern vm_map_t vm_map_create_options(
1194 pmap_t pmap,
1195 vm_map_offset_t min_off,
1196 vm_map_offset_t max_off,
1197 int options);
1198 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1199 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1200 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1201 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1202
1203 extern void vm_map_disable_hole_optimization(vm_map_t map);
1204
1205 /* Get rid of a map */
1206 extern void vm_map_destroy(
1207 vm_map_t map,
1208 int flags);
1209
1210 /* Lose a reference */
1211 extern void vm_map_deallocate(
1212 vm_map_t map);
1213
1214 extern vm_map_t vm_map_switch(
1215 vm_map_t map);
1216
1217 /* Change protection */
1218 extern kern_return_t vm_map_protect(
1219 vm_map_t map,
1220 vm_map_offset_t start,
1221 vm_map_offset_t end,
1222 vm_prot_t new_prot,
1223 boolean_t set_max);
1224
1225 /* Check protection */
1226 extern boolean_t vm_map_check_protection(
1227 vm_map_t map,
1228 vm_map_offset_t start,
1229 vm_map_offset_t end,
1230 vm_prot_t protection);
1231
1232 /* wire down a region */
1233
1234 #ifdef XNU_KERNEL_PRIVATE
1235
1236 extern kern_return_t vm_map_wire_kernel(
1237 vm_map_t map,
1238 vm_map_offset_t start,
1239 vm_map_offset_t end,
1240 vm_prot_t access_type,
1241 vm_tag_t tag,
1242 boolean_t user_wire);
1243
1244 extern kern_return_t vm_map_wire_and_extract_kernel(
1245 vm_map_t map,
1246 vm_map_offset_t start,
1247 vm_prot_t access_type,
1248 vm_tag_t tag,
1249 boolean_t user_wire,
1250 ppnum_t *physpage_p);
1251
1252 /* kext exported versions */
1253
1254 extern kern_return_t vm_map_wire_external(
1255 vm_map_t map,
1256 vm_map_offset_t start,
1257 vm_map_offset_t end,
1258 vm_prot_t access_type,
1259 boolean_t user_wire);
1260
1261 extern kern_return_t vm_map_wire_and_extract_external(
1262 vm_map_t map,
1263 vm_map_offset_t start,
1264 vm_prot_t access_type,
1265 boolean_t user_wire,
1266 ppnum_t *physpage_p);
1267
1268 #else /* XNU_KERNEL_PRIVATE */
1269
1270 extern kern_return_t vm_map_wire(
1271 vm_map_t map,
1272 vm_map_offset_t start,
1273 vm_map_offset_t end,
1274 vm_prot_t access_type,
1275 boolean_t user_wire);
1276
1277 extern kern_return_t vm_map_wire_and_extract(
1278 vm_map_t map,
1279 vm_map_offset_t start,
1280 vm_prot_t access_type,
1281 boolean_t user_wire,
1282 ppnum_t *physpage_p);
1283
1284 #endif /* !XNU_KERNEL_PRIVATE */
1285
1286 /* unwire a region */
1287 extern kern_return_t vm_map_unwire(
1288 vm_map_t map,
1289 vm_map_offset_t start,
1290 vm_map_offset_t end,
1291 boolean_t user_wire);
1292
1293 #ifdef XNU_KERNEL_PRIVATE
1294
1295 /* Enter a mapping of a memory object */
1296 extern kern_return_t vm_map_enter_mem_object(
1297 vm_map_t map,
1298 vm_map_offset_t *address,
1299 vm_map_size_t size,
1300 vm_map_offset_t mask,
1301 int flags,
1302 vm_map_kernel_flags_t vmk_flags,
1303 vm_tag_t tag,
1304 ipc_port_t port,
1305 vm_object_offset_t offset,
1306 boolean_t needs_copy,
1307 vm_prot_t cur_protection,
1308 vm_prot_t max_protection,
1309 vm_inherit_t inheritance);
1310
1311 /* Enter a mapping of a memory object */
1312 extern kern_return_t vm_map_enter_mem_object_prefault(
1313 vm_map_t map,
1314 vm_map_offset_t *address,
1315 vm_map_size_t size,
1316 vm_map_offset_t mask,
1317 int flags,
1318 vm_map_kernel_flags_t vmk_flags,
1319 vm_tag_t tag,
1320 ipc_port_t port,
1321 vm_object_offset_t offset,
1322 vm_prot_t cur_protection,
1323 vm_prot_t max_protection,
1324 upl_page_list_ptr_t page_list,
1325 unsigned int page_list_count);
1326
1327 /* Enter a mapping of a memory object */
1328 extern kern_return_t vm_map_enter_mem_object_control(
1329 vm_map_t map,
1330 vm_map_offset_t *address,
1331 vm_map_size_t size,
1332 vm_map_offset_t mask,
1333 int flags,
1334 vm_map_kernel_flags_t vmk_flags,
1335 vm_tag_t tag,
1336 memory_object_control_t control,
1337 vm_object_offset_t offset,
1338 boolean_t needs_copy,
1339 vm_prot_t cur_protection,
1340 vm_prot_t max_protection,
1341 vm_inherit_t inheritance);
1342
1343 #endif /* !XNU_KERNEL_PRIVATE */
1344
1345 /* Deallocate a region */
1346 extern kern_return_t vm_map_remove(
1347 vm_map_t map,
1348 vm_map_offset_t start,
1349 vm_map_offset_t end,
1350 boolean_t flags);
1351
1352 /* Deallocate a region when the map is already locked */
1353 extern kern_return_t vm_map_remove_locked(
1354 vm_map_t map,
1355 vm_map_offset_t start,
1356 vm_map_offset_t end,
1357 boolean_t flags);
1358
1359 /* Discard a copy without using it */
1360 extern void vm_map_copy_discard(
1361 vm_map_copy_t copy);
1362
1363 /* Overwrite existing memory with a copy */
1364 extern kern_return_t vm_map_copy_overwrite(
1365 vm_map_t dst_map,
1366 vm_map_address_t dst_addr,
1367 vm_map_copy_t copy,
1368 boolean_t interruptible);
1369
1370 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1371 extern boolean_t vm_map_copy_validate_size(
1372 vm_map_t dst_map,
1373 vm_map_copy_t copy,
1374 vm_map_size_t *size);
1375
1376 /* Place a copy into a map */
1377 extern kern_return_t vm_map_copyout(
1378 vm_map_t dst_map,
1379 vm_map_address_t *dst_addr, /* OUT */
1380 vm_map_copy_t copy);
1381
1382 extern kern_return_t vm_map_copyout_size(
1383 vm_map_t dst_map,
1384 vm_map_address_t *dst_addr, /* OUT */
1385 vm_map_copy_t copy,
1386 vm_map_size_t copy_size);
1387
1388 extern kern_return_t vm_map_copyout_internal(
1389 vm_map_t dst_map,
1390 vm_map_address_t *dst_addr, /* OUT */
1391 vm_map_copy_t copy,
1392 vm_map_size_t copy_size,
1393 boolean_t consume_on_success,
1394 vm_prot_t cur_protection,
1395 vm_prot_t max_protection,
1396 vm_inherit_t inheritance);
1397
1398 extern kern_return_t vm_map_copyin(
1399 vm_map_t src_map,
1400 vm_map_address_t src_addr,
1401 vm_map_size_t len,
1402 boolean_t src_destroy,
1403 vm_map_copy_t *copy_result); /* OUT */
1404
1405 extern kern_return_t vm_map_copyin_common(
1406 vm_map_t src_map,
1407 vm_map_address_t src_addr,
1408 vm_map_size_t len,
1409 boolean_t src_destroy,
1410 boolean_t src_volatile,
1411 vm_map_copy_t *copy_result, /* OUT */
1412 boolean_t use_maxprot);
1413
1414 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1415 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1416 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1417 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1418 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1419 extern kern_return_t vm_map_copyin_internal(
1420 vm_map_t src_map,
1421 vm_map_address_t src_addr,
1422 vm_map_size_t len,
1423 int flags,
1424 vm_map_copy_t *copy_result); /* OUT */
1425
1426 extern kern_return_t vm_map_copy_extract(
1427 vm_map_t src_map,
1428 vm_map_address_t src_addr,
1429 vm_map_size_t len,
1430 vm_map_copy_t *copy_result, /* OUT */
1431 vm_prot_t *cur_prot, /* OUT */
1432 vm_prot_t *max_prot);
1433
1434
1435 extern void vm_map_disable_NX(
1436 vm_map_t map);
1437
1438 extern void vm_map_disallow_data_exec(
1439 vm_map_t map);
1440
1441 extern void vm_map_set_64bit(
1442 vm_map_t map);
1443
1444 extern void vm_map_set_32bit(
1445 vm_map_t map);
1446
1447 extern void vm_map_set_jumbo(
1448 vm_map_t map);
1449
1450 extern void vm_map_set_max_addr(
1451 vm_map_t map, vm_map_offset_t new_max_offset);
1452
1453 extern boolean_t vm_map_has_hard_pagezero(
1454 vm_map_t map,
1455 vm_map_offset_t pagezero_size);
1456 extern void vm_commit_pagezero_status(vm_map_t tmap);
1457
1458 #ifdef __arm__
1459 static inline boolean_t
1460 vm_map_is_64bit(__unused vm_map_t map)
1461 {
1462 return 0;
1463 }
1464 #else
1465 extern boolean_t vm_map_is_64bit(
1466 vm_map_t map);
1467 #endif
1468
1469
1470 extern kern_return_t vm_map_raise_max_offset(
1471 vm_map_t map,
1472 vm_map_offset_t new_max_offset);
1473
1474 extern kern_return_t vm_map_raise_min_offset(
1475 vm_map_t map,
1476 vm_map_offset_t new_min_offset);
1477 #if __x86_64__
1478 extern void vm_map_set_high_start(
1479 vm_map_t map,
1480 vm_map_offset_t high_start);
1481 #endif /* __x86_64__ */
1482
1483 extern vm_map_offset_t vm_compute_max_offset(
1484 boolean_t is64);
1485
1486 extern void vm_map_get_max_aslr_slide_section(
1487 vm_map_t map,
1488 int64_t *max_sections,
1489 int64_t *section_size);
1490
1491 extern uint64_t vm_map_get_max_aslr_slide_pages(
1492 vm_map_t map);
1493
1494 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1495 vm_map_t map);
1496
1497 extern void vm_map_set_user_wire_limit(
1498 vm_map_t map,
1499 vm_size_t limit);
1500
1501 extern void vm_map_switch_protect(
1502 vm_map_t map,
1503 boolean_t val);
1504
1505 extern void vm_map_iokit_mapped_region(
1506 vm_map_t map,
1507 vm_size_t bytes);
1508
1509 extern void vm_map_iokit_unmapped_region(
1510 vm_map_t map,
1511 vm_size_t bytes);
1512
1513
1514 extern boolean_t first_free_is_valid(vm_map_t);
1515
1516 extern int vm_map_page_shift(
1517 vm_map_t map);
1518
1519 extern vm_map_offset_t vm_map_page_mask(
1520 vm_map_t map);
1521
1522 extern int vm_map_page_size(
1523 vm_map_t map);
1524
1525 extern vm_map_offset_t vm_map_round_page_mask(
1526 vm_map_offset_t offset,
1527 vm_map_offset_t mask);
1528
1529 extern vm_map_offset_t vm_map_trunc_page_mask(
1530 vm_map_offset_t offset,
1531 vm_map_offset_t mask);
1532
1533 extern boolean_t vm_map_page_aligned(
1534 vm_map_offset_t offset,
1535 vm_map_offset_t mask);
1536
1537 #ifdef XNU_KERNEL_PRIVATE
1538 extern kern_return_t vm_map_page_info(
1539 vm_map_t map,
1540 vm_map_offset_t offset,
1541 vm_page_info_flavor_t flavor,
1542 vm_page_info_t info,
1543 mach_msg_type_number_t *count);
1544 extern kern_return_t vm_map_page_range_info_internal(
1545 vm_map_t map,
1546 vm_map_offset_t start_offset,
1547 vm_map_offset_t end_offset,
1548 vm_page_info_flavor_t flavor,
1549 vm_page_info_t info,
1550 mach_msg_type_number_t *count);
1551 #endif /* XNU_KERNEL_PRIVATE */
1552
1553
1554 #ifdef MACH_KERNEL_PRIVATE
1555
1556 /*
1557 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1558 * usual form; it handles a copyin based on the current protection
1559 * (current protection == VM_PROT_NONE) is a failure.
1560 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1561 * access. The difference is that a region with no current access
1562 * BUT possible maximum access is rejected by vm_map_copyin(), but
1563 * returned by vm_map_copyin_maxprot.
1564 */
1565 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1566 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1567 FALSE, copy_result, FALSE)
1568
1569 #define vm_map_copyin_maxprot(src_map, \
1570 src_addr, len, src_destroy, copy_result) \
1571 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1572 FALSE, copy_result, TRUE)
1573
1574
1575 /*
1576 * Internal macros for rounding and truncation of vm_map offsets and sizes
1577 */
1578 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1579 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1580
1581 /*
1582 * Macros for rounding and truncation of vm_map offsets and sizes
1583 */
1584 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1585 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1586 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1587 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1588
1589 static inline void
1590 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1591 {
1592 switch (prot) {
1593 case MAP_MEM_NOOP: break;
1594 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1595 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1596 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1597 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1598 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1599 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1600 default:
1601 panic("Unrecognized mapping type %u\n", prot);
1602 }
1603 }
1604
1605 #endif /* MACH_KERNEL_PRIVATE */
1606
1607 #ifdef XNU_KERNEL_PRIVATE
1608 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1609 #endif /* XNU_KERNEL_PRIVATE */
1610
1611 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1612 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1613
1614 /*
1615 * Flags for vm_map_remove() and vm_map_delete()
1616 */
1617 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1618 #define VM_MAP_REMOVE_KUNWIRE 0x1
1619 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1620 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1621 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1622 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1623 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1624 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1625 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1626 #define VM_MAP_REMOVE_GAPS_OK 0x100
1627
1628 /* Support for UPLs from vm_maps */
1629
1630 #ifdef XNU_KERNEL_PRIVATE
1631
1632 extern kern_return_t vm_map_get_upl(
1633 vm_map_t target_map,
1634 vm_map_offset_t map_offset,
1635 upl_size_t *size,
1636 upl_t *upl,
1637 upl_page_info_array_t page_info,
1638 unsigned int *page_infoCnt,
1639 upl_control_flags_t *flags,
1640 vm_tag_t tag,
1641 int force_data_sync);
1642
1643 #endif /* XNU_KERNEL_PRIVATE */
1644
1645 extern void
1646 vm_map_sizes(vm_map_t map,
1647 vm_map_size_t * psize,
1648 vm_map_size_t * pfree,
1649 vm_map_size_t * plargest_free);
1650
1651 #if CONFIG_DYNAMIC_CODE_SIGNING
1652 extern kern_return_t vm_map_sign(vm_map_t map,
1653 vm_map_offset_t start,
1654 vm_map_offset_t end);
1655 #endif
1656
1657 extern kern_return_t vm_map_partial_reap(
1658 vm_map_t map,
1659 unsigned int *reclaimed_resident,
1660 unsigned int *reclaimed_compressed);
1661
1662
1663 #if DEVELOPMENT || DEBUG
1664
1665 extern int vm_map_disconnect_page_mappings(
1666 vm_map_t map,
1667 boolean_t);
1668 #endif
1669
1670
1671 #if CONFIG_FREEZE
1672
1673 extern kern_return_t vm_map_freeze(
1674 vm_map_t map,
1675 unsigned int *purgeable_count,
1676 unsigned int *wired_count,
1677 unsigned int *clean_count,
1678 unsigned int *dirty_count,
1679 unsigned int dirty_budget,
1680 unsigned int *shared_count,
1681 int *freezer_error_code,
1682 boolean_t eval_only);
1683
1684
1685 #define FREEZER_ERROR_GENERIC (-1)
1686 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1687 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1688 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1689 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1690
1691 #endif
1692
1693 __END_DECLS
1694
1695 /*
1696 * In some cases, we don't have a real VM object but still want to return a
1697 * unique ID (to avoid a memory region looking like shared memory), so build
1698 * a fake pointer based on the map's ledger and the index of the ledger being
1699 * reported.
1700 */
1701 #define INFO_MAKE_FAKE_OBJECT_ID(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1702
1703 #endif /* KERNEL_PRIVATE */
1704
1705 #endif /* _VM_VM_MAP_H_ */