]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map.h
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.h
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <vm/pmap.h>
82
83 #ifdef KERNEL_PRIVATE
84
85 #include <sys/cdefs.h>
86
87 __BEGIN_DECLS
88
89 extern void vm_map_reference(vm_map_t map);
90 extern vm_map_t current_map(void);
91
92 /* Setup reserved areas in a new VM map */
93 extern kern_return_t vm_map_exec(
94 vm_map_t new_map,
95 task_t task,
96 void *fsroot,
97 cpu_type_t cpu);
98
99 __END_DECLS
100
101 #ifdef MACH_KERNEL_PRIVATE
102
103 #include <task_swapper.h>
104 #include <mach_assert.h>
105
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <kern/lock.h>
109 #include <kern/zalloc.h>
110 #include <kern/macro_help.h>
111
112 #include <kern/thread.h>
113
114 #define current_map_fast() (current_thread()->map)
115 #define current_map() (current_map_fast())
116
117 #include <vm/vm_map_store.h>
118
119
120 /*
121 * Types defined:
122 *
123 * vm_map_t the high-level address map data structure.
124 * vm_map_entry_t an entry in an address map.
125 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
126 * vm_map_copy_t represents memory copied from an address map,
127 * used for inter-map copy operations
128 */
129 typedef struct vm_map_entry *vm_map_entry_t;
130 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
131
132
133 /*
134 * Type: vm_map_object_t [internal use only]
135 *
136 * Description:
137 * The target of an address mapping, either a virtual
138 * memory object or a sub map (of the kernel map).
139 */
140 typedef union vm_map_object {
141 vm_object_t vm_object; /* object object */
142 vm_map_t sub_map; /* belongs to another map */
143 } vm_map_object_t;
144
145 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
146 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
147 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
148 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
149
150 /*
151 * Type: vm_named_entry_t [internal use only]
152 *
153 * Description:
154 * Description of a mapping to a memory cache object.
155 *
156 * Implementation:
157 * While the handle to this object is used as a means to map
158 * and pass around the right to map regions backed by pagers
159 * of all sorts, the named_entry itself is only manipulated
160 * by the kernel. Named entries hold information on the
161 * right to map a region of a cached object. Namely,
162 * the target cache object, the beginning and ending of the
163 * region to be mapped, and the permissions, (read, write)
164 * with which it can be mapped.
165 *
166 */
167
168 struct vm_named_entry {
169 decl_lck_mtx_data(, Lock) /* Synchronization */
170 union {
171 vm_object_t object; /* object I point to */
172 memory_object_t pager; /* amo pager port */
173 vm_map_t map; /* map backing submap */
174 vm_map_copy_t copy; /* a VM map copy */
175 } backing;
176 vm_object_offset_t offset; /* offset into object */
177 vm_object_size_t size; /* size of region */
178 vm_object_offset_t data_offset; /* offset to first byte of data */
179 vm_prot_t protection; /* access permissions */
180 int ref_count; /* Number of references */
181 unsigned int /* Is backing.xxx : */
182 /* boolean_t */ internal:1, /* ... an internal object */
183 /* boolean_t */ is_sub_map:1, /* ... a submap? */
184 /* boolean_t */ is_pager:1, /* ... a pager port */
185 /* boolean_t */ is_copy:1; /* ... a VM map copy */
186 };
187
188 /*
189 * Type: vm_map_entry_t [internal use only]
190 *
191 * Description:
192 * A single mapping within an address map.
193 *
194 * Implementation:
195 * Address map entries consist of start and end addresses,
196 * a VM object (or sub map) and offset into that object,
197 * and user-exported inheritance and protection information.
198 * Control information for virtual copy operations is also
199 * stored in the address map entry.
200 */
201
202 struct vm_map_links {
203 struct vm_map_entry *prev; /* previous entry */
204 struct vm_map_entry *next; /* next entry */
205 vm_map_offset_t start; /* start address */
206 vm_map_offset_t end; /* end address */
207 };
208
209 struct vm_map_entry {
210 struct vm_map_links links; /* links to other entries */
211 #define vme_prev links.prev
212 #define vme_next links.next
213 #define vme_start links.start
214 #define vme_end links.end
215
216 struct vm_map_store store;
217 union vm_map_object object; /* object I point to */
218 vm_object_offset_t offset; /* offset into object */
219 unsigned int
220 /* boolean_t */ is_shared:1, /* region is shared */
221 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
222 /* boolean_t */ in_transition:1, /* Entry being changed */
223 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
224 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
225 /* behavior is not defined for submap type */
226 /* boolean_t */ needs_copy:1, /* object need to be copied? */
227 /* Only in task maps: */
228 /* vm_prot_t */ protection:3, /* protection code */
229 /* vm_prot_t */ max_protection:3,/* maximum protection */
230 /* vm_inherit_t */ inheritance:2, /* inheritance */
231 /* boolean_t */ use_pmap:1, /* nested pmaps */
232 /*
233 * IMPORTANT:
234 * The "alias" field can be updated while holding the VM map lock
235 * "shared". It's OK as along as it's the only field that can be
236 * updated without the VM map "exclusive" lock.
237 */
238 /* unsigned char */ alias:8, /* user alias */
239 /* boolean_t */ no_cache:1, /* should new pages be cached? */
240 /* boolean_t */ permanent:1, /* mapping can not be removed */
241 /* boolean_t */ superpage_size:1,/* use superpages of a certain size */
242 /* boolean_t */ map_aligned:1, /* align to map's page size */
243 /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
244 /* boolean_t */ used_for_jit:1,
245 /* boolean_t */ from_reserved_zone:1, /* Allocated from
246 * kernel reserved zone */
247 __unused_bits:1;
248 unsigned short wired_count; /* can be paged if = 0 */
249 unsigned short user_wired_count; /* for vm_wire */
250 #if DEBUG
251 #define MAP_ENTRY_CREATION_DEBUG (1)
252 #define MAP_ENTRY_INSERTION_DEBUG (1)
253 #endif
254 #if MAP_ENTRY_CREATION_DEBUG
255 struct vm_map_header *vme_creation_maphdr;
256 uintptr_t vme_creation_bt[16];
257 #endif
258 #if MAP_ENTRY_INSERTION_DEBUG
259 uintptr_t vme_insertion_bt[16];
260 #endif
261 };
262
263 /*
264 * Convenience macros for dealing with superpages
265 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
266 */
267 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
268 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
269 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
270 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
271
272 /*
273 * wired_counts are unsigned short. This value is used to safeguard
274 * against any mishaps due to runaway user programs.
275 */
276 #define MAX_WIRE_COUNT 65535
277
278
279
280 /*
281 * Type: struct vm_map_header
282 *
283 * Description:
284 * Header for a vm_map and a vm_map_copy.
285 */
286
287
288 struct vm_map_header {
289 struct vm_map_links links; /* first, last, min, max */
290 int nentries; /* Number of entries */
291 boolean_t entries_pageable;
292 /* are map entries pageable? */
293 vm_map_offset_t highest_entry_end_addr; /* The ending address of the highest allocated vm_entry_t */
294 #ifdef VM_MAP_STORE_USE_RB
295 struct rb_head rb_head_store;
296 #endif
297 int page_shift; /* page shift */
298 };
299
300 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
301 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
302 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
303
304 /*
305 * Type: vm_map_t [exported; contents invisible]
306 *
307 * Description:
308 * An address map -- a directory relating valid
309 * regions of a task's address space to the corresponding
310 * virtual memory objects.
311 *
312 * Implementation:
313 * Maps are doubly-linked lists of map entries, sorted
314 * by address. One hint is used to start
315 * searches again from the last successful search,
316 * insertion, or removal. Another hint is used to
317 * quickly find free space.
318 */
319 struct _vm_map {
320 lock_t lock; /* uni- and smp-lock */
321 struct vm_map_header hdr; /* Map entry header */
322 #define min_offset hdr.links.start /* start of range */
323 #define max_offset hdr.links.end /* end of range */
324 #define highest_entry_end hdr.highest_entry_end_addr
325 pmap_t pmap; /* Physical map */
326 vm_map_size_t size; /* virtual size */
327 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
328 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
329 int ref_count; /* Reference count */
330 #if TASK_SWAPPER
331 int res_count; /* Residence count (swap) */
332 int sw_state; /* Swap state */
333 #endif /* TASK_SWAPPER */
334 decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */
335 lck_mtx_ext_t s_lock_ext;
336 vm_map_entry_t hint; /* hint for quick lookups */
337 vm_map_entry_t first_free; /* First free space hint */
338 unsigned int
339 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
340 /* boolean_t */ wiring_required:1, /* All memory wired? */
341 /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */
342 /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
343 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
344 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
345 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
346 /* reserved */ pad:25;
347 unsigned int timestamp; /* Version number */
348 unsigned int color_rr; /* next color (not protected by a lock) */
349 #if CONFIG_FREEZE
350 void *default_freezer_handle;
351 #endif
352 boolean_t jit_entry_exists;
353 } ;
354
355 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
356 #define vm_map_first_entry(map) ((map)->hdr.links.next)
357 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
358
359 #if TASK_SWAPPER
360 /*
361 * VM map swap states. There are no transition states.
362 */
363 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
364 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
365 #endif /* TASK_SWAPPER */
366
367 /*
368 * Type: vm_map_version_t [exported; contents invisible]
369 *
370 * Description:
371 * Map versions may be used to quickly validate a previous
372 * lookup operation.
373 *
374 * Usage note:
375 * Because they are bulky objects, map versions are usually
376 * passed by reference.
377 *
378 * Implementation:
379 * Just a timestamp for the main map.
380 */
381 typedef struct vm_map_version {
382 unsigned int main_timestamp;
383 } vm_map_version_t;
384
385 /*
386 * Type: vm_map_copy_t [exported; contents invisible]
387 *
388 * Description:
389 * A map copy object represents a region of virtual memory
390 * that has been copied from an address map but is still
391 * in transit.
392 *
393 * A map copy object may only be used by a single thread
394 * at a time.
395 *
396 * Implementation:
397 * There are three formats for map copy objects.
398 * The first is very similar to the main
399 * address map in structure, and as a result, some
400 * of the internal maintenance functions/macros can
401 * be used with either address maps or map copy objects.
402 *
403 * The map copy object contains a header links
404 * entry onto which the other entries that represent
405 * the region are chained.
406 *
407 * The second format is a single vm object. This was used
408 * primarily in the pageout path - but is not currently used
409 * except for placeholder copy objects (see vm_map_copy_copy()).
410 *
411 * The third format is a kernel buffer copy object - for data
412 * small enough that physical copies were the most efficient
413 * method.
414 */
415
416 struct vm_map_copy {
417 int type;
418 #define VM_MAP_COPY_ENTRY_LIST 1
419 #define VM_MAP_COPY_OBJECT 2
420 #define VM_MAP_COPY_KERNEL_BUFFER 3
421 vm_object_offset_t offset;
422 vm_map_size_t size;
423 union {
424 struct vm_map_header hdr; /* ENTRY_LIST */
425 vm_object_t object; /* OBJECT */
426 struct {
427 void *kdata; /* KERNEL_BUFFER */
428 vm_size_t kalloc_size; /* size of this copy_t */
429 } c_k;
430 } c_u;
431 };
432
433
434 #define cpy_hdr c_u.hdr
435
436 #define cpy_object c_u.object
437
438 #define cpy_kdata c_u.c_k.kdata
439 #define cpy_kalloc_size c_u.c_k.kalloc_size
440
441 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
442 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
443 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
444
445 /*
446 * Useful macros for entry list copy objects
447 */
448
449 #define vm_map_copy_to_entry(copy) \
450 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
451 #define vm_map_copy_first_entry(copy) \
452 ((copy)->cpy_hdr.links.next)
453 #define vm_map_copy_last_entry(copy) \
454 ((copy)->cpy_hdr.links.prev)
455
456 /*
457 * Macros: vm_map_lock, etc. [internal use only]
458 * Description:
459 * Perform locking on the data portion of a map.
460 * When multiple maps are to be locked, order by map address.
461 * (See vm_map.c::vm_remap())
462 */
463
464 #define vm_map_lock_init(map) \
465 ((map)->timestamp = 0 , \
466 lock_init(&(map)->lock, TRUE, 0, 0))
467
468 #define vm_map_lock(map) lock_write(&(map)->lock)
469 #define vm_map_unlock(map) \
470 ((map)->timestamp++ , lock_write_done(&(map)->lock))
471 #define vm_map_lock_read(map) lock_read(&(map)->lock)
472 #define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
473 #define vm_map_lock_write_to_read(map) \
474 ((map)->timestamp++ , lock_write_to_read(&(map)->lock))
475 /* lock_read_to_write() returns FALSE on failure. Macro evaluates to
476 * zero on success and non-zero value on failure.
477 */
478 #define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE)
479
480 /*
481 * Exported procedures that operate on vm_map_t.
482 */
483
484 /* Initialize the module */
485 extern void vm_map_init(void);
486
487 extern void vm_kernel_reserved_entry_init(void);
488
489 /* Allocate a range in the specified virtual address map and
490 * return the entry allocated for that range. */
491 extern kern_return_t vm_map_find_space(
492 vm_map_t map,
493 vm_map_address_t *address, /* OUT */
494 vm_map_size_t size,
495 vm_map_offset_t mask,
496 int flags,
497 vm_map_entry_t *o_entry); /* OUT */
498
499 extern void vm_map_clip_start(
500 vm_map_t map,
501 vm_map_entry_t entry,
502 vm_map_offset_t endaddr);
503 extern void vm_map_clip_end(
504 vm_map_t map,
505 vm_map_entry_t entry,
506 vm_map_offset_t endaddr);
507 extern boolean_t vm_map_entry_should_cow_for_true_share(
508 vm_map_entry_t entry);
509
510 /* Lookup map entry containing or the specified address in the given map */
511 extern boolean_t vm_map_lookup_entry(
512 vm_map_t map,
513 vm_map_address_t address,
514 vm_map_entry_t *entry); /* OUT */
515
516 extern void vm_map_copy_remap(
517 vm_map_t map,
518 vm_map_entry_t where,
519 vm_map_copy_t copy,
520 vm_map_offset_t adjustment,
521 vm_prot_t cur_prot,
522 vm_prot_t max_prot,
523 vm_inherit_t inheritance);
524
525 /* Find the VM object, offset, and protection for a given virtual address
526 * in the specified map, assuming a page fault of the type specified. */
527 extern kern_return_t vm_map_lookup_locked(
528 vm_map_t *var_map, /* IN/OUT */
529 vm_map_address_t vaddr,
530 vm_prot_t fault_type,
531 int object_lock_type,
532 vm_map_version_t *out_version, /* OUT */
533 vm_object_t *object, /* OUT */
534 vm_object_offset_t *offset, /* OUT */
535 vm_prot_t *out_prot, /* OUT */
536 boolean_t *wired, /* OUT */
537 vm_object_fault_info_t fault_info, /* OUT */
538 vm_map_t *real_map); /* OUT */
539
540 /* Verifies that the map has not changed since the given version. */
541 extern boolean_t vm_map_verify(
542 vm_map_t map,
543 vm_map_version_t *version); /* REF */
544
545 extern vm_map_entry_t vm_map_entry_insert(
546 vm_map_t map,
547 vm_map_entry_t insp_entry,
548 vm_map_offset_t start,
549 vm_map_offset_t end,
550 vm_object_t object,
551 vm_object_offset_t offset,
552 boolean_t needs_copy,
553 boolean_t is_shared,
554 boolean_t in_transition,
555 vm_prot_t cur_protection,
556 vm_prot_t max_protection,
557 vm_behavior_t behavior,
558 vm_inherit_t inheritance,
559 unsigned wired_count,
560 boolean_t no_cache,
561 boolean_t permanent,
562 unsigned int superpage_size,
563 boolean_t clear_map_aligned);
564
565
566 /*
567 * Functions implemented as macros
568 */
569 #define vm_map_min(map) ((map)->min_offset)
570 /* Lowest valid address in
571 * a map */
572
573 #define vm_map_max(map) ((map)->max_offset)
574 /* Highest valid address */
575
576 #define vm_map_pmap(map) ((map)->pmap)
577 /* Physical map associated
578 * with this address map */
579
580 #define vm_map_verify_done(map, version) vm_map_unlock_read(map)
581 /* Operation that required
582 * a verified lookup is
583 * now complete */
584
585 /*
586 * Macros/functions for map residence counts and swapin/out of vm maps
587 */
588 #if TASK_SWAPPER
589
590 #if MACH_ASSERT
591 /* Gain a reference to an existing map */
592 extern void vm_map_reference(
593 vm_map_t map);
594 /* Lose a residence count */
595 extern void vm_map_res_deallocate(
596 vm_map_t map);
597 /* Gain a residence count on a map */
598 extern void vm_map_res_reference(
599 vm_map_t map);
600 /* Gain reference & residence counts to possibly swapped-out map */
601 extern void vm_map_reference_swap(
602 vm_map_t map);
603
604 #else /* MACH_ASSERT */
605
606 #define vm_map_reference(map) \
607 MACRO_BEGIN \
608 vm_map_t Map = (map); \
609 if (Map) { \
610 lck_mtx_lock(&Map->s_lock); \
611 Map->res_count++; \
612 Map->ref_count++; \
613 lck_mtx_unlock(&Map->s_lock); \
614 } \
615 MACRO_END
616
617 #define vm_map_res_reference(map) \
618 MACRO_BEGIN \
619 vm_map_t Lmap = (map); \
620 if (Lmap->res_count == 0) { \
621 lck_mtx_unlock(&Lmap->s_lock);\
622 vm_map_lock(Lmap); \
623 vm_map_swapin(Lmap); \
624 lck_mtx_lock(&Lmap->s_lock); \
625 ++Lmap->res_count; \
626 vm_map_unlock(Lmap); \
627 } else \
628 ++Lmap->res_count; \
629 MACRO_END
630
631 #define vm_map_res_deallocate(map) \
632 MACRO_BEGIN \
633 vm_map_t Map = (map); \
634 if (--Map->res_count == 0) { \
635 lck_mtx_unlock(&Map->s_lock); \
636 vm_map_lock(Map); \
637 vm_map_swapout(Map); \
638 vm_map_unlock(Map); \
639 lck_mtx_lock(&Map->s_lock); \
640 } \
641 MACRO_END
642
643 #define vm_map_reference_swap(map) \
644 MACRO_BEGIN \
645 vm_map_t Map = (map); \
646 lck_mtx_lock(&Map->s_lock); \
647 ++Map->ref_count; \
648 vm_map_res_reference(Map); \
649 lck_mtx_unlock(&Map->s_lock); \
650 MACRO_END
651 #endif /* MACH_ASSERT */
652
653 extern void vm_map_swapin(
654 vm_map_t map);
655
656 extern void vm_map_swapout(
657 vm_map_t map);
658
659 #else /* TASK_SWAPPER */
660
661 #define vm_map_reference(map) \
662 MACRO_BEGIN \
663 vm_map_t Map = (map); \
664 if (Map) { \
665 lck_mtx_lock(&Map->s_lock); \
666 Map->ref_count++; \
667 lck_mtx_unlock(&Map->s_lock); \
668 } \
669 MACRO_END
670
671 #define vm_map_reference_swap(map) vm_map_reference(map)
672 #define vm_map_res_reference(map)
673 #define vm_map_res_deallocate(map)
674
675 #endif /* TASK_SWAPPER */
676
677 /*
678 * Submap object. Must be used to create memory to be put
679 * in a submap by vm_map_submap.
680 */
681 extern vm_object_t vm_submap_object;
682
683 /*
684 * Wait and wakeup macros for in_transition map entries.
685 */
686 #define vm_map_entry_wait(map, interruptible) \
687 ((map)->timestamp++ , \
688 thread_sleep_lock_write((event_t)&(map)->hdr, \
689 &(map)->lock, interruptible))
690
691
692 #define vm_map_entry_wakeup(map) \
693 thread_wakeup((event_t)(&(map)->hdr))
694
695
696 #define vm_map_ref_fast(map) \
697 MACRO_BEGIN \
698 lck_mtx_lock(&map->s_lock); \
699 map->ref_count++; \
700 vm_map_res_reference(map); \
701 lck_mtx_unlock(&map->s_lock); \
702 MACRO_END
703
704 #define vm_map_dealloc_fast(map) \
705 MACRO_BEGIN \
706 register int c; \
707 \
708 lck_mtx_lock(&map->s_lock); \
709 c = --map->ref_count; \
710 if (c > 0) \
711 vm_map_res_deallocate(map); \
712 lck_mtx_unlock(&map->s_lock); \
713 if (c == 0) \
714 vm_map_destroy(map); \
715 MACRO_END
716
717
718 /* simplify map entries */
719 extern void vm_map_simplify_entry(
720 vm_map_t map,
721 vm_map_entry_t this_entry);
722 extern void vm_map_simplify(
723 vm_map_t map,
724 vm_map_offset_t start);
725
726 /* Move the information in a map copy object to a new map copy object */
727 extern vm_map_copy_t vm_map_copy_copy(
728 vm_map_copy_t copy);
729
730 /* Create a copy object from an object. */
731 extern kern_return_t vm_map_copyin_object(
732 vm_object_t object,
733 vm_object_offset_t offset,
734 vm_object_size_t size,
735 vm_map_copy_t *copy_result); /* OUT */
736
737 extern kern_return_t vm_map_random_address_for_size(
738 vm_map_t map,
739 vm_map_offset_t *address,
740 vm_map_size_t size);
741
742 /* Enter a mapping */
743 extern kern_return_t vm_map_enter(
744 vm_map_t map,
745 vm_map_offset_t *address,
746 vm_map_size_t size,
747 vm_map_offset_t mask,
748 int flags,
749 vm_object_t object,
750 vm_object_offset_t offset,
751 boolean_t needs_copy,
752 vm_prot_t cur_protection,
753 vm_prot_t max_protection,
754 vm_inherit_t inheritance);
755
756 /* XXX should go away - replaced with regular enter of contig object */
757 extern kern_return_t vm_map_enter_cpm(
758 vm_map_t map,
759 vm_map_address_t *addr,
760 vm_map_size_t size,
761 int flags);
762
763 extern kern_return_t vm_map_remap(
764 vm_map_t target_map,
765 vm_map_offset_t *address,
766 vm_map_size_t size,
767 vm_map_offset_t mask,
768 int flags,
769 vm_map_t src_map,
770 vm_map_offset_t memory_address,
771 boolean_t copy,
772 vm_prot_t *cur_protection,
773 vm_prot_t *max_protection,
774 vm_inherit_t inheritance);
775
776
777 /*
778 * Read and write from a kernel buffer to a specified map.
779 */
780 extern kern_return_t vm_map_write_user(
781 vm_map_t map,
782 void *src_p,
783 vm_map_offset_t dst_addr,
784 vm_size_t size);
785
786 extern kern_return_t vm_map_read_user(
787 vm_map_t map,
788 vm_map_offset_t src_addr,
789 void *dst_p,
790 vm_size_t size);
791
792 /* Create a new task map using an existing task map as a template. */
793 extern vm_map_t vm_map_fork(
794 ledger_t ledger,
795 vm_map_t old_map);
796
797 /* Change inheritance */
798 extern kern_return_t vm_map_inherit(
799 vm_map_t map,
800 vm_map_offset_t start,
801 vm_map_offset_t end,
802 vm_inherit_t new_inheritance);
803
804 /* Add or remove machine-dependent attributes from map regions */
805 extern kern_return_t vm_map_machine_attribute(
806 vm_map_t map,
807 vm_map_offset_t start,
808 vm_map_offset_t end,
809 vm_machine_attribute_t attribute,
810 vm_machine_attribute_val_t* value); /* IN/OUT */
811
812 extern kern_return_t vm_map_msync(
813 vm_map_t map,
814 vm_map_address_t address,
815 vm_map_size_t size,
816 vm_sync_t sync_flags);
817
818 /* Set paging behavior */
819 extern kern_return_t vm_map_behavior_set(
820 vm_map_t map,
821 vm_map_offset_t start,
822 vm_map_offset_t end,
823 vm_behavior_t new_behavior);
824
825 extern kern_return_t vm_map_purgable_control(
826 vm_map_t map,
827 vm_map_offset_t address,
828 vm_purgable_t control,
829 int *state);
830
831 extern kern_return_t vm_map_region(
832 vm_map_t map,
833 vm_map_offset_t *address,
834 vm_map_size_t *size,
835 vm_region_flavor_t flavor,
836 vm_region_info_t info,
837 mach_msg_type_number_t *count,
838 mach_port_t *object_name);
839
840 extern kern_return_t vm_map_region_recurse_64(
841 vm_map_t map,
842 vm_map_offset_t *address,
843 vm_map_size_t *size,
844 natural_t *nesting_depth,
845 vm_region_submap_info_64_t info,
846 mach_msg_type_number_t *count);
847
848 extern kern_return_t vm_map_page_query_internal(
849 vm_map_t map,
850 vm_map_offset_t offset,
851 int *disposition,
852 int *ref_count);
853
854 extern kern_return_t vm_map_query_volatile(
855 vm_map_t map,
856 mach_vm_size_t *volatile_virtual_size_p,
857 mach_vm_size_t *volatile_resident_size_p,
858 mach_vm_size_t *volatile_pmap_size_p);
859
860 extern kern_return_t vm_map_submap(
861 vm_map_t map,
862 vm_map_offset_t start,
863 vm_map_offset_t end,
864 vm_map_t submap,
865 vm_map_offset_t offset,
866 boolean_t use_pmap);
867
868 extern void vm_map_submap_pmap_clean(
869 vm_map_t map,
870 vm_map_offset_t start,
871 vm_map_offset_t end,
872 vm_map_t sub_map,
873 vm_map_offset_t offset);
874
875 /* Convert from a map entry port to a map */
876 extern vm_map_t convert_port_entry_to_map(
877 ipc_port_t port);
878
879 /* Convert from a port to a vm_object */
880 extern vm_object_t convert_port_entry_to_object(
881 ipc_port_t port);
882
883
884 extern kern_return_t vm_map_set_cache_attr(
885 vm_map_t map,
886 vm_map_offset_t va);
887
888
889 /* definitions related to overriding the NX behavior */
890
891 #define VM_ABI_32 0x1
892 #define VM_ABI_64 0x2
893
894 extern int override_nx(vm_map_t map, uint32_t user_tag);
895
896 #endif /* MACH_KERNEL_PRIVATE */
897
898 __BEGIN_DECLS
899
900 /* Create an empty map */
901 extern vm_map_t vm_map_create(
902 pmap_t pmap,
903 vm_map_offset_t min_off,
904 vm_map_offset_t max_off,
905 boolean_t pageable);
906
907 /* Get rid of a map */
908 extern void vm_map_destroy(
909 vm_map_t map,
910 int flags);
911
912 /* Lose a reference */
913 extern void vm_map_deallocate(
914 vm_map_t map);
915
916 extern vm_map_t vm_map_switch(
917 vm_map_t map);
918
919 /* Change protection */
920 extern kern_return_t vm_map_protect(
921 vm_map_t map,
922 vm_map_offset_t start,
923 vm_map_offset_t end,
924 vm_prot_t new_prot,
925 boolean_t set_max);
926
927 /* Check protection */
928 extern boolean_t vm_map_check_protection(
929 vm_map_t map,
930 vm_map_offset_t start,
931 vm_map_offset_t end,
932 vm_prot_t protection);
933
934 /* wire down a region */
935 extern kern_return_t vm_map_wire(
936 vm_map_t map,
937 vm_map_offset_t start,
938 vm_map_offset_t end,
939 vm_prot_t access_type,
940 boolean_t user_wire);
941
942 /* unwire a region */
943 extern kern_return_t vm_map_unwire(
944 vm_map_t map,
945 vm_map_offset_t start,
946 vm_map_offset_t end,
947 boolean_t user_wire);
948
949 /* Enter a mapping of a memory object */
950 extern kern_return_t vm_map_enter_mem_object(
951 vm_map_t map,
952 vm_map_offset_t *address,
953 vm_map_size_t size,
954 vm_map_offset_t mask,
955 int flags,
956 ipc_port_t port,
957 vm_object_offset_t offset,
958 boolean_t needs_copy,
959 vm_prot_t cur_protection,
960 vm_prot_t max_protection,
961 vm_inherit_t inheritance);
962
963 /* Enter a mapping of a memory object */
964 extern kern_return_t vm_map_enter_mem_object_control(
965 vm_map_t map,
966 vm_map_offset_t *address,
967 vm_map_size_t size,
968 vm_map_offset_t mask,
969 int flags,
970 memory_object_control_t control,
971 vm_object_offset_t offset,
972 boolean_t needs_copy,
973 vm_prot_t cur_protection,
974 vm_prot_t max_protection,
975 vm_inherit_t inheritance);
976
977 /* Deallocate a region */
978 extern kern_return_t vm_map_remove(
979 vm_map_t map,
980 vm_map_offset_t start,
981 vm_map_offset_t end,
982 boolean_t flags);
983
984 /* Discard a copy without using it */
985 extern void vm_map_copy_discard(
986 vm_map_copy_t copy);
987
988 /* Overwrite existing memory with a copy */
989 extern kern_return_t vm_map_copy_overwrite(
990 vm_map_t dst_map,
991 vm_map_address_t dst_addr,
992 vm_map_copy_t copy,
993 boolean_t interruptible);
994
995 /* Place a copy into a map */
996 extern kern_return_t vm_map_copyout(
997 vm_map_t dst_map,
998 vm_map_address_t *dst_addr, /* OUT */
999 vm_map_copy_t copy);
1000
1001 extern kern_return_t vm_map_copyout_internal(
1002 vm_map_t dst_map,
1003 vm_map_address_t *dst_addr, /* OUT */
1004 vm_map_copy_t copy,
1005 boolean_t consume_on_success,
1006 vm_prot_t cur_protection,
1007 vm_prot_t max_protection,
1008 vm_inherit_t inheritance);
1009
1010 extern kern_return_t vm_map_copyin(
1011 vm_map_t src_map,
1012 vm_map_address_t src_addr,
1013 vm_map_size_t len,
1014 boolean_t src_destroy,
1015 vm_map_copy_t *copy_result); /* OUT */
1016
1017 extern kern_return_t vm_map_copyin_common(
1018 vm_map_t src_map,
1019 vm_map_address_t src_addr,
1020 vm_map_size_t len,
1021 boolean_t src_destroy,
1022 boolean_t src_volatile,
1023 vm_map_copy_t *copy_result, /* OUT */
1024 boolean_t use_maxprot);
1025
1026 extern kern_return_t vm_map_copy_extract(
1027 vm_map_t src_map,
1028 vm_map_address_t src_addr,
1029 vm_map_size_t len,
1030 vm_map_copy_t *copy_result, /* OUT */
1031 vm_prot_t *cur_prot, /* OUT */
1032 vm_prot_t *max_prot);
1033
1034
1035 extern void vm_map_disable_NX(
1036 vm_map_t map);
1037
1038 extern void vm_map_disallow_data_exec(
1039 vm_map_t map);
1040
1041 extern void vm_map_set_64bit(
1042 vm_map_t map);
1043
1044 extern void vm_map_set_32bit(
1045 vm_map_t map);
1046
1047 extern boolean_t vm_map_has_hard_pagezero(
1048 vm_map_t map,
1049 vm_map_offset_t pagezero_size);
1050
1051 extern boolean_t vm_map_is_64bit(
1052 vm_map_t map);
1053 #define vm_map_has_4GB_pagezero(map) vm_map_has_hard_pagezero(map, (vm_map_offset_t)0x100000000ULL)
1054
1055
1056 extern void vm_map_set_4GB_pagezero(
1057 vm_map_t map);
1058
1059 extern void vm_map_clear_4GB_pagezero(
1060 vm_map_t map);
1061
1062 extern kern_return_t vm_map_raise_max_offset(
1063 vm_map_t map,
1064 vm_map_offset_t new_max_offset);
1065
1066 extern kern_return_t vm_map_raise_min_offset(
1067 vm_map_t map,
1068 vm_map_offset_t new_min_offset);
1069
1070 extern vm_map_offset_t vm_compute_max_offset(
1071 unsigned is64);
1072
1073 extern uint64_t vm_map_get_max_aslr_slide_pages(
1074 vm_map_t map);
1075
1076 extern void vm_map_set_user_wire_limit(
1077 vm_map_t map,
1078 vm_size_t limit);
1079
1080 extern void vm_map_switch_protect(
1081 vm_map_t map,
1082 boolean_t val);
1083
1084 extern void vm_map_iokit_mapped_region(
1085 vm_map_t map,
1086 vm_size_t bytes);
1087
1088 extern void vm_map_iokit_unmapped_region(
1089 vm_map_t map,
1090 vm_size_t bytes);
1091
1092
1093 extern boolean_t first_free_is_valid(vm_map_t);
1094
1095 extern int vm_map_page_shift(
1096 vm_map_t map);
1097
1098 extern int vm_map_page_mask(
1099 vm_map_t map);
1100
1101 extern int vm_map_page_size(
1102 vm_map_t map);
1103
1104 extern vm_map_offset_t vm_map_round_page_mask(
1105 vm_map_offset_t offset,
1106 vm_map_offset_t mask);
1107
1108 extern vm_map_offset_t vm_map_trunc_page_mask(
1109 vm_map_offset_t offset,
1110 vm_map_offset_t mask);
1111
1112 #ifdef XNU_KERNEL_PRIVATE
1113 extern kern_return_t vm_map_page_info(
1114 vm_map_t map,
1115 vm_map_offset_t offset,
1116 vm_page_info_flavor_t flavor,
1117 vm_page_info_t info,
1118 mach_msg_type_number_t *count);
1119 #endif /* XNU_KERNEL_PRIVATE */
1120
1121
1122 #ifdef MACH_KERNEL_PRIVATE
1123
1124 /*
1125 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1126 * usual form; it handles a copyin based on the current protection
1127 * (current protection == VM_PROT_NONE) is a failure.
1128 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1129 * access. The difference is that a region with no current access
1130 * BUT possible maximum access is rejected by vm_map_copyin(), but
1131 * returned by vm_map_copyin_maxprot.
1132 */
1133 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1134 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1135 FALSE, copy_result, FALSE)
1136
1137 #define vm_map_copyin_maxprot(src_map, \
1138 src_addr, len, src_destroy, copy_result) \
1139 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1140 FALSE, copy_result, TRUE)
1141
1142
1143 /*
1144 * Internal macros for rounding and truncation of vm_map offsets and sizes
1145 */
1146 #define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1147 #define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1148
1149 /*
1150 * Macros for rounding and truncation of vm_map offsets and sizes
1151 */
1152 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1153 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1154 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1155 #define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
1156
1157 #endif /* MACH_KERNEL_PRIVATE */
1158
1159 #ifdef XNU_KERNEL_PRIVATE
1160 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1161 #endif /* XNU_KERNEL_PRIVATE */
1162
1163 #define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1164 #define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1165
1166 /*
1167 * Flags for vm_map_remove() and vm_map_delete()
1168 */
1169 #define VM_MAP_NO_FLAGS 0x0
1170 #define VM_MAP_REMOVE_KUNWIRE 0x1
1171 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1172 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1173 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1174 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1175
1176 /* Support for UPLs from vm_maps */
1177
1178 extern kern_return_t vm_map_get_upl(
1179 vm_map_t target_map,
1180 vm_map_offset_t map_offset,
1181 upl_size_t *size,
1182 upl_t *upl,
1183 upl_page_info_array_t page_info,
1184 unsigned int *page_infoCnt,
1185 int *flags,
1186 int force_data_sync);
1187
1188 #if CONFIG_DYNAMIC_CODE_SIGNING
1189 extern kern_return_t vm_map_sign(vm_map_t map,
1190 vm_map_offset_t start,
1191 vm_map_offset_t end);
1192 #endif
1193
1194 #if CONFIG_FREEZE
1195 void vm_map_freeze_thaw_init(void);
1196 void vm_map_freeze_thaw(void);
1197 void vm_map_demand_fault(void);
1198
1199 extern kern_return_t vm_map_freeze_walk(
1200 vm_map_t map,
1201 unsigned int *purgeable_count,
1202 unsigned int *wired_count,
1203 unsigned int *clean_count,
1204 unsigned int *dirty_count,
1205 unsigned int dirty_budget,
1206 boolean_t *has_shared);
1207
1208 extern kern_return_t vm_map_freeze(
1209 vm_map_t map,
1210 unsigned int *purgeable_count,
1211 unsigned int *wired_count,
1212 unsigned int *clean_count,
1213 unsigned int *dirty_count,
1214 unsigned int dirty_budget,
1215 boolean_t *has_shared);
1216
1217 extern kern_return_t vm_map_thaw(
1218 vm_map_t map);
1219 #endif
1220
1221 __END_DECLS
1222
1223 #endif /* KERNEL_PRIVATE */
1224
1225 #endif /* _VM_VM_MAP_H_ */