]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map.h
d9859323271ee100c2676b41b6240018782f5173
[apple/xnu.git] / osfmk / vm / vm_map.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: vm/vm_map.h
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Date: 1985
57 *
58 * Virtual memory map module definitions.
59 *
60 * Contributors:
61 * avie, dlb, mwyoung
62 */
63
64 #ifndef _VM_VM_MAP_H_
65 #define _VM_VM_MAP_H_
66
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/boolean.h>
70 #include <mach/vm_types.h>
71 #include <mach/vm_prot.h>
72 #include <mach/vm_inherit.h>
73 #include <mach/vm_behavior.h>
74 #include <vm/pmap.h>
75
76 typedef struct vm_map_entry *vm_map_entry_t;
77
78 extern void kernel_vm_map_reference(vm_map_t map);
79
80 #ifndef MACH_KERNEL_PRIVATE
81
82 struct vm_map_entry {};
83
84 extern void vm_map_reference(vm_map_t map);
85 extern vm_map_t current_map(void);
86
87 #else /* MACH_KERNEL_PRIVATE */
88
89 #include <cpus.h>
90 #include <task_swapper.h>
91 #include <mach_assert.h>
92
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <kern/lock.h>
96 #include <kern/zalloc.h>
97 #include <kern/macro_help.h>
98
99 #define shared_region_mapping_lock_init(object) \
100 mutex_init(&(object)->Lock, ETAP_VM_OBJ)
101 #define shared_region_mapping_lock(object) mutex_lock(&(object)->Lock)
102 #define shared_region_mapping_unlock(object) mutex_unlock(&(object)->Lock)
103 #include <kern/thread_act.h>
104
105 #define current_map_fast() (current_act_fast()->map)
106 #define current_map() (current_map_fast())
107
108 /*
109 * Types defined:
110 *
111 * vm_map_t the high-level address map data structure.
112 * vm_map_entry_t an entry in an address map.
113 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
114 * vm_map_copy_t represents memory copied from an address map,
115 * used for inter-map copy operations
116 */
117
118 /*
119 * Type: vm_map_object_t [internal use only]
120 *
121 * Description:
122 * The target of an address mapping, either a virtual
123 * memory object or a sub map (of the kernel map).
124 */
125 typedef union vm_map_object {
126 struct vm_object *vm_object; /* object object */
127 struct vm_map *sub_map; /* belongs to another map */
128 } vm_map_object_t;
129
130 #define named_entry_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ)
131 #define named_entry_lock(object) mutex_lock(&(object)->Lock)
132 #define named_entry_unlock(object) mutex_unlock(&(object)->Lock)
133
134 /*
135 * Type: vm_named_entry_t [internal use only]
136 *
137 * Description:
138 * Description of a mapping to a memory cache object.
139 *
140 * Implementation:
141 * While the handle to this object is used as a means to map
142 * and pass around the right to map regions backed by pagers
143 * of all sorts, the named_entry itself is only manipulated
144 * by the kernel. Named entries hold information on the
145 * right to map a region of a cached object. Namely,
146 * the target cache object, the beginning and ending of the
147 * region to be mapped, and the permissions, (read, write)
148 * with which it can be mapped.
149 *
150 */
151
152 struct vm_named_entry {
153 decl_mutex_data(, Lock) /* Synchronization */
154 vm_object_t object; /* object I point to */
155 vm_object_offset_t offset; /* offset into object */
156 union {
157 memory_object_t pager; /* amo pager port */
158 vm_map_t map; /* map backing submap */
159 } backing;
160 unsigned int size; /* size of region */
161 unsigned int protection; /* access permissions */
162 int ref_count; /* Number of references */
163 unsigned int
164 /* boolean_t */ internal:1, /* is an internal object */
165 /* boolean_t */ is_sub_map:1; /* is object is a submap? */
166 };
167
168 /*
169 * Type: vm_map_entry_t [internal use only]
170 *
171 * Description:
172 * A single mapping within an address map.
173 *
174 * Implementation:
175 * Address map entries consist of start and end addresses,
176 * a VM object (or sub map) and offset into that object,
177 * and user-exported inheritance and protection information.
178 * Control information for virtual copy operations is also
179 * stored in the address map entry.
180 */
181 struct vm_map_links {
182 struct vm_map_entry *prev; /* previous entry */
183 struct vm_map_entry *next; /* next entry */
184 vm_offset_t start; /* start address */
185 vm_offset_t end; /* end address */
186 };
187
188 struct vm_map_entry {
189 struct vm_map_links links; /* links to other entries */
190 #define vme_prev links.prev
191 #define vme_next links.next
192 #define vme_start links.start
193 #define vme_end links.end
194 union vm_map_object object; /* object I point to */
195 vm_object_offset_t offset; /* offset into object */
196 unsigned int
197 /* boolean_t */ is_shared:1, /* region is shared */
198 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
199 /* boolean_t */ in_transition:1, /* Entry being changed */
200 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
201 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
202 /* behavior is not defined for submap type */
203 /* boolean_t */ needs_copy:1, /* object need to be copied? */
204 /* Only in task maps: */
205 /* vm_prot_t */ protection:3, /* protection code */
206 /* vm_prot_t */ max_protection:3,/* maximum protection */
207 /* vm_inherit_t */ inheritance:2, /* inheritance */
208 /* nested pmap */ use_pmap:1, /* nested pmaps */
209 /* user alias */ alias:8;
210 unsigned short wired_count; /* can be paged if = 0 */
211 unsigned short user_wired_count; /* for vm_wire */
212 };
213
214 /*
215 * wired_counts are unsigned short. This value is used to safeguard
216 * against any mishaps due to runaway user programs.
217 */
218 #define MAX_WIRE_COUNT 65535
219
220
221
222 /*
223 * Type: struct vm_map_header
224 *
225 * Description:
226 * Header for a vm_map and a vm_map_copy.
227 */
228 struct vm_map_header {
229 struct vm_map_links links; /* first, last, min, max */
230 int nentries; /* Number of entries */
231 boolean_t entries_pageable;
232 /* are map entries pageable? */
233 };
234
235 /*
236 * Type: vm_map_t [exported; contents invisible]
237 *
238 * Description:
239 * An address map -- a directory relating valid
240 * regions of a task's address space to the corresponding
241 * virtual memory objects.
242 *
243 * Implementation:
244 * Maps are doubly-linked lists of map entries, sorted
245 * by address. One hint is used to start
246 * searches again from the last successful search,
247 * insertion, or removal. Another hint is used to
248 * quickly find free space.
249 */
250 struct vm_map {
251 lock_t lock; /* uni- and smp-lock */
252 struct vm_map_header hdr; /* Map entry header */
253 #define min_offset hdr.links.start /* start of range */
254 #define max_offset hdr.links.end /* end of range */
255 pmap_t pmap; /* Physical map */
256 vm_size_t size; /* virtual size */
257 int ref_count; /* Reference count */
258 #if TASK_SWAPPER
259 int res_count; /* Residence count (swap) */
260 int sw_state; /* Swap state */
261 #endif /* TASK_SWAPPER */
262 decl_mutex_data(, s_lock) /* Lock ref, res, hint fields */
263 vm_map_entry_t hint; /* hint for quick lookups */
264 vm_map_entry_t first_free; /* First free space hint */
265 boolean_t wait_for_space; /* Should callers wait
266 for space? */
267 boolean_t wiring_required;/* All memory wired? */
268 boolean_t no_zero_fill; /* No zero fill absent pages */
269 unsigned int timestamp; /* Version number */
270 } ;
271
272 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
273 #define vm_map_first_entry(map) ((map)->hdr.links.next)
274 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
275
276 #if TASK_SWAPPER
277 /*
278 * VM map swap states. There are no transition states.
279 */
280 #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */
281 #define MAP_SW_OUT 2 /* map is out (res_count == 0 */
282 #endif /* TASK_SWAPPER */
283
284 /*
285 * Type: vm_map_version_t [exported; contents invisible]
286 *
287 * Description:
288 * Map versions may be used to quickly validate a previous
289 * lookup operation.
290 *
291 * Usage note:
292 * Because they are bulky objects, map versions are usually
293 * passed by reference.
294 *
295 * Implementation:
296 * Just a timestamp for the main map.
297 */
298 typedef struct vm_map_version {
299 unsigned int main_timestamp;
300 } vm_map_version_t;
301
302 /*
303 * Type: vm_map_copy_t [exported; contents invisible]
304 *
305 * Description:
306 * A map copy object represents a region of virtual memory
307 * that has been copied from an address map but is still
308 * in transit.
309 *
310 * A map copy object may only be used by a single thread
311 * at a time.
312 *
313 * Implementation:
314 * There are three formats for map copy objects.
315 * The first is very similar to the main
316 * address map in structure, and as a result, some
317 * of the internal maintenance functions/macros can
318 * be used with either address maps or map copy objects.
319 *
320 * The map copy object contains a header links
321 * entry onto which the other entries that represent
322 * the region are chained.
323 *
324 * The second format is a single vm object. This is used
325 * primarily in the pageout path. The third format is a
326 * list of vm pages. An optional continuation provides
327 * a hook to be called to obtain more of the memory,
328 * or perform other operations. The continuation takes 3
329 * arguments, a saved arg buffer, a pointer to a new vm_map_copy
330 * (returned) and an abort flag (abort if TRUE).
331 */
332
333 #define VM_MAP_COPY_PAGE_LIST_MAX 20
334 #define VM_MAP_COPY_PAGE_LIST_MAX_SIZE (VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE)
335
336
337 /*
338 * Options for vm_map_copyin_page_list.
339 */
340
341 #define VM_MAP_COPYIN_OPT_VM_PROT 0x7
342 #define VM_MAP_COPYIN_OPT_SRC_DESTROY 0x8
343 #define VM_MAP_COPYIN_OPT_STEAL_PAGES 0x10
344 #define VM_MAP_COPYIN_OPT_PMAP_ENTER 0x20
345 #define VM_MAP_COPYIN_OPT_NO_ZERO_FILL 0x40
346
347 /*
348 * Continuation structures for vm_map_copyin_page_list.
349 */
350 typedef struct {
351 vm_map_t map;
352 vm_offset_t src_addr;
353 vm_size_t src_len;
354 vm_offset_t destroy_addr;
355 vm_size_t destroy_len;
356 int options;
357 } vm_map_copyin_args_data_t, *vm_map_copyin_args_t;
358
359 #define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0)
360
361
362 /* vm_map_copy_cont_t is a type definition/prototype
363 * for the cont function pointer in vm_map_copy structure.
364 */
365 typedef kern_return_t (*vm_map_copy_cont_t)(
366 vm_map_copyin_args_t,
367 vm_map_copy_t *);
368
369 #define VM_MAP_COPY_CONT_NULL ((vm_map_copy_cont_t) 0)
370
371 struct vm_map_copy {
372 int type;
373 #define VM_MAP_COPY_ENTRY_LIST 1
374 #define VM_MAP_COPY_OBJECT 2
375 #define VM_MAP_COPY_KERNEL_BUFFER 3
376 vm_object_offset_t offset;
377 vm_size_t size;
378 union {
379 struct vm_map_header hdr; /* ENTRY_LIST */
380 struct { /* OBJECT */
381 vm_object_t object;
382 vm_size_t index; /* record progress as pages
383 * are moved from object to
384 * page list; must be zero
385 * when first invoking
386 * vm_map_object_to_page_list
387 */
388 } c_o;
389 struct { /* KERNEL_BUFFER */
390 vm_offset_t kdata;
391 vm_size_t kalloc_size; /* size of this copy_t */
392 } c_k;
393 } c_u;
394 };
395
396
397 #define cpy_hdr c_u.hdr
398
399 #define cpy_object c_u.c_o.object
400 #define cpy_index c_u.c_o.index
401
402 #define cpy_kdata c_u.c_k.kdata
403 #define cpy_kalloc_size c_u.c_k.kalloc_size
404
405
406 /*
407 * Useful macros for entry list copy objects
408 */
409
410 #define vm_map_copy_to_entry(copy) \
411 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
412 #define vm_map_copy_first_entry(copy) \
413 ((copy)->cpy_hdr.links.next)
414 #define vm_map_copy_last_entry(copy) \
415 ((copy)->cpy_hdr.links.prev)
416
417 /*
418 * Macros: vm_map_lock, etc. [internal use only]
419 * Description:
420 * Perform locking on the data portion of a map.
421 * When multiple maps are to be locked, order by map address.
422 * (See vm_map.c::vm_remap())
423 */
424
425 #define vm_map_lock_init(map) \
426 MACRO_BEGIN \
427 lock_init(&(map)->lock, TRUE, ETAP_VM_MAP, ETAP_VM_MAP_I); \
428 (map)->timestamp = 0; \
429 MACRO_END
430 #define vm_map_lock(map) \
431 MACRO_BEGIN \
432 lock_write(&(map)->lock); \
433 (map)->timestamp++; \
434 MACRO_END
435
436 #define vm_map_unlock(map) lock_write_done(&(map)->lock)
437 #define vm_map_lock_read(map) lock_read(&(map)->lock)
438 #define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
439 #define vm_map_lock_write_to_read(map) \
440 lock_write_to_read(&(map)->lock)
441 #define vm_map_lock_read_to_write(map) \
442 (lock_read_to_write(&(map)->lock) || (((map)->timestamp++), 0))
443
444 extern zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */
445
446 /*
447 * Exported procedures that operate on vm_map_t.
448 */
449
450 /* Initialize the module */
451 extern void vm_map_init(void);
452
453 /* Allocate a range in the specified virtual address map and
454 * return the entry allocated for that range. */
455 extern kern_return_t vm_map_find_space(
456 vm_map_t map,
457 vm_offset_t *address, /* OUT */
458 vm_size_t size,
459 vm_offset_t mask,
460 vm_map_entry_t *o_entry); /* OUT */
461
462 /* Lookup map entry containing or the specified address in the given map */
463 extern boolean_t vm_map_lookup_entry(
464 vm_map_t map,
465 vm_offset_t address,
466 vm_map_entry_t *entry); /* OUT */
467
468 /* Find the VM object, offset, and protection for a given virtual address
469 * in the specified map, assuming a page fault of the type specified. */
470 extern kern_return_t vm_map_lookup_locked(
471 vm_map_t *var_map, /* IN/OUT */
472 vm_offset_t vaddr,
473 vm_prot_t fault_type,
474 vm_map_version_t *out_version, /* OUT */
475 vm_object_t *object, /* OUT */
476 vm_object_offset_t *offset, /* OUT */
477 vm_prot_t *out_prot, /* OUT */
478 boolean_t *wired, /* OUT */
479 int *behavior, /* OUT */
480 vm_object_offset_t *lo_offset, /* OUT */
481 vm_object_offset_t *hi_offset, /* OUT */
482 vm_map_t *pmap_map); /* OUT */
483
484 /* Verifies that the map has not changed since the given version. */
485 extern boolean_t vm_map_verify(
486 vm_map_t map,
487 vm_map_version_t *version); /* REF */
488
489 /* Split a vm_map_entry into 2 entries */
490 extern void _vm_map_clip_start(
491 struct vm_map_header *map_header,
492 vm_map_entry_t entry,
493 vm_offset_t start);
494
495 extern vm_map_entry_t vm_map_entry_insert(
496 vm_map_t map,
497 vm_map_entry_t insp_entry,
498 vm_offset_t start,
499 vm_offset_t end,
500 vm_object_t object,
501 vm_object_offset_t offset,
502 boolean_t needs_copy,
503 boolean_t is_shared,
504 boolean_t in_transition,
505 vm_prot_t cur_protection,
506 vm_prot_t max_protection,
507 vm_behavior_t behavior,
508 vm_inherit_t inheritance,
509 unsigned wired_count);
510
511 extern kern_return_t vm_remap_extract(
512 vm_map_t map,
513 vm_offset_t addr,
514 vm_size_t size,
515 boolean_t copy,
516 struct vm_map_header *map_header,
517 vm_prot_t *cur_protection,
518 vm_prot_t *max_protection,
519 vm_inherit_t inheritance,
520 boolean_t pageable);
521
522 extern kern_return_t vm_remap_range_allocate(
523 vm_map_t map,
524 vm_offset_t *address,
525 vm_size_t size,
526 vm_offset_t mask,
527 boolean_t anywhere,
528 vm_map_entry_t *map_entry);
529
530 extern kern_return_t vm_remap_extract(
531 vm_map_t map,
532 vm_offset_t addr,
533 vm_size_t size,
534 boolean_t copy,
535 struct vm_map_header *map_header,
536 vm_prot_t *cur_protection,
537 vm_prot_t *max_protection,
538 vm_inherit_t inheritance,
539 boolean_t pageable);
540
541 extern kern_return_t vm_remap_range_allocate(
542 vm_map_t map,
543 vm_offset_t *address,
544 vm_size_t size,
545 vm_offset_t mask,
546 boolean_t anywhere,
547 vm_map_entry_t *map_entry);
548
549 /*
550 * Functions implemented as macros
551 */
552 #define vm_map_min(map) ((map)->min_offset)
553 /* Lowest valid address in
554 * a map */
555
556 #define vm_map_max(map) ((map)->max_offset)
557 /* Highest valid address */
558
559 #define vm_map_pmap(map) ((map)->pmap)
560 /* Physical map associated
561 * with this address map */
562
563 #define vm_map_verify_done(map, version) vm_map_unlock_read(map)
564 /* Operation that required
565 * a verified lookup is
566 * now complete */
567
568 /*
569 * Macros/functions for map residence counts and swapin/out of vm maps
570 */
571 #if TASK_SWAPPER
572
573 #if MACH_ASSERT
574 /* Gain a reference to an existing map */
575 extern void vm_map_reference(
576 vm_map_t map);
577 /* Lose a residence count */
578 extern void vm_map_res_deallocate(
579 vm_map_t map);
580 /* Gain a residence count on a map */
581 extern void vm_map_res_reference(
582 vm_map_t map);
583 /* Gain reference & residence counts to possibly swapped-out map */
584 extern void vm_map_reference_swap(
585 vm_map_t map);
586
587 #else /* MACH_ASSERT */
588
589 #define vm_map_reference(map) \
590 MACRO_BEGIN \
591 vm_map_t Map = (map); \
592 if (Map) { \
593 mutex_lock(&Map->s_lock); \
594 Map->res_count++; \
595 Map->ref_count++; \
596 mutex_unlock(&Map->s_lock); \
597 } \
598 MACRO_END
599
600 #define vm_map_res_reference(map) \
601 MACRO_BEGIN \
602 vm_map_t Lmap = (map); \
603 if (Lmap->res_count == 0) { \
604 mutex_unlock(&Lmap->s_lock); \
605 vm_map_lock(Lmap); \
606 vm_map_swapin(Lmap); \
607 mutex_lock(&Lmap->s_lock); \
608 ++Lmap->res_count; \
609 vm_map_unlock(Lmap); \
610 } else \
611 ++Lmap->res_count; \
612 MACRO_END
613
614 #define vm_map_res_deallocate(map) \
615 MACRO_BEGIN \
616 vm_map_t Map = (map); \
617 if (--Map->res_count == 0) { \
618 mutex_unlock(&Map->s_lock); \
619 vm_map_lock(Map); \
620 vm_map_swapout(Map); \
621 vm_map_unlock(Map); \
622 mutex_lock(&Map->s_lock); \
623 } \
624 MACRO_END
625
626 #define vm_map_reference_swap(map) \
627 MACRO_BEGIN \
628 vm_map_t Map = (map); \
629 mutex_lock(&Map->s_lock); \
630 ++Map->ref_count; \
631 vm_map_res_reference(Map); \
632 mutex_unlock(&Map->s_lock); \
633 MACRO_END
634 #endif /* MACH_ASSERT */
635
636 extern void vm_map_swapin(
637 vm_map_t map);
638
639 extern void vm_map_swapout(
640 vm_map_t map);
641
642 #else /* TASK_SWAPPER */
643
644 #define vm_map_reference(map) \
645 MACRO_BEGIN \
646 vm_map_t Map = (map); \
647 if (Map) { \
648 mutex_lock(&Map->s_lock); \
649 Map->ref_count++; \
650 mutex_unlock(&Map->s_lock); \
651 } \
652 MACRO_END
653
654 #define vm_map_reference_swap(map) vm_map_reference(map)
655 #define vm_map_res_reference(map)
656 #define vm_map_res_deallocate(map)
657
658 #endif /* TASK_SWAPPER */
659
660 /*
661 * Submap object. Must be used to create memory to be put
662 * in a submap by vm_map_submap.
663 */
664 extern vm_object_t vm_submap_object;
665
666 /*
667 * Wait and wakeup macros for in_transition map entries.
668 */
669 #define vm_map_entry_wait(map, interruptible) \
670 MACRO_BEGIN \
671 assert_wait((event_t)&(map)->hdr, interruptible); \
672 vm_map_unlock(map); \
673 thread_block((void (*)(void))0); \
674 MACRO_END
675
676 #define vm_map_entry_wakeup(map) thread_wakeup((event_t)(&(map)->hdr))
677
678
679
680 #define vm_map_ref_fast(map) \
681 MACRO_BEGIN \
682 mutex_lock(&map->s_lock); \
683 map->ref_count++; \
684 vm_map_res_reference(map); \
685 mutex_unlock(&map->s_lock); \
686 MACRO_END
687
688 #define vm_map_dealloc_fast(map) \
689 MACRO_BEGIN \
690 register int c; \
691 \
692 mutex_lock(&map->s_lock); \
693 c = --map->ref_count; \
694 if (c > 0) \
695 vm_map_res_deallocate(map); \
696 mutex_unlock(&map->s_lock); \
697 if (c == 0) \
698 vm_map_destroy(map); \
699 MACRO_END
700
701
702 /* simplify map entries */
703 extern void vm_map_simplify(
704 vm_map_t map,
705 vm_offset_t start);
706
707 /* Steal all the pages from a vm_map_copy page_list */
708 extern void vm_map_copy_steal_pages(
709 vm_map_copy_t copy);
710
711 /* Discard a copy without using it */
712 extern void vm_map_copy_discard(
713 vm_map_copy_t copy);
714
715 /* Move the information in a map copy object to a new map copy object */
716 extern vm_map_copy_t vm_map_copy_copy(
717 vm_map_copy_t copy);
718
719 /* Overwrite existing memory with a copy */
720 extern kern_return_t vm_map_copy_overwrite(
721 vm_map_t dst_map,
722 vm_offset_t dst_addr,
723 vm_map_copy_t copy,
724 int interruptible);
725
726 /* Create a copy object from an object. */
727 extern kern_return_t vm_map_copyin_object(
728 vm_object_t object,
729 vm_object_offset_t offset,
730 vm_object_size_t size,
731 vm_map_copy_t *copy_result); /* OUT */
732
733 extern vm_map_t vm_map_switch(
734 vm_map_t map);
735
736 extern int vm_map_copy_cont_is_valid(
737 vm_map_copy_t copy);
738
739
740
741 #endif /* !MACH_KERNEL_PRIVATE */
742
743 /* Get rid of a map */
744 extern void vm_map_destroy(
745 vm_map_t map);
746 /* Lose a reference */
747 extern void vm_map_deallocate(
748 vm_map_t map);
749
750 /* Create an empty map */
751 extern vm_map_t vm_map_create(
752 pmap_t pmap,
753 vm_offset_t min,
754 vm_offset_t max,
755 boolean_t pageable);
756
757
758 /* Enter a mapping */
759 extern kern_return_t vm_map_enter(
760 vm_map_t map,
761 vm_offset_t *address,
762 vm_size_t size,
763 vm_offset_t mask,
764 int flags,
765 vm_object_t object,
766 vm_object_offset_t offset,
767 boolean_t needs_copy,
768 vm_prot_t cur_protection,
769 vm_prot_t max_protection,
770 vm_inherit_t inheritance);
771
772 extern kern_return_t vm_map_write_user(
773 vm_map_t map,
774 vm_offset_t src_addr,
775 vm_offset_t dst_addr,
776 vm_size_t size);
777
778 extern kern_return_t vm_map_read_user(
779 vm_map_t map,
780 vm_offset_t src_addr,
781 vm_offset_t dst_addr,
782 vm_size_t size);
783
784 /* Create a new task map using an existing task map as a template. */
785 extern vm_map_t vm_map_fork(
786 vm_map_t old_map);
787
788 /* Change protection */
789 extern kern_return_t vm_map_protect(
790 vm_map_t map,
791 vm_offset_t start,
792 vm_offset_t end,
793 vm_prot_t new_prot,
794 boolean_t set_max);
795
796 /* Change inheritance */
797 extern kern_return_t vm_map_inherit(
798 vm_map_t map,
799 vm_offset_t start,
800 vm_offset_t end,
801 vm_inherit_t new_inheritance);
802
803 /* wire down a region */
804 extern kern_return_t vm_map_wire(
805 vm_map_t map,
806 vm_offset_t start,
807 vm_offset_t end,
808 vm_prot_t access_type,
809 boolean_t user_wire);
810
811 /* unwire a region */
812 extern kern_return_t vm_map_unwire(
813 vm_map_t map,
814 vm_offset_t start,
815 vm_offset_t end,
816 boolean_t user_wire);
817
818 /* Deallocate a region */
819 extern kern_return_t vm_map_remove(
820 vm_map_t map,
821 vm_offset_t start,
822 vm_offset_t end,
823 boolean_t flags);
824
825 /* Place a copy into a map */
826 extern kern_return_t vm_map_copyout(
827 vm_map_t dst_map,
828 vm_offset_t *dst_addr, /* OUT */
829 vm_map_copy_t copy);
830
831
832 /* Add or remove machine-dependent attributes from map regions */
833 extern kern_return_t vm_map_machine_attribute(
834 vm_map_t map,
835 vm_offset_t address,
836 vm_size_t size,
837 vm_machine_attribute_t attribute,
838 vm_machine_attribute_val_t* value); /* IN/OUT */
839 /* Set paging behavior */
840 extern kern_return_t vm_map_behavior_set(
841 vm_map_t map,
842 vm_offset_t start,
843 vm_offset_t end,
844 vm_behavior_t new_behavior);
845
846 extern kern_return_t vm_map_copyin_common(
847 vm_map_t src_map,
848 vm_offset_t src_addr,
849 vm_size_t len,
850 boolean_t src_destroy,
851 boolean_t src_volatile,
852 vm_map_copy_t *copy_result, /* OUT */
853 boolean_t use_maxprot);
854
855 extern kern_return_t vm_map_submap(
856 vm_map_t map,
857 vm_offset_t start,
858 vm_offset_t end,
859 vm_map_t submap,
860 vm_offset_t offset,
861 boolean_t use_pmap);
862
863 extern kern_return_t vm_region_clone(
864 ipc_port_t src_region,
865 ipc_port_t dst_region);
866
867 extern kern_return_t vm_map_region_replace(
868 vm_map_t target_map,
869 ipc_port_t old_region,
870 ipc_port_t new_region,
871 vm_offset_t start,
872 vm_offset_t end);
873
874 /*
875 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
876 * usual form; it handles a copyin based on the current protection
877 * (current protection == VM_PROT_NONE) is a failure.
878 * vm_map_copyin_maxprot handles a copyin based on maximum possible
879 * access. The difference is that a region with no current access
880 * BUT possible maximum access is rejected by vm_map_copyin(), but
881 * returned by vm_map_copyin_maxprot.
882 */
883 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
884 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
885 FALSE, copy_result, FALSE)
886
887 #define vm_map_copyin_maxprot(src_map, \
888 src_addr, len, src_destroy, copy_result) \
889 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
890 FALSE, copy_result, TRUE)
891
892 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
893
894 /*
895 * Flags for vm_map_remove() and vm_map_delete()
896 */
897 #define VM_MAP_NO_FLAGS 0x0
898 #define VM_MAP_REMOVE_KUNWIRE 0x1
899 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
900 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
901
902
903 #ifdef MACH_KERNEL_PRIVATE
904
905 /* address space shared region descriptor */
906
907 struct shared_region_mapping {
908 decl_mutex_data(, Lock) /* Synchronization */
909 int ref_count;
910 ipc_port_t text_region;
911 vm_size_t text_size;
912 ipc_port_t data_region;
913 vm_size_t data_size;
914 vm_offset_t region_mappings;
915 vm_offset_t client_base;
916 vm_offset_t alternate_base;
917 vm_offset_t alternate_next;
918 int flags;
919 int depth;
920 struct shared_region_object_chain *object_chain;
921 struct shared_region_mapping *self;
922 struct shared_region_mapping *next;
923 };
924
925 typedef struct shared_region_mapping *shared_region_mapping_t;
926
927 struct shared_region_object_chain {
928 shared_region_mapping_t object_chain_region;
929 int depth;
930 struct shared_region_object_chain *next;
931 };
932
933 typedef struct shared_region_object_chain *shared_region_object_chain_t;
934
935 #else /* !MACH_KERNEL_PRIVATE */
936
937 typedef void *shared_region_mapping_t;
938
939 #endif /* MACH_KERNEL_PRIVATE */
940
941 /* address space shared region descriptor */
942
943 extern kern_return_t shared_region_mapping_info(
944 shared_region_mapping_t shared_region,
945 ipc_port_t *text_region,
946 vm_size_t *text_size,
947 ipc_port_t *data_region,
948 vm_size_t *data_size,
949 vm_offset_t *region_mappings,
950 vm_offset_t *client_base,
951 vm_offset_t *alternate_base,
952 vm_offset_t *alternate_next,
953 int *flags,
954 shared_region_mapping_t *next);
955
956 extern kern_return_t shared_region_mapping_create(
957 ipc_port_t text_region,
958 vm_size_t text_size,
959 ipc_port_t data_region,
960 vm_size_t data_size,
961 vm_offset_t region_mappings,
962 vm_offset_t client_base,
963 shared_region_mapping_t *shared_region,
964 vm_offset_t alt_base,
965 vm_offset_t alt_next);
966
967 extern kern_return_t shared_region_mapping_ref(
968 shared_region_mapping_t shared_region);
969
970 extern kern_return_t shared_region_mapping_dealloc(
971 shared_region_mapping_t shared_region);
972
973 extern kern_return_t
974 shared_region_object_chain_attach(
975 shared_region_mapping_t target_region,
976 shared_region_mapping_t object_chain);
977
978 /*
979 extern kern_return_t vm_get_shared_region(
980 task_t task,
981 shared_region_mapping_t *shared_region);
982
983 extern kern_return_t vm_set_shared_region(
984 task_t task,
985 shared_region_mapping_t shared_region);
986 */
987
988 #endif /* _VM_VM_MAP_H_ */
989