]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
9bccf70c | 2 | * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | */ | |
55 | ||
56 | /* | |
57 | * File: vm/vm_map.h | |
58 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
59 | * Date: 1985 | |
60 | * | |
61 | * Virtual memory map module definitions. | |
62 | * | |
63 | * Contributors: | |
64 | * avie, dlb, mwyoung | |
65 | */ | |
66 | ||
67 | #ifndef _VM_VM_MAP_H_ | |
68 | #define _VM_VM_MAP_H_ | |
69 | ||
70 | #include <mach/mach_types.h> | |
71 | #include <mach/kern_return.h> | |
72 | #include <mach/boolean.h> | |
73 | #include <mach/vm_types.h> | |
74 | #include <mach/vm_prot.h> | |
75 | #include <mach/vm_inherit.h> | |
76 | #include <mach/vm_behavior.h> | |
77 | #include <vm/pmap.h> | |
78 | ||
9bccf70c | 79 | #include <sys/appleapiopts.h> |
1c79356b | 80 | |
9bccf70c | 81 | #ifdef __APPLE_API_PRIVATE |
1c79356b A |
82 | |
83 | #ifndef MACH_KERNEL_PRIVATE | |
84 | ||
9bccf70c A |
85 | #ifdef __APPLE_API_OBSOLETE |
86 | extern void kernel_vm_map_reference(vm_map_t map); | |
87 | #endif /* __APPLE_API_OBSOLETE */ | |
1c79356b A |
88 | |
89 | extern void vm_map_reference(vm_map_t map); | |
90 | extern vm_map_t current_map(void); | |
91 | ||
92 | #else /* MACH_KERNEL_PRIVATE */ | |
93 | ||
94 | #include <cpus.h> | |
95 | #include <task_swapper.h> | |
96 | #include <mach_assert.h> | |
97 | ||
98 | #include <vm/vm_object.h> | |
99 | #include <vm/vm_page.h> | |
100 | #include <kern/lock.h> | |
101 | #include <kern/zalloc.h> | |
102 | #include <kern/macro_help.h> | |
103 | ||
1c79356b A |
104 | #include <kern/thread_act.h> |
105 | ||
106 | #define current_map_fast() (current_act_fast()->map) | |
107 | #define current_map() (current_map_fast()) | |
108 | ||
109 | /* | |
110 | * Types defined: | |
111 | * | |
112 | * vm_map_t the high-level address map data structure. | |
113 | * vm_map_entry_t an entry in an address map. | |
114 | * vm_map_version_t a timestamp of a map, for use with vm_map_lookup | |
115 | * vm_map_copy_t represents memory copied from an address map, | |
116 | * used for inter-map copy operations | |
117 | */ | |
9bccf70c A |
118 | typedef struct vm_map_entry *vm_map_entry_t; |
119 | ||
1c79356b A |
120 | |
121 | /* | |
122 | * Type: vm_map_object_t [internal use only] | |
123 | * | |
124 | * Description: | |
125 | * The target of an address mapping, either a virtual | |
126 | * memory object or a sub map (of the kernel map). | |
127 | */ | |
128 | typedef union vm_map_object { | |
129 | struct vm_object *vm_object; /* object object */ | |
130 | struct vm_map *sub_map; /* belongs to another map */ | |
131 | } vm_map_object_t; | |
132 | ||
133 | #define named_entry_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) | |
134 | #define named_entry_lock(object) mutex_lock(&(object)->Lock) | |
135 | #define named_entry_unlock(object) mutex_unlock(&(object)->Lock) | |
136 | ||
137 | /* | |
138 | * Type: vm_named_entry_t [internal use only] | |
139 | * | |
140 | * Description: | |
141 | * Description of a mapping to a memory cache object. | |
142 | * | |
143 | * Implementation: | |
144 | * While the handle to this object is used as a means to map | |
145 | * and pass around the right to map regions backed by pagers | |
146 | * of all sorts, the named_entry itself is only manipulated | |
147 | * by the kernel. Named entries hold information on the | |
148 | * right to map a region of a cached object. Namely, | |
149 | * the target cache object, the beginning and ending of the | |
150 | * region to be mapped, and the permissions, (read, write) | |
151 | * with which it can be mapped. | |
152 | * | |
153 | */ | |
154 | ||
155 | struct vm_named_entry { | |
156 | decl_mutex_data(, Lock) /* Synchronization */ | |
157 | vm_object_t object; /* object I point to */ | |
158 | vm_object_offset_t offset; /* offset into object */ | |
159 | union { | |
0b4e3aa0 | 160 | memory_object_t pager; /* amo pager port */ |
1c79356b A |
161 | vm_map_t map; /* map backing submap */ |
162 | } backing; | |
163 | unsigned int size; /* size of region */ | |
164 | unsigned int protection; /* access permissions */ | |
165 | int ref_count; /* Number of references */ | |
166 | unsigned int | |
167 | /* boolean_t */ internal:1, /* is an internal object */ | |
168 | /* boolean_t */ is_sub_map:1; /* is object is a submap? */ | |
169 | }; | |
170 | ||
1c79356b A |
171 | /* |
172 | * Type: vm_map_entry_t [internal use only] | |
173 | * | |
174 | * Description: | |
175 | * A single mapping within an address map. | |
176 | * | |
177 | * Implementation: | |
178 | * Address map entries consist of start and end addresses, | |
179 | * a VM object (or sub map) and offset into that object, | |
180 | * and user-exported inheritance and protection information. | |
181 | * Control information for virtual copy operations is also | |
182 | * stored in the address map entry. | |
183 | */ | |
184 | struct vm_map_links { | |
185 | struct vm_map_entry *prev; /* previous entry */ | |
186 | struct vm_map_entry *next; /* next entry */ | |
187 | vm_offset_t start; /* start address */ | |
188 | vm_offset_t end; /* end address */ | |
189 | }; | |
190 | ||
191 | struct vm_map_entry { | |
192 | struct vm_map_links links; /* links to other entries */ | |
193 | #define vme_prev links.prev | |
194 | #define vme_next links.next | |
195 | #define vme_start links.start | |
196 | #define vme_end links.end | |
197 | union vm_map_object object; /* object I point to */ | |
198 | vm_object_offset_t offset; /* offset into object */ | |
199 | unsigned int | |
200 | /* boolean_t */ is_shared:1, /* region is shared */ | |
201 | /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ | |
202 | /* boolean_t */ in_transition:1, /* Entry being changed */ | |
203 | /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ | |
204 | /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ | |
205 | /* behavior is not defined for submap type */ | |
206 | /* boolean_t */ needs_copy:1, /* object need to be copied? */ | |
207 | /* Only in task maps: */ | |
208 | /* vm_prot_t */ protection:3, /* protection code */ | |
209 | /* vm_prot_t */ max_protection:3,/* maximum protection */ | |
210 | /* vm_inherit_t */ inheritance:2, /* inheritance */ | |
211 | /* nested pmap */ use_pmap:1, /* nested pmaps */ | |
212 | /* user alias */ alias:8; | |
213 | unsigned short wired_count; /* can be paged if = 0 */ | |
214 | unsigned short user_wired_count; /* for vm_wire */ | |
215 | }; | |
216 | ||
217 | /* | |
218 | * wired_counts are unsigned short. This value is used to safeguard | |
219 | * against any mishaps due to runaway user programs. | |
220 | */ | |
221 | #define MAX_WIRE_COUNT 65535 | |
222 | ||
223 | ||
224 | ||
225 | /* | |
226 | * Type: struct vm_map_header | |
227 | * | |
228 | * Description: | |
229 | * Header for a vm_map and a vm_map_copy. | |
230 | */ | |
231 | struct vm_map_header { | |
232 | struct vm_map_links links; /* first, last, min, max */ | |
233 | int nentries; /* Number of entries */ | |
234 | boolean_t entries_pageable; | |
235 | /* are map entries pageable? */ | |
236 | }; | |
237 | ||
238 | /* | |
239 | * Type: vm_map_t [exported; contents invisible] | |
240 | * | |
241 | * Description: | |
242 | * An address map -- a directory relating valid | |
243 | * regions of a task's address space to the corresponding | |
244 | * virtual memory objects. | |
245 | * | |
246 | * Implementation: | |
247 | * Maps are doubly-linked lists of map entries, sorted | |
248 | * by address. One hint is used to start | |
249 | * searches again from the last successful search, | |
250 | * insertion, or removal. Another hint is used to | |
251 | * quickly find free space. | |
252 | */ | |
253 | struct vm_map { | |
254 | lock_t lock; /* uni- and smp-lock */ | |
255 | struct vm_map_header hdr; /* Map entry header */ | |
256 | #define min_offset hdr.links.start /* start of range */ | |
257 | #define max_offset hdr.links.end /* end of range */ | |
258 | pmap_t pmap; /* Physical map */ | |
259 | vm_size_t size; /* virtual size */ | |
260 | int ref_count; /* Reference count */ | |
261 | #if TASK_SWAPPER | |
262 | int res_count; /* Residence count (swap) */ | |
263 | int sw_state; /* Swap state */ | |
264 | #endif /* TASK_SWAPPER */ | |
265 | decl_mutex_data(, s_lock) /* Lock ref, res, hint fields */ | |
266 | vm_map_entry_t hint; /* hint for quick lookups */ | |
267 | vm_map_entry_t first_free; /* First free space hint */ | |
268 | boolean_t wait_for_space; /* Should callers wait | |
269 | for space? */ | |
270 | boolean_t wiring_required;/* All memory wired? */ | |
271 | boolean_t no_zero_fill; /* No zero fill absent pages */ | |
9bccf70c | 272 | boolean_t mapped; /* has this map been mapped */ |
1c79356b A |
273 | unsigned int timestamp; /* Version number */ |
274 | } ; | |
275 | ||
276 | #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links) | |
277 | #define vm_map_first_entry(map) ((map)->hdr.links.next) | |
278 | #define vm_map_last_entry(map) ((map)->hdr.links.prev) | |
279 | ||
280 | #if TASK_SWAPPER | |
281 | /* | |
282 | * VM map swap states. There are no transition states. | |
283 | */ | |
284 | #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ | |
285 | #define MAP_SW_OUT 2 /* map is out (res_count == 0 */ | |
286 | #endif /* TASK_SWAPPER */ | |
287 | ||
288 | /* | |
289 | * Type: vm_map_version_t [exported; contents invisible] | |
290 | * | |
291 | * Description: | |
292 | * Map versions may be used to quickly validate a previous | |
293 | * lookup operation. | |
294 | * | |
295 | * Usage note: | |
296 | * Because they are bulky objects, map versions are usually | |
297 | * passed by reference. | |
298 | * | |
299 | * Implementation: | |
300 | * Just a timestamp for the main map. | |
301 | */ | |
302 | typedef struct vm_map_version { | |
303 | unsigned int main_timestamp; | |
304 | } vm_map_version_t; | |
305 | ||
306 | /* | |
307 | * Type: vm_map_copy_t [exported; contents invisible] | |
308 | * | |
309 | * Description: | |
310 | * A map copy object represents a region of virtual memory | |
311 | * that has been copied from an address map but is still | |
312 | * in transit. | |
313 | * | |
314 | * A map copy object may only be used by a single thread | |
315 | * at a time. | |
316 | * | |
317 | * Implementation: | |
318 | * There are three formats for map copy objects. | |
319 | * The first is very similar to the main | |
320 | * address map in structure, and as a result, some | |
321 | * of the internal maintenance functions/macros can | |
322 | * be used with either address maps or map copy objects. | |
323 | * | |
324 | * The map copy object contains a header links | |
325 | * entry onto which the other entries that represent | |
326 | * the region are chained. | |
327 | * | |
328 | * The second format is a single vm object. This is used | |
329 | * primarily in the pageout path. The third format is a | |
330 | * list of vm pages. An optional continuation provides | |
331 | * a hook to be called to obtain more of the memory, | |
332 | * or perform other operations. The continuation takes 3 | |
333 | * arguments, a saved arg buffer, a pointer to a new vm_map_copy | |
334 | * (returned) and an abort flag (abort if TRUE). | |
335 | */ | |
336 | ||
337 | #define VM_MAP_COPY_PAGE_LIST_MAX 20 | |
338 | #define VM_MAP_COPY_PAGE_LIST_MAX_SIZE (VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE) | |
339 | ||
340 | ||
341 | /* | |
342 | * Options for vm_map_copyin_page_list. | |
343 | */ | |
344 | ||
345 | #define VM_MAP_COPYIN_OPT_VM_PROT 0x7 | |
346 | #define VM_MAP_COPYIN_OPT_SRC_DESTROY 0x8 | |
347 | #define VM_MAP_COPYIN_OPT_STEAL_PAGES 0x10 | |
348 | #define VM_MAP_COPYIN_OPT_PMAP_ENTER 0x20 | |
349 | #define VM_MAP_COPYIN_OPT_NO_ZERO_FILL 0x40 | |
350 | ||
351 | /* | |
352 | * Continuation structures for vm_map_copyin_page_list. | |
353 | */ | |
354 | typedef struct { | |
355 | vm_map_t map; | |
356 | vm_offset_t src_addr; | |
357 | vm_size_t src_len; | |
358 | vm_offset_t destroy_addr; | |
359 | vm_size_t destroy_len; | |
360 | int options; | |
361 | } vm_map_copyin_args_data_t, *vm_map_copyin_args_t; | |
362 | ||
363 | #define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0) | |
364 | ||
365 | ||
366 | /* vm_map_copy_cont_t is a type definition/prototype | |
367 | * for the cont function pointer in vm_map_copy structure. | |
368 | */ | |
369 | typedef kern_return_t (*vm_map_copy_cont_t)( | |
370 | vm_map_copyin_args_t, | |
371 | vm_map_copy_t *); | |
372 | ||
373 | #define VM_MAP_COPY_CONT_NULL ((vm_map_copy_cont_t) 0) | |
374 | ||
375 | struct vm_map_copy { | |
376 | int type; | |
377 | #define VM_MAP_COPY_ENTRY_LIST 1 | |
378 | #define VM_MAP_COPY_OBJECT 2 | |
0b4e3aa0 | 379 | #define VM_MAP_COPY_KERNEL_BUFFER 3 |
1c79356b A |
380 | vm_object_offset_t offset; |
381 | vm_size_t size; | |
382 | union { | |
383 | struct vm_map_header hdr; /* ENTRY_LIST */ | |
384 | struct { /* OBJECT */ | |
385 | vm_object_t object; | |
386 | vm_size_t index; /* record progress as pages | |
387 | * are moved from object to | |
388 | * page list; must be zero | |
389 | * when first invoking | |
390 | * vm_map_object_to_page_list | |
391 | */ | |
392 | } c_o; | |
1c79356b A |
393 | struct { /* KERNEL_BUFFER */ |
394 | vm_offset_t kdata; | |
395 | vm_size_t kalloc_size; /* size of this copy_t */ | |
396 | } c_k; | |
397 | } c_u; | |
398 | }; | |
399 | ||
400 | ||
401 | #define cpy_hdr c_u.hdr | |
402 | ||
403 | #define cpy_object c_u.c_o.object | |
404 | #define cpy_index c_u.c_o.index | |
405 | ||
1c79356b A |
406 | #define cpy_kdata c_u.c_k.kdata |
407 | #define cpy_kalloc_size c_u.c_k.kalloc_size | |
408 | ||
409 | ||
410 | /* | |
411 | * Useful macros for entry list copy objects | |
412 | */ | |
413 | ||
414 | #define vm_map_copy_to_entry(copy) \ | |
415 | ((struct vm_map_entry *) &(copy)->cpy_hdr.links) | |
416 | #define vm_map_copy_first_entry(copy) \ | |
417 | ((copy)->cpy_hdr.links.next) | |
418 | #define vm_map_copy_last_entry(copy) \ | |
419 | ((copy)->cpy_hdr.links.prev) | |
420 | ||
1c79356b A |
421 | /* |
422 | * Macros: vm_map_lock, etc. [internal use only] | |
423 | * Description: | |
424 | * Perform locking on the data portion of a map. | |
425 | * When multiple maps are to be locked, order by map address. | |
426 | * (See vm_map.c::vm_remap()) | |
427 | */ | |
428 | ||
429 | #define vm_map_lock_init(map) \ | |
9bccf70c A |
430 | ((map)->timestamp = 0 , \ |
431 | lock_init(&(map)->lock, TRUE, ETAP_VM_MAP, ETAP_VM_MAP_I)) | |
432 | ||
433 | #define vm_map_lock(map) lock_write(&(map)->lock) | |
434 | #define vm_map_unlock(map) \ | |
435 | ((map)->timestamp++ , lock_write_done(&(map)->lock)) | |
436 | #define vm_map_lock_read(map) lock_read(&(map)->lock) | |
437 | #define vm_map_unlock_read(map) lock_read_done(&(map)->lock) | |
1c79356b | 438 | #define vm_map_lock_write_to_read(map) \ |
9bccf70c A |
439 | ((map)->timestamp++ , lock_write_to_read(&(map)->lock)) |
440 | #define vm_map_lock_read_to_write(map) lock_read_to_write(&(map)->lock) | |
1c79356b A |
441 | |
442 | extern zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ | |
443 | ||
444 | /* | |
445 | * Exported procedures that operate on vm_map_t. | |
446 | */ | |
447 | ||
448 | /* Initialize the module */ | |
449 | extern void vm_map_init(void); | |
450 | ||
451 | /* Allocate a range in the specified virtual address map and | |
452 | * return the entry allocated for that range. */ | |
453 | extern kern_return_t vm_map_find_space( | |
454 | vm_map_t map, | |
455 | vm_offset_t *address, /* OUT */ | |
456 | vm_size_t size, | |
457 | vm_offset_t mask, | |
458 | vm_map_entry_t *o_entry); /* OUT */ | |
459 | ||
460 | /* Lookup map entry containing or the specified address in the given map */ | |
461 | extern boolean_t vm_map_lookup_entry( | |
462 | vm_map_t map, | |
463 | vm_offset_t address, | |
464 | vm_map_entry_t *entry); /* OUT */ | |
465 | ||
1c79356b A |
466 | /* Find the VM object, offset, and protection for a given virtual address |
467 | * in the specified map, assuming a page fault of the type specified. */ | |
468 | extern kern_return_t vm_map_lookup_locked( | |
469 | vm_map_t *var_map, /* IN/OUT */ | |
470 | vm_offset_t vaddr, | |
471 | vm_prot_t fault_type, | |
472 | vm_map_version_t *out_version, /* OUT */ | |
473 | vm_object_t *object, /* OUT */ | |
474 | vm_object_offset_t *offset, /* OUT */ | |
475 | vm_prot_t *out_prot, /* OUT */ | |
476 | boolean_t *wired, /* OUT */ | |
477 | int *behavior, /* OUT */ | |
478 | vm_object_offset_t *lo_offset, /* OUT */ | |
479 | vm_object_offset_t *hi_offset, /* OUT */ | |
480 | vm_map_t *pmap_map); /* OUT */ | |
481 | ||
482 | /* Verifies that the map has not changed since the given version. */ | |
483 | extern boolean_t vm_map_verify( | |
484 | vm_map_t map, | |
485 | vm_map_version_t *version); /* REF */ | |
486 | ||
487 | /* Split a vm_map_entry into 2 entries */ | |
488 | extern void _vm_map_clip_start( | |
489 | struct vm_map_header *map_header, | |
490 | vm_map_entry_t entry, | |
491 | vm_offset_t start); | |
492 | ||
493 | extern vm_map_entry_t vm_map_entry_insert( | |
494 | vm_map_t map, | |
495 | vm_map_entry_t insp_entry, | |
496 | vm_offset_t start, | |
497 | vm_offset_t end, | |
498 | vm_object_t object, | |
499 | vm_object_offset_t offset, | |
500 | boolean_t needs_copy, | |
501 | boolean_t is_shared, | |
502 | boolean_t in_transition, | |
503 | vm_prot_t cur_protection, | |
504 | vm_prot_t max_protection, | |
505 | vm_behavior_t behavior, | |
506 | vm_inherit_t inheritance, | |
507 | unsigned wired_count); | |
508 | ||
509 | extern kern_return_t vm_remap_extract( | |
510 | vm_map_t map, | |
511 | vm_offset_t addr, | |
512 | vm_size_t size, | |
513 | boolean_t copy, | |
514 | struct vm_map_header *map_header, | |
515 | vm_prot_t *cur_protection, | |
516 | vm_prot_t *max_protection, | |
517 | vm_inherit_t inheritance, | |
518 | boolean_t pageable); | |
519 | ||
520 | extern kern_return_t vm_remap_range_allocate( | |
521 | vm_map_t map, | |
522 | vm_offset_t *address, | |
523 | vm_size_t size, | |
524 | vm_offset_t mask, | |
525 | boolean_t anywhere, | |
526 | vm_map_entry_t *map_entry); | |
527 | ||
528 | extern kern_return_t vm_remap_extract( | |
529 | vm_map_t map, | |
530 | vm_offset_t addr, | |
531 | vm_size_t size, | |
532 | boolean_t copy, | |
533 | struct vm_map_header *map_header, | |
534 | vm_prot_t *cur_protection, | |
535 | vm_prot_t *max_protection, | |
536 | vm_inherit_t inheritance, | |
537 | boolean_t pageable); | |
538 | ||
539 | extern kern_return_t vm_remap_range_allocate( | |
540 | vm_map_t map, | |
541 | vm_offset_t *address, | |
542 | vm_size_t size, | |
543 | vm_offset_t mask, | |
544 | boolean_t anywhere, | |
545 | vm_map_entry_t *map_entry); | |
546 | ||
547 | /* | |
548 | * Functions implemented as macros | |
549 | */ | |
550 | #define vm_map_min(map) ((map)->min_offset) | |
551 | /* Lowest valid address in | |
552 | * a map */ | |
553 | ||
554 | #define vm_map_max(map) ((map)->max_offset) | |
555 | /* Highest valid address */ | |
556 | ||
557 | #define vm_map_pmap(map) ((map)->pmap) | |
558 | /* Physical map associated | |
559 | * with this address map */ | |
560 | ||
561 | #define vm_map_verify_done(map, version) vm_map_unlock_read(map) | |
562 | /* Operation that required | |
563 | * a verified lookup is | |
564 | * now complete */ | |
565 | ||
566 | /* | |
567 | * Macros/functions for map residence counts and swapin/out of vm maps | |
568 | */ | |
569 | #if TASK_SWAPPER | |
570 | ||
571 | #if MACH_ASSERT | |
572 | /* Gain a reference to an existing map */ | |
573 | extern void vm_map_reference( | |
574 | vm_map_t map); | |
575 | /* Lose a residence count */ | |
576 | extern void vm_map_res_deallocate( | |
577 | vm_map_t map); | |
578 | /* Gain a residence count on a map */ | |
579 | extern void vm_map_res_reference( | |
580 | vm_map_t map); | |
581 | /* Gain reference & residence counts to possibly swapped-out map */ | |
582 | extern void vm_map_reference_swap( | |
583 | vm_map_t map); | |
584 | ||
585 | #else /* MACH_ASSERT */ | |
586 | ||
587 | #define vm_map_reference(map) \ | |
588 | MACRO_BEGIN \ | |
589 | vm_map_t Map = (map); \ | |
590 | if (Map) { \ | |
591 | mutex_lock(&Map->s_lock); \ | |
592 | Map->res_count++; \ | |
593 | Map->ref_count++; \ | |
594 | mutex_unlock(&Map->s_lock); \ | |
595 | } \ | |
596 | MACRO_END | |
597 | ||
598 | #define vm_map_res_reference(map) \ | |
599 | MACRO_BEGIN \ | |
600 | vm_map_t Lmap = (map); \ | |
601 | if (Lmap->res_count == 0) { \ | |
602 | mutex_unlock(&Lmap->s_lock); \ | |
603 | vm_map_lock(Lmap); \ | |
604 | vm_map_swapin(Lmap); \ | |
605 | mutex_lock(&Lmap->s_lock); \ | |
606 | ++Lmap->res_count; \ | |
607 | vm_map_unlock(Lmap); \ | |
608 | } else \ | |
609 | ++Lmap->res_count; \ | |
610 | MACRO_END | |
611 | ||
612 | #define vm_map_res_deallocate(map) \ | |
613 | MACRO_BEGIN \ | |
614 | vm_map_t Map = (map); \ | |
615 | if (--Map->res_count == 0) { \ | |
616 | mutex_unlock(&Map->s_lock); \ | |
617 | vm_map_lock(Map); \ | |
618 | vm_map_swapout(Map); \ | |
619 | vm_map_unlock(Map); \ | |
620 | mutex_lock(&Map->s_lock); \ | |
621 | } \ | |
622 | MACRO_END | |
623 | ||
624 | #define vm_map_reference_swap(map) \ | |
625 | MACRO_BEGIN \ | |
626 | vm_map_t Map = (map); \ | |
627 | mutex_lock(&Map->s_lock); \ | |
628 | ++Map->ref_count; \ | |
629 | vm_map_res_reference(Map); \ | |
630 | mutex_unlock(&Map->s_lock); \ | |
631 | MACRO_END | |
632 | #endif /* MACH_ASSERT */ | |
633 | ||
634 | extern void vm_map_swapin( | |
635 | vm_map_t map); | |
636 | ||
637 | extern void vm_map_swapout( | |
638 | vm_map_t map); | |
639 | ||
640 | #else /* TASK_SWAPPER */ | |
641 | ||
642 | #define vm_map_reference(map) \ | |
643 | MACRO_BEGIN \ | |
644 | vm_map_t Map = (map); \ | |
645 | if (Map) { \ | |
646 | mutex_lock(&Map->s_lock); \ | |
647 | Map->ref_count++; \ | |
648 | mutex_unlock(&Map->s_lock); \ | |
649 | } \ | |
650 | MACRO_END | |
651 | ||
652 | #define vm_map_reference_swap(map) vm_map_reference(map) | |
653 | #define vm_map_res_reference(map) | |
654 | #define vm_map_res_deallocate(map) | |
655 | ||
656 | #endif /* TASK_SWAPPER */ | |
657 | ||
658 | /* | |
659 | * Submap object. Must be used to create memory to be put | |
660 | * in a submap by vm_map_submap. | |
661 | */ | |
662 | extern vm_object_t vm_submap_object; | |
663 | ||
664 | /* | |
665 | * Wait and wakeup macros for in_transition map entries. | |
666 | */ | |
667 | #define vm_map_entry_wait(map, interruptible) \ | |
9bccf70c A |
668 | ((map)->timestamp++ , \ |
669 | thread_sleep_lock_write((event_t)&(map)->hdr, \ | |
670 | &(map)->lock, interruptible)) | |
1c79356b | 671 | |
1c79356b | 672 | |
9bccf70c | 673 | #define vm_map_entry_wakeup(map) thread_wakeup((event_t)(&(map)->hdr)) |
1c79356b A |
674 | |
675 | ||
676 | #define vm_map_ref_fast(map) \ | |
677 | MACRO_BEGIN \ | |
678 | mutex_lock(&map->s_lock); \ | |
679 | map->ref_count++; \ | |
680 | vm_map_res_reference(map); \ | |
681 | mutex_unlock(&map->s_lock); \ | |
682 | MACRO_END | |
683 | ||
684 | #define vm_map_dealloc_fast(map) \ | |
685 | MACRO_BEGIN \ | |
686 | register int c; \ | |
687 | \ | |
688 | mutex_lock(&map->s_lock); \ | |
689 | c = --map->ref_count; \ | |
690 | if (c > 0) \ | |
691 | vm_map_res_deallocate(map); \ | |
692 | mutex_unlock(&map->s_lock); \ | |
693 | if (c == 0) \ | |
694 | vm_map_destroy(map); \ | |
695 | MACRO_END | |
696 | ||
697 | ||
698 | /* simplify map entries */ | |
699 | extern void vm_map_simplify( | |
700 | vm_map_t map, | |
701 | vm_offset_t start); | |
702 | ||
703 | /* Steal all the pages from a vm_map_copy page_list */ | |
704 | extern void vm_map_copy_steal_pages( | |
705 | vm_map_copy_t copy); | |
706 | ||
707 | /* Discard a copy without using it */ | |
708 | extern void vm_map_copy_discard( | |
709 | vm_map_copy_t copy); | |
710 | ||
711 | /* Move the information in a map copy object to a new map copy object */ | |
712 | extern vm_map_copy_t vm_map_copy_copy( | |
713 | vm_map_copy_t copy); | |
714 | ||
715 | /* Overwrite existing memory with a copy */ | |
716 | extern kern_return_t vm_map_copy_overwrite( | |
717 | vm_map_t dst_map, | |
718 | vm_offset_t dst_addr, | |
719 | vm_map_copy_t copy, | |
720 | int interruptible); | |
721 | ||
1c79356b A |
722 | /* Create a copy object from an object. */ |
723 | extern kern_return_t vm_map_copyin_object( | |
724 | vm_object_t object, | |
725 | vm_object_offset_t offset, | |
726 | vm_object_size_t size, | |
727 | vm_map_copy_t *copy_result); /* OUT */ | |
728 | ||
1c79356b A |
729 | extern vm_map_t vm_map_switch( |
730 | vm_map_t map); | |
731 | ||
732 | extern int vm_map_copy_cont_is_valid( | |
733 | vm_map_copy_t copy); | |
734 | ||
735 | ||
9bccf70c | 736 | #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) |
1c79356b A |
737 | |
738 | ||
739 | /* Enter a mapping */ | |
740 | extern kern_return_t vm_map_enter( | |
741 | vm_map_t map, | |
742 | vm_offset_t *address, | |
743 | vm_size_t size, | |
744 | vm_offset_t mask, | |
745 | int flags, | |
746 | vm_object_t object, | |
747 | vm_object_offset_t offset, | |
748 | boolean_t needs_copy, | |
749 | vm_prot_t cur_protection, | |
750 | vm_prot_t max_protection, | |
751 | vm_inherit_t inheritance); | |
752 | ||
753 | extern kern_return_t vm_map_write_user( | |
754 | vm_map_t map, | |
755 | vm_offset_t src_addr, | |
756 | vm_offset_t dst_addr, | |
757 | vm_size_t size); | |
758 | ||
759 | extern kern_return_t vm_map_read_user( | |
760 | vm_map_t map, | |
761 | vm_offset_t src_addr, | |
762 | vm_offset_t dst_addr, | |
763 | vm_size_t size); | |
764 | ||
765 | /* Create a new task map using an existing task map as a template. */ | |
766 | extern vm_map_t vm_map_fork( | |
767 | vm_map_t old_map); | |
768 | ||
9bccf70c A |
769 | /* Change inheritance */ |
770 | extern kern_return_t vm_map_inherit( | |
1c79356b A |
771 | vm_map_t map, |
772 | vm_offset_t start, | |
773 | vm_offset_t end, | |
9bccf70c | 774 | vm_inherit_t new_inheritance); |
1c79356b | 775 | |
9bccf70c A |
776 | /* Add or remove machine-dependent attributes from map regions */ |
777 | extern kern_return_t vm_map_machine_attribute( | |
778 | vm_map_t map, | |
779 | vm_offset_t address, | |
780 | vm_size_t size, | |
781 | vm_machine_attribute_t attribute, | |
782 | vm_machine_attribute_val_t* value); /* IN/OUT */ | |
783 | /* Set paging behavior */ | |
784 | extern kern_return_t vm_map_behavior_set( | |
1c79356b A |
785 | vm_map_t map, |
786 | vm_offset_t start, | |
787 | vm_offset_t end, | |
9bccf70c A |
788 | vm_behavior_t new_behavior); |
789 | ||
790 | extern kern_return_t vm_map_submap( | |
791 | vm_map_t map, | |
792 | vm_offset_t start, | |
793 | vm_offset_t end, | |
794 | vm_map_t submap, | |
795 | vm_offset_t offset, | |
796 | boolean_t use_pmap); | |
797 | ||
798 | ||
799 | #endif /* MACH_KERNEL_PRIVATE */ | |
800 | ||
801 | /* Create an empty map */ | |
802 | extern vm_map_t vm_map_create( | |
803 | pmap_t pmap, | |
804 | vm_offset_t min, | |
805 | vm_offset_t max, | |
806 | boolean_t pageable); | |
807 | ||
808 | /* Get rid of a map */ | |
809 | extern void vm_map_destroy( | |
810 | vm_map_t map); | |
811 | /* Lose a reference */ | |
812 | extern void vm_map_deallocate( | |
813 | vm_map_t map); | |
814 | ||
815 | /* Change protection */ | |
816 | extern kern_return_t vm_map_protect( | |
817 | vm_map_t map, | |
818 | vm_offset_t start, | |
819 | vm_offset_t end, | |
820 | vm_prot_t new_prot, | |
821 | boolean_t set_max); | |
1c79356b A |
822 | |
823 | /* wire down a region */ | |
824 | extern kern_return_t vm_map_wire( | |
825 | vm_map_t map, | |
826 | vm_offset_t start, | |
827 | vm_offset_t end, | |
828 | vm_prot_t access_type, | |
829 | boolean_t user_wire); | |
830 | ||
831 | /* unwire a region */ | |
832 | extern kern_return_t vm_map_unwire( | |
833 | vm_map_t map, | |
834 | vm_offset_t start, | |
835 | vm_offset_t end, | |
836 | boolean_t user_wire); | |
837 | ||
838 | /* Deallocate a region */ | |
839 | extern kern_return_t vm_map_remove( | |
840 | vm_map_t map, | |
841 | vm_offset_t start, | |
842 | vm_offset_t end, | |
843 | boolean_t flags); | |
844 | ||
845 | /* Place a copy into a map */ | |
846 | extern kern_return_t vm_map_copyout( | |
847 | vm_map_t dst_map, | |
848 | vm_offset_t *dst_addr, /* OUT */ | |
849 | vm_map_copy_t copy); | |
850 | ||
1c79356b A |
851 | extern kern_return_t vm_map_copyin_common( |
852 | vm_map_t src_map, | |
853 | vm_offset_t src_addr, | |
854 | vm_size_t len, | |
855 | boolean_t src_destroy, | |
856 | boolean_t src_volatile, | |
857 | vm_map_copy_t *copy_result, /* OUT */ | |
858 | boolean_t use_maxprot); | |
859 | ||
0b4e3aa0 A |
860 | extern kern_return_t vm_region_clone( |
861 | ipc_port_t src_region, | |
862 | ipc_port_t dst_region); | |
863 | ||
864 | extern kern_return_t vm_map_region_replace( | |
865 | vm_map_t target_map, | |
866 | ipc_port_t old_region, | |
867 | ipc_port_t new_region, | |
868 | vm_offset_t start, | |
869 | vm_offset_t end); | |
870 | ||
55e303ae A |
871 | extern boolean_t vm_map_check_protection( |
872 | vm_map_t map, | |
873 | vm_offset_t start, | |
874 | vm_offset_t end, | |
875 | vm_prot_t protection); | |
876 | ||
1c79356b A |
877 | /* |
878 | * Macros to invoke vm_map_copyin_common. vm_map_copyin is the | |
879 | * usual form; it handles a copyin based on the current protection | |
880 | * (current protection == VM_PROT_NONE) is a failure. | |
881 | * vm_map_copyin_maxprot handles a copyin based on maximum possible | |
882 | * access. The difference is that a region with no current access | |
883 | * BUT possible maximum access is rejected by vm_map_copyin(), but | |
884 | * returned by vm_map_copyin_maxprot. | |
885 | */ | |
886 | #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ | |
887 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ | |
888 | FALSE, copy_result, FALSE) | |
889 | ||
890 | #define vm_map_copyin_maxprot(src_map, \ | |
891 | src_addr, len, src_destroy, copy_result) \ | |
892 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ | |
893 | FALSE, copy_result, TRUE) | |
894 | ||
1c79356b A |
895 | /* |
896 | * Flags for vm_map_remove() and vm_map_delete() | |
897 | */ | |
898 | #define VM_MAP_NO_FLAGS 0x0 | |
899 | #define VM_MAP_REMOVE_KUNWIRE 0x1 | |
900 | #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 | |
901 | #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 | |
902 | ||
55e303ae A |
903 | /* |
904 | * Backing store throttle when BS is exhausted | |
905 | */ | |
906 | extern unsigned int vm_backing_store_low; | |
907 | ||
908 | extern void vm_backing_store_disable( | |
909 | boolean_t suspend); | |
910 | ||
911 | ||
9bccf70c | 912 | #endif /* __APPLE_API_PRIVATE */ |
1c79356b | 913 | |
1c79356b A |
914 | #endif /* _VM_VM_MAP_H_ */ |
915 |