]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
9bccf70c | 2 | * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | ||
53 | /* | |
54 | * File: vm/vm_map.h | |
55 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
56 | * Date: 1985 | |
57 | * | |
58 | * Virtual memory map module definitions. | |
59 | * | |
60 | * Contributors: | |
61 | * avie, dlb, mwyoung | |
62 | */ | |
63 | ||
64 | #ifndef _VM_VM_MAP_H_ | |
65 | #define _VM_VM_MAP_H_ | |
66 | ||
67 | #include <mach/mach_types.h> | |
68 | #include <mach/kern_return.h> | |
69 | #include <mach/boolean.h> | |
70 | #include <mach/vm_types.h> | |
71 | #include <mach/vm_prot.h> | |
72 | #include <mach/vm_inherit.h> | |
73 | #include <mach/vm_behavior.h> | |
74 | #include <vm/pmap.h> | |
75 | ||
9bccf70c | 76 | #include <sys/appleapiopts.h> |
1c79356b | 77 | |
9bccf70c | 78 | #ifdef __APPLE_API_PRIVATE |
1c79356b A |
79 | |
80 | #ifndef MACH_KERNEL_PRIVATE | |
81 | ||
9bccf70c A |
82 | #ifdef __APPLE_API_OBSOLETE |
83 | extern void kernel_vm_map_reference(vm_map_t map); | |
84 | #endif /* __APPLE_API_OBSOLETE */ | |
1c79356b A |
85 | |
86 | extern void vm_map_reference(vm_map_t map); | |
87 | extern vm_map_t current_map(void); | |
88 | ||
89 | #else /* MACH_KERNEL_PRIVATE */ | |
90 | ||
91 | #include <cpus.h> | |
92 | #include <task_swapper.h> | |
93 | #include <mach_assert.h> | |
94 | ||
95 | #include <vm/vm_object.h> | |
96 | #include <vm/vm_page.h> | |
97 | #include <kern/lock.h> | |
98 | #include <kern/zalloc.h> | |
99 | #include <kern/macro_help.h> | |
100 | ||
1c79356b A |
101 | #include <kern/thread_act.h> |
102 | ||
103 | #define current_map_fast() (current_act_fast()->map) | |
104 | #define current_map() (current_map_fast()) | |
105 | ||
106 | /* | |
107 | * Types defined: | |
108 | * | |
109 | * vm_map_t the high-level address map data structure. | |
110 | * vm_map_entry_t an entry in an address map. | |
111 | * vm_map_version_t a timestamp of a map, for use with vm_map_lookup | |
112 | * vm_map_copy_t represents memory copied from an address map, | |
113 | * used for inter-map copy operations | |
114 | */ | |
9bccf70c A |
115 | typedef struct vm_map_entry *vm_map_entry_t; |
116 | ||
1c79356b A |
117 | |
118 | /* | |
119 | * Type: vm_map_object_t [internal use only] | |
120 | * | |
121 | * Description: | |
122 | * The target of an address mapping, either a virtual | |
123 | * memory object or a sub map (of the kernel map). | |
124 | */ | |
125 | typedef union vm_map_object { | |
126 | struct vm_object *vm_object; /* object object */ | |
127 | struct vm_map *sub_map; /* belongs to another map */ | |
128 | } vm_map_object_t; | |
129 | ||
130 | #define named_entry_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) | |
131 | #define named_entry_lock(object) mutex_lock(&(object)->Lock) | |
132 | #define named_entry_unlock(object) mutex_unlock(&(object)->Lock) | |
133 | ||
134 | /* | |
135 | * Type: vm_named_entry_t [internal use only] | |
136 | * | |
137 | * Description: | |
138 | * Description of a mapping to a memory cache object. | |
139 | * | |
140 | * Implementation: | |
141 | * While the handle to this object is used as a means to map | |
142 | * and pass around the right to map regions backed by pagers | |
143 | * of all sorts, the named_entry itself is only manipulated | |
144 | * by the kernel. Named entries hold information on the | |
145 | * right to map a region of a cached object. Namely, | |
146 | * the target cache object, the beginning and ending of the | |
147 | * region to be mapped, and the permissions, (read, write) | |
148 | * with which it can be mapped. | |
149 | * | |
150 | */ | |
151 | ||
152 | struct vm_named_entry { | |
153 | decl_mutex_data(, Lock) /* Synchronization */ | |
154 | vm_object_t object; /* object I point to */ | |
155 | vm_object_offset_t offset; /* offset into object */ | |
156 | union { | |
0b4e3aa0 | 157 | memory_object_t pager; /* amo pager port */ |
1c79356b A |
158 | vm_map_t map; /* map backing submap */ |
159 | } backing; | |
160 | unsigned int size; /* size of region */ | |
161 | unsigned int protection; /* access permissions */ | |
162 | int ref_count; /* Number of references */ | |
163 | unsigned int | |
164 | /* boolean_t */ internal:1, /* is an internal object */ | |
165 | /* boolean_t */ is_sub_map:1; /* is object is a submap? */ | |
166 | }; | |
167 | ||
1c79356b A |
168 | /* |
169 | * Type: vm_map_entry_t [internal use only] | |
170 | * | |
171 | * Description: | |
172 | * A single mapping within an address map. | |
173 | * | |
174 | * Implementation: | |
175 | * Address map entries consist of start and end addresses, | |
176 | * a VM object (or sub map) and offset into that object, | |
177 | * and user-exported inheritance and protection information. | |
178 | * Control information for virtual copy operations is also | |
179 | * stored in the address map entry. | |
180 | */ | |
181 | struct vm_map_links { | |
182 | struct vm_map_entry *prev; /* previous entry */ | |
183 | struct vm_map_entry *next; /* next entry */ | |
184 | vm_offset_t start; /* start address */ | |
185 | vm_offset_t end; /* end address */ | |
186 | }; | |
187 | ||
188 | struct vm_map_entry { | |
189 | struct vm_map_links links; /* links to other entries */ | |
190 | #define vme_prev links.prev | |
191 | #define vme_next links.next | |
192 | #define vme_start links.start | |
193 | #define vme_end links.end | |
194 | union vm_map_object object; /* object I point to */ | |
195 | vm_object_offset_t offset; /* offset into object */ | |
196 | unsigned int | |
197 | /* boolean_t */ is_shared:1, /* region is shared */ | |
198 | /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ | |
199 | /* boolean_t */ in_transition:1, /* Entry being changed */ | |
200 | /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ | |
201 | /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ | |
202 | /* behavior is not defined for submap type */ | |
203 | /* boolean_t */ needs_copy:1, /* object need to be copied? */ | |
204 | /* Only in task maps: */ | |
205 | /* vm_prot_t */ protection:3, /* protection code */ | |
206 | /* vm_prot_t */ max_protection:3,/* maximum protection */ | |
207 | /* vm_inherit_t */ inheritance:2, /* inheritance */ | |
208 | /* nested pmap */ use_pmap:1, /* nested pmaps */ | |
209 | /* user alias */ alias:8; | |
210 | unsigned short wired_count; /* can be paged if = 0 */ | |
211 | unsigned short user_wired_count; /* for vm_wire */ | |
212 | }; | |
213 | ||
214 | /* | |
215 | * wired_counts are unsigned short. This value is used to safeguard | |
216 | * against any mishaps due to runaway user programs. | |
217 | */ | |
218 | #define MAX_WIRE_COUNT 65535 | |
219 | ||
220 | ||
221 | ||
222 | /* | |
223 | * Type: struct vm_map_header | |
224 | * | |
225 | * Description: | |
226 | * Header for a vm_map and a vm_map_copy. | |
227 | */ | |
228 | struct vm_map_header { | |
229 | struct vm_map_links links; /* first, last, min, max */ | |
230 | int nentries; /* Number of entries */ | |
231 | boolean_t entries_pageable; | |
232 | /* are map entries pageable? */ | |
233 | }; | |
234 | ||
235 | /* | |
236 | * Type: vm_map_t [exported; contents invisible] | |
237 | * | |
238 | * Description: | |
239 | * An address map -- a directory relating valid | |
240 | * regions of a task's address space to the corresponding | |
241 | * virtual memory objects. | |
242 | * | |
243 | * Implementation: | |
244 | * Maps are doubly-linked lists of map entries, sorted | |
245 | * by address. One hint is used to start | |
246 | * searches again from the last successful search, | |
247 | * insertion, or removal. Another hint is used to | |
248 | * quickly find free space. | |
249 | */ | |
250 | struct vm_map { | |
251 | lock_t lock; /* uni- and smp-lock */ | |
252 | struct vm_map_header hdr; /* Map entry header */ | |
253 | #define min_offset hdr.links.start /* start of range */ | |
254 | #define max_offset hdr.links.end /* end of range */ | |
255 | pmap_t pmap; /* Physical map */ | |
256 | vm_size_t size; /* virtual size */ | |
257 | int ref_count; /* Reference count */ | |
258 | #if TASK_SWAPPER | |
259 | int res_count; /* Residence count (swap) */ | |
260 | int sw_state; /* Swap state */ | |
261 | #endif /* TASK_SWAPPER */ | |
262 | decl_mutex_data(, s_lock) /* Lock ref, res, hint fields */ | |
263 | vm_map_entry_t hint; /* hint for quick lookups */ | |
264 | vm_map_entry_t first_free; /* First free space hint */ | |
265 | boolean_t wait_for_space; /* Should callers wait | |
266 | for space? */ | |
267 | boolean_t wiring_required;/* All memory wired? */ | |
268 | boolean_t no_zero_fill; /* No zero fill absent pages */ | |
9bccf70c | 269 | boolean_t mapped; /* has this map been mapped */ |
1c79356b A |
270 | unsigned int timestamp; /* Version number */ |
271 | } ; | |
272 | ||
273 | #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links) | |
274 | #define vm_map_first_entry(map) ((map)->hdr.links.next) | |
275 | #define vm_map_last_entry(map) ((map)->hdr.links.prev) | |
276 | ||
277 | #if TASK_SWAPPER | |
278 | /* | |
279 | * VM map swap states. There are no transition states. | |
280 | */ | |
281 | #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ | |
282 | #define MAP_SW_OUT 2 /* map is out (res_count == 0 */ | |
283 | #endif /* TASK_SWAPPER */ | |
284 | ||
285 | /* | |
286 | * Type: vm_map_version_t [exported; contents invisible] | |
287 | * | |
288 | * Description: | |
289 | * Map versions may be used to quickly validate a previous | |
290 | * lookup operation. | |
291 | * | |
292 | * Usage note: | |
293 | * Because they are bulky objects, map versions are usually | |
294 | * passed by reference. | |
295 | * | |
296 | * Implementation: | |
297 | * Just a timestamp for the main map. | |
298 | */ | |
299 | typedef struct vm_map_version { | |
300 | unsigned int main_timestamp; | |
301 | } vm_map_version_t; | |
302 | ||
303 | /* | |
304 | * Type: vm_map_copy_t [exported; contents invisible] | |
305 | * | |
306 | * Description: | |
307 | * A map copy object represents a region of virtual memory | |
308 | * that has been copied from an address map but is still | |
309 | * in transit. | |
310 | * | |
311 | * A map copy object may only be used by a single thread | |
312 | * at a time. | |
313 | * | |
314 | * Implementation: | |
315 | * There are three formats for map copy objects. | |
316 | * The first is very similar to the main | |
317 | * address map in structure, and as a result, some | |
318 | * of the internal maintenance functions/macros can | |
319 | * be used with either address maps or map copy objects. | |
320 | * | |
321 | * The map copy object contains a header links | |
322 | * entry onto which the other entries that represent | |
323 | * the region are chained. | |
324 | * | |
325 | * The second format is a single vm object. This is used | |
326 | * primarily in the pageout path. The third format is a | |
327 | * list of vm pages. An optional continuation provides | |
328 | * a hook to be called to obtain more of the memory, | |
329 | * or perform other operations. The continuation takes 3 | |
330 | * arguments, a saved arg buffer, a pointer to a new vm_map_copy | |
331 | * (returned) and an abort flag (abort if TRUE). | |
332 | */ | |
333 | ||
334 | #define VM_MAP_COPY_PAGE_LIST_MAX 20 | |
335 | #define VM_MAP_COPY_PAGE_LIST_MAX_SIZE (VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE) | |
336 | ||
337 | ||
338 | /* | |
339 | * Options for vm_map_copyin_page_list. | |
340 | */ | |
341 | ||
342 | #define VM_MAP_COPYIN_OPT_VM_PROT 0x7 | |
343 | #define VM_MAP_COPYIN_OPT_SRC_DESTROY 0x8 | |
344 | #define VM_MAP_COPYIN_OPT_STEAL_PAGES 0x10 | |
345 | #define VM_MAP_COPYIN_OPT_PMAP_ENTER 0x20 | |
346 | #define VM_MAP_COPYIN_OPT_NO_ZERO_FILL 0x40 | |
347 | ||
348 | /* | |
349 | * Continuation structures for vm_map_copyin_page_list. | |
350 | */ | |
351 | typedef struct { | |
352 | vm_map_t map; | |
353 | vm_offset_t src_addr; | |
354 | vm_size_t src_len; | |
355 | vm_offset_t destroy_addr; | |
356 | vm_size_t destroy_len; | |
357 | int options; | |
358 | } vm_map_copyin_args_data_t, *vm_map_copyin_args_t; | |
359 | ||
360 | #define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0) | |
361 | ||
362 | ||
363 | /* vm_map_copy_cont_t is a type definition/prototype | |
364 | * for the cont function pointer in vm_map_copy structure. | |
365 | */ | |
366 | typedef kern_return_t (*vm_map_copy_cont_t)( | |
367 | vm_map_copyin_args_t, | |
368 | vm_map_copy_t *); | |
369 | ||
370 | #define VM_MAP_COPY_CONT_NULL ((vm_map_copy_cont_t) 0) | |
371 | ||
372 | struct vm_map_copy { | |
373 | int type; | |
374 | #define VM_MAP_COPY_ENTRY_LIST 1 | |
375 | #define VM_MAP_COPY_OBJECT 2 | |
0b4e3aa0 | 376 | #define VM_MAP_COPY_KERNEL_BUFFER 3 |
1c79356b A |
377 | vm_object_offset_t offset; |
378 | vm_size_t size; | |
379 | union { | |
380 | struct vm_map_header hdr; /* ENTRY_LIST */ | |
381 | struct { /* OBJECT */ | |
382 | vm_object_t object; | |
383 | vm_size_t index; /* record progress as pages | |
384 | * are moved from object to | |
385 | * page list; must be zero | |
386 | * when first invoking | |
387 | * vm_map_object_to_page_list | |
388 | */ | |
389 | } c_o; | |
1c79356b A |
390 | struct { /* KERNEL_BUFFER */ |
391 | vm_offset_t kdata; | |
392 | vm_size_t kalloc_size; /* size of this copy_t */ | |
393 | } c_k; | |
394 | } c_u; | |
395 | }; | |
396 | ||
397 | ||
398 | #define cpy_hdr c_u.hdr | |
399 | ||
400 | #define cpy_object c_u.c_o.object | |
401 | #define cpy_index c_u.c_o.index | |
402 | ||
1c79356b A |
403 | #define cpy_kdata c_u.c_k.kdata |
404 | #define cpy_kalloc_size c_u.c_k.kalloc_size | |
405 | ||
406 | ||
407 | /* | |
408 | * Useful macros for entry list copy objects | |
409 | */ | |
410 | ||
411 | #define vm_map_copy_to_entry(copy) \ | |
412 | ((struct vm_map_entry *) &(copy)->cpy_hdr.links) | |
413 | #define vm_map_copy_first_entry(copy) \ | |
414 | ((copy)->cpy_hdr.links.next) | |
415 | #define vm_map_copy_last_entry(copy) \ | |
416 | ((copy)->cpy_hdr.links.prev) | |
417 | ||
1c79356b A |
418 | /* |
419 | * Macros: vm_map_lock, etc. [internal use only] | |
420 | * Description: | |
421 | * Perform locking on the data portion of a map. | |
422 | * When multiple maps are to be locked, order by map address. | |
423 | * (See vm_map.c::vm_remap()) | |
424 | */ | |
425 | ||
426 | #define vm_map_lock_init(map) \ | |
9bccf70c A |
427 | ((map)->timestamp = 0 , \ |
428 | lock_init(&(map)->lock, TRUE, ETAP_VM_MAP, ETAP_VM_MAP_I)) | |
429 | ||
430 | #define vm_map_lock(map) lock_write(&(map)->lock) | |
431 | #define vm_map_unlock(map) \ | |
432 | ((map)->timestamp++ , lock_write_done(&(map)->lock)) | |
433 | #define vm_map_lock_read(map) lock_read(&(map)->lock) | |
434 | #define vm_map_unlock_read(map) lock_read_done(&(map)->lock) | |
1c79356b | 435 | #define vm_map_lock_write_to_read(map) \ |
9bccf70c A |
436 | ((map)->timestamp++ , lock_write_to_read(&(map)->lock)) |
437 | #define vm_map_lock_read_to_write(map) lock_read_to_write(&(map)->lock) | |
1c79356b A |
438 | |
439 | extern zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ | |
440 | ||
441 | /* | |
442 | * Exported procedures that operate on vm_map_t. | |
443 | */ | |
444 | ||
445 | /* Initialize the module */ | |
446 | extern void vm_map_init(void); | |
447 | ||
448 | /* Allocate a range in the specified virtual address map and | |
449 | * return the entry allocated for that range. */ | |
450 | extern kern_return_t vm_map_find_space( | |
451 | vm_map_t map, | |
452 | vm_offset_t *address, /* OUT */ | |
453 | vm_size_t size, | |
454 | vm_offset_t mask, | |
455 | vm_map_entry_t *o_entry); /* OUT */ | |
456 | ||
457 | /* Lookup map entry containing or the specified address in the given map */ | |
458 | extern boolean_t vm_map_lookup_entry( | |
459 | vm_map_t map, | |
460 | vm_offset_t address, | |
461 | vm_map_entry_t *entry); /* OUT */ | |
462 | ||
1c79356b A |
463 | /* Find the VM object, offset, and protection for a given virtual address |
464 | * in the specified map, assuming a page fault of the type specified. */ | |
465 | extern kern_return_t vm_map_lookup_locked( | |
466 | vm_map_t *var_map, /* IN/OUT */ | |
467 | vm_offset_t vaddr, | |
468 | vm_prot_t fault_type, | |
469 | vm_map_version_t *out_version, /* OUT */ | |
470 | vm_object_t *object, /* OUT */ | |
471 | vm_object_offset_t *offset, /* OUT */ | |
472 | vm_prot_t *out_prot, /* OUT */ | |
473 | boolean_t *wired, /* OUT */ | |
474 | int *behavior, /* OUT */ | |
475 | vm_object_offset_t *lo_offset, /* OUT */ | |
476 | vm_object_offset_t *hi_offset, /* OUT */ | |
477 | vm_map_t *pmap_map); /* OUT */ | |
478 | ||
479 | /* Verifies that the map has not changed since the given version. */ | |
480 | extern boolean_t vm_map_verify( | |
481 | vm_map_t map, | |
482 | vm_map_version_t *version); /* REF */ | |
483 | ||
484 | /* Split a vm_map_entry into 2 entries */ | |
485 | extern void _vm_map_clip_start( | |
486 | struct vm_map_header *map_header, | |
487 | vm_map_entry_t entry, | |
488 | vm_offset_t start); | |
489 | ||
490 | extern vm_map_entry_t vm_map_entry_insert( | |
491 | vm_map_t map, | |
492 | vm_map_entry_t insp_entry, | |
493 | vm_offset_t start, | |
494 | vm_offset_t end, | |
495 | vm_object_t object, | |
496 | vm_object_offset_t offset, | |
497 | boolean_t needs_copy, | |
498 | boolean_t is_shared, | |
499 | boolean_t in_transition, | |
500 | vm_prot_t cur_protection, | |
501 | vm_prot_t max_protection, | |
502 | vm_behavior_t behavior, | |
503 | vm_inherit_t inheritance, | |
504 | unsigned wired_count); | |
505 | ||
506 | extern kern_return_t vm_remap_extract( | |
507 | vm_map_t map, | |
508 | vm_offset_t addr, | |
509 | vm_size_t size, | |
510 | boolean_t copy, | |
511 | struct vm_map_header *map_header, | |
512 | vm_prot_t *cur_protection, | |
513 | vm_prot_t *max_protection, | |
514 | vm_inherit_t inheritance, | |
515 | boolean_t pageable); | |
516 | ||
517 | extern kern_return_t vm_remap_range_allocate( | |
518 | vm_map_t map, | |
519 | vm_offset_t *address, | |
520 | vm_size_t size, | |
521 | vm_offset_t mask, | |
522 | boolean_t anywhere, | |
523 | vm_map_entry_t *map_entry); | |
524 | ||
525 | extern kern_return_t vm_remap_extract( | |
526 | vm_map_t map, | |
527 | vm_offset_t addr, | |
528 | vm_size_t size, | |
529 | boolean_t copy, | |
530 | struct vm_map_header *map_header, | |
531 | vm_prot_t *cur_protection, | |
532 | vm_prot_t *max_protection, | |
533 | vm_inherit_t inheritance, | |
534 | boolean_t pageable); | |
535 | ||
536 | extern kern_return_t vm_remap_range_allocate( | |
537 | vm_map_t map, | |
538 | vm_offset_t *address, | |
539 | vm_size_t size, | |
540 | vm_offset_t mask, | |
541 | boolean_t anywhere, | |
542 | vm_map_entry_t *map_entry); | |
543 | ||
544 | /* | |
545 | * Functions implemented as macros | |
546 | */ | |
547 | #define vm_map_min(map) ((map)->min_offset) | |
548 | /* Lowest valid address in | |
549 | * a map */ | |
550 | ||
551 | #define vm_map_max(map) ((map)->max_offset) | |
552 | /* Highest valid address */ | |
553 | ||
554 | #define vm_map_pmap(map) ((map)->pmap) | |
555 | /* Physical map associated | |
556 | * with this address map */ | |
557 | ||
558 | #define vm_map_verify_done(map, version) vm_map_unlock_read(map) | |
559 | /* Operation that required | |
560 | * a verified lookup is | |
561 | * now complete */ | |
562 | ||
563 | /* | |
564 | * Macros/functions for map residence counts and swapin/out of vm maps | |
565 | */ | |
566 | #if TASK_SWAPPER | |
567 | ||
568 | #if MACH_ASSERT | |
569 | /* Gain a reference to an existing map */ | |
570 | extern void vm_map_reference( | |
571 | vm_map_t map); | |
572 | /* Lose a residence count */ | |
573 | extern void vm_map_res_deallocate( | |
574 | vm_map_t map); | |
575 | /* Gain a residence count on a map */ | |
576 | extern void vm_map_res_reference( | |
577 | vm_map_t map); | |
578 | /* Gain reference & residence counts to possibly swapped-out map */ | |
579 | extern void vm_map_reference_swap( | |
580 | vm_map_t map); | |
581 | ||
582 | #else /* MACH_ASSERT */ | |
583 | ||
584 | #define vm_map_reference(map) \ | |
585 | MACRO_BEGIN \ | |
586 | vm_map_t Map = (map); \ | |
587 | if (Map) { \ | |
588 | mutex_lock(&Map->s_lock); \ | |
589 | Map->res_count++; \ | |
590 | Map->ref_count++; \ | |
591 | mutex_unlock(&Map->s_lock); \ | |
592 | } \ | |
593 | MACRO_END | |
594 | ||
595 | #define vm_map_res_reference(map) \ | |
596 | MACRO_BEGIN \ | |
597 | vm_map_t Lmap = (map); \ | |
598 | if (Lmap->res_count == 0) { \ | |
599 | mutex_unlock(&Lmap->s_lock); \ | |
600 | vm_map_lock(Lmap); \ | |
601 | vm_map_swapin(Lmap); \ | |
602 | mutex_lock(&Lmap->s_lock); \ | |
603 | ++Lmap->res_count; \ | |
604 | vm_map_unlock(Lmap); \ | |
605 | } else \ | |
606 | ++Lmap->res_count; \ | |
607 | MACRO_END | |
608 | ||
609 | #define vm_map_res_deallocate(map) \ | |
610 | MACRO_BEGIN \ | |
611 | vm_map_t Map = (map); \ | |
612 | if (--Map->res_count == 0) { \ | |
613 | mutex_unlock(&Map->s_lock); \ | |
614 | vm_map_lock(Map); \ | |
615 | vm_map_swapout(Map); \ | |
616 | vm_map_unlock(Map); \ | |
617 | mutex_lock(&Map->s_lock); \ | |
618 | } \ | |
619 | MACRO_END | |
620 | ||
621 | #define vm_map_reference_swap(map) \ | |
622 | MACRO_BEGIN \ | |
623 | vm_map_t Map = (map); \ | |
624 | mutex_lock(&Map->s_lock); \ | |
625 | ++Map->ref_count; \ | |
626 | vm_map_res_reference(Map); \ | |
627 | mutex_unlock(&Map->s_lock); \ | |
628 | MACRO_END | |
629 | #endif /* MACH_ASSERT */ | |
630 | ||
631 | extern void vm_map_swapin( | |
632 | vm_map_t map); | |
633 | ||
634 | extern void vm_map_swapout( | |
635 | vm_map_t map); | |
636 | ||
637 | #else /* TASK_SWAPPER */ | |
638 | ||
639 | #define vm_map_reference(map) \ | |
640 | MACRO_BEGIN \ | |
641 | vm_map_t Map = (map); \ | |
642 | if (Map) { \ | |
643 | mutex_lock(&Map->s_lock); \ | |
644 | Map->ref_count++; \ | |
645 | mutex_unlock(&Map->s_lock); \ | |
646 | } \ | |
647 | MACRO_END | |
648 | ||
649 | #define vm_map_reference_swap(map) vm_map_reference(map) | |
650 | #define vm_map_res_reference(map) | |
651 | #define vm_map_res_deallocate(map) | |
652 | ||
653 | #endif /* TASK_SWAPPER */ | |
654 | ||
655 | /* | |
656 | * Submap object. Must be used to create memory to be put | |
657 | * in a submap by vm_map_submap. | |
658 | */ | |
659 | extern vm_object_t vm_submap_object; | |
660 | ||
661 | /* | |
662 | * Wait and wakeup macros for in_transition map entries. | |
663 | */ | |
664 | #define vm_map_entry_wait(map, interruptible) \ | |
9bccf70c A |
665 | ((map)->timestamp++ , \ |
666 | thread_sleep_lock_write((event_t)&(map)->hdr, \ | |
667 | &(map)->lock, interruptible)) | |
1c79356b | 668 | |
1c79356b | 669 | |
9bccf70c | 670 | #define vm_map_entry_wakeup(map) thread_wakeup((event_t)(&(map)->hdr)) |
1c79356b A |
671 | |
672 | ||
673 | #define vm_map_ref_fast(map) \ | |
674 | MACRO_BEGIN \ | |
675 | mutex_lock(&map->s_lock); \ | |
676 | map->ref_count++; \ | |
677 | vm_map_res_reference(map); \ | |
678 | mutex_unlock(&map->s_lock); \ | |
679 | MACRO_END | |
680 | ||
681 | #define vm_map_dealloc_fast(map) \ | |
682 | MACRO_BEGIN \ | |
683 | register int c; \ | |
684 | \ | |
685 | mutex_lock(&map->s_lock); \ | |
686 | c = --map->ref_count; \ | |
687 | if (c > 0) \ | |
688 | vm_map_res_deallocate(map); \ | |
689 | mutex_unlock(&map->s_lock); \ | |
690 | if (c == 0) \ | |
691 | vm_map_destroy(map); \ | |
692 | MACRO_END | |
693 | ||
694 | ||
695 | /* simplify map entries */ | |
696 | extern void vm_map_simplify( | |
697 | vm_map_t map, | |
698 | vm_offset_t start); | |
699 | ||
700 | /* Steal all the pages from a vm_map_copy page_list */ | |
701 | extern void vm_map_copy_steal_pages( | |
702 | vm_map_copy_t copy); | |
703 | ||
704 | /* Discard a copy without using it */ | |
705 | extern void vm_map_copy_discard( | |
706 | vm_map_copy_t copy); | |
707 | ||
708 | /* Move the information in a map copy object to a new map copy object */ | |
709 | extern vm_map_copy_t vm_map_copy_copy( | |
710 | vm_map_copy_t copy); | |
711 | ||
712 | /* Overwrite existing memory with a copy */ | |
713 | extern kern_return_t vm_map_copy_overwrite( | |
714 | vm_map_t dst_map, | |
715 | vm_offset_t dst_addr, | |
716 | vm_map_copy_t copy, | |
717 | int interruptible); | |
718 | ||
1c79356b A |
719 | /* Create a copy object from an object. */ |
720 | extern kern_return_t vm_map_copyin_object( | |
721 | vm_object_t object, | |
722 | vm_object_offset_t offset, | |
723 | vm_object_size_t size, | |
724 | vm_map_copy_t *copy_result); /* OUT */ | |
725 | ||
1c79356b A |
726 | extern vm_map_t vm_map_switch( |
727 | vm_map_t map); | |
728 | ||
729 | extern int vm_map_copy_cont_is_valid( | |
730 | vm_map_copy_t copy); | |
731 | ||
732 | ||
9bccf70c | 733 | #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) |
1c79356b A |
734 | |
735 | ||
736 | /* Enter a mapping */ | |
737 | extern kern_return_t vm_map_enter( | |
738 | vm_map_t map, | |
739 | vm_offset_t *address, | |
740 | vm_size_t size, | |
741 | vm_offset_t mask, | |
742 | int flags, | |
743 | vm_object_t object, | |
744 | vm_object_offset_t offset, | |
745 | boolean_t needs_copy, | |
746 | vm_prot_t cur_protection, | |
747 | vm_prot_t max_protection, | |
748 | vm_inherit_t inheritance); | |
749 | ||
750 | extern kern_return_t vm_map_write_user( | |
751 | vm_map_t map, | |
752 | vm_offset_t src_addr, | |
753 | vm_offset_t dst_addr, | |
754 | vm_size_t size); | |
755 | ||
756 | extern kern_return_t vm_map_read_user( | |
757 | vm_map_t map, | |
758 | vm_offset_t src_addr, | |
759 | vm_offset_t dst_addr, | |
760 | vm_size_t size); | |
761 | ||
762 | /* Create a new task map using an existing task map as a template. */ | |
763 | extern vm_map_t vm_map_fork( | |
764 | vm_map_t old_map); | |
765 | ||
9bccf70c A |
766 | /* Change inheritance */ |
767 | extern kern_return_t vm_map_inherit( | |
1c79356b A |
768 | vm_map_t map, |
769 | vm_offset_t start, | |
770 | vm_offset_t end, | |
9bccf70c | 771 | vm_inherit_t new_inheritance); |
1c79356b | 772 | |
9bccf70c A |
773 | /* Add or remove machine-dependent attributes from map regions */ |
774 | extern kern_return_t vm_map_machine_attribute( | |
775 | vm_map_t map, | |
776 | vm_offset_t address, | |
777 | vm_size_t size, | |
778 | vm_machine_attribute_t attribute, | |
779 | vm_machine_attribute_val_t* value); /* IN/OUT */ | |
780 | /* Set paging behavior */ | |
781 | extern kern_return_t vm_map_behavior_set( | |
1c79356b A |
782 | vm_map_t map, |
783 | vm_offset_t start, | |
784 | vm_offset_t end, | |
9bccf70c A |
785 | vm_behavior_t new_behavior); |
786 | ||
787 | extern kern_return_t vm_map_submap( | |
788 | vm_map_t map, | |
789 | vm_offset_t start, | |
790 | vm_offset_t end, | |
791 | vm_map_t submap, | |
792 | vm_offset_t offset, | |
793 | boolean_t use_pmap); | |
794 | ||
795 | ||
796 | #endif /* MACH_KERNEL_PRIVATE */ | |
797 | ||
798 | /* Create an empty map */ | |
799 | extern vm_map_t vm_map_create( | |
800 | pmap_t pmap, | |
801 | vm_offset_t min, | |
802 | vm_offset_t max, | |
803 | boolean_t pageable); | |
804 | ||
805 | /* Get rid of a map */ | |
806 | extern void vm_map_destroy( | |
807 | vm_map_t map); | |
808 | /* Lose a reference */ | |
809 | extern void vm_map_deallocate( | |
810 | vm_map_t map); | |
811 | ||
812 | /* Change protection */ | |
813 | extern kern_return_t vm_map_protect( | |
814 | vm_map_t map, | |
815 | vm_offset_t start, | |
816 | vm_offset_t end, | |
817 | vm_prot_t new_prot, | |
818 | boolean_t set_max); | |
1c79356b A |
819 | |
820 | /* wire down a region */ | |
821 | extern kern_return_t vm_map_wire( | |
822 | vm_map_t map, | |
823 | vm_offset_t start, | |
824 | vm_offset_t end, | |
825 | vm_prot_t access_type, | |
826 | boolean_t user_wire); | |
827 | ||
828 | /* unwire a region */ | |
829 | extern kern_return_t vm_map_unwire( | |
830 | vm_map_t map, | |
831 | vm_offset_t start, | |
832 | vm_offset_t end, | |
833 | boolean_t user_wire); | |
834 | ||
835 | /* Deallocate a region */ | |
836 | extern kern_return_t vm_map_remove( | |
837 | vm_map_t map, | |
838 | vm_offset_t start, | |
839 | vm_offset_t end, | |
840 | boolean_t flags); | |
841 | ||
842 | /* Place a copy into a map */ | |
843 | extern kern_return_t vm_map_copyout( | |
844 | vm_map_t dst_map, | |
845 | vm_offset_t *dst_addr, /* OUT */ | |
846 | vm_map_copy_t copy); | |
847 | ||
1c79356b A |
848 | extern kern_return_t vm_map_copyin_common( |
849 | vm_map_t src_map, | |
850 | vm_offset_t src_addr, | |
851 | vm_size_t len, | |
852 | boolean_t src_destroy, | |
853 | boolean_t src_volatile, | |
854 | vm_map_copy_t *copy_result, /* OUT */ | |
855 | boolean_t use_maxprot); | |
856 | ||
0b4e3aa0 A |
857 | extern kern_return_t vm_region_clone( |
858 | ipc_port_t src_region, | |
859 | ipc_port_t dst_region); | |
860 | ||
861 | extern kern_return_t vm_map_region_replace( | |
862 | vm_map_t target_map, | |
863 | ipc_port_t old_region, | |
864 | ipc_port_t new_region, | |
865 | vm_offset_t start, | |
866 | vm_offset_t end); | |
867 | ||
1c79356b A |
868 | /* |
869 | * Macros to invoke vm_map_copyin_common. vm_map_copyin is the | |
870 | * usual form; it handles a copyin based on the current protection | |
871 | * (current protection == VM_PROT_NONE) is a failure. | |
872 | * vm_map_copyin_maxprot handles a copyin based on maximum possible | |
873 | * access. The difference is that a region with no current access | |
874 | * BUT possible maximum access is rejected by vm_map_copyin(), but | |
875 | * returned by vm_map_copyin_maxprot. | |
876 | */ | |
877 | #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ | |
878 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ | |
879 | FALSE, copy_result, FALSE) | |
880 | ||
881 | #define vm_map_copyin_maxprot(src_map, \ | |
882 | src_addr, len, src_destroy, copy_result) \ | |
883 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ | |
884 | FALSE, copy_result, TRUE) | |
885 | ||
1c79356b A |
886 | /* |
887 | * Flags for vm_map_remove() and vm_map_delete() | |
888 | */ | |
889 | #define VM_MAP_NO_FLAGS 0x0 | |
890 | #define VM_MAP_REMOVE_KUNWIRE 0x1 | |
891 | #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 | |
892 | #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 | |
893 | ||
9bccf70c | 894 | #endif /* __APPLE_API_PRIVATE */ |
1c79356b | 895 | |
1c79356b A |
896 | #endif /* _VM_VM_MAP_H_ */ |
897 |