]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | /* | |
60 | * File: vm/vm_map.h | |
61 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
62 | * Date: 1985 | |
63 | * | |
64 | * Virtual memory map module definitions. | |
65 | * | |
66 | * Contributors: | |
67 | * avie, dlb, mwyoung | |
68 | */ | |
69 | ||
70 | #ifndef _VM_VM_MAP_H_ | |
71 | #define _VM_VM_MAP_H_ | |
72 | ||
73 | #include <mach/mach_types.h> | |
74 | #include <mach/kern_return.h> | |
75 | #include <mach/boolean.h> | |
76 | #include <mach/vm_types.h> | |
77 | #include <mach/vm_prot.h> | |
78 | #include <mach/vm_inherit.h> | |
79 | #include <mach/vm_behavior.h> | |
91447636 | 80 | #include <mach/vm_param.h> |
1c79356b A |
81 | #include <vm/pmap.h> |
82 | ||
91447636 | 83 | #ifdef KERNEL_PRIVATE |
1c79356b | 84 | |
91447636 | 85 | #include <sys/cdefs.h> |
1c79356b | 86 | |
91447636 | 87 | __BEGIN_DECLS |
1c79356b A |
88 | |
89 | extern void vm_map_reference(vm_map_t map); | |
90 | extern vm_map_t current_map(void); | |
91 | ||
2d21ac55 A |
92 | /* Setup reserved areas in a new VM map */ |
93 | extern kern_return_t vm_map_exec( | |
94 | vm_map_t new_map, | |
95 | task_t task, | |
96 | void *fsroot, | |
97 | cpu_type_t cpu); | |
98 | ||
91447636 A |
99 | __END_DECLS |
100 | ||
101 | #ifdef MACH_KERNEL_PRIVATE | |
1c79356b | 102 | |
1c79356b A |
103 | #include <task_swapper.h> |
104 | #include <mach_assert.h> | |
105 | ||
106 | #include <vm/vm_object.h> | |
107 | #include <vm/vm_page.h> | |
108 | #include <kern/lock.h> | |
109 | #include <kern/zalloc.h> | |
110 | #include <kern/macro_help.h> | |
111 | ||
91447636 | 112 | #include <kern/thread.h> |
1c79356b | 113 | |
91447636 | 114 | #define current_map_fast() (current_thread()->map) |
1c79356b A |
115 | #define current_map() (current_map_fast()) |
116 | ||
117 | /* | |
118 | * Types defined: | |
119 | * | |
120 | * vm_map_t the high-level address map data structure. | |
121 | * vm_map_entry_t an entry in an address map. | |
122 | * vm_map_version_t a timestamp of a map, for use with vm_map_lookup | |
123 | * vm_map_copy_t represents memory copied from an address map, | |
124 | * used for inter-map copy operations | |
125 | */ | |
9bccf70c | 126 | typedef struct vm_map_entry *vm_map_entry_t; |
91447636 | 127 | #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) |
9bccf70c | 128 | |
1c79356b A |
129 | |
130 | /* | |
131 | * Type: vm_map_object_t [internal use only] | |
132 | * | |
133 | * Description: | |
134 | * The target of an address mapping, either a virtual | |
135 | * memory object or a sub map (of the kernel map). | |
136 | */ | |
137 | typedef union vm_map_object { | |
91447636 A |
138 | vm_object_t vm_object; /* object object */ |
139 | vm_map_t sub_map; /* belongs to another map */ | |
1c79356b A |
140 | } vm_map_object_t; |
141 | ||
b0d623f7 A |
142 | #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) |
143 | #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) | |
144 | #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) | |
1c79356b A |
145 | |
146 | /* | |
147 | * Type: vm_named_entry_t [internal use only] | |
148 | * | |
149 | * Description: | |
150 | * Description of a mapping to a memory cache object. | |
151 | * | |
152 | * Implementation: | |
153 | * While the handle to this object is used as a means to map | |
154 | * and pass around the right to map regions backed by pagers | |
155 | * of all sorts, the named_entry itself is only manipulated | |
156 | * by the kernel. Named entries hold information on the | |
157 | * right to map a region of a cached object. Namely, | |
158 | * the target cache object, the beginning and ending of the | |
159 | * region to be mapped, and the permissions, (read, write) | |
160 | * with which it can be mapped. | |
161 | * | |
162 | */ | |
163 | ||
164 | struct vm_named_entry { | |
b0d623f7 | 165 | decl_lck_mtx_data(, Lock) /* Synchronization */ |
1c79356b | 166 | union { |
91447636 A |
167 | vm_object_t object; /* object I point to */ |
168 | memory_object_t pager; /* amo pager port */ | |
169 | vm_map_t map; /* map backing submap */ | |
1c79356b | 170 | } backing; |
91447636 A |
171 | vm_object_offset_t offset; /* offset into object */ |
172 | vm_object_size_t size; /* size of region */ | |
173 | vm_prot_t protection; /* access permissions */ | |
1c79356b | 174 | int ref_count; /* Number of references */ |
91447636 A |
175 | unsigned int /* Is backing.xxx : */ |
176 | /* boolean_t */ internal:1, /* ... an internal object */ | |
177 | /* boolean_t */ is_sub_map:1, /* ... a submap? */ | |
178 | /* boolean_t */ is_pager:1; /* ... a pager port */ | |
1c79356b A |
179 | }; |
180 | ||
1c79356b A |
181 | /* |
182 | * Type: vm_map_entry_t [internal use only] | |
183 | * | |
184 | * Description: | |
185 | * A single mapping within an address map. | |
186 | * | |
187 | * Implementation: | |
188 | * Address map entries consist of start and end addresses, | |
189 | * a VM object (or sub map) and offset into that object, | |
190 | * and user-exported inheritance and protection information. | |
191 | * Control information for virtual copy operations is also | |
192 | * stored in the address map entry. | |
193 | */ | |
194 | struct vm_map_links { | |
195 | struct vm_map_entry *prev; /* previous entry */ | |
196 | struct vm_map_entry *next; /* next entry */ | |
91447636 A |
197 | vm_map_offset_t start; /* start address */ |
198 | vm_map_offset_t end; /* end address */ | |
1c79356b A |
199 | }; |
200 | ||
201 | struct vm_map_entry { | |
202 | struct vm_map_links links; /* links to other entries */ | |
203 | #define vme_prev links.prev | |
204 | #define vme_next links.next | |
205 | #define vme_start links.start | |
206 | #define vme_end links.end | |
207 | union vm_map_object object; /* object I point to */ | |
208 | vm_object_offset_t offset; /* offset into object */ | |
209 | unsigned int | |
210 | /* boolean_t */ is_shared:1, /* region is shared */ | |
211 | /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ | |
212 | /* boolean_t */ in_transition:1, /* Entry being changed */ | |
213 | /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ | |
214 | /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ | |
215 | /* behavior is not defined for submap type */ | |
216 | /* boolean_t */ needs_copy:1, /* object need to be copied? */ | |
217 | /* Only in task maps: */ | |
218 | /* vm_prot_t */ protection:3, /* protection code */ | |
219 | /* vm_prot_t */ max_protection:3,/* maximum protection */ | |
220 | /* vm_inherit_t */ inheritance:2, /* inheritance */ | |
91447636 | 221 | /* boolean_t */ use_pmap:1, /* nested pmaps */ |
b0d623f7 A |
222 | /* |
223 | * IMPORTANT: | |
224 | * The "alias" field can be updated while holding the VM map lock | |
225 | * "shared". It's OK as along as it's the only field that can be | |
226 | * updated without the VM map "exclusive" lock. | |
227 | */ | |
91447636 | 228 | /* unsigned char */ alias:8, /* user alias */ |
2d21ac55 | 229 | /* boolean_t */ no_cache:1, /* should new pages be cached? */ |
b0d623f7 A |
230 | /* boolean_t */ permanent:1, /* mapping can not be removed */ |
231 | /* boolean_t */ superpage_size:3,/* use superpages of a certain size */ | |
232 | /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */ | |
233 | /* unsigned char */ pad:2; /* available bits */ | |
1c79356b A |
234 | unsigned short wired_count; /* can be paged if = 0 */ |
235 | unsigned short user_wired_count; /* for vm_wire */ | |
236 | }; | |
237 | ||
b0d623f7 A |
238 | /* |
239 | * Convenience macros for dealing with superpages | |
240 | * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h | |
241 | */ | |
242 | #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES) | |
243 | #define SUPERPAGE_MASK (-SUPERPAGE_SIZE) | |
244 | #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK) | |
245 | #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK) | |
246 | ||
1c79356b A |
247 | /* |
248 | * wired_counts are unsigned short. This value is used to safeguard | |
249 | * against any mishaps due to runaway user programs. | |
250 | */ | |
251 | #define MAX_WIRE_COUNT 65535 | |
252 | ||
253 | ||
254 | ||
255 | /* | |
256 | * Type: struct vm_map_header | |
257 | * | |
258 | * Description: | |
259 | * Header for a vm_map and a vm_map_copy. | |
260 | */ | |
261 | struct vm_map_header { | |
262 | struct vm_map_links links; /* first, last, min, max */ | |
263 | int nentries; /* Number of entries */ | |
264 | boolean_t entries_pageable; | |
265 | /* are map entries pageable? */ | |
266 | }; | |
267 | ||
268 | /* | |
269 | * Type: vm_map_t [exported; contents invisible] | |
270 | * | |
271 | * Description: | |
272 | * An address map -- a directory relating valid | |
273 | * regions of a task's address space to the corresponding | |
274 | * virtual memory objects. | |
275 | * | |
276 | * Implementation: | |
277 | * Maps are doubly-linked lists of map entries, sorted | |
278 | * by address. One hint is used to start | |
279 | * searches again from the last successful search, | |
280 | * insertion, or removal. Another hint is used to | |
281 | * quickly find free space. | |
282 | */ | |
2d21ac55 | 283 | struct _vm_map { |
1c79356b A |
284 | lock_t lock; /* uni- and smp-lock */ |
285 | struct vm_map_header hdr; /* Map entry header */ | |
286 | #define min_offset hdr.links.start /* start of range */ | |
287 | #define max_offset hdr.links.end /* end of range */ | |
288 | pmap_t pmap; /* Physical map */ | |
91447636 | 289 | vm_map_size_t size; /* virtual size */ |
2d21ac55 A |
290 | vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ |
291 | vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ | |
1c79356b A |
292 | int ref_count; /* Reference count */ |
293 | #if TASK_SWAPPER | |
294 | int res_count; /* Residence count (swap) */ | |
295 | int sw_state; /* Swap state */ | |
296 | #endif /* TASK_SWAPPER */ | |
b0d623f7 A |
297 | decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */ |
298 | lck_mtx_ext_t s_lock_ext; | |
1c79356b A |
299 | vm_map_entry_t hint; /* hint for quick lookups */ |
300 | vm_map_entry_t first_free; /* First free space hint */ | |
301 | boolean_t wait_for_space; /* Should callers wait | |
302 | for space? */ | |
303 | boolean_t wiring_required;/* All memory wired? */ | |
304 | boolean_t no_zero_fill; /* No zero fill absent pages */ | |
9bccf70c | 305 | boolean_t mapped; /* has this map been mapped */ |
b0d623f7 | 306 | boolean_t switch_protect; /* Protect map from write faults while switched */ |
1c79356b | 307 | unsigned int timestamp; /* Version number */ |
2d21ac55 | 308 | unsigned int color_rr; /* next color (not protected by a lock) */ |
1c79356b A |
309 | } ; |
310 | ||
311 | #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links) | |
312 | #define vm_map_first_entry(map) ((map)->hdr.links.next) | |
313 | #define vm_map_last_entry(map) ((map)->hdr.links.prev) | |
314 | ||
315 | #if TASK_SWAPPER | |
316 | /* | |
317 | * VM map swap states. There are no transition states. | |
318 | */ | |
319 | #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ | |
320 | #define MAP_SW_OUT 2 /* map is out (res_count == 0 */ | |
321 | #endif /* TASK_SWAPPER */ | |
322 | ||
323 | /* | |
324 | * Type: vm_map_version_t [exported; contents invisible] | |
325 | * | |
326 | * Description: | |
327 | * Map versions may be used to quickly validate a previous | |
328 | * lookup operation. | |
329 | * | |
330 | * Usage note: | |
331 | * Because they are bulky objects, map versions are usually | |
332 | * passed by reference. | |
333 | * | |
334 | * Implementation: | |
335 | * Just a timestamp for the main map. | |
336 | */ | |
337 | typedef struct vm_map_version { | |
338 | unsigned int main_timestamp; | |
339 | } vm_map_version_t; | |
340 | ||
341 | /* | |
342 | * Type: vm_map_copy_t [exported; contents invisible] | |
343 | * | |
344 | * Description: | |
345 | * A map copy object represents a region of virtual memory | |
346 | * that has been copied from an address map but is still | |
347 | * in transit. | |
348 | * | |
349 | * A map copy object may only be used by a single thread | |
350 | * at a time. | |
351 | * | |
352 | * Implementation: | |
353 | * There are three formats for map copy objects. | |
354 | * The first is very similar to the main | |
355 | * address map in structure, and as a result, some | |
356 | * of the internal maintenance functions/macros can | |
357 | * be used with either address maps or map copy objects. | |
358 | * | |
359 | * The map copy object contains a header links | |
360 | * entry onto which the other entries that represent | |
361 | * the region are chained. | |
362 | * | |
91447636 A |
363 | * The second format is a single vm object. This was used |
364 | * primarily in the pageout path - but is not currently used | |
365 | * except for placeholder copy objects (see vm_map_copy_copy()). | |
366 | * | |
367 | * The third format is a kernel buffer copy object - for data | |
368 | * small enough that physical copies were the most efficient | |
369 | * method. | |
1c79356b | 370 | */ |
1c79356b A |
371 | |
372 | struct vm_map_copy { | |
373 | int type; | |
374 | #define VM_MAP_COPY_ENTRY_LIST 1 | |
375 | #define VM_MAP_COPY_OBJECT 2 | |
0b4e3aa0 | 376 | #define VM_MAP_COPY_KERNEL_BUFFER 3 |
1c79356b | 377 | vm_object_offset_t offset; |
91447636 | 378 | vm_map_size_t size; |
1c79356b A |
379 | union { |
380 | struct vm_map_header hdr; /* ENTRY_LIST */ | |
91447636 A |
381 | vm_object_t object; /* OBJECT */ |
382 | struct { | |
383 | void *kdata; /* KERNEL_BUFFER */ | |
1c79356b A |
384 | vm_size_t kalloc_size; /* size of this copy_t */ |
385 | } c_k; | |
386 | } c_u; | |
387 | }; | |
388 | ||
389 | ||
390 | #define cpy_hdr c_u.hdr | |
391 | ||
91447636 | 392 | #define cpy_object c_u.object |
1c79356b | 393 | |
1c79356b A |
394 | #define cpy_kdata c_u.c_k.kdata |
395 | #define cpy_kalloc_size c_u.c_k.kalloc_size | |
396 | ||
397 | ||
398 | /* | |
399 | * Useful macros for entry list copy objects | |
400 | */ | |
401 | ||
402 | #define vm_map_copy_to_entry(copy) \ | |
403 | ((struct vm_map_entry *) &(copy)->cpy_hdr.links) | |
404 | #define vm_map_copy_first_entry(copy) \ | |
405 | ((copy)->cpy_hdr.links.next) | |
406 | #define vm_map_copy_last_entry(copy) \ | |
407 | ((copy)->cpy_hdr.links.prev) | |
408 | ||
1c79356b A |
409 | /* |
410 | * Macros: vm_map_lock, etc. [internal use only] | |
411 | * Description: | |
412 | * Perform locking on the data portion of a map. | |
413 | * When multiple maps are to be locked, order by map address. | |
414 | * (See vm_map.c::vm_remap()) | |
415 | */ | |
416 | ||
417 | #define vm_map_lock_init(map) \ | |
9bccf70c | 418 | ((map)->timestamp = 0 , \ |
91447636 | 419 | lock_init(&(map)->lock, TRUE, 0, 0)) |
9bccf70c A |
420 | |
421 | #define vm_map_lock(map) lock_write(&(map)->lock) | |
422 | #define vm_map_unlock(map) \ | |
423 | ((map)->timestamp++ , lock_write_done(&(map)->lock)) | |
424 | #define vm_map_lock_read(map) lock_read(&(map)->lock) | |
425 | #define vm_map_unlock_read(map) lock_read_done(&(map)->lock) | |
1c79356b | 426 | #define vm_map_lock_write_to_read(map) \ |
9bccf70c | 427 | ((map)->timestamp++ , lock_write_to_read(&(map)->lock)) |
2d21ac55 A |
428 | /* lock_read_to_write() returns FALSE on failure. Macro evaluates to |
429 | * zero on success and non-zero value on failure. | |
430 | */ | |
431 | #define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE) | |
1c79356b | 432 | |
1c79356b A |
433 | /* |
434 | * Exported procedures that operate on vm_map_t. | |
435 | */ | |
436 | ||
437 | /* Initialize the module */ | |
2d21ac55 | 438 | extern void vm_map_init(void) __attribute__((section("__TEXT, initcode"))); |
1c79356b A |
439 | |
440 | /* Allocate a range in the specified virtual address map and | |
441 | * return the entry allocated for that range. */ | |
442 | extern kern_return_t vm_map_find_space( | |
91447636 A |
443 | vm_map_t map, |
444 | vm_map_address_t *address, /* OUT */ | |
445 | vm_map_size_t size, | |
446 | vm_map_offset_t mask, | |
0c530ab8 | 447 | int flags, |
91447636 | 448 | vm_map_entry_t *o_entry); /* OUT */ |
1c79356b A |
449 | |
450 | /* Lookup map entry containing or the specified address in the given map */ | |
451 | extern boolean_t vm_map_lookup_entry( | |
91447636 A |
452 | vm_map_t map, |
453 | vm_map_address_t address, | |
454 | vm_map_entry_t *entry); /* OUT */ | |
1c79356b | 455 | |
1c79356b A |
456 | /* Find the VM object, offset, and protection for a given virtual address |
457 | * in the specified map, assuming a page fault of the type specified. */ | |
458 | extern kern_return_t vm_map_lookup_locked( | |
91447636 A |
459 | vm_map_t *var_map, /* IN/OUT */ |
460 | vm_map_address_t vaddr, | |
461 | vm_prot_t fault_type, | |
2d21ac55 | 462 | int object_lock_type, |
91447636 A |
463 | vm_map_version_t *out_version, /* OUT */ |
464 | vm_object_t *object, /* OUT */ | |
465 | vm_object_offset_t *offset, /* OUT */ | |
466 | vm_prot_t *out_prot, /* OUT */ | |
467 | boolean_t *wired, /* OUT */ | |
2d21ac55 | 468 | vm_object_fault_info_t fault_info, /* OUT */ |
91447636 | 469 | vm_map_t *real_map); /* OUT */ |
1c79356b A |
470 | |
471 | /* Verifies that the map has not changed since the given version. */ | |
472 | extern boolean_t vm_map_verify( | |
91447636 A |
473 | vm_map_t map, |
474 | vm_map_version_t *version); /* REF */ | |
1c79356b A |
475 | |
476 | extern vm_map_entry_t vm_map_entry_insert( | |
477 | vm_map_t map, | |
478 | vm_map_entry_t insp_entry, | |
91447636 A |
479 | vm_map_offset_t start, |
480 | vm_map_offset_t end, | |
1c79356b A |
481 | vm_object_t object, |
482 | vm_object_offset_t offset, | |
483 | boolean_t needs_copy, | |
484 | boolean_t is_shared, | |
485 | boolean_t in_transition, | |
486 | vm_prot_t cur_protection, | |
487 | vm_prot_t max_protection, | |
488 | vm_behavior_t behavior, | |
489 | vm_inherit_t inheritance, | |
2d21ac55 | 490 | unsigned wired_count, |
b0d623f7 A |
491 | boolean_t no_cache, |
492 | boolean_t permanent, | |
493 | unsigned int superpage_size); | |
1c79356b | 494 | |
1c79356b A |
495 | |
496 | /* | |
497 | * Functions implemented as macros | |
498 | */ | |
91447636 | 499 | #define vm_map_min(map) ((map)->min_offset) |
1c79356b A |
500 | /* Lowest valid address in |
501 | * a map */ | |
502 | ||
91447636 | 503 | #define vm_map_max(map) ((map)->max_offset) |
1c79356b A |
504 | /* Highest valid address */ |
505 | ||
506 | #define vm_map_pmap(map) ((map)->pmap) | |
507 | /* Physical map associated | |
508 | * with this address map */ | |
509 | ||
510 | #define vm_map_verify_done(map, version) vm_map_unlock_read(map) | |
511 | /* Operation that required | |
512 | * a verified lookup is | |
513 | * now complete */ | |
514 | ||
515 | /* | |
516 | * Macros/functions for map residence counts and swapin/out of vm maps | |
517 | */ | |
518 | #if TASK_SWAPPER | |
519 | ||
520 | #if MACH_ASSERT | |
521 | /* Gain a reference to an existing map */ | |
522 | extern void vm_map_reference( | |
523 | vm_map_t map); | |
524 | /* Lose a residence count */ | |
525 | extern void vm_map_res_deallocate( | |
526 | vm_map_t map); | |
527 | /* Gain a residence count on a map */ | |
528 | extern void vm_map_res_reference( | |
529 | vm_map_t map); | |
530 | /* Gain reference & residence counts to possibly swapped-out map */ | |
531 | extern void vm_map_reference_swap( | |
532 | vm_map_t map); | |
533 | ||
534 | #else /* MACH_ASSERT */ | |
535 | ||
91447636 | 536 | #define vm_map_reference(map) \ |
1c79356b | 537 | MACRO_BEGIN \ |
91447636 | 538 | vm_map_t Map = (map); \ |
1c79356b | 539 | if (Map) { \ |
b0d623f7 | 540 | lck_mtx_lock(&Map->s_lock); \ |
1c79356b A |
541 | Map->res_count++; \ |
542 | Map->ref_count++; \ | |
b0d623f7 | 543 | lck_mtx_unlock(&Map->s_lock); \ |
1c79356b A |
544 | } \ |
545 | MACRO_END | |
546 | ||
547 | #define vm_map_res_reference(map) \ | |
548 | MACRO_BEGIN \ | |
91447636 | 549 | vm_map_t Lmap = (map); \ |
1c79356b | 550 | if (Lmap->res_count == 0) { \ |
b0d623f7 | 551 | lck_mtx_unlock(&Lmap->s_lock);\ |
1c79356b A |
552 | vm_map_lock(Lmap); \ |
553 | vm_map_swapin(Lmap); \ | |
b0d623f7 | 554 | lck_mtx_lock(&Lmap->s_lock); \ |
1c79356b A |
555 | ++Lmap->res_count; \ |
556 | vm_map_unlock(Lmap); \ | |
557 | } else \ | |
558 | ++Lmap->res_count; \ | |
559 | MACRO_END | |
560 | ||
561 | #define vm_map_res_deallocate(map) \ | |
562 | MACRO_BEGIN \ | |
91447636 A |
563 | vm_map_t Map = (map); \ |
564 | if (--Map->res_count == 0) { \ | |
b0d623f7 | 565 | lck_mtx_unlock(&Map->s_lock); \ |
1c79356b A |
566 | vm_map_lock(Map); \ |
567 | vm_map_swapout(Map); \ | |
568 | vm_map_unlock(Map); \ | |
b0d623f7 | 569 | lck_mtx_lock(&Map->s_lock); \ |
1c79356b A |
570 | } \ |
571 | MACRO_END | |
572 | ||
573 | #define vm_map_reference_swap(map) \ | |
574 | MACRO_BEGIN \ | |
575 | vm_map_t Map = (map); \ | |
b0d623f7 | 576 | lck_mtx_lock(&Map->s_lock); \ |
1c79356b A |
577 | ++Map->ref_count; \ |
578 | vm_map_res_reference(Map); \ | |
b0d623f7 | 579 | lck_mtx_unlock(&Map->s_lock); \ |
1c79356b A |
580 | MACRO_END |
581 | #endif /* MACH_ASSERT */ | |
582 | ||
583 | extern void vm_map_swapin( | |
584 | vm_map_t map); | |
585 | ||
586 | extern void vm_map_swapout( | |
587 | vm_map_t map); | |
588 | ||
589 | #else /* TASK_SWAPPER */ | |
590 | ||
591 | #define vm_map_reference(map) \ | |
592 | MACRO_BEGIN \ | |
593 | vm_map_t Map = (map); \ | |
594 | if (Map) { \ | |
b0d623f7 | 595 | lck_mtx_lock(&Map->s_lock); \ |
1c79356b | 596 | Map->ref_count++; \ |
b0d623f7 | 597 | lck_mtx_unlock(&Map->s_lock); \ |
1c79356b A |
598 | } \ |
599 | MACRO_END | |
600 | ||
601 | #define vm_map_reference_swap(map) vm_map_reference(map) | |
602 | #define vm_map_res_reference(map) | |
603 | #define vm_map_res_deallocate(map) | |
604 | ||
605 | #endif /* TASK_SWAPPER */ | |
606 | ||
607 | /* | |
608 | * Submap object. Must be used to create memory to be put | |
609 | * in a submap by vm_map_submap. | |
610 | */ | |
611 | extern vm_object_t vm_submap_object; | |
612 | ||
613 | /* | |
614 | * Wait and wakeup macros for in_transition map entries. | |
615 | */ | |
616 | #define vm_map_entry_wait(map, interruptible) \ | |
9bccf70c A |
617 | ((map)->timestamp++ , \ |
618 | thread_sleep_lock_write((event_t)&(map)->hdr, \ | |
619 | &(map)->lock, interruptible)) | |
1c79356b | 620 | |
1c79356b | 621 | |
91447636 A |
622 | #define vm_map_entry_wakeup(map) \ |
623 | thread_wakeup((event_t)(&(map)->hdr)) | |
1c79356b A |
624 | |
625 | ||
91447636 | 626 | #define vm_map_ref_fast(map) \ |
1c79356b | 627 | MACRO_BEGIN \ |
b0d623f7 | 628 | lck_mtx_lock(&map->s_lock); \ |
1c79356b A |
629 | map->ref_count++; \ |
630 | vm_map_res_reference(map); \ | |
b0d623f7 | 631 | lck_mtx_unlock(&map->s_lock); \ |
1c79356b A |
632 | MACRO_END |
633 | ||
91447636 | 634 | #define vm_map_dealloc_fast(map) \ |
1c79356b | 635 | MACRO_BEGIN \ |
91447636 | 636 | register int c; \ |
1c79356b | 637 | \ |
b0d623f7 | 638 | lck_mtx_lock(&map->s_lock); \ |
91447636 | 639 | c = --map->ref_count; \ |
1c79356b A |
640 | if (c > 0) \ |
641 | vm_map_res_deallocate(map); \ | |
b0d623f7 | 642 | lck_mtx_unlock(&map->s_lock); \ |
1c79356b A |
643 | if (c == 0) \ |
644 | vm_map_destroy(map); \ | |
645 | MACRO_END | |
646 | ||
647 | ||
648 | /* simplify map entries */ | |
5353443c A |
649 | extern void vm_map_simplify_entry( |
650 | vm_map_t map, | |
651 | vm_map_entry_t this_entry); | |
1c79356b | 652 | extern void vm_map_simplify( |
91447636 A |
653 | vm_map_t map, |
654 | vm_map_offset_t start); | |
1c79356b A |
655 | |
656 | /* Move the information in a map copy object to a new map copy object */ | |
657 | extern vm_map_copy_t vm_map_copy_copy( | |
91447636 | 658 | vm_map_copy_t copy); |
1c79356b | 659 | |
1c79356b A |
660 | /* Create a copy object from an object. */ |
661 | extern kern_return_t vm_map_copyin_object( | |
662 | vm_object_t object, | |
663 | vm_object_offset_t offset, | |
664 | vm_object_size_t size, | |
665 | vm_map_copy_t *copy_result); /* OUT */ | |
666 | ||
1c79356b A |
667 | /* Enter a mapping */ |
668 | extern kern_return_t vm_map_enter( | |
669 | vm_map_t map, | |
91447636 A |
670 | vm_map_offset_t *address, |
671 | vm_map_size_t size, | |
672 | vm_map_offset_t mask, | |
1c79356b A |
673 | int flags, |
674 | vm_object_t object, | |
675 | vm_object_offset_t offset, | |
676 | boolean_t needs_copy, | |
677 | vm_prot_t cur_protection, | |
678 | vm_prot_t max_protection, | |
679 | vm_inherit_t inheritance); | |
680 | ||
91447636 A |
681 | /* XXX should go away - replaced with regular enter of contig object */ |
682 | extern kern_return_t vm_map_enter_cpm( | |
683 | vm_map_t map, | |
684 | vm_map_address_t *addr, | |
685 | vm_map_size_t size, | |
686 | int flags); | |
687 | ||
688 | extern kern_return_t vm_map_remap( | |
689 | vm_map_t target_map, | |
690 | vm_map_offset_t *address, | |
691 | vm_map_size_t size, | |
692 | vm_map_offset_t mask, | |
693 | boolean_t anywhere, | |
694 | vm_map_t src_map, | |
695 | vm_map_offset_t memory_address, | |
696 | boolean_t copy, | |
697 | vm_prot_t *cur_protection, | |
698 | vm_prot_t *max_protection, | |
699 | vm_inherit_t inheritance); | |
700 | ||
701 | ||
702 | /* | |
703 | * Read and write from a kernel buffer to a specified map. | |
704 | */ | |
1c79356b | 705 | extern kern_return_t vm_map_write_user( |
91447636 A |
706 | vm_map_t map, |
707 | void *src_p, | |
708 | vm_map_offset_t dst_addr, | |
709 | vm_size_t size); | |
1c79356b A |
710 | |
711 | extern kern_return_t vm_map_read_user( | |
91447636 A |
712 | vm_map_t map, |
713 | vm_map_offset_t src_addr, | |
714 | void *dst_p, | |
715 | vm_size_t size); | |
1c79356b A |
716 | |
717 | /* Create a new task map using an existing task map as a template. */ | |
718 | extern vm_map_t vm_map_fork( | |
91447636 | 719 | vm_map_t old_map); |
1c79356b | 720 | |
9bccf70c A |
721 | /* Change inheritance */ |
722 | extern kern_return_t vm_map_inherit( | |
91447636 A |
723 | vm_map_t map, |
724 | vm_map_offset_t start, | |
725 | vm_map_offset_t end, | |
726 | vm_inherit_t new_inheritance); | |
1c79356b | 727 | |
9bccf70c A |
728 | /* Add or remove machine-dependent attributes from map regions */ |
729 | extern kern_return_t vm_map_machine_attribute( | |
91447636 A |
730 | vm_map_t map, |
731 | vm_map_offset_t start, | |
732 | vm_map_offset_t end, | |
9bccf70c A |
733 | vm_machine_attribute_t attribute, |
734 | vm_machine_attribute_val_t* value); /* IN/OUT */ | |
91447636 A |
735 | |
736 | extern kern_return_t vm_map_msync( | |
737 | vm_map_t map, | |
738 | vm_map_address_t address, | |
739 | vm_map_size_t size, | |
740 | vm_sync_t sync_flags); | |
741 | ||
9bccf70c A |
742 | /* Set paging behavior */ |
743 | extern kern_return_t vm_map_behavior_set( | |
91447636 A |
744 | vm_map_t map, |
745 | vm_map_offset_t start, | |
746 | vm_map_offset_t end, | |
747 | vm_behavior_t new_behavior); | |
748 | ||
749 | extern kern_return_t vm_map_purgable_control( | |
750 | vm_map_t map, | |
751 | vm_map_offset_t address, | |
752 | vm_purgable_t control, | |
753 | int *state); | |
754 | ||
755 | extern kern_return_t vm_map_region( | |
756 | vm_map_t map, | |
757 | vm_map_offset_t *address, | |
758 | vm_map_size_t *size, | |
759 | vm_region_flavor_t flavor, | |
760 | vm_region_info_t info, | |
761 | mach_msg_type_number_t *count, | |
762 | mach_port_t *object_name); | |
763 | ||
764 | extern kern_return_t vm_map_region_recurse_64( | |
765 | vm_map_t map, | |
766 | vm_map_offset_t *address, | |
767 | vm_map_size_t *size, | |
768 | natural_t *nesting_depth, | |
769 | vm_region_submap_info_64_t info, | |
770 | mach_msg_type_number_t *count); | |
771 | ||
b0d623f7 | 772 | extern kern_return_t vm_map_page_query_internal( |
91447636 A |
773 | vm_map_t map, |
774 | vm_map_offset_t offset, | |
775 | int *disposition, | |
776 | int *ref_count); | |
9bccf70c | 777 | |
b0d623f7 | 778 | |
9bccf70c | 779 | extern kern_return_t vm_map_submap( |
91447636 A |
780 | vm_map_t map, |
781 | vm_map_offset_t start, | |
782 | vm_map_offset_t end, | |
783 | vm_map_t submap, | |
784 | vm_map_offset_t offset, | |
785 | boolean_t use_pmap); | |
786 | ||
787 | extern void vm_map_submap_pmap_clean( | |
788 | vm_map_t map, | |
789 | vm_map_offset_t start, | |
790 | vm_map_offset_t end, | |
791 | vm_map_t sub_map, | |
792 | vm_map_offset_t offset); | |
793 | ||
794 | /* Convert from a map entry port to a map */ | |
795 | extern vm_map_t convert_port_entry_to_map( | |
796 | ipc_port_t port); | |
797 | ||
798 | /* Convert from a port to a vm_object */ | |
799 | extern vm_object_t convert_port_entry_to_object( | |
800 | ipc_port_t port); | |
9bccf70c A |
801 | |
802 | ||
2d21ac55 A |
803 | /* definitions related to overriding the NX behavior */ |
804 | ||
805 | #define VM_ABI_32 0x1 | |
806 | #define VM_ABI_64 0x2 | |
807 | ||
808 | extern int override_nx(vm_map_t map, uint32_t user_tag); | |
809 | ||
9bccf70c A |
810 | #endif /* MACH_KERNEL_PRIVATE */ |
811 | ||
91447636 A |
812 | __BEGIN_DECLS |
813 | ||
9bccf70c A |
814 | /* Create an empty map */ |
815 | extern vm_map_t vm_map_create( | |
91447636 A |
816 | pmap_t pmap, |
817 | vm_map_offset_t min_off, | |
818 | vm_map_offset_t max_off, | |
819 | boolean_t pageable); | |
9bccf70c A |
820 | |
821 | /* Get rid of a map */ | |
822 | extern void vm_map_destroy( | |
2d21ac55 A |
823 | vm_map_t map, |
824 | int flags); | |
825 | ||
9bccf70c A |
826 | /* Lose a reference */ |
827 | extern void vm_map_deallocate( | |
91447636 A |
828 | vm_map_t map); |
829 | ||
830 | extern vm_map_t vm_map_switch( | |
831 | vm_map_t map); | |
9bccf70c A |
832 | |
833 | /* Change protection */ | |
834 | extern kern_return_t vm_map_protect( | |
91447636 A |
835 | vm_map_t map, |
836 | vm_map_offset_t start, | |
837 | vm_map_offset_t end, | |
838 | vm_prot_t new_prot, | |
839 | boolean_t set_max); | |
840 | ||
841 | /* Check protection */ | |
842 | extern boolean_t vm_map_check_protection( | |
843 | vm_map_t map, | |
844 | vm_map_offset_t start, | |
845 | vm_map_offset_t end, | |
846 | vm_prot_t protection); | |
1c79356b A |
847 | |
848 | /* wire down a region */ | |
849 | extern kern_return_t vm_map_wire( | |
91447636 A |
850 | vm_map_t map, |
851 | vm_map_offset_t start, | |
852 | vm_map_offset_t end, | |
853 | vm_prot_t access_type, | |
854 | boolean_t user_wire); | |
1c79356b A |
855 | |
856 | /* unwire a region */ | |
857 | extern kern_return_t vm_map_unwire( | |
91447636 A |
858 | vm_map_t map, |
859 | vm_map_offset_t start, | |
860 | vm_map_offset_t end, | |
861 | boolean_t user_wire); | |
1c79356b | 862 | |
2d21ac55 A |
863 | /* Enter a mapping of a memory object */ |
864 | extern kern_return_t vm_map_enter_mem_object( | |
865 | vm_map_t map, | |
866 | vm_map_offset_t *address, | |
867 | vm_map_size_t size, | |
868 | vm_map_offset_t mask, | |
869 | int flags, | |
870 | ipc_port_t port, | |
871 | vm_object_offset_t offset, | |
872 | boolean_t needs_copy, | |
873 | vm_prot_t cur_protection, | |
874 | vm_prot_t max_protection, | |
875 | vm_inherit_t inheritance); | |
876 | ||
b0d623f7 A |
877 | /* Enter a mapping of a memory object */ |
878 | extern kern_return_t vm_map_enter_mem_object_control( | |
879 | vm_map_t map, | |
880 | vm_map_offset_t *address, | |
881 | vm_map_size_t size, | |
882 | vm_map_offset_t mask, | |
883 | int flags, | |
884 | memory_object_control_t control, | |
885 | vm_object_offset_t offset, | |
886 | boolean_t needs_copy, | |
887 | vm_prot_t cur_protection, | |
888 | vm_prot_t max_protection, | |
889 | vm_inherit_t inheritance); | |
890 | ||
1c79356b A |
891 | /* Deallocate a region */ |
892 | extern kern_return_t vm_map_remove( | |
91447636 A |
893 | vm_map_t map, |
894 | vm_map_offset_t start, | |
895 | vm_map_offset_t end, | |
896 | boolean_t flags); | |
897 | ||
898 | /* Discard a copy without using it */ | |
899 | extern void vm_map_copy_discard( | |
900 | vm_map_copy_t copy); | |
901 | ||
902 | /* Overwrite existing memory with a copy */ | |
903 | extern kern_return_t vm_map_copy_overwrite( | |
904 | vm_map_t dst_map, | |
905 | vm_map_address_t dst_addr, | |
906 | vm_map_copy_t copy, | |
b0d623f7 | 907 | boolean_t interruptible); |
1c79356b A |
908 | |
909 | /* Place a copy into a map */ | |
910 | extern kern_return_t vm_map_copyout( | |
91447636 A |
911 | vm_map_t dst_map, |
912 | vm_map_address_t *dst_addr, /* OUT */ | |
913 | vm_map_copy_t copy); | |
1c79356b | 914 | |
2d21ac55 A |
915 | extern kern_return_t vm_map_copyin( |
916 | vm_map_t src_map, | |
917 | vm_map_address_t src_addr, | |
918 | vm_map_size_t len, | |
919 | boolean_t src_destroy, | |
920 | vm_map_copy_t *copy_result); /* OUT */ | |
921 | ||
1c79356b | 922 | extern kern_return_t vm_map_copyin_common( |
91447636 A |
923 | vm_map_t src_map, |
924 | vm_map_address_t src_addr, | |
925 | vm_map_size_t len, | |
926 | boolean_t src_destroy, | |
927 | boolean_t src_volatile, | |
928 | vm_map_copy_t *copy_result, /* OUT */ | |
929 | boolean_t use_maxprot); | |
55e303ae | 930 | |
0c530ab8 A |
931 | extern void vm_map_disable_NX( |
932 | vm_map_t map); | |
933 | ||
934 | extern void vm_map_set_64bit( | |
935 | vm_map_t map); | |
936 | ||
937 | extern void vm_map_set_32bit( | |
938 | vm_map_t map); | |
939 | ||
2d21ac55 A |
940 | extern boolean_t vm_map_is_64bit( |
941 | vm_map_t map); | |
942 | ||
0c530ab8 A |
943 | extern boolean_t vm_map_has_4GB_pagezero( |
944 | vm_map_t map); | |
945 | ||
946 | extern void vm_map_set_4GB_pagezero( | |
947 | vm_map_t map); | |
948 | ||
949 | extern void vm_map_clear_4GB_pagezero( | |
950 | vm_map_t map); | |
951 | ||
952 | extern kern_return_t vm_map_raise_min_offset( | |
953 | vm_map_t map, | |
954 | vm_map_offset_t new_min_offset); | |
955 | ||
956 | extern vm_map_offset_t vm_compute_max_offset( | |
957 | unsigned is64); | |
958 | ||
2d21ac55 A |
959 | extern void vm_map_set_user_wire_limit( |
960 | vm_map_t map, | |
961 | vm_size_t limit); | |
962 | ||
b0d623f7 A |
963 | extern void vm_map_switch_protect( |
964 | vm_map_t map, | |
965 | boolean_t val); | |
966 | ||
967 | #ifdef XNU_KERNEL_PRIVATE | |
968 | extern kern_return_t vm_map_page_info( | |
969 | vm_map_t map, | |
970 | vm_map_offset_t offset, | |
971 | vm_page_info_flavor_t flavor, | |
972 | vm_page_info_t info, | |
973 | mach_msg_type_number_t *count); | |
974 | #endif /* XNU_KERNEL_PRIVATE */ | |
975 | ||
593a1d5f | 976 | |
2d21ac55 A |
977 | #ifdef MACH_KERNEL_PRIVATE |
978 | ||
1c79356b A |
979 | /* |
980 | * Macros to invoke vm_map_copyin_common. vm_map_copyin is the | |
981 | * usual form; it handles a copyin based on the current protection | |
982 | * (current protection == VM_PROT_NONE) is a failure. | |
983 | * vm_map_copyin_maxprot handles a copyin based on maximum possible | |
984 | * access. The difference is that a region with no current access | |
985 | * BUT possible maximum access is rejected by vm_map_copyin(), but | |
986 | * returned by vm_map_copyin_maxprot. | |
987 | */ | |
988 | #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ | |
989 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ | |
990 | FALSE, copy_result, FALSE) | |
991 | ||
992 | #define vm_map_copyin_maxprot(src_map, \ | |
993 | src_addr, len, src_destroy, copy_result) \ | |
994 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ | |
995 | FALSE, copy_result, TRUE) | |
996 | ||
2d21ac55 A |
997 | #endif /* MACH_KERNEL_PRIVATE */ |
998 | ||
91447636 A |
999 | /* |
1000 | * Macros for rounding and truncation of vm_map offsets and sizes | |
1001 | */ | |
1002 | #define vm_map_round_page(x) (((vm_map_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) | |
1003 | #define vm_map_trunc_page(x) ((vm_map_offset_t)(x) & ~((signed)PAGE_MASK)) | |
1004 | ||
1c79356b A |
1005 | /* |
1006 | * Flags for vm_map_remove() and vm_map_delete() | |
1007 | */ | |
1008 | #define VM_MAP_NO_FLAGS 0x0 | |
1009 | #define VM_MAP_REMOVE_KUNWIRE 0x1 | |
1010 | #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 | |
1011 | #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 | |
91447636 | 1012 | #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8 |
2d21ac55 | 1013 | #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10 |
91447636 A |
1014 | |
1015 | /* Support for UPLs from vm_maps */ | |
1016 | ||
1017 | extern kern_return_t vm_map_get_upl( | |
1018 | vm_map_t target_map, | |
cc9f6e38 | 1019 | vm_map_offset_t map_offset, |
b0d623f7 | 1020 | upl_size_t *size, |
91447636 A |
1021 | upl_t *upl, |
1022 | upl_page_info_array_t page_info, | |
b0d623f7 A |
1023 | unsigned int *page_infoCnt, |
1024 | int *flags, | |
1025 | int force_data_sync); | |
91447636 | 1026 | |
b7266188 A |
1027 | #if CONFIG_DYNAMIC_CODE_SIGNING |
1028 | extern kern_return_t vm_map_sign(vm_map_t map, | |
1029 | vm_map_offset_t start, | |
1030 | vm_map_offset_t end); | |
1031 | #endif | |
1032 | ||
91447636 A |
1033 | __END_DECLS |
1034 | ||
1035 | #endif /* KERNEL_PRIVATE */ | |
1c79356b | 1036 | |
1c79356b | 1037 | #endif /* _VM_VM_MAP_H_ */ |