]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm_object.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Virtual memory object module definitions. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_OBJECT_H_ | |
67 | #define _VM_VM_OBJECT_H_ | |
68 | ||
6d2010ae A |
69 | #include <debug.h> |
70 | #include <mach_assert.h> | |
1c79356b A |
71 | #include <mach_pagemap.h> |
72 | #include <task_swapper.h> | |
73 | ||
74 | #include <mach/kern_return.h> | |
75 | #include <mach/boolean.h> | |
76 | #include <mach/memory_object_types.h> | |
77 | #include <mach/port.h> | |
78 | #include <mach/vm_prot.h> | |
91447636 | 79 | #include <mach/vm_param.h> |
1c79356b A |
80 | #include <mach/machine/vm_types.h> |
81 | #include <kern/queue.h> | |
2d21ac55 | 82 | #include <kern/locks.h> |
1c79356b | 83 | #include <kern/assert.h> |
0b4e3aa0 | 84 | #include <kern/misc_protos.h> |
1c79356b A |
85 | #include <kern/macro_help.h> |
86 | #include <ipc/ipc_types.h> | |
87 | #include <vm/pmap.h> | |
1c79356b | 88 | |
1c79356b | 89 | #include <vm/vm_external.h> |
1c79356b | 90 | |
b0d623f7 A |
91 | #include <vm/vm_options.h> |
92 | ||
fe8ab488 A |
93 | #if VM_OBJECT_TRACKING |
94 | #include <libkern/OSDebug.h> | |
95 | #include <kern/btlog.h> | |
96 | extern void vm_object_tracking_init(void); | |
97 | extern boolean_t vm_object_tracking_inited; | |
98 | extern btlog_t *vm_object_tracking_btlog; | |
99 | #define VM_OBJECT_TRACKING_BTDEPTH 7 | |
100 | #define VM_OBJECT_TRACKING_OP_CREATED 1 | |
101 | #define VM_OBJECT_TRACKING_OP_MODIFIED 2 | |
102 | #define VM_OBJECT_TRACKING_OP_TRUESHARE 3 | |
103 | #endif /* VM_OBJECT_TRACKING */ | |
104 | ||
91447636 | 105 | struct vm_page; |
39236c6e | 106 | struct vm_shared_region_slide_info; |
1c79356b A |
107 | |
108 | /* | |
109 | * Types defined: | |
110 | * | |
111 | * vm_object_t Virtual memory object. | |
2d21ac55 | 112 | * vm_object_fault_info_t Used to determine cluster size. |
1c79356b A |
113 | */ |
114 | ||
2d21ac55 A |
115 | struct vm_object_fault_info { |
116 | int interruptible; | |
117 | uint32_t user_tag; | |
118 | vm_size_t cluster_size; | |
119 | vm_behavior_t behavior; | |
120 | vm_map_offset_t lo_offset; | |
121 | vm_map_offset_t hi_offset; | |
6d2010ae A |
122 | unsigned int |
123 | /* boolean_t */ no_cache:1, | |
124 | /* boolean_t */ stealth:1, | |
125 | /* boolean_t */ io_sync:1, | |
126 | /* boolean_t */ cs_bypass:1, | |
127 | /* boolean_t */ mark_zf_absent:1, | |
316670eb A |
128 | /* boolean_t */ batch_pmap_op:1, |
129 | __vm_object_fault_info_unused_bits:26; | |
fe8ab488 | 130 | int pmap_options; |
2d21ac55 A |
131 | }; |
132 | ||
133 | ||
39236c6e A |
134 | #define vo_size vo_un1.vou_size |
135 | #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan | |
136 | #define vo_shadow_offset vo_un2.vou_shadow_offset | |
137 | #define vo_cache_ts vo_un2.vou_cache_ts | |
138 | #define vo_purgeable_owner vo_un2.vou_purgeable_owner | |
139 | #define vo_slide_info vo_un2.vou_slide_info | |
2d21ac55 | 140 | |
1c79356b A |
141 | struct vm_object { |
142 | queue_head_t memq; /* Resident memory */ | |
2d21ac55 | 143 | lck_rw_t Lock; /* Synchronization */ |
1c79356b | 144 | |
6d2010ae A |
145 | union { |
146 | vm_object_size_t vou_size; /* Object size (only valid if internal) */ | |
147 | int vou_cache_pages_to_scan; /* pages yet to be visited in an | |
148 | * external object in cache | |
149 | */ | |
150 | } vo_un1; | |
151 | ||
91447636 | 152 | struct vm_page *memq_hint; |
1c79356b A |
153 | int ref_count; /* Number of references */ |
154 | #if TASK_SWAPPER | |
155 | int res_count; /* Residency references (swap)*/ | |
156 | #endif /* TASK_SWAPPER */ | |
157 | unsigned int resident_page_count; | |
158 | /* number of resident pages */ | |
b0d623f7 A |
159 | unsigned int wired_page_count; /* number of wired pages */ |
160 | unsigned int reusable_page_count; | |
1c79356b A |
161 | |
162 | struct vm_object *copy; /* Object that should receive | |
163 | * a copy of my changed pages, | |
164 | * for copy_delay, or just the | |
165 | * temporary object that | |
166 | * shadows this object, for | |
167 | * copy_call. | |
168 | */ | |
169 | struct vm_object *shadow; /* My shadow */ | |
6d2010ae A |
170 | |
171 | union { | |
172 | vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ | |
39236c6e A |
173 | clock_sec_t vou_cache_ts; /* age of an external object |
174 | * present in cache | |
175 | */ | |
176 | task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set | |
177 | * to volatile/emtpy, this is the task | |
178 | * that owns this purgeable object. | |
6d2010ae | 179 | */ |
39236c6e | 180 | struct vm_shared_region_slide_info *vou_slide_info; |
6d2010ae | 181 | } vo_un2; |
1c79356b | 182 | |
0b4e3aa0 | 183 | memory_object_t pager; /* Where to get data */ |
1c79356b | 184 | vm_object_offset_t paging_offset; /* Offset into memory object */ |
91447636 | 185 | memory_object_control_t pager_control; /* Where data comes back */ |
1c79356b A |
186 | |
187 | memory_object_copy_strategy_t | |
188 | copy_strategy; /* How to handle data copy */ | |
189 | ||
fe8ab488 A |
190 | #if __LP64__ |
191 | /* | |
192 | * Some user processes (mostly VirtualMachine software) take a large | |
193 | * number of UPLs (via IOMemoryDescriptors) to wire pages in large | |
194 | * VM objects and overflow the 16-bit "activity_in_progress" counter. | |
195 | * Since we never enforced any limit there, let's give them 32 bits | |
196 | * for backwards compatibility's sake. | |
197 | */ | |
198 | unsigned int paging_in_progress:16, | |
199 | __object1_unused_bits:16; | |
200 | unsigned int activity_in_progress; | |
201 | #else /* __LP64__ */ | |
202 | /* | |
203 | * On 32-bit platforms, enlarging "activity_in_progress" would increase | |
204 | * the size of "struct vm_object". Since we don't know of any actual | |
205 | * overflow of these counters on these platforms, let's keep the | |
206 | * counters as 16-bit integers. | |
207 | */ | |
208 | unsigned short paging_in_progress; | |
209 | unsigned short activity_in_progress; | |
210 | #endif /* __LP64__ */ | |
1c79356b A |
211 | /* The memory object ports are |
212 | * being used (e.g., for pagein | |
213 | * or pageout) -- don't change | |
214 | * any of these fields (i.e., | |
215 | * don't collapse, destroy or | |
216 | * terminate) | |
217 | */ | |
b0d623f7 | 218 | |
1c79356b A |
219 | unsigned int |
220 | /* boolean_t array */ all_wanted:11, /* Bit array of "want to be | |
221 | * awakened" notations. See | |
222 | * VM_OBJECT_EVENT_* items | |
223 | * below */ | |
224 | /* boolean_t */ pager_created:1, /* Has pager been created? */ | |
225 | /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ | |
226 | /* boolean_t */ pager_ready:1, /* Will pager take requests? */ | |
227 | ||
228 | /* boolean_t */ pager_trusted:1,/* The pager for this object | |
229 | * is trusted. This is true for | |
230 | * all internal objects (backed | |
231 | * by the default pager) | |
232 | */ | |
233 | /* boolean_t */ can_persist:1, /* The kernel may keep the data | |
234 | * for this object (and rights | |
235 | * to the memory object) after | |
236 | * all address map references | |
237 | * are deallocated? | |
238 | */ | |
239 | /* boolean_t */ internal:1, /* Created by the kernel (and | |
240 | * therefore, managed by the | |
241 | * default memory manger) | |
242 | */ | |
243 | /* boolean_t */ temporary:1, /* Permanent objects may be | |
244 | * changed externally by the | |
245 | * memory manager, and changes | |
246 | * made in memory must be | |
247 | * reflected back to the memory | |
248 | * manager. Temporary objects | |
249 | * lack both of these | |
250 | * characteristics. | |
251 | */ | |
252 | /* boolean_t */ private:1, /* magic device_pager object, | |
253 | * holds private pages only */ | |
254 | /* boolean_t */ pageout:1, /* pageout object. contains | |
255 | * private pages that refer to | |
256 | * a real memory object. */ | |
257 | /* boolean_t */ alive:1, /* Not yet terminated */ | |
258 | ||
91447636 | 259 | /* boolean_t */ purgable:2, /* Purgable state. See |
2d21ac55 | 260 | * VM_PURGABLE_* |
1c79356b | 261 | */ |
39236c6e A |
262 | /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token |
263 | * becomes ripe. | |
264 | */ | |
1c79356b | 265 | /* boolean_t */ shadowed:1, /* Shadow may exist */ |
1c79356b A |
266 | /* boolean_t */ advisory_pageout:1, |
267 | /* Instead of sending page | |
268 | * via OOL, just notify | |
269 | * pager that the kernel | |
270 | * wants to discard it, page | |
271 | * remains in object */ | |
272 | /* boolean_t */ true_share:1, | |
273 | /* This object is mapped | |
274 | * in more than one place | |
275 | * and hence cannot be | |
276 | * coalesced */ | |
277 | /* boolean_t */ terminating:1, | |
278 | /* Allows vm_object_lookup | |
279 | * and vm_object_deallocate | |
280 | * to special case their | |
281 | * behavior when they are | |
282 | * called as a result of | |
283 | * page cleaning during | |
284 | * object termination | |
285 | */ | |
286 | /* boolean_t */ named:1, /* An enforces an internal | |
287 | * naming convention, by | |
288 | * calling the right routines | |
289 | * for allocation and | |
290 | * destruction, UBC references | |
291 | * against the vm_object are | |
292 | * checked. | |
293 | */ | |
294 | /* boolean_t */ shadow_severed:1, | |
295 | /* When a permanent object | |
296 | * backing a COW goes away | |
297 | * unexpectedly. This bit | |
298 | * allows vm_fault to return | |
299 | * an error rather than a | |
300 | * zero filled page. | |
301 | */ | |
0b4e3aa0 | 302 | /* boolean_t */ phys_contiguous:1, |
1c79356b A |
303 | /* Memory is wired and |
304 | * guaranteed physically | |
305 | * contiguous. However | |
306 | * it is not device memory | |
307 | * and obeys normal virtual | |
308 | * memory rules w.r.t pmap | |
309 | * access bits. | |
310 | */ | |
0b4e3aa0 A |
311 | /* boolean_t */ nophyscache:1; |
312 | /* When mapped at the | |
313 | * pmap level, don't allow | |
314 | * primary caching. (for | |
315 | * I/O) | |
316 | */ | |
1c79356b A |
317 | |
318 | ||
319 | ||
320 | queue_chain_t cached_list; /* Attachment point for the | |
321 | * list of objects cached as a | |
322 | * result of their can_persist | |
323 | * value | |
324 | */ | |
325 | ||
326 | queue_head_t msr_q; /* memory object synchronise | |
327 | request queue */ | |
328 | ||
2d21ac55 A |
329 | /* |
330 | * the following fields are not protected by any locks | |
331 | * they are updated via atomic compare and swap | |
332 | */ | |
1c79356b | 333 | vm_object_offset_t last_alloc; /* last allocation offset */ |
2d21ac55 A |
334 | int sequential; /* sequential access size */ |
335 | ||
336 | uint32_t pages_created; | |
337 | uint32_t pages_used; | |
1c79356b A |
338 | #if MACH_PAGEMAP |
339 | vm_external_map_t existence_map; /* bitmap of pages written to | |
340 | * backing storage */ | |
341 | #endif /* MACH_PAGEMAP */ | |
55e303ae | 342 | vm_offset_t cow_hint; /* last page present in */ |
0b4e3aa0 | 343 | /* shadow but not in object */ |
1c79356b A |
344 | #if MACH_ASSERT |
345 | struct vm_object *paging_object; /* object which pages to be | |
346 | * swapped out are temporary | |
347 | * put in current object | |
348 | */ | |
349 | #endif | |
2d21ac55 A |
350 | /* hold object lock when altering */ |
351 | unsigned int | |
352 | wimg_bits:8, /* cache WIMG bits */ | |
353 | code_signed:1, /* pages are signed and should be | |
354 | validated; the signatures are stored | |
355 | with the pager */ | |
b0d623f7 A |
356 | hashed:1, /* object/pager entered in hash */ |
357 | transposed:1, /* object was transposed with another */ | |
593a1d5f | 358 | mapping_in_progress:1, /* pager being mapped/unmapped */ |
fe8ab488 | 359 | phantom_isssd:1, |
b0d623f7 A |
360 | volatile_empty:1, |
361 | volatile_fault:1, | |
362 | all_reusable:1, | |
363 | blocked_access:1, | |
6d2010ae | 364 | set_cache_attr:1, |
39236c6e A |
365 | object_slid:1, |
366 | purgeable_queue_type:2, | |
367 | purgeable_queue_group:3, | |
fe8ab488 A |
368 | io_tracking:1, |
369 | __object2_unused_bits:7; /* for expansion */ | |
6d2010ae A |
370 | |
371 | uint32_t scan_collisions; | |
fe8ab488 A |
372 | #if CONFIG_PHANTOM_CACHE |
373 | uint32_t phantom_object_id; | |
374 | #endif | |
375 | #if CONFIG_IOSCHED || UPL_DEBUG | |
1c79356b | 376 | queue_head_t uplq; /* List of outstanding upls */ |
fe8ab488 | 377 | #endif |
2d21ac55 | 378 | |
0c530ab8 A |
379 | #ifdef VM_PIP_DEBUG |
380 | /* | |
381 | * Keep track of the stack traces for the first holders | |
382 | * of a "paging_in_progress" reference for this VM object. | |
383 | */ | |
384 | #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ | |
385 | #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ | |
386 | struct __pip_backtrace { | |
387 | void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES]; | |
388 | } pip_holders[VM_PIP_DEBUG_MAX_REFS]; | |
389 | #endif /* VM_PIP_DEBUG */ | |
2d21ac55 | 390 | |
6d2010ae | 391 | queue_chain_t objq; /* object queue - currently used for purgable queues */ |
fe8ab488 A |
392 | |
393 | #if DEBUG | |
394 | void *purgeable_owner_bt[16]; | |
395 | task_t vo_purgeable_volatilizer; /* who made it volatile? */ | |
396 | void *purgeable_volatilizer_bt[16]; | |
397 | #endif /* DEBUG */ | |
1c79356b A |
398 | }; |
399 | ||
b0d623f7 A |
400 | #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ |
401 | ((object)->volatile_fault && \ | |
402 | ((object)->purgable == VM_PURGABLE_VOLATILE || \ | |
403 | (object)->purgable == VM_PURGABLE_EMPTY)) | |
404 | ||
91447636 A |
405 | #define VM_PAGE_REMOVE(page) \ |
406 | MACRO_BEGIN \ | |
407 | vm_page_t __page = (page); \ | |
408 | vm_object_t __object = __page->object; \ | |
409 | if (__page == __object->memq_hint) { \ | |
410 | vm_page_t __new_hint; \ | |
411 | queue_entry_t __qe; \ | |
412 | __qe = queue_next(&__page->listq); \ | |
413 | if (queue_end(&__object->memq, __qe)) { \ | |
414 | __qe = queue_prev(&__page->listq); \ | |
415 | if (queue_end(&__object->memq, __qe)) { \ | |
416 | __qe = NULL; \ | |
417 | } \ | |
418 | } \ | |
419 | __new_hint = (vm_page_t) __qe; \ | |
420 | __object->memq_hint = __new_hint; \ | |
421 | } \ | |
422 | queue_remove(&__object->memq, __page, vm_page_t, listq); \ | |
423 | MACRO_END | |
424 | ||
425 | #define VM_PAGE_INSERT(page, object) \ | |
426 | MACRO_BEGIN \ | |
427 | vm_page_t __page = (page); \ | |
428 | vm_object_t __object = (object); \ | |
429 | queue_enter(&__object->memq, __page, vm_page_t, listq); \ | |
430 | __object->memq_hint = __page; \ | |
431 | MACRO_END | |
432 | ||
39236c6e | 433 | extern |
1c79356b A |
434 | vm_object_t kernel_object; /* the single kernel object */ |
435 | ||
39236c6e A |
436 | extern |
437 | vm_object_t compressor_object; /* the single compressor object */ | |
438 | ||
439 | extern | |
91447636 | 440 | unsigned int vm_object_absent_max; /* maximum number of absent pages |
1c79356b A |
441 | at a time for each object */ |
442 | ||
443 | # define VM_MSYNC_INITIALIZED 0 | |
444 | # define VM_MSYNC_SYNCHRONIZING 1 | |
445 | # define VM_MSYNC_DONE 2 | |
446 | ||
447 | struct msync_req { | |
448 | queue_chain_t msr_q; /* object request queue */ | |
449 | queue_chain_t req_q; /* vm_msync request queue */ | |
450 | unsigned int flag; | |
451 | vm_object_offset_t offset; | |
452 | vm_object_size_t length; | |
453 | vm_object_t object; /* back pointer */ | |
b0d623f7 | 454 | decl_lck_mtx_data(, msync_req_lock) /* Lock for this structure */ |
1c79356b A |
455 | }; |
456 | ||
457 | typedef struct msync_req *msync_req_t; | |
458 | #define MSYNC_REQ_NULL ((msync_req_t) 0) | |
459 | ||
b0d623f7 A |
460 | |
461 | extern lck_grp_t vm_map_lck_grp; | |
462 | extern lck_attr_t vm_map_lck_attr; | |
463 | ||
1c79356b A |
464 | /* |
465 | * Macros to allocate and free msync_reqs | |
466 | */ | |
467 | #define msync_req_alloc(msr) \ | |
b0d623f7 | 468 | MACRO_BEGIN \ |
1c79356b | 469 | (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ |
b0d623f7 A |
470 | lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \ |
471 | msr->flag = VM_MSYNC_INITIALIZED; \ | |
472 | MACRO_END | |
1c79356b A |
473 | |
474 | #define msync_req_free(msr) \ | |
39236c6e A |
475 | MACRO_BEGIN \ |
476 | lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \ | |
477 | kfree((msr), sizeof(struct msync_req)); \ | |
478 | MACRO_END | |
1c79356b | 479 | |
b0d623f7 A |
480 | #define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock) |
481 | #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock) | |
1c79356b A |
482 | |
483 | /* | |
484 | * Declare procedures that operate on VM objects. | |
485 | */ | |
486 | ||
39236c6e | 487 | __private_extern__ void vm_object_bootstrap(void); |
1c79356b | 488 | |
0b4e3aa0 | 489 | __private_extern__ void vm_object_init(void); |
1c79356b | 490 | |
2d21ac55 A |
491 | __private_extern__ void vm_object_init_lck_grp(void); |
492 | ||
8f6c56a5 A |
493 | __private_extern__ void vm_object_reaper_init(void); |
494 | ||
0b4e3aa0 | 495 | __private_extern__ vm_object_t vm_object_allocate( |
1c79356b A |
496 | vm_object_size_t size); |
497 | ||
91447636 A |
498 | __private_extern__ void _vm_object_allocate(vm_object_size_t size, |
499 | vm_object_t object); | |
500 | ||
0b4e3aa0 A |
501 | #if TASK_SWAPPER |
502 | ||
503 | __private_extern__ void vm_object_res_reference( | |
504 | vm_object_t object); | |
505 | __private_extern__ void vm_object_res_deallocate( | |
506 | vm_object_t object); | |
507 | #define VM_OBJ_RES_INCR(object) (object)->res_count++ | |
508 | #define VM_OBJ_RES_DECR(object) (object)->res_count-- | |
509 | ||
510 | #else /* TASK_SWAPPER */ | |
511 | ||
512 | #define VM_OBJ_RES_INCR(object) | |
513 | #define VM_OBJ_RES_DECR(object) | |
514 | #define vm_object_res_reference(object) | |
515 | #define vm_object_res_deallocate(object) | |
516 | ||
517 | #endif /* TASK_SWAPPER */ | |
518 | ||
519 | #define vm_object_reference_locked(object) \ | |
2d21ac55 A |
520 | MACRO_BEGIN \ |
521 | vm_object_t RLObject = (object); \ | |
522 | vm_object_lock_assert_exclusive(object); \ | |
523 | assert((RLObject)->ref_count > 0); \ | |
524 | (RLObject)->ref_count++; \ | |
525 | assert((RLObject)->ref_count > 1); \ | |
526 | vm_object_res_reference(RLObject); \ | |
527 | MACRO_END | |
528 | ||
529 | ||
530 | #define vm_object_reference_shared(object) \ | |
531 | MACRO_BEGIN \ | |
532 | vm_object_t RLObject = (object); \ | |
533 | vm_object_lock_assert_shared(object); \ | |
534 | assert((RLObject)->ref_count > 0); \ | |
b0d623f7 | 535 | OSAddAtomic(1, &(RLObject)->ref_count); \ |
316670eb | 536 | assert((RLObject)->ref_count > 0); \ |
2d21ac55 A |
537 | /* XXX we would need an atomic version of the following ... */ \ |
538 | vm_object_res_reference(RLObject); \ | |
539 | MACRO_END | |
0b4e3aa0 A |
540 | |
541 | ||
0b4e3aa0 | 542 | __private_extern__ void vm_object_reference( |
1c79356b | 543 | vm_object_t object); |
0b4e3aa0 | 544 | |
91447636 | 545 | #if !MACH_ASSERT |
0b4e3aa0 | 546 | |
1c79356b A |
547 | #define vm_object_reference(object) \ |
548 | MACRO_BEGIN \ | |
0b4e3aa0 A |
549 | vm_object_t RObject = (object); \ |
550 | if (RObject) { \ | |
b0d623f7 A |
551 | vm_object_lock_shared(RObject); \ |
552 | vm_object_reference_shared(RObject); \ | |
0b4e3aa0 | 553 | vm_object_unlock(RObject); \ |
1c79356b A |
554 | } \ |
555 | MACRO_END | |
0b4e3aa0 | 556 | |
1c79356b A |
557 | #endif /* MACH_ASSERT */ |
558 | ||
0b4e3aa0 | 559 | __private_extern__ void vm_object_deallocate( |
1c79356b A |
560 | vm_object_t object); |
561 | ||
0b4e3aa0 A |
562 | __private_extern__ kern_return_t vm_object_release_name( |
563 | vm_object_t object, | |
564 | int flags); | |
565 | ||
566 | __private_extern__ void vm_object_pmap_protect( | |
1c79356b A |
567 | vm_object_t object, |
568 | vm_object_offset_t offset, | |
91447636 | 569 | vm_object_size_t size, |
1c79356b | 570 | pmap_t pmap, |
91447636 | 571 | vm_map_offset_t pmap_start, |
1c79356b A |
572 | vm_prot_t prot); |
573 | ||
39236c6e A |
574 | __private_extern__ void vm_object_pmap_protect_options( |
575 | vm_object_t object, | |
576 | vm_object_offset_t offset, | |
577 | vm_object_size_t size, | |
578 | pmap_t pmap, | |
579 | vm_map_offset_t pmap_start, | |
580 | vm_prot_t prot, | |
581 | int options); | |
582 | ||
0b4e3aa0 | 583 | __private_extern__ void vm_object_page_remove( |
1c79356b A |
584 | vm_object_t object, |
585 | vm_object_offset_t start, | |
586 | vm_object_offset_t end); | |
587 | ||
0b4e3aa0 A |
588 | __private_extern__ void vm_object_deactivate_pages( |
589 | vm_object_t object, | |
590 | vm_object_offset_t offset, | |
591 | vm_object_size_t size, | |
b0d623f7 A |
592 | boolean_t kill_page, |
593 | boolean_t reusable_page); | |
594 | ||
595 | __private_extern__ void vm_object_reuse_pages( | |
596 | vm_object_t object, | |
597 | vm_object_offset_t start_offset, | |
598 | vm_object_offset_t end_offset, | |
599 | boolean_t allow_partial_reuse); | |
0b4e3aa0 | 600 | |
b0d623f7 | 601 | __private_extern__ void vm_object_purge( |
fe8ab488 A |
602 | vm_object_t object, |
603 | int flags); | |
91447636 A |
604 | |
605 | __private_extern__ kern_return_t vm_object_purgable_control( | |
606 | vm_object_t object, | |
607 | vm_purgable_t control, | |
608 | int *state); | |
609 | ||
39236c6e A |
610 | __private_extern__ kern_return_t vm_object_get_page_counts( |
611 | vm_object_t object, | |
612 | vm_object_offset_t offset, | |
613 | vm_object_size_t size, | |
614 | unsigned int *resident_page_count, | |
615 | unsigned int *dirty_page_count); | |
616 | ||
0b4e3aa0 | 617 | __private_extern__ boolean_t vm_object_coalesce( |
1c79356b A |
618 | vm_object_t prev_object, |
619 | vm_object_t next_object, | |
620 | vm_object_offset_t prev_offset, | |
621 | vm_object_offset_t next_offset, | |
622 | vm_object_size_t prev_size, | |
623 | vm_object_size_t next_size); | |
624 | ||
0b4e3aa0 | 625 | __private_extern__ boolean_t vm_object_shadow( |
1c79356b A |
626 | vm_object_t *object, |
627 | vm_object_offset_t *offset, | |
628 | vm_object_size_t length); | |
629 | ||
0b4e3aa0 | 630 | __private_extern__ void vm_object_collapse( |
55e303ae | 631 | vm_object_t object, |
0c530ab8 A |
632 | vm_object_offset_t offset, |
633 | boolean_t can_bypass); | |
1c79356b | 634 | |
0b4e3aa0 | 635 | __private_extern__ boolean_t vm_object_copy_quickly( |
1c79356b A |
636 | vm_object_t *_object, |
637 | vm_object_offset_t src_offset, | |
638 | vm_object_size_t size, | |
639 | boolean_t *_src_needs_copy, | |
640 | boolean_t *_dst_needs_copy); | |
641 | ||
0b4e3aa0 | 642 | __private_extern__ kern_return_t vm_object_copy_strategically( |
1c79356b A |
643 | vm_object_t src_object, |
644 | vm_object_offset_t src_offset, | |
645 | vm_object_size_t size, | |
646 | vm_object_t *dst_object, | |
647 | vm_object_offset_t *dst_offset, | |
648 | boolean_t *dst_needs_copy); | |
649 | ||
0b4e3aa0 | 650 | __private_extern__ kern_return_t vm_object_copy_slowly( |
1c79356b A |
651 | vm_object_t src_object, |
652 | vm_object_offset_t src_offset, | |
653 | vm_object_size_t size, | |
b0d623f7 | 654 | boolean_t interruptible, |
1c79356b A |
655 | vm_object_t *_result_object); |
656 | ||
0b4e3aa0 A |
657 | __private_extern__ vm_object_t vm_object_copy_delayed( |
658 | vm_object_t src_object, | |
659 | vm_object_offset_t src_offset, | |
2d21ac55 A |
660 | vm_object_size_t size, |
661 | boolean_t src_object_shared); | |
0b4e3aa0 | 662 | |
1c79356b | 663 | |
1c79356b | 664 | |
0b4e3aa0 A |
665 | __private_extern__ kern_return_t vm_object_destroy( |
666 | vm_object_t object, | |
667 | kern_return_t reason); | |
1c79356b | 668 | |
0b4e3aa0 A |
669 | __private_extern__ void vm_object_pager_create( |
670 | vm_object_t object); | |
671 | ||
39236c6e A |
672 | __private_extern__ void vm_object_compressor_pager_create( |
673 | vm_object_t object); | |
674 | ||
0b4e3aa0 | 675 | __private_extern__ void vm_object_page_map( |
1c79356b A |
676 | vm_object_t object, |
677 | vm_object_offset_t offset, | |
678 | vm_object_size_t size, | |
679 | vm_object_offset_t (*map_fn) | |
680 | (void *, vm_object_offset_t), | |
681 | void *map_fn_data); | |
682 | ||
0b4e3aa0 A |
683 | __private_extern__ kern_return_t vm_object_upl_request( |
684 | vm_object_t object, | |
685 | vm_object_offset_t offset, | |
91447636 | 686 | upl_size_t size, |
0b4e3aa0 A |
687 | upl_t *upl, |
688 | upl_page_info_t *page_info, | |
689 | unsigned int *count, | |
690 | int flags); | |
691 | ||
91447636 A |
692 | __private_extern__ kern_return_t vm_object_transpose( |
693 | vm_object_t object1, | |
694 | vm_object_t object2, | |
695 | vm_object_size_t transpose_size); | |
696 | ||
0b4e3aa0 A |
697 | __private_extern__ boolean_t vm_object_sync( |
698 | vm_object_t object, | |
699 | vm_object_offset_t offset, | |
91447636 | 700 | vm_object_size_t size, |
0b4e3aa0 | 701 | boolean_t should_flush, |
91447636 A |
702 | boolean_t should_return, |
703 | boolean_t should_iosync); | |
1c79356b | 704 | |
0b4e3aa0 A |
705 | __private_extern__ kern_return_t vm_object_update( |
706 | vm_object_t object, | |
707 | vm_object_offset_t offset, | |
91447636 A |
708 | vm_object_size_t size, |
709 | vm_object_offset_t *error_offset, | |
710 | int *io_errno, | |
0b4e3aa0 A |
711 | memory_object_return_t should_return, |
712 | int flags, | |
713 | vm_prot_t prot); | |
1c79356b | 714 | |
0b4e3aa0 A |
715 | __private_extern__ kern_return_t vm_object_lock_request( |
716 | vm_object_t object, | |
717 | vm_object_offset_t offset, | |
718 | vm_object_size_t size, | |
719 | memory_object_return_t should_return, | |
720 | int flags, | |
721 | vm_prot_t prot); | |
1c79356b | 722 | |
1c79356b | 723 | |
1c79356b | 724 | |
0b4e3aa0 A |
725 | __private_extern__ vm_object_t vm_object_enter( |
726 | memory_object_t pager, | |
1c79356b A |
727 | vm_object_size_t size, |
728 | boolean_t internal, | |
729 | boolean_t init, | |
730 | boolean_t check_named); | |
731 | ||
732 | ||
2d21ac55 A |
733 | __private_extern__ void vm_object_cluster_size( |
734 | vm_object_t object, | |
735 | vm_object_offset_t *start, | |
736 | vm_size_t *length, | |
b0d623f7 A |
737 | vm_object_fault_info_t fault_info, |
738 | uint32_t *io_streaming); | |
91447636 A |
739 | |
740 | __private_extern__ kern_return_t vm_object_populate_with_private( | |
741 | vm_object_t object, | |
742 | vm_object_offset_t offset, | |
743 | ppnum_t phys_page, | |
744 | vm_size_t size); | |
745 | ||
6d2010ae A |
746 | __private_extern__ void vm_object_change_wimg_mode( |
747 | vm_object_t object, | |
748 | unsigned int wimg_mode); | |
749 | ||
0c530ab8 | 750 | extern kern_return_t adjust_vm_object_cache( |
91447636 A |
751 | vm_size_t oval, |
752 | vm_size_t nval); | |
753 | ||
0c530ab8 A |
754 | extern kern_return_t vm_object_page_op( |
755 | vm_object_t object, | |
756 | vm_object_offset_t offset, | |
757 | int ops, | |
758 | ppnum_t *phys_entry, | |
759 | int *flags); | |
760 | ||
761 | extern kern_return_t vm_object_range_op( | |
762 | vm_object_t object, | |
763 | vm_object_offset_t offset_beg, | |
764 | vm_object_offset_t offset_end, | |
765 | int ops, | |
b0d623f7 A |
766 | uint32_t *range); |
767 | ||
768 | ||
769 | __private_extern__ void vm_object_reap_pages( | |
770 | vm_object_t object, | |
771 | int reap_type); | |
772 | #define REAP_REAP 0 | |
773 | #define REAP_TERMINATE 1 | |
774 | #define REAP_PURGEABLE 2 | |
775 | #define REAP_DATA_FLUSH 3 | |
776 | ||
6d2010ae | 777 | #if CONFIG_FREEZE |
316670eb | 778 | struct default_freezer_handle; |
6d2010ae A |
779 | |
780 | __private_extern__ kern_return_t | |
781 | vm_object_pack( | |
316670eb A |
782 | unsigned int *purgeable_count, |
783 | unsigned int *wired_count, | |
784 | unsigned int *clean_count, | |
785 | unsigned int *dirty_count, | |
786 | unsigned int dirty_budget, | |
787 | boolean_t *shared, | |
788 | vm_object_t src_object, | |
789 | struct default_freezer_handle *df_handle); | |
6d2010ae A |
790 | |
791 | __private_extern__ void | |
792 | vm_object_pack_pages( | |
316670eb A |
793 | unsigned int *wired_count, |
794 | unsigned int *clean_count, | |
795 | unsigned int *dirty_count, | |
796 | unsigned int dirty_budget, | |
797 | vm_object_t src_object, | |
798 | struct default_freezer_handle *df_handle); | |
6d2010ae | 799 | |
316670eb A |
800 | __private_extern__ void |
801 | vm_object_pageout( | |
802 | vm_object_t object); | |
6d2010ae | 803 | |
316670eb A |
804 | __private_extern__ kern_return_t |
805 | vm_object_pagein( | |
806 | vm_object_t object); | |
6d2010ae | 807 | #endif /* CONFIG_FREEZE */ |
0c530ab8 | 808 | |
fe8ab488 A |
809 | #if CONFIG_IOSCHED |
810 | struct io_reprioritize_req { | |
811 | uint64_t blkno; | |
812 | uint32_t len; | |
813 | int priority; | |
814 | struct vnode *devvp; | |
815 | queue_chain_t io_reprioritize_list; | |
816 | }; | |
817 | typedef struct io_reprioritize_req *io_reprioritize_req_t; | |
818 | ||
819 | extern void vm_io_reprioritize_init(void); | |
820 | #endif | |
821 | ||
1c79356b A |
822 | /* |
823 | * Event waiting handling | |
824 | */ | |
825 | ||
826 | #define VM_OBJECT_EVENT_INITIALIZED 0 | |
827 | #define VM_OBJECT_EVENT_PAGER_READY 1 | |
828 | #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 | |
593a1d5f | 829 | #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3 |
1c79356b A |
830 | #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 |
831 | #define VM_OBJECT_EVENT_UNCACHING 5 | |
832 | #define VM_OBJECT_EVENT_COPY_CALL 6 | |
833 | #define VM_OBJECT_EVENT_CACHING 7 | |
b0d623f7 A |
834 | #define VM_OBJECT_EVENT_UNBLOCKED 8 |
835 | #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 | |
1c79356b A |
836 | |
837 | #define vm_object_assert_wait(object, event, interruptible) \ | |
9bccf70c A |
838 | (((object)->all_wanted |= 1 << (event)), \ |
839 | assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible))) | |
1c79356b A |
840 | |
841 | #define vm_object_wait(object, event, interruptible) \ | |
9bccf70c A |
842 | (vm_object_assert_wait((object),(event),(interruptible)), \ |
843 | vm_object_unlock(object), \ | |
844 | thread_block(THREAD_CONTINUE_NULL)) \ | |
845 | ||
846 | #define thread_sleep_vm_object(object, event, interruptible) \ | |
fe8ab488 | 847 | lck_rw_sleep(&(object)->Lock, LCK_SLEEP_PROMOTED_PRI, (event_t)(event), (interruptible)) |
9bccf70c A |
848 | |
849 | #define vm_object_sleep(object, event, interruptible) \ | |
850 | (((object)->all_wanted |= 1 << (event)), \ | |
851 | thread_sleep_vm_object((object), \ | |
852 | ((vm_offset_t)(object)+(event)), (interruptible))) | |
1c79356b A |
853 | |
854 | #define vm_object_wakeup(object, event) \ | |
855 | MACRO_BEGIN \ | |
856 | if ((object)->all_wanted & (1 << (event))) \ | |
857 | thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \ | |
858 | (object)->all_wanted &= ~(1 << (event)); \ | |
859 | MACRO_END | |
860 | ||
861 | #define vm_object_set_wanted(object, event) \ | |
862 | MACRO_BEGIN \ | |
863 | ((object)->all_wanted |= (1 << (event))); \ | |
864 | MACRO_END | |
865 | ||
866 | #define vm_object_wanted(object, event) \ | |
867 | ((object)->all_wanted & (1 << (event))) | |
868 | ||
869 | /* | |
870 | * Routines implemented as macros | |
871 | */ | |
0c530ab8 | 872 | #ifdef VM_PIP_DEBUG |
2d21ac55 | 873 | #include <libkern/OSDebug.h> |
0c530ab8 A |
874 | #define VM_PIP_DEBUG_BEGIN(object) \ |
875 | MACRO_BEGIN \ | |
b0d623f7 A |
876 | int pip = ((object)->paging_in_progress + \ |
877 | (object)->activity_in_progress); \ | |
878 | if (pip < VM_PIP_DEBUG_MAX_REFS) { \ | |
2d21ac55 | 879 | (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ |
0c530ab8 A |
880 | VM_PIP_DEBUG_STACK_FRAMES); \ |
881 | } \ | |
882 | MACRO_END | |
883 | #else /* VM_PIP_DEBUG */ | |
884 | #define VM_PIP_DEBUG_BEGIN(object) | |
885 | #endif /* VM_PIP_DEBUG */ | |
1c79356b | 886 | |
b0d623f7 A |
887 | #define vm_object_activity_begin(object) \ |
888 | MACRO_BEGIN \ | |
889 | vm_object_lock_assert_exclusive((object)); \ | |
b0d623f7 A |
890 | VM_PIP_DEBUG_BEGIN((object)); \ |
891 | (object)->activity_in_progress++; \ | |
fe8ab488 A |
892 | if ((object)->activity_in_progress == 0) { \ |
893 | panic("vm_object_activity_begin(%p): overflow\n", (object));\ | |
894 | } \ | |
b0d623f7 A |
895 | MACRO_END |
896 | ||
897 | #define vm_object_activity_end(object) \ | |
898 | MACRO_BEGIN \ | |
899 | vm_object_lock_assert_exclusive((object)); \ | |
fe8ab488 A |
900 | if ((object)->activity_in_progress == 0) { \ |
901 | panic("vm_object_activity_end(%p): underflow\n", (object));\ | |
902 | } \ | |
b0d623f7 A |
903 | (object)->activity_in_progress--; \ |
904 | if ((object)->paging_in_progress == 0 && \ | |
905 | (object)->activity_in_progress == 0) \ | |
906 | vm_object_wakeup((object), \ | |
907 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ | |
908 | MACRO_END | |
909 | ||
910 | #define vm_object_paging_begin(object) \ | |
1c79356b | 911 | MACRO_BEGIN \ |
2d21ac55 | 912 | vm_object_lock_assert_exclusive((object)); \ |
0c530ab8 | 913 | VM_PIP_DEBUG_BEGIN((object)); \ |
1c79356b | 914 | (object)->paging_in_progress++; \ |
fe8ab488 A |
915 | if ((object)->paging_in_progress == 0) { \ |
916 | panic("vm_object_paging_begin(%p): overflow\n", (object));\ | |
917 | } \ | |
1c79356b A |
918 | MACRO_END |
919 | ||
b0d623f7 | 920 | #define vm_object_paging_end(object) \ |
1c79356b | 921 | MACRO_BEGIN \ |
2d21ac55 | 922 | vm_object_lock_assert_exclusive((object)); \ |
fe8ab488 A |
923 | if ((object)->paging_in_progress == 0) { \ |
924 | panic("vm_object_paging_end(%p): underflow\n", (object));\ | |
925 | } \ | |
b0d623f7 A |
926 | (object)->paging_in_progress--; \ |
927 | if ((object)->paging_in_progress == 0) { \ | |
928 | vm_object_wakeup((object), \ | |
929 | VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \ | |
930 | if ((object)->activity_in_progress == 0) \ | |
931 | vm_object_wakeup((object), \ | |
932 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ | |
1c79356b A |
933 | } \ |
934 | MACRO_END | |
935 | ||
936 | #define vm_object_paging_wait(object, interruptible) \ | |
937 | MACRO_BEGIN \ | |
2d21ac55 | 938 | vm_object_lock_assert_exclusive((object)); \ |
b0d623f7 A |
939 | while ((object)->paging_in_progress != 0 || \ |
940 | (object)->activity_in_progress != 0) { \ | |
9bccf70c A |
941 | wait_result_t _wr; \ |
942 | \ | |
943 | _wr = vm_object_sleep((object), \ | |
1c79356b A |
944 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \ |
945 | (interruptible)); \ | |
1c79356b | 946 | \ |
9bccf70c | 947 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ |
1c79356b A |
948 | /*XXX break; */ \ |
949 | } \ | |
950 | MACRO_END | |
951 | ||
b0d623f7 A |
952 | #define vm_object_paging_only_wait(object, interruptible) \ |
953 | MACRO_BEGIN \ | |
954 | vm_object_lock_assert_exclusive((object)); \ | |
955 | while ((object)->paging_in_progress != 0) { \ | |
956 | wait_result_t _wr; \ | |
957 | \ | |
958 | _wr = vm_object_sleep((object), \ | |
959 | VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\ | |
960 | (interruptible)); \ | |
961 | \ | |
962 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ | |
963 | /*XXX break; */ \ | |
964 | } \ | |
965 | MACRO_END | |
966 | ||
1c79356b | 967 | |
593a1d5f A |
968 | #define vm_object_mapping_begin(object) \ |
969 | MACRO_BEGIN \ | |
970 | vm_object_lock_assert_exclusive((object)); \ | |
971 | assert(! (object)->mapping_in_progress); \ | |
972 | (object)->mapping_in_progress = TRUE; \ | |
973 | MACRO_END | |
974 | ||
975 | #define vm_object_mapping_end(object) \ | |
976 | MACRO_BEGIN \ | |
977 | vm_object_lock_assert_exclusive((object)); \ | |
978 | assert((object)->mapping_in_progress); \ | |
979 | (object)->mapping_in_progress = FALSE; \ | |
980 | vm_object_wakeup((object), \ | |
981 | VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \ | |
982 | MACRO_END | |
983 | ||
984 | #define vm_object_mapping_wait(object, interruptible) \ | |
985 | MACRO_BEGIN \ | |
986 | vm_object_lock_assert_exclusive((object)); \ | |
987 | while ((object)->mapping_in_progress) { \ | |
988 | wait_result_t _wr; \ | |
989 | \ | |
990 | _wr = vm_object_sleep((object), \ | |
991 | VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \ | |
992 | (interruptible)); \ | |
993 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ | |
994 | /*XXX break; */ \ | |
995 | } \ | |
996 | assert(!(object)->mapping_in_progress); \ | |
997 | MACRO_END | |
998 | ||
999 | ||
1c79356b | 1000 | |
2d21ac55 A |
1001 | #define OBJECT_LOCK_SHARED 0 |
1002 | #define OBJECT_LOCK_EXCLUSIVE 1 | |
1003 | ||
1004 | extern lck_grp_t vm_object_lck_grp; | |
1005 | extern lck_grp_attr_t vm_object_lck_grp_attr; | |
1006 | extern lck_attr_t vm_object_lck_attr; | |
1007 | extern lck_attr_t kernel_object_lck_attr; | |
39236c6e | 1008 | extern lck_attr_t compressor_object_lck_attr; |
2d21ac55 A |
1009 | |
1010 | extern vm_object_t vm_pageout_scan_wants_object; | |
1011 | ||
1012 | extern void vm_object_lock(vm_object_t); | |
1013 | extern boolean_t vm_object_lock_try(vm_object_t); | |
b0d623f7 A |
1014 | extern boolean_t _vm_object_lock_try(vm_object_t); |
1015 | extern boolean_t vm_object_lock_avoid(vm_object_t); | |
2d21ac55 A |
1016 | extern void vm_object_lock_shared(vm_object_t); |
1017 | extern boolean_t vm_object_lock_try_shared(vm_object_t); | |
1c79356b A |
1018 | |
1019 | /* | |
1020 | * Object locking macros | |
1021 | */ | |
1022 | ||
2d21ac55 A |
1023 | #define vm_object_lock_init(object) \ |
1024 | lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ | |
1025 | (((object) == kernel_object || \ | |
1026 | (object) == vm_submap_object) ? \ | |
1027 | &kernel_object_lck_attr : \ | |
39236c6e A |
1028 | (((object) == compressor_object) ? \ |
1029 | &compressor_object_lck_attr : \ | |
1030 | &vm_object_lck_attr))) | |
2d21ac55 A |
1031 | #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) |
1032 | ||
1033 | #define vm_object_unlock(object) lck_rw_done(&(object)->Lock) | |
1034 | #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock) | |
b0d623f7 | 1035 | #define vm_object_lock_try_scan(object) _vm_object_lock_try(object) |
2d21ac55 A |
1036 | |
1037 | /* | |
1038 | * CAUTION: the following vm_object_lock_assert_held*() macros merely | |
1039 | * check if anyone is holding the lock, but the holder may not necessarily | |
1040 | * be the caller... | |
1041 | */ | |
6d2010ae | 1042 | #if MACH_ASSERT || DEBUG |
2d21ac55 A |
1043 | #define vm_object_lock_assert_held(object) \ |
1044 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) | |
1045 | #define vm_object_lock_assert_shared(object) \ | |
1046 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) | |
1047 | #define vm_object_lock_assert_exclusive(object) \ | |
1048 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) | |
6d2010ae | 1049 | #else /* MACH_ASSERT || DEBUG */ |
2d21ac55 A |
1050 | #define vm_object_lock_assert_held(object) |
1051 | #define vm_object_lock_assert_shared(object) | |
1052 | #define vm_object_lock_assert_exclusive(object) | |
6d2010ae | 1053 | #endif /* MACH_ASSERT || DEBUG */ |
1c79356b | 1054 | |
91447636 A |
1055 | #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
1056 | #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK)) | |
1057 | ||
6d2010ae A |
1058 | extern void vm_object_cache_add(vm_object_t); |
1059 | extern void vm_object_cache_remove(vm_object_t); | |
1060 | extern int vm_object_cache_evict(int, int); | |
1061 | ||
1c79356b | 1062 | #endif /* _VM_VM_OBJECT_H_ */ |