]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm_object.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Virtual memory object module definitions. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_OBJECT_H_ | |
67 | #define _VM_VM_OBJECT_H_ | |
68 | ||
69 | #include <debug.h> | |
70 | #include <mach_assert.h> | |
71 | #include <mach_pagemap.h> | |
72 | #include <task_swapper.h> | |
73 | ||
74 | #include <mach/kern_return.h> | |
75 | #include <mach/boolean.h> | |
76 | #include <mach/memory_object_types.h> | |
77 | #include <mach/port.h> | |
78 | #include <mach/vm_prot.h> | |
79 | #include <mach/vm_param.h> | |
80 | #include <mach/machine/vm_types.h> | |
81 | #include <kern/queue.h> | |
82 | #include <kern/locks.h> | |
83 | #include <kern/assert.h> | |
84 | #include <kern/misc_protos.h> | |
85 | #include <kern/macro_help.h> | |
86 | #include <ipc/ipc_types.h> | |
87 | #include <vm/pmap.h> | |
88 | ||
89 | #include <vm/vm_external.h> | |
90 | ||
91 | #include <vm/vm_options.h> | |
92 | #include <vm/vm_page.h> | |
93 | ||
94 | #if VM_OBJECT_TRACKING | |
95 | #include <libkern/OSDebug.h> | |
96 | #include <kern/btlog.h> | |
97 | extern void vm_object_tracking_init(void); | |
98 | extern boolean_t vm_object_tracking_inited; | |
99 | extern btlog_t *vm_object_tracking_btlog; | |
100 | #define VM_OBJECT_TRACKING_NUM_RECORDS 50000 | |
101 | #define VM_OBJECT_TRACKING_BTDEPTH 7 | |
102 | #define VM_OBJECT_TRACKING_OP_CREATED 1 | |
103 | #define VM_OBJECT_TRACKING_OP_MODIFIED 2 | |
104 | #define VM_OBJECT_TRACKING_OP_TRUESHARE 3 | |
105 | #endif /* VM_OBJECT_TRACKING */ | |
106 | ||
107 | struct vm_page; | |
108 | struct vm_shared_region_slide_info; | |
109 | ||
110 | /* | |
111 | * Types defined: | |
112 | * | |
113 | * vm_object_t Virtual memory object. | |
114 | * vm_object_fault_info_t Used to determine cluster size. | |
115 | */ | |
116 | ||
117 | struct vm_object_fault_info { | |
118 | int interruptible; | |
119 | uint32_t user_tag; | |
120 | vm_size_t cluster_size; | |
121 | vm_behavior_t behavior; | |
122 | vm_map_offset_t lo_offset; | |
123 | vm_map_offset_t hi_offset; | |
124 | unsigned int | |
125 | /* boolean_t */ no_cache:1, | |
126 | /* boolean_t */ stealth:1, | |
127 | /* boolean_t */ io_sync:1, | |
128 | /* boolean_t */ cs_bypass:1, | |
129 | /* boolean_t */ mark_zf_absent:1, | |
130 | /* boolean_t */ batch_pmap_op:1, | |
131 | __vm_object_fault_info_unused_bits:26; | |
132 | int pmap_options; | |
133 | }; | |
134 | ||
135 | ||
136 | #define vo_size vo_un1.vou_size | |
137 | #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan | |
138 | #define vo_shadow_offset vo_un2.vou_shadow_offset | |
139 | #define vo_cache_ts vo_un2.vou_cache_ts | |
140 | #define vo_purgeable_owner vo_un2.vou_purgeable_owner | |
141 | #define vo_slide_info vo_un2.vou_slide_info | |
142 | ||
143 | struct vm_object { | |
144 | /* | |
145 | * on 64 bit systems we pack the pointers hung off the memq. | |
146 | * those pointers have to be able to point back to the memq. | |
147 | * the packed pointers are required to be on a 64 byte boundary | |
148 | * which means 2 things for the vm_object... (1) the memq | |
149 | * struct has to be the first element of the structure so that | |
150 | * we can control it's alignment... (2) the vm_object must be | |
151 | * aligned on a 64 byte boundary... for static vm_object's | |
152 | * this is accomplished via the 'aligned' attribute... for | |
153 | * vm_object's in the zone pool, this is accomplished by | |
154 | * rounding the size of the vm_object element to the nearest | |
155 | * 64 byte size before creating the zone. | |
156 | */ | |
157 | vm_page_queue_head_t memq; /* Resident memory - must be first */ | |
158 | lck_rw_t Lock; /* Synchronization */ | |
159 | ||
160 | #if DEVELOPMENT || DEBUG | |
161 | thread_t Lock_owner; | |
162 | #endif | |
163 | union { | |
164 | vm_object_size_t vou_size; /* Object size (only valid if internal) */ | |
165 | int vou_cache_pages_to_scan; /* pages yet to be visited in an | |
166 | * external object in cache | |
167 | */ | |
168 | } vo_un1; | |
169 | ||
170 | struct vm_page *memq_hint; | |
171 | int ref_count; /* Number of references */ | |
172 | unsigned int resident_page_count; | |
173 | /* number of resident pages */ | |
174 | unsigned int wired_page_count; /* number of wired pages */ | |
175 | unsigned int reusable_page_count; | |
176 | ||
177 | struct vm_object *copy; /* Object that should receive | |
178 | * a copy of my changed pages, | |
179 | * for copy_delay, or just the | |
180 | * temporary object that | |
181 | * shadows this object, for | |
182 | * copy_call. | |
183 | */ | |
184 | struct vm_object *shadow; /* My shadow */ | |
185 | ||
186 | union { | |
187 | vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ | |
188 | clock_sec_t vou_cache_ts; /* age of an external object | |
189 | * present in cache | |
190 | */ | |
191 | task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set | |
192 | * to volatile/emtpy, this is the task | |
193 | * that owns this purgeable object. | |
194 | */ | |
195 | struct vm_shared_region_slide_info *vou_slide_info; | |
196 | } vo_un2; | |
197 | ||
198 | memory_object_t pager; /* Where to get data */ | |
199 | vm_object_offset_t paging_offset; /* Offset into memory object */ | |
200 | memory_object_control_t pager_control; /* Where data comes back */ | |
201 | ||
202 | memory_object_copy_strategy_t | |
203 | copy_strategy; /* How to handle data copy */ | |
204 | ||
205 | #if __LP64__ | |
206 | /* | |
207 | * Some user processes (mostly VirtualMachine software) take a large | |
208 | * number of UPLs (via IOMemoryDescriptors) to wire pages in large | |
209 | * VM objects and overflow the 16-bit "activity_in_progress" counter. | |
210 | * Since we never enforced any limit there, let's give them 32 bits | |
211 | * for backwards compatibility's sake. | |
212 | */ | |
213 | unsigned int paging_in_progress:16, | |
214 | __object1_unused_bits:16; | |
215 | unsigned int activity_in_progress; | |
216 | #else /* __LP64__ */ | |
217 | /* | |
218 | * On 32-bit platforms, enlarging "activity_in_progress" would increase | |
219 | * the size of "struct vm_object". Since we don't know of any actual | |
220 | * overflow of these counters on these platforms, let's keep the | |
221 | * counters as 16-bit integers. | |
222 | */ | |
223 | unsigned short paging_in_progress; | |
224 | unsigned short activity_in_progress; | |
225 | #endif /* __LP64__ */ | |
226 | /* The memory object ports are | |
227 | * being used (e.g., for pagein | |
228 | * or pageout) -- don't change | |
229 | * any of these fields (i.e., | |
230 | * don't collapse, destroy or | |
231 | * terminate) | |
232 | */ | |
233 | ||
234 | unsigned int | |
235 | /* boolean_t array */ all_wanted:11, /* Bit array of "want to be | |
236 | * awakened" notations. See | |
237 | * VM_OBJECT_EVENT_* items | |
238 | * below */ | |
239 | /* boolean_t */ pager_created:1, /* Has pager been created? */ | |
240 | /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ | |
241 | /* boolean_t */ pager_ready:1, /* Will pager take requests? */ | |
242 | ||
243 | /* boolean_t */ pager_trusted:1,/* The pager for this object | |
244 | * is trusted. This is true for | |
245 | * all internal objects (backed | |
246 | * by the default pager) | |
247 | */ | |
248 | /* boolean_t */ can_persist:1, /* The kernel may keep the data | |
249 | * for this object (and rights | |
250 | * to the memory object) after | |
251 | * all address map references | |
252 | * are deallocated? | |
253 | */ | |
254 | /* boolean_t */ internal:1, /* Created by the kernel (and | |
255 | * therefore, managed by the | |
256 | * default memory manger) | |
257 | */ | |
258 | /* boolean_t */ temporary:1, /* Permanent objects may be | |
259 | * changed externally by the | |
260 | * memory manager, and changes | |
261 | * made in memory must be | |
262 | * reflected back to the memory | |
263 | * manager. Temporary objects | |
264 | * lack both of these | |
265 | * characteristics. | |
266 | */ | |
267 | /* boolean_t */ private:1, /* magic device_pager object, | |
268 | * holds private pages only */ | |
269 | /* boolean_t */ pageout:1, /* pageout object. contains | |
270 | * private pages that refer to | |
271 | * a real memory object. */ | |
272 | /* boolean_t */ alive:1, /* Not yet terminated */ | |
273 | ||
274 | /* boolean_t */ purgable:2, /* Purgable state. See | |
275 | * VM_PURGABLE_* | |
276 | */ | |
277 | /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token | |
278 | * becomes ripe. | |
279 | */ | |
280 | /* boolean_t */ shadowed:1, /* Shadow may exist */ | |
281 | /* boolean_t */ advisory_pageout:1, | |
282 | /* Instead of sending page | |
283 | * via OOL, just notify | |
284 | * pager that the kernel | |
285 | * wants to discard it, page | |
286 | * remains in object */ | |
287 | /* boolean_t */ true_share:1, | |
288 | /* This object is mapped | |
289 | * in more than one place | |
290 | * and hence cannot be | |
291 | * coalesced */ | |
292 | /* boolean_t */ terminating:1, | |
293 | /* Allows vm_object_lookup | |
294 | * and vm_object_deallocate | |
295 | * to special case their | |
296 | * behavior when they are | |
297 | * called as a result of | |
298 | * page cleaning during | |
299 | * object termination | |
300 | */ | |
301 | /* boolean_t */ named:1, /* An enforces an internal | |
302 | * naming convention, by | |
303 | * calling the right routines | |
304 | * for allocation and | |
305 | * destruction, UBC references | |
306 | * against the vm_object are | |
307 | * checked. | |
308 | */ | |
309 | /* boolean_t */ shadow_severed:1, | |
310 | /* When a permanent object | |
311 | * backing a COW goes away | |
312 | * unexpectedly. This bit | |
313 | * allows vm_fault to return | |
314 | * an error rather than a | |
315 | * zero filled page. | |
316 | */ | |
317 | /* boolean_t */ phys_contiguous:1, | |
318 | /* Memory is wired and | |
319 | * guaranteed physically | |
320 | * contiguous. However | |
321 | * it is not device memory | |
322 | * and obeys normal virtual | |
323 | * memory rules w.r.t pmap | |
324 | * access bits. | |
325 | */ | |
326 | /* boolean_t */ nophyscache:1; | |
327 | /* When mapped at the | |
328 | * pmap level, don't allow | |
329 | * primary caching. (for | |
330 | * I/O) | |
331 | */ | |
332 | ||
333 | queue_chain_t cached_list; /* Attachment point for the | |
334 | * list of objects cached as a | |
335 | * result of their can_persist | |
336 | * value | |
337 | */ | |
338 | ||
339 | queue_head_t msr_q; /* memory object synchronise | |
340 | request queue */ | |
341 | ||
342 | /* | |
343 | * the following fields are not protected by any locks | |
344 | * they are updated via atomic compare and swap | |
345 | */ | |
346 | vm_object_offset_t last_alloc; /* last allocation offset */ | |
347 | int sequential; /* sequential access size */ | |
348 | ||
349 | uint32_t pages_created; | |
350 | uint32_t pages_used; | |
351 | vm_offset_t cow_hint; /* last page present in */ | |
352 | /* shadow but not in object */ | |
353 | #if MACH_ASSERT | |
354 | struct vm_object *paging_object; /* object which pages to be | |
355 | * swapped out are temporary | |
356 | * put in current object | |
357 | */ | |
358 | #endif | |
359 | /* hold object lock when altering */ | |
360 | unsigned int | |
361 | wimg_bits:8, /* cache WIMG bits */ | |
362 | code_signed:1, /* pages are signed and should be | |
363 | validated; the signatures are stored | |
364 | with the pager */ | |
365 | hashed:1, /* object/pager entered in hash */ | |
366 | transposed:1, /* object was transposed with another */ | |
367 | mapping_in_progress:1, /* pager being mapped/unmapped */ | |
368 | phantom_isssd:1, | |
369 | volatile_empty:1, | |
370 | volatile_fault:1, | |
371 | all_reusable:1, | |
372 | blocked_access:1, | |
373 | set_cache_attr:1, | |
374 | object_slid:1, | |
375 | purgeable_queue_type:2, | |
376 | purgeable_queue_group:3, | |
377 | io_tracking:1, | |
378 | #if CONFIG_SECLUDED_MEMORY | |
379 | eligible_for_secluded:1, | |
380 | can_grab_secluded:1, | |
381 | #else /* CONFIG_SECLUDED_MEMORY */ | |
382 | __object3_unused_bits:2, | |
383 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
384 | __object2_unused_bits:5; /* for expansion */ | |
385 | ||
386 | uint8_t scan_collisions; | |
387 | vm_tag_t wire_tag; | |
388 | uint8_t __object4_unused_bits[2]; | |
389 | ||
390 | #if CONFIG_PHANTOM_CACHE | |
391 | uint32_t phantom_object_id; | |
392 | #endif | |
393 | #if CONFIG_IOSCHED || UPL_DEBUG | |
394 | queue_head_t uplq; /* List of outstanding upls */ | |
395 | #endif | |
396 | ||
397 | #ifdef VM_PIP_DEBUG | |
398 | /* | |
399 | * Keep track of the stack traces for the first holders | |
400 | * of a "paging_in_progress" reference for this VM object. | |
401 | */ | |
402 | #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ | |
403 | #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ | |
404 | struct __pip_backtrace { | |
405 | void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES]; | |
406 | } pip_holders[VM_PIP_DEBUG_MAX_REFS]; | |
407 | #endif /* VM_PIP_DEBUG */ | |
408 | ||
409 | queue_chain_t objq; /* object queue - currently used for purgable queues */ | |
410 | ||
411 | #if DEBUG | |
412 | void *purgeable_owner_bt[16]; | |
413 | task_t vo_purgeable_volatilizer; /* who made it volatile? */ | |
414 | void *purgeable_volatilizer_bt[16]; | |
415 | #endif /* DEBUG */ | |
416 | }; | |
417 | ||
418 | #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ | |
419 | ((object)->volatile_fault && \ | |
420 | ((object)->purgable == VM_PURGABLE_VOLATILE || \ | |
421 | (object)->purgable == VM_PURGABLE_EMPTY)) | |
422 | ||
423 | extern | |
424 | vm_object_t kernel_object; /* the single kernel object */ | |
425 | ||
426 | extern | |
427 | vm_object_t compressor_object; /* the single compressor object */ | |
428 | ||
429 | extern | |
430 | unsigned int vm_object_absent_max; /* maximum number of absent pages | |
431 | at a time for each object */ | |
432 | ||
433 | # define VM_MSYNC_INITIALIZED 0 | |
434 | # define VM_MSYNC_SYNCHRONIZING 1 | |
435 | # define VM_MSYNC_DONE 2 | |
436 | ||
437 | struct msync_req { | |
438 | queue_chain_t msr_q; /* object request queue */ | |
439 | queue_chain_t req_q; /* vm_msync request queue */ | |
440 | unsigned int flag; | |
441 | vm_object_offset_t offset; | |
442 | vm_object_size_t length; | |
443 | vm_object_t object; /* back pointer */ | |
444 | decl_lck_mtx_data(, msync_req_lock) /* Lock for this structure */ | |
445 | }; | |
446 | ||
447 | typedef struct msync_req *msync_req_t; | |
448 | #define MSYNC_REQ_NULL ((msync_req_t) 0) | |
449 | ||
450 | ||
451 | extern lck_grp_t vm_map_lck_grp; | |
452 | extern lck_attr_t vm_map_lck_attr; | |
453 | ||
454 | /* | |
455 | * Macros to allocate and free msync_reqs | |
456 | */ | |
457 | #define msync_req_alloc(msr) \ | |
458 | MACRO_BEGIN \ | |
459 | (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ | |
460 | lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \ | |
461 | msr->flag = VM_MSYNC_INITIALIZED; \ | |
462 | MACRO_END | |
463 | ||
464 | #define msync_req_free(msr) \ | |
465 | MACRO_BEGIN \ | |
466 | lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \ | |
467 | kfree((msr), sizeof(struct msync_req)); \ | |
468 | MACRO_END | |
469 | ||
470 | #define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock) | |
471 | #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock) | |
472 | ||
473 | #define VM_OBJECT_WIRED(object) \ | |
474 | MACRO_BEGIN \ | |
475 | if ((object)->purgable == VM_PURGABLE_DENY) \ | |
476 | { \ | |
477 | lck_spin_lock(&vm_objects_wired_lock); \ | |
478 | assert(!(object)->objq.next); \ | |
479 | assert(!(object)->objq.prev); \ | |
480 | queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \ | |
481 | lck_spin_unlock(&vm_objects_wired_lock); \ | |
482 | } \ | |
483 | MACRO_END | |
484 | ||
485 | #define VM_OBJECT_UNWIRED(object) \ | |
486 | MACRO_BEGIN \ | |
487 | (object)->wire_tag = VM_KERN_MEMORY_NONE; \ | |
488 | if (((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \ | |
489 | { \ | |
490 | lck_spin_lock(&vm_objects_wired_lock); \ | |
491 | queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \ | |
492 | lck_spin_unlock(&vm_objects_wired_lock); \ | |
493 | } \ | |
494 | MACRO_END | |
495 | ||
496 | ||
497 | #define OBJECT_LOCK_SHARED 0 | |
498 | #define OBJECT_LOCK_EXCLUSIVE 1 | |
499 | ||
500 | extern lck_grp_t vm_object_lck_grp; | |
501 | extern lck_grp_attr_t vm_object_lck_grp_attr; | |
502 | extern lck_attr_t vm_object_lck_attr; | |
503 | extern lck_attr_t kernel_object_lck_attr; | |
504 | extern lck_attr_t compressor_object_lck_attr; | |
505 | ||
506 | extern vm_object_t vm_pageout_scan_wants_object; | |
507 | ||
508 | extern void vm_object_lock(vm_object_t); | |
509 | extern boolean_t vm_object_lock_try(vm_object_t); | |
510 | extern boolean_t _vm_object_lock_try(vm_object_t); | |
511 | extern boolean_t vm_object_lock_avoid(vm_object_t); | |
512 | extern void vm_object_lock_shared(vm_object_t); | |
513 | extern boolean_t vm_object_lock_try_shared(vm_object_t); | |
514 | extern void vm_object_unlock(vm_object_t); | |
515 | extern boolean_t vm_object_lock_upgrade(vm_object_t); | |
516 | ||
517 | /* | |
518 | * Object locking macros | |
519 | */ | |
520 | ||
521 | #define vm_object_lock_init(object) \ | |
522 | lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ | |
523 | (((object) == kernel_object || \ | |
524 | (object) == vm_submap_object) ? \ | |
525 | &kernel_object_lck_attr : \ | |
526 | (((object) == compressor_object) ? \ | |
527 | &compressor_object_lck_attr : \ | |
528 | &vm_object_lck_attr))) | |
529 | #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) | |
530 | ||
531 | #define vm_object_lock_try_scan(object) _vm_object_lock_try(object) | |
532 | ||
533 | /* | |
534 | * CAUTION: the following vm_object_lock_assert_held*() macros merely | |
535 | * check if anyone is holding the lock, but the holder may not necessarily | |
536 | * be the caller... | |
537 | */ | |
538 | #if MACH_ASSERT || DEBUG | |
539 | #define vm_object_lock_assert_held(object) \ | |
540 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) | |
541 | #define vm_object_lock_assert_shared(object) \ | |
542 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) | |
543 | #define vm_object_lock_assert_exclusive(object) \ | |
544 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) | |
545 | #define vm_object_lock_assert_notheld(object) \ | |
546 | lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD) | |
547 | #else /* MACH_ASSERT || DEBUG */ | |
548 | #define vm_object_lock_assert_held(object) | |
549 | #define vm_object_lock_assert_shared(object) | |
550 | #define vm_object_lock_assert_exclusive(object) | |
551 | #define vm_object_lock_assert_notheld(object) | |
552 | #endif /* MACH_ASSERT || DEBUG */ | |
553 | ||
554 | ||
555 | /* | |
556 | * Declare procedures that operate on VM objects. | |
557 | */ | |
558 | ||
559 | __private_extern__ void vm_object_bootstrap(void); | |
560 | ||
561 | __private_extern__ void vm_object_init(void); | |
562 | ||
563 | __private_extern__ void vm_object_init_lck_grp(void); | |
564 | ||
565 | __private_extern__ void vm_object_reaper_init(void); | |
566 | ||
567 | __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size); | |
568 | ||
569 | __private_extern__ void _vm_object_allocate(vm_object_size_t size, | |
570 | vm_object_t object); | |
571 | ||
572 | #if TASK_SWAPPER | |
573 | ||
574 | __private_extern__ void vm_object_res_reference( | |
575 | vm_object_t object); | |
576 | __private_extern__ void vm_object_res_deallocate( | |
577 | vm_object_t object); | |
578 | #define VM_OBJ_RES_INCR(object) (object)->res_count++ | |
579 | #define VM_OBJ_RES_DECR(object) (object)->res_count-- | |
580 | ||
581 | #else /* TASK_SWAPPER */ | |
582 | ||
583 | #define VM_OBJ_RES_INCR(object) | |
584 | #define VM_OBJ_RES_DECR(object) | |
585 | #define vm_object_res_reference(object) | |
586 | #define vm_object_res_deallocate(object) | |
587 | ||
588 | #endif /* TASK_SWAPPER */ | |
589 | ||
590 | #define vm_object_reference_locked(object) \ | |
591 | MACRO_BEGIN \ | |
592 | vm_object_t RLObject = (object); \ | |
593 | vm_object_lock_assert_exclusive(object); \ | |
594 | assert((RLObject)->ref_count > 0); \ | |
595 | (RLObject)->ref_count++; \ | |
596 | assert((RLObject)->ref_count > 1); \ | |
597 | vm_object_res_reference(RLObject); \ | |
598 | MACRO_END | |
599 | ||
600 | ||
601 | #define vm_object_reference_shared(object) \ | |
602 | MACRO_BEGIN \ | |
603 | vm_object_t RLObject = (object); \ | |
604 | vm_object_lock_assert_shared(object); \ | |
605 | assert((RLObject)->ref_count > 0); \ | |
606 | OSAddAtomic(1, &(RLObject)->ref_count); \ | |
607 | assert((RLObject)->ref_count > 0); \ | |
608 | /* XXX we would need an atomic version of the following ... */ \ | |
609 | vm_object_res_reference(RLObject); \ | |
610 | MACRO_END | |
611 | ||
612 | ||
613 | __private_extern__ void vm_object_reference( | |
614 | vm_object_t object); | |
615 | ||
616 | #if !MACH_ASSERT | |
617 | ||
618 | #define vm_object_reference(object) \ | |
619 | MACRO_BEGIN \ | |
620 | vm_object_t RObject = (object); \ | |
621 | if (RObject) { \ | |
622 | vm_object_lock_shared(RObject); \ | |
623 | vm_object_reference_shared(RObject); \ | |
624 | vm_object_unlock(RObject); \ | |
625 | } \ | |
626 | MACRO_END | |
627 | ||
628 | #endif /* MACH_ASSERT */ | |
629 | ||
630 | __private_extern__ void vm_object_deallocate( | |
631 | vm_object_t object); | |
632 | ||
633 | __private_extern__ kern_return_t vm_object_release_name( | |
634 | vm_object_t object, | |
635 | int flags); | |
636 | ||
637 | __private_extern__ void vm_object_pmap_protect( | |
638 | vm_object_t object, | |
639 | vm_object_offset_t offset, | |
640 | vm_object_size_t size, | |
641 | pmap_t pmap, | |
642 | vm_map_offset_t pmap_start, | |
643 | vm_prot_t prot); | |
644 | ||
645 | __private_extern__ void vm_object_pmap_protect_options( | |
646 | vm_object_t object, | |
647 | vm_object_offset_t offset, | |
648 | vm_object_size_t size, | |
649 | pmap_t pmap, | |
650 | vm_map_offset_t pmap_start, | |
651 | vm_prot_t prot, | |
652 | int options); | |
653 | ||
654 | __private_extern__ void vm_object_page_remove( | |
655 | vm_object_t object, | |
656 | vm_object_offset_t start, | |
657 | vm_object_offset_t end); | |
658 | ||
659 | __private_extern__ void vm_object_deactivate_pages( | |
660 | vm_object_t object, | |
661 | vm_object_offset_t offset, | |
662 | vm_object_size_t size, | |
663 | boolean_t kill_page, | |
664 | boolean_t reusable_page, | |
665 | struct pmap *pmap, | |
666 | vm_map_offset_t pmap_offset); | |
667 | ||
668 | __private_extern__ void vm_object_reuse_pages( | |
669 | vm_object_t object, | |
670 | vm_object_offset_t start_offset, | |
671 | vm_object_offset_t end_offset, | |
672 | boolean_t allow_partial_reuse); | |
673 | ||
674 | __private_extern__ void vm_object_purge( | |
675 | vm_object_t object, | |
676 | int flags); | |
677 | ||
678 | __private_extern__ kern_return_t vm_object_purgable_control( | |
679 | vm_object_t object, | |
680 | vm_purgable_t control, | |
681 | int *state); | |
682 | ||
683 | __private_extern__ kern_return_t vm_object_get_page_counts( | |
684 | vm_object_t object, | |
685 | vm_object_offset_t offset, | |
686 | vm_object_size_t size, | |
687 | unsigned int *resident_page_count, | |
688 | unsigned int *dirty_page_count); | |
689 | ||
690 | __private_extern__ boolean_t vm_object_coalesce( | |
691 | vm_object_t prev_object, | |
692 | vm_object_t next_object, | |
693 | vm_object_offset_t prev_offset, | |
694 | vm_object_offset_t next_offset, | |
695 | vm_object_size_t prev_size, | |
696 | vm_object_size_t next_size); | |
697 | ||
698 | __private_extern__ boolean_t vm_object_shadow( | |
699 | vm_object_t *object, | |
700 | vm_object_offset_t *offset, | |
701 | vm_object_size_t length); | |
702 | ||
703 | __private_extern__ void vm_object_collapse( | |
704 | vm_object_t object, | |
705 | vm_object_offset_t offset, | |
706 | boolean_t can_bypass); | |
707 | ||
708 | __private_extern__ boolean_t vm_object_copy_quickly( | |
709 | vm_object_t *_object, | |
710 | vm_object_offset_t src_offset, | |
711 | vm_object_size_t size, | |
712 | boolean_t *_src_needs_copy, | |
713 | boolean_t *_dst_needs_copy); | |
714 | ||
715 | __private_extern__ kern_return_t vm_object_copy_strategically( | |
716 | vm_object_t src_object, | |
717 | vm_object_offset_t src_offset, | |
718 | vm_object_size_t size, | |
719 | vm_object_t *dst_object, | |
720 | vm_object_offset_t *dst_offset, | |
721 | boolean_t *dst_needs_copy); | |
722 | ||
723 | __private_extern__ kern_return_t vm_object_copy_slowly( | |
724 | vm_object_t src_object, | |
725 | vm_object_offset_t src_offset, | |
726 | vm_object_size_t size, | |
727 | boolean_t interruptible, | |
728 | vm_object_t *_result_object); | |
729 | ||
730 | __private_extern__ vm_object_t vm_object_copy_delayed( | |
731 | vm_object_t src_object, | |
732 | vm_object_offset_t src_offset, | |
733 | vm_object_size_t size, | |
734 | boolean_t src_object_shared); | |
735 | ||
736 | ||
737 | ||
738 | __private_extern__ kern_return_t vm_object_destroy( | |
739 | vm_object_t object, | |
740 | kern_return_t reason); | |
741 | ||
742 | __private_extern__ void vm_object_pager_create( | |
743 | vm_object_t object); | |
744 | ||
745 | __private_extern__ void vm_object_compressor_pager_create( | |
746 | vm_object_t object); | |
747 | ||
748 | __private_extern__ void vm_object_page_map( | |
749 | vm_object_t object, | |
750 | vm_object_offset_t offset, | |
751 | vm_object_size_t size, | |
752 | vm_object_offset_t (*map_fn) | |
753 | (void *, vm_object_offset_t), | |
754 | void *map_fn_data); | |
755 | ||
756 | __private_extern__ kern_return_t vm_object_upl_request( | |
757 | vm_object_t object, | |
758 | vm_object_offset_t offset, | |
759 | upl_size_t size, | |
760 | upl_t *upl, | |
761 | upl_page_info_t *page_info, | |
762 | unsigned int *count, | |
763 | upl_control_flags_t flags); | |
764 | ||
765 | __private_extern__ kern_return_t vm_object_transpose( | |
766 | vm_object_t object1, | |
767 | vm_object_t object2, | |
768 | vm_object_size_t transpose_size); | |
769 | ||
770 | __private_extern__ boolean_t vm_object_sync( | |
771 | vm_object_t object, | |
772 | vm_object_offset_t offset, | |
773 | vm_object_size_t size, | |
774 | boolean_t should_flush, | |
775 | boolean_t should_return, | |
776 | boolean_t should_iosync); | |
777 | ||
778 | __private_extern__ kern_return_t vm_object_update( | |
779 | vm_object_t object, | |
780 | vm_object_offset_t offset, | |
781 | vm_object_size_t size, | |
782 | vm_object_offset_t *error_offset, | |
783 | int *io_errno, | |
784 | memory_object_return_t should_return, | |
785 | int flags, | |
786 | vm_prot_t prot); | |
787 | ||
788 | __private_extern__ kern_return_t vm_object_lock_request( | |
789 | vm_object_t object, | |
790 | vm_object_offset_t offset, | |
791 | vm_object_size_t size, | |
792 | memory_object_return_t should_return, | |
793 | int flags, | |
794 | vm_prot_t prot); | |
795 | ||
796 | ||
797 | ||
798 | __private_extern__ vm_object_t vm_object_enter( | |
799 | memory_object_t pager, | |
800 | vm_object_size_t size, | |
801 | boolean_t internal, | |
802 | boolean_t init, | |
803 | boolean_t check_named); | |
804 | ||
805 | ||
806 | __private_extern__ void vm_object_cluster_size( | |
807 | vm_object_t object, | |
808 | vm_object_offset_t *start, | |
809 | vm_size_t *length, | |
810 | vm_object_fault_info_t fault_info, | |
811 | uint32_t *io_streaming); | |
812 | ||
813 | __private_extern__ kern_return_t vm_object_populate_with_private( | |
814 | vm_object_t object, | |
815 | vm_object_offset_t offset, | |
816 | ppnum_t phys_page, | |
817 | vm_size_t size); | |
818 | ||
819 | __private_extern__ void vm_object_change_wimg_mode( | |
820 | vm_object_t object, | |
821 | unsigned int wimg_mode); | |
822 | ||
823 | extern kern_return_t adjust_vm_object_cache( | |
824 | vm_size_t oval, | |
825 | vm_size_t nval); | |
826 | ||
827 | extern kern_return_t vm_object_page_op( | |
828 | vm_object_t object, | |
829 | vm_object_offset_t offset, | |
830 | int ops, | |
831 | ppnum_t *phys_entry, | |
832 | int *flags); | |
833 | ||
834 | extern kern_return_t vm_object_range_op( | |
835 | vm_object_t object, | |
836 | vm_object_offset_t offset_beg, | |
837 | vm_object_offset_t offset_end, | |
838 | int ops, | |
839 | uint32_t *range); | |
840 | ||
841 | ||
842 | __private_extern__ void vm_object_reap_pages( | |
843 | vm_object_t object, | |
844 | int reap_type); | |
845 | #define REAP_REAP 0 | |
846 | #define REAP_TERMINATE 1 | |
847 | #define REAP_PURGEABLE 2 | |
848 | #define REAP_DATA_FLUSH 3 | |
849 | ||
850 | #if CONFIG_FREEZE | |
851 | ||
852 | __private_extern__ void | |
853 | vm_object_compressed_freezer_pageout( | |
854 | vm_object_t object); | |
855 | ||
856 | __private_extern__ void | |
857 | vm_object_compressed_freezer_done( | |
858 | void); | |
859 | ||
860 | #endif /* CONFIG_FREEZE */ | |
861 | ||
862 | __private_extern__ void | |
863 | vm_object_pageout( | |
864 | vm_object_t object); | |
865 | ||
866 | #if CONFIG_IOSCHED | |
867 | struct io_reprioritize_req { | |
868 | uint64_t blkno; | |
869 | uint32_t len; | |
870 | int priority; | |
871 | struct vnode *devvp; | |
872 | queue_chain_t io_reprioritize_list; | |
873 | }; | |
874 | typedef struct io_reprioritize_req *io_reprioritize_req_t; | |
875 | ||
876 | extern void vm_io_reprioritize_init(void); | |
877 | #endif | |
878 | ||
879 | /* | |
880 | * Event waiting handling | |
881 | */ | |
882 | ||
883 | #define VM_OBJECT_EVENT_INITIALIZED 0 | |
884 | #define VM_OBJECT_EVENT_PAGER_READY 1 | |
885 | #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 | |
886 | #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3 | |
887 | #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 | |
888 | #define VM_OBJECT_EVENT_UNCACHING 5 | |
889 | #define VM_OBJECT_EVENT_COPY_CALL 6 | |
890 | #define VM_OBJECT_EVENT_CACHING 7 | |
891 | #define VM_OBJECT_EVENT_UNBLOCKED 8 | |
892 | #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 | |
893 | ||
894 | #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */ | |
895 | ||
896 | static __inline__ wait_result_t | |
897 | vm_object_assert_wait( | |
898 | vm_object_t object, | |
899 | int event, | |
900 | wait_interrupt_t interruptible) | |
901 | { | |
902 | wait_result_t wr; | |
903 | ||
904 | vm_object_lock_assert_exclusive(object); | |
905 | assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); | |
906 | ||
907 | object->all_wanted |= 1 << event; | |
908 | wr = assert_wait((event_t)((vm_offset_t)object + event), | |
909 | interruptible); | |
910 | return wr; | |
911 | } | |
912 | ||
913 | static __inline__ wait_result_t | |
914 | vm_object_wait( | |
915 | vm_object_t object, | |
916 | int event, | |
917 | wait_interrupt_t interruptible) | |
918 | { | |
919 | wait_result_t wr; | |
920 | ||
921 | vm_object_assert_wait(object, event, interruptible); | |
922 | vm_object_unlock(object); | |
923 | wr = thread_block(THREAD_CONTINUE_NULL); | |
924 | return wr; | |
925 | } | |
926 | ||
927 | static __inline__ wait_result_t | |
928 | thread_sleep_vm_object( | |
929 | vm_object_t object, | |
930 | event_t event, | |
931 | wait_interrupt_t interruptible) | |
932 | { | |
933 | wait_result_t wr; | |
934 | ||
935 | #if DEVELOPMENT || DEBUG | |
936 | if (object->Lock_owner != current_thread()) | |
937 | panic("thread_sleep_vm_object: now owner - %p\n", object); | |
938 | object->Lock_owner = 0; | |
939 | #endif | |
940 | wr = lck_rw_sleep(&object->Lock, | |
941 | LCK_SLEEP_PROMOTED_PRI, | |
942 | event, | |
943 | interruptible); | |
944 | #if DEVELOPMENT || DEBUG | |
945 | object->Lock_owner = current_thread(); | |
946 | #endif | |
947 | return wr; | |
948 | } | |
949 | ||
950 | static __inline__ wait_result_t | |
951 | vm_object_sleep( | |
952 | vm_object_t object, | |
953 | int event, | |
954 | wait_interrupt_t interruptible) | |
955 | { | |
956 | wait_result_t wr; | |
957 | ||
958 | vm_object_lock_assert_exclusive(object); | |
959 | assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); | |
960 | ||
961 | object->all_wanted |= 1 << event; | |
962 | wr = thread_sleep_vm_object(object, | |
963 | (event_t)((vm_offset_t)object + event), | |
964 | interruptible); | |
965 | return wr; | |
966 | } | |
967 | ||
968 | static __inline__ void | |
969 | vm_object_wakeup( | |
970 | vm_object_t object, | |
971 | int event) | |
972 | { | |
973 | vm_object_lock_assert_exclusive(object); | |
974 | assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); | |
975 | ||
976 | if (object->all_wanted & (1 << event)) | |
977 | thread_wakeup((event_t)((vm_offset_t)object + event)); | |
978 | object->all_wanted &= ~(1 << event); | |
979 | } | |
980 | ||
981 | static __inline__ void | |
982 | vm_object_set_wanted( | |
983 | vm_object_t object, | |
984 | int event) | |
985 | { | |
986 | vm_object_lock_assert_exclusive(object); | |
987 | assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); | |
988 | ||
989 | object->all_wanted |= (1 << event); | |
990 | } | |
991 | ||
992 | static __inline__ int | |
993 | vm_object_wanted( | |
994 | vm_object_t object, | |
995 | int event) | |
996 | { | |
997 | vm_object_lock_assert_held(object); | |
998 | assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); | |
999 | ||
1000 | return object->all_wanted & (1 << event); | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * Routines implemented as macros | |
1005 | */ | |
1006 | #ifdef VM_PIP_DEBUG | |
1007 | #include <libkern/OSDebug.h> | |
1008 | #define VM_PIP_DEBUG_BEGIN(object) \ | |
1009 | MACRO_BEGIN \ | |
1010 | int pip = ((object)->paging_in_progress + \ | |
1011 | (object)->activity_in_progress); \ | |
1012 | if (pip < VM_PIP_DEBUG_MAX_REFS) { \ | |
1013 | (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ | |
1014 | VM_PIP_DEBUG_STACK_FRAMES); \ | |
1015 | } \ | |
1016 | MACRO_END | |
1017 | #else /* VM_PIP_DEBUG */ | |
1018 | #define VM_PIP_DEBUG_BEGIN(object) | |
1019 | #endif /* VM_PIP_DEBUG */ | |
1020 | ||
1021 | #define vm_object_activity_begin(object) \ | |
1022 | MACRO_BEGIN \ | |
1023 | vm_object_lock_assert_exclusive((object)); \ | |
1024 | VM_PIP_DEBUG_BEGIN((object)); \ | |
1025 | (object)->activity_in_progress++; \ | |
1026 | if ((object)->activity_in_progress == 0) { \ | |
1027 | panic("vm_object_activity_begin(%p): overflow\n", (object));\ | |
1028 | } \ | |
1029 | MACRO_END | |
1030 | ||
1031 | #define vm_object_activity_end(object) \ | |
1032 | MACRO_BEGIN \ | |
1033 | vm_object_lock_assert_exclusive((object)); \ | |
1034 | if ((object)->activity_in_progress == 0) { \ | |
1035 | panic("vm_object_activity_end(%p): underflow\n", (object));\ | |
1036 | } \ | |
1037 | (object)->activity_in_progress--; \ | |
1038 | if ((object)->paging_in_progress == 0 && \ | |
1039 | (object)->activity_in_progress == 0) \ | |
1040 | vm_object_wakeup((object), \ | |
1041 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ | |
1042 | MACRO_END | |
1043 | ||
1044 | #define vm_object_paging_begin(object) \ | |
1045 | MACRO_BEGIN \ | |
1046 | vm_object_lock_assert_exclusive((object)); \ | |
1047 | VM_PIP_DEBUG_BEGIN((object)); \ | |
1048 | (object)->paging_in_progress++; \ | |
1049 | if ((object)->paging_in_progress == 0) { \ | |
1050 | panic("vm_object_paging_begin(%p): overflow\n", (object));\ | |
1051 | } \ | |
1052 | MACRO_END | |
1053 | ||
1054 | #define vm_object_paging_end(object) \ | |
1055 | MACRO_BEGIN \ | |
1056 | vm_object_lock_assert_exclusive((object)); \ | |
1057 | if ((object)->paging_in_progress == 0) { \ | |
1058 | panic("vm_object_paging_end(%p): underflow\n", (object));\ | |
1059 | } \ | |
1060 | (object)->paging_in_progress--; \ | |
1061 | if ((object)->paging_in_progress == 0) { \ | |
1062 | vm_object_wakeup((object), \ | |
1063 | VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \ | |
1064 | if ((object)->activity_in_progress == 0) \ | |
1065 | vm_object_wakeup((object), \ | |
1066 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ | |
1067 | } \ | |
1068 | MACRO_END | |
1069 | ||
1070 | #define vm_object_paging_wait(object, interruptible) \ | |
1071 | MACRO_BEGIN \ | |
1072 | vm_object_lock_assert_exclusive((object)); \ | |
1073 | while ((object)->paging_in_progress != 0 || \ | |
1074 | (object)->activity_in_progress != 0) { \ | |
1075 | wait_result_t _wr; \ | |
1076 | \ | |
1077 | _wr = vm_object_sleep((object), \ | |
1078 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \ | |
1079 | (interruptible)); \ | |
1080 | \ | |
1081 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ | |
1082 | /*XXX break; */ \ | |
1083 | } \ | |
1084 | MACRO_END | |
1085 | ||
1086 | #define vm_object_paging_only_wait(object, interruptible) \ | |
1087 | MACRO_BEGIN \ | |
1088 | vm_object_lock_assert_exclusive((object)); \ | |
1089 | while ((object)->paging_in_progress != 0) { \ | |
1090 | wait_result_t _wr; \ | |
1091 | \ | |
1092 | _wr = vm_object_sleep((object), \ | |
1093 | VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\ | |
1094 | (interruptible)); \ | |
1095 | \ | |
1096 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ | |
1097 | /*XXX break; */ \ | |
1098 | } \ | |
1099 | MACRO_END | |
1100 | ||
1101 | ||
1102 | #define vm_object_mapping_begin(object) \ | |
1103 | MACRO_BEGIN \ | |
1104 | vm_object_lock_assert_exclusive((object)); \ | |
1105 | assert(! (object)->mapping_in_progress); \ | |
1106 | (object)->mapping_in_progress = TRUE; \ | |
1107 | MACRO_END | |
1108 | ||
1109 | #define vm_object_mapping_end(object) \ | |
1110 | MACRO_BEGIN \ | |
1111 | vm_object_lock_assert_exclusive((object)); \ | |
1112 | assert((object)->mapping_in_progress); \ | |
1113 | (object)->mapping_in_progress = FALSE; \ | |
1114 | vm_object_wakeup((object), \ | |
1115 | VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \ | |
1116 | MACRO_END | |
1117 | ||
1118 | #define vm_object_mapping_wait(object, interruptible) \ | |
1119 | MACRO_BEGIN \ | |
1120 | vm_object_lock_assert_exclusive((object)); \ | |
1121 | while ((object)->mapping_in_progress) { \ | |
1122 | wait_result_t _wr; \ | |
1123 | \ | |
1124 | _wr = vm_object_sleep((object), \ | |
1125 | VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \ | |
1126 | (interruptible)); \ | |
1127 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ | |
1128 | /*XXX break; */ \ | |
1129 | } \ | |
1130 | assert(!(object)->mapping_in_progress); \ | |
1131 | MACRO_END | |
1132 | ||
1133 | ||
1134 | ||
1135 | #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) | |
1136 | #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK)) | |
1137 | ||
1138 | extern void vm_object_cache_add(vm_object_t); | |
1139 | extern void vm_object_cache_remove(vm_object_t); | |
1140 | extern int vm_object_cache_evict(int, int); | |
1141 | ||
1142 | #endif /* _VM_VM_OBJECT_H_ */ |