]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | */ | |
55 | /* | |
56 | * File: vm_object.h | |
57 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
58 | * Date: 1985 | |
59 | * | |
60 | * Virtual memory object module definitions. | |
61 | */ | |
62 | ||
63 | #ifndef _VM_VM_OBJECT_H_ | |
64 | #define _VM_VM_OBJECT_H_ | |
65 | ||
66 | #include <mach_pagemap.h> | |
67 | #include <task_swapper.h> | |
68 | ||
69 | #include <mach/kern_return.h> | |
70 | #include <mach/boolean.h> | |
71 | #include <mach/memory_object_types.h> | |
72 | #include <mach/port.h> | |
73 | #include <mach/vm_prot.h> | |
74 | #include <mach/machine/vm_types.h> | |
75 | #include <kern/queue.h> | |
76 | #include <kern/lock.h> | |
77 | #include <kern/assert.h> | |
0b4e3aa0 A |
78 | #include <kern/ipc_mig.h> |
79 | #include <kern/misc_protos.h> | |
1c79356b A |
80 | #include <kern/macro_help.h> |
81 | #include <ipc/ipc_types.h> | |
82 | #include <vm/pmap.h> | |
1c79356b A |
83 | |
84 | #if MACH_PAGEMAP | |
85 | #include <vm/vm_external.h> | |
86 | #endif /* MACH_PAGEMAP */ | |
87 | ||
0b4e3aa0 | 88 | typedef memory_object_control_t pager_request_t; |
1c79356b A |
89 | #define PAGER_REQUEST_NULL ((pager_request_t) 0) |
90 | ||
91 | /* | |
92 | * Types defined: | |
93 | * | |
94 | * vm_object_t Virtual memory object. | |
1c79356b A |
95 | */ |
96 | ||
97 | typedef unsigned long long vm_object_size_t; | |
98 | ||
99 | ||
100 | struct vm_object { | |
101 | queue_head_t memq; /* Resident memory */ | |
102 | decl_mutex_data(, Lock) /* Synchronization */ | |
103 | ||
104 | vm_object_size_t size; /* Object size (only valid | |
105 | * if internal) | |
106 | */ | |
107 | vm_object_size_t frozen_size; /* How much has been marked | |
108 | * copy-on-write (only | |
109 | * valid if copy_symmetric) | |
110 | */ | |
111 | int ref_count; /* Number of references */ | |
112 | #if TASK_SWAPPER | |
113 | int res_count; /* Residency references (swap)*/ | |
114 | #endif /* TASK_SWAPPER */ | |
115 | unsigned int resident_page_count; | |
116 | /* number of resident pages */ | |
117 | ||
118 | struct vm_object *copy; /* Object that should receive | |
119 | * a copy of my changed pages, | |
120 | * for copy_delay, or just the | |
121 | * temporary object that | |
122 | * shadows this object, for | |
123 | * copy_call. | |
124 | */ | |
125 | struct vm_object *shadow; /* My shadow */ | |
126 | vm_object_offset_t shadow_offset; /* Offset into shadow */ | |
127 | ||
0b4e3aa0 | 128 | memory_object_t pager; /* Where to get data */ |
1c79356b A |
129 | vm_object_offset_t paging_offset; /* Offset into memory object */ |
130 | pager_request_t pager_request; /* Where data comes back */ | |
131 | ||
132 | memory_object_copy_strategy_t | |
133 | copy_strategy; /* How to handle data copy */ | |
134 | ||
135 | unsigned int absent_count; /* The number of pages that | |
136 | * have been requested but | |
137 | * not filled. That is, the | |
138 | * number of pages for which | |
139 | * the "absent" attribute is | |
140 | * asserted. | |
141 | */ | |
142 | ||
143 | unsigned int paging_in_progress; | |
144 | /* The memory object ports are | |
145 | * being used (e.g., for pagein | |
146 | * or pageout) -- don't change | |
147 | * any of these fields (i.e., | |
148 | * don't collapse, destroy or | |
149 | * terminate) | |
150 | */ | |
151 | unsigned int | |
152 | /* boolean_t array */ all_wanted:11, /* Bit array of "want to be | |
153 | * awakened" notations. See | |
154 | * VM_OBJECT_EVENT_* items | |
155 | * below */ | |
156 | /* boolean_t */ pager_created:1, /* Has pager been created? */ | |
157 | /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ | |
158 | /* boolean_t */ pager_ready:1, /* Will pager take requests? */ | |
159 | ||
160 | /* boolean_t */ pager_trusted:1,/* The pager for this object | |
161 | * is trusted. This is true for | |
162 | * all internal objects (backed | |
163 | * by the default pager) | |
164 | */ | |
165 | /* boolean_t */ can_persist:1, /* The kernel may keep the data | |
166 | * for this object (and rights | |
167 | * to the memory object) after | |
168 | * all address map references | |
169 | * are deallocated? | |
170 | */ | |
171 | /* boolean_t */ internal:1, /* Created by the kernel (and | |
172 | * therefore, managed by the | |
173 | * default memory manger) | |
174 | */ | |
175 | /* boolean_t */ temporary:1, /* Permanent objects may be | |
176 | * changed externally by the | |
177 | * memory manager, and changes | |
178 | * made in memory must be | |
179 | * reflected back to the memory | |
180 | * manager. Temporary objects | |
181 | * lack both of these | |
182 | * characteristics. | |
183 | */ | |
184 | /* boolean_t */ private:1, /* magic device_pager object, | |
185 | * holds private pages only */ | |
186 | /* boolean_t */ pageout:1, /* pageout object. contains | |
187 | * private pages that refer to | |
188 | * a real memory object. */ | |
189 | /* boolean_t */ alive:1, /* Not yet terminated */ | |
190 | ||
191 | /* boolean_t */ lock_in_progress:1, | |
192 | /* Is a multi-page lock | |
193 | * request in progress? | |
194 | */ | |
195 | /* boolean_t */ lock_restart:1, | |
196 | /* Should lock request in | |
197 | * progress restart search? | |
198 | */ | |
199 | /* boolean_t */ shadowed:1, /* Shadow may exist */ | |
200 | /* boolean_t */ silent_overwrite:1, | |
201 | /* Allow full page overwrite | |
202 | * without data_request if | |
203 | * page is absent */ | |
204 | /* boolean_t */ advisory_pageout:1, | |
205 | /* Instead of sending page | |
206 | * via OOL, just notify | |
207 | * pager that the kernel | |
208 | * wants to discard it, page | |
209 | * remains in object */ | |
210 | /* boolean_t */ true_share:1, | |
211 | /* This object is mapped | |
212 | * in more than one place | |
213 | * and hence cannot be | |
214 | * coalesced */ | |
215 | /* boolean_t */ terminating:1, | |
216 | /* Allows vm_object_lookup | |
217 | * and vm_object_deallocate | |
218 | * to special case their | |
219 | * behavior when they are | |
220 | * called as a result of | |
221 | * page cleaning during | |
222 | * object termination | |
223 | */ | |
224 | /* boolean_t */ named:1, /* An enforces an internal | |
225 | * naming convention, by | |
226 | * calling the right routines | |
227 | * for allocation and | |
228 | * destruction, UBC references | |
229 | * against the vm_object are | |
230 | * checked. | |
231 | */ | |
232 | /* boolean_t */ shadow_severed:1, | |
233 | /* When a permanent object | |
234 | * backing a COW goes away | |
235 | * unexpectedly. This bit | |
236 | * allows vm_fault to return | |
237 | * an error rather than a | |
238 | * zero filled page. | |
239 | */ | |
0b4e3aa0 | 240 | /* boolean_t */ phys_contiguous:1, |
1c79356b A |
241 | /* Memory is wired and |
242 | * guaranteed physically | |
243 | * contiguous. However | |
244 | * it is not device memory | |
245 | * and obeys normal virtual | |
246 | * memory rules w.r.t pmap | |
247 | * access bits. | |
248 | */ | |
0b4e3aa0 A |
249 | /* boolean_t */ nophyscache:1; |
250 | /* When mapped at the | |
251 | * pmap level, don't allow | |
252 | * primary caching. (for | |
253 | * I/O) | |
254 | */ | |
1c79356b A |
255 | |
256 | ||
257 | ||
258 | queue_chain_t cached_list; /* Attachment point for the | |
259 | * list of objects cached as a | |
260 | * result of their can_persist | |
261 | * value | |
262 | */ | |
263 | ||
264 | queue_head_t msr_q; /* memory object synchronise | |
265 | request queue */ | |
266 | ||
267 | vm_object_offset_t last_alloc; /* last allocation offset */ | |
55e303ae | 268 | vm_object_offset_t sequential; /* sequential access size */ |
1c79356b A |
269 | vm_size_t cluster_size; /* size of paging cluster */ |
270 | #if MACH_PAGEMAP | |
271 | vm_external_map_t existence_map; /* bitmap of pages written to | |
272 | * backing storage */ | |
273 | #endif /* MACH_PAGEMAP */ | |
55e303ae | 274 | vm_offset_t cow_hint; /* last page present in */ |
0b4e3aa0 | 275 | /* shadow but not in object */ |
1c79356b A |
276 | #if MACH_ASSERT |
277 | struct vm_object *paging_object; /* object which pages to be | |
278 | * swapped out are temporary | |
279 | * put in current object | |
280 | */ | |
281 | #endif | |
9bccf70c A |
282 | /* hold object lock when altering */ |
283 | unsigned int /* cache WIMG bits */ | |
284 | wimg_bits:8, /* wimg plus some expansion*/ | |
285 | not_in_use:24; | |
1c79356b A |
286 | #ifdef UBC_DEBUG |
287 | queue_head_t uplq; /* List of outstanding upls */ | |
288 | #endif /* UBC_DEBUG */ | |
289 | }; | |
290 | ||
0b4e3aa0 | 291 | __private_extern__ |
1c79356b A |
292 | vm_object_t kernel_object; /* the single kernel object */ |
293 | ||
0b4e3aa0 | 294 | __private_extern__ |
1c79356b A |
295 | int vm_object_absent_max; /* maximum number of absent pages |
296 | at a time for each object */ | |
297 | ||
298 | # define VM_MSYNC_INITIALIZED 0 | |
299 | # define VM_MSYNC_SYNCHRONIZING 1 | |
300 | # define VM_MSYNC_DONE 2 | |
301 | ||
302 | struct msync_req { | |
303 | queue_chain_t msr_q; /* object request queue */ | |
304 | queue_chain_t req_q; /* vm_msync request queue */ | |
305 | unsigned int flag; | |
306 | vm_object_offset_t offset; | |
307 | vm_object_size_t length; | |
308 | vm_object_t object; /* back pointer */ | |
309 | decl_mutex_data(, msync_req_lock) /* Lock for this structure */ | |
310 | }; | |
311 | ||
312 | typedef struct msync_req *msync_req_t; | |
313 | #define MSYNC_REQ_NULL ((msync_req_t) 0) | |
314 | ||
315 | /* | |
316 | * Macros to allocate and free msync_reqs | |
317 | */ | |
318 | #define msync_req_alloc(msr) \ | |
319 | MACRO_BEGIN \ | |
320 | (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ | |
321 | mutex_init(&(msr)->msync_req_lock, ETAP_VM_MSYNC); \ | |
322 | msr->flag = VM_MSYNC_INITIALIZED; \ | |
323 | MACRO_END | |
324 | ||
325 | #define msync_req_free(msr) \ | |
326 | (kfree((vm_offset_t)(msr), sizeof(struct msync_req))) | |
327 | ||
328 | #define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock) | |
329 | #define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock) | |
330 | ||
331 | /* | |
332 | * Declare procedures that operate on VM objects. | |
333 | */ | |
334 | ||
0b4e3aa0 | 335 | __private_extern__ void vm_object_bootstrap(void); |
1c79356b | 336 | |
0b4e3aa0 | 337 | __private_extern__ void vm_object_init(void); |
1c79356b | 338 | |
0b4e3aa0 | 339 | __private_extern__ vm_object_t vm_object_allocate( |
1c79356b A |
340 | vm_object_size_t size); |
341 | ||
0b4e3aa0 A |
342 | #if TASK_SWAPPER |
343 | ||
344 | __private_extern__ void vm_object_res_reference( | |
345 | vm_object_t object); | |
346 | __private_extern__ void vm_object_res_deallocate( | |
347 | vm_object_t object); | |
348 | #define VM_OBJ_RES_INCR(object) (object)->res_count++ | |
349 | #define VM_OBJ_RES_DECR(object) (object)->res_count-- | |
350 | ||
351 | #else /* TASK_SWAPPER */ | |
352 | ||
353 | #define VM_OBJ_RES_INCR(object) | |
354 | #define VM_OBJ_RES_DECR(object) | |
355 | #define vm_object_res_reference(object) | |
356 | #define vm_object_res_deallocate(object) | |
357 | ||
358 | #endif /* TASK_SWAPPER */ | |
359 | ||
360 | #define vm_object_reference_locked(object) \ | |
361 | MACRO_BEGIN \ | |
362 | vm_object_t RLObject = (object); \ | |
363 | assert((RLObject)->ref_count > 0); \ | |
364 | (RLObject)->ref_count++; \ | |
365 | vm_object_res_reference(RLObject); \ | |
366 | MACRO_END | |
367 | ||
368 | ||
1c79356b | 369 | #if MACH_ASSERT |
0b4e3aa0 A |
370 | |
371 | __private_extern__ void vm_object_reference( | |
1c79356b | 372 | vm_object_t object); |
0b4e3aa0 | 373 | |
1c79356b | 374 | #else /* MACH_ASSERT */ |
0b4e3aa0 | 375 | |
1c79356b A |
376 | #define vm_object_reference(object) \ |
377 | MACRO_BEGIN \ | |
0b4e3aa0 A |
378 | vm_object_t RObject = (object); \ |
379 | if (RObject) { \ | |
380 | vm_object_lock(RObject); \ | |
381 | vm_object_reference_locked(RObject); \ | |
382 | vm_object_unlock(RObject); \ | |
1c79356b A |
383 | } \ |
384 | MACRO_END | |
0b4e3aa0 | 385 | |
1c79356b A |
386 | #endif /* MACH_ASSERT */ |
387 | ||
0b4e3aa0 | 388 | __private_extern__ void vm_object_deallocate( |
1c79356b A |
389 | vm_object_t object); |
390 | ||
0b4e3aa0 A |
391 | __private_extern__ kern_return_t vm_object_release_name( |
392 | vm_object_t object, | |
393 | int flags); | |
394 | ||
395 | __private_extern__ void vm_object_pmap_protect( | |
1c79356b A |
396 | vm_object_t object, |
397 | vm_object_offset_t offset, | |
398 | vm_size_t size, | |
399 | pmap_t pmap, | |
400 | vm_offset_t pmap_start, | |
401 | vm_prot_t prot); | |
402 | ||
0b4e3aa0 | 403 | __private_extern__ void vm_object_page_remove( |
1c79356b A |
404 | vm_object_t object, |
405 | vm_object_offset_t start, | |
406 | vm_object_offset_t end); | |
407 | ||
0b4e3aa0 A |
408 | __private_extern__ void vm_object_deactivate_pages( |
409 | vm_object_t object, | |
410 | vm_object_offset_t offset, | |
411 | vm_object_size_t size, | |
412 | boolean_t kill_page); | |
413 | ||
414 | __private_extern__ boolean_t vm_object_coalesce( | |
1c79356b A |
415 | vm_object_t prev_object, |
416 | vm_object_t next_object, | |
417 | vm_object_offset_t prev_offset, | |
418 | vm_object_offset_t next_offset, | |
419 | vm_object_size_t prev_size, | |
420 | vm_object_size_t next_size); | |
421 | ||
0b4e3aa0 | 422 | __private_extern__ boolean_t vm_object_shadow( |
1c79356b A |
423 | vm_object_t *object, |
424 | vm_object_offset_t *offset, | |
425 | vm_object_size_t length); | |
426 | ||
0b4e3aa0 | 427 | __private_extern__ void vm_object_collapse( |
55e303ae A |
428 | vm_object_t object, |
429 | vm_object_offset_t offset); | |
1c79356b | 430 | |
0b4e3aa0 | 431 | __private_extern__ boolean_t vm_object_copy_quickly( |
1c79356b A |
432 | vm_object_t *_object, |
433 | vm_object_offset_t src_offset, | |
434 | vm_object_size_t size, | |
435 | boolean_t *_src_needs_copy, | |
436 | boolean_t *_dst_needs_copy); | |
437 | ||
0b4e3aa0 | 438 | __private_extern__ kern_return_t vm_object_copy_strategically( |
1c79356b A |
439 | vm_object_t src_object, |
440 | vm_object_offset_t src_offset, | |
441 | vm_object_size_t size, | |
442 | vm_object_t *dst_object, | |
443 | vm_object_offset_t *dst_offset, | |
444 | boolean_t *dst_needs_copy); | |
445 | ||
0b4e3aa0 | 446 | __private_extern__ kern_return_t vm_object_copy_slowly( |
1c79356b A |
447 | vm_object_t src_object, |
448 | vm_object_offset_t src_offset, | |
449 | vm_object_size_t size, | |
450 | int interruptible, | |
451 | vm_object_t *_result_object); | |
452 | ||
0b4e3aa0 A |
453 | __private_extern__ vm_object_t vm_object_copy_delayed( |
454 | vm_object_t src_object, | |
455 | vm_object_offset_t src_offset, | |
456 | vm_object_size_t size); | |
457 | ||
1c79356b | 458 | |
1c79356b | 459 | |
0b4e3aa0 A |
460 | __private_extern__ kern_return_t vm_object_destroy( |
461 | vm_object_t object, | |
462 | kern_return_t reason); | |
1c79356b | 463 | |
0b4e3aa0 A |
464 | __private_extern__ void vm_object_pager_create( |
465 | vm_object_t object); | |
466 | ||
467 | __private_extern__ void vm_object_page_map( | |
1c79356b A |
468 | vm_object_t object, |
469 | vm_object_offset_t offset, | |
470 | vm_object_size_t size, | |
471 | vm_object_offset_t (*map_fn) | |
472 | (void *, vm_object_offset_t), | |
473 | void *map_fn_data); | |
474 | ||
0b4e3aa0 A |
475 | __private_extern__ kern_return_t vm_object_upl_request( |
476 | vm_object_t object, | |
477 | vm_object_offset_t offset, | |
478 | vm_size_t size, | |
479 | upl_t *upl, | |
480 | upl_page_info_t *page_info, | |
481 | unsigned int *count, | |
482 | int flags); | |
483 | ||
484 | __private_extern__ boolean_t vm_object_sync( | |
485 | vm_object_t object, | |
486 | vm_object_offset_t offset, | |
487 | vm_size_t size, | |
488 | boolean_t should_flush, | |
489 | boolean_t should_return); | |
1c79356b | 490 | |
0b4e3aa0 A |
491 | __private_extern__ kern_return_t vm_object_update( |
492 | vm_object_t object, | |
493 | vm_object_offset_t offset, | |
494 | vm_size_t size, /* should be 64 */ | |
495 | memory_object_return_t should_return, | |
496 | int flags, | |
497 | vm_prot_t prot); | |
1c79356b | 498 | |
0b4e3aa0 A |
499 | __private_extern__ kern_return_t vm_object_lock_request( |
500 | vm_object_t object, | |
501 | vm_object_offset_t offset, | |
502 | vm_object_size_t size, | |
503 | memory_object_return_t should_return, | |
504 | int flags, | |
505 | vm_prot_t prot); | |
1c79356b | 506 | |
1c79356b | 507 | |
1c79356b | 508 | |
0b4e3aa0 A |
509 | __private_extern__ vm_object_t vm_object_enter( |
510 | memory_object_t pager, | |
1c79356b A |
511 | vm_object_size_t size, |
512 | boolean_t internal, | |
513 | boolean_t init, | |
514 | boolean_t check_named); | |
515 | ||
516 | ||
1c79356b A |
517 | /* |
518 | * Event waiting handling | |
519 | */ | |
520 | ||
521 | #define VM_OBJECT_EVENT_INITIALIZED 0 | |
522 | #define VM_OBJECT_EVENT_PAGER_READY 1 | |
523 | #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 | |
524 | #define VM_OBJECT_EVENT_ABSENT_COUNT 3 | |
525 | #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 | |
526 | #define VM_OBJECT_EVENT_UNCACHING 5 | |
527 | #define VM_OBJECT_EVENT_COPY_CALL 6 | |
528 | #define VM_OBJECT_EVENT_CACHING 7 | |
529 | ||
530 | #define vm_object_assert_wait(object, event, interruptible) \ | |
9bccf70c A |
531 | (((object)->all_wanted |= 1 << (event)), \ |
532 | assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible))) | |
1c79356b A |
533 | |
534 | #define vm_object_wait(object, event, interruptible) \ | |
9bccf70c A |
535 | (vm_object_assert_wait((object),(event),(interruptible)), \ |
536 | vm_object_unlock(object), \ | |
537 | thread_block(THREAD_CONTINUE_NULL)) \ | |
538 | ||
539 | #define thread_sleep_vm_object(object, event, interruptible) \ | |
540 | thread_sleep_mutex((event_t)(event), &(object)->Lock, (interruptible)) | |
541 | ||
542 | #define vm_object_sleep(object, event, interruptible) \ | |
543 | (((object)->all_wanted |= 1 << (event)), \ | |
544 | thread_sleep_vm_object((object), \ | |
545 | ((vm_offset_t)(object)+(event)), (interruptible))) | |
1c79356b A |
546 | |
547 | #define vm_object_wakeup(object, event) \ | |
548 | MACRO_BEGIN \ | |
549 | if ((object)->all_wanted & (1 << (event))) \ | |
550 | thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \ | |
551 | (object)->all_wanted &= ~(1 << (event)); \ | |
552 | MACRO_END | |
553 | ||
554 | #define vm_object_set_wanted(object, event) \ | |
555 | MACRO_BEGIN \ | |
556 | ((object)->all_wanted |= (1 << (event))); \ | |
557 | MACRO_END | |
558 | ||
559 | #define vm_object_wanted(object, event) \ | |
560 | ((object)->all_wanted & (1 << (event))) | |
561 | ||
562 | /* | |
563 | * Routines implemented as macros | |
564 | */ | |
565 | ||
566 | #define vm_object_paging_begin(object) \ | |
567 | MACRO_BEGIN \ | |
568 | (object)->paging_in_progress++; \ | |
569 | MACRO_END | |
570 | ||
571 | #define vm_object_paging_end(object) \ | |
572 | MACRO_BEGIN \ | |
573 | assert((object)->paging_in_progress != 0); \ | |
574 | if (--(object)->paging_in_progress == 0) { \ | |
575 | vm_object_wakeup(object, \ | |
576 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ | |
577 | } \ | |
578 | MACRO_END | |
579 | ||
580 | #define vm_object_paging_wait(object, interruptible) \ | |
581 | MACRO_BEGIN \ | |
582 | while ((object)->paging_in_progress != 0) { \ | |
9bccf70c A |
583 | wait_result_t _wr; \ |
584 | \ | |
585 | _wr = vm_object_sleep((object), \ | |
1c79356b A |
586 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \ |
587 | (interruptible)); \ | |
1c79356b | 588 | \ |
9bccf70c | 589 | /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ |
1c79356b A |
590 | /*XXX break; */ \ |
591 | } \ | |
592 | MACRO_END | |
593 | ||
594 | #define vm_object_absent_assert_wait(object, interruptible) \ | |
595 | MACRO_BEGIN \ | |
596 | vm_object_assert_wait( (object), \ | |
597 | VM_OBJECT_EVENT_ABSENT_COUNT, \ | |
598 | (interruptible)); \ | |
599 | MACRO_END | |
600 | ||
601 | ||
602 | #define vm_object_absent_release(object) \ | |
603 | MACRO_BEGIN \ | |
604 | (object)->absent_count--; \ | |
605 | vm_object_wakeup((object), \ | |
606 | VM_OBJECT_EVENT_ABSENT_COUNT); \ | |
607 | MACRO_END | |
608 | ||
609 | /* | |
610 | * Object locking macros | |
611 | */ | |
612 | ||
613 | #define vm_object_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) | |
614 | #define vm_object_lock(object) mutex_lock(&(object)->Lock) | |
615 | #define vm_object_unlock(object) mutex_unlock(&(object)->Lock) | |
616 | #define vm_object_lock_try(object) mutex_try(&(object)->Lock) | |
617 | ||
618 | #endif /* _VM_VM_OBJECT_H_ */ |