]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.h
a093bf2f4a313038e37bd1b66227741b855c2878
[apple/xnu.git] / osfmk / vm / vm_object.h
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <mach_pagemap.h>
70 #include <task_swapper.h>
71
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/lock.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
86 #include <vm/pmap.h>
87
88 #if MACH_PAGEMAP
89 #include <vm/vm_external.h>
90 #endif /* MACH_PAGEMAP */
91
92 struct vm_page;
93
94 /*
95 * Types defined:
96 *
97 * vm_object_t Virtual memory object.
98 * vm_object_fault_info_t Used to determine cluster size.
99 */
100
101 struct vm_object_fault_info {
102 int interruptible;
103 uint32_t user_tag;
104 vm_size_t cluster_size;
105 vm_behavior_t behavior;
106 vm_map_offset_t lo_offset;
107 vm_map_offset_t hi_offset;
108 boolean_t no_cache;
109 };
110
111
112
113 struct vm_object {
114 queue_head_t memq; /* Resident memory */
115 lck_rw_t Lock; /* Synchronization */
116
117 vm_object_size_t size; /* Object size (only valid
118 * if internal)
119 */
120 struct vm_page *memq_hint;
121 int ref_count; /* Number of references */
122 #if TASK_SWAPPER
123 int res_count; /* Residency references (swap)*/
124 #endif /* TASK_SWAPPER */
125 unsigned int resident_page_count;
126 /* number of resident pages */
127
128 struct vm_object *copy; /* Object that should receive
129 * a copy of my changed pages,
130 * for copy_delay, or just the
131 * temporary object that
132 * shadows this object, for
133 * copy_call.
134 */
135 struct vm_object *shadow; /* My shadow */
136 vm_object_offset_t shadow_offset; /* Offset into shadow */
137
138 memory_object_t pager; /* Where to get data */
139 vm_object_offset_t paging_offset; /* Offset into memory object */
140 memory_object_control_t pager_control; /* Where data comes back */
141
142 memory_object_copy_strategy_t
143 copy_strategy; /* How to handle data copy */
144
145 int paging_in_progress;
146 /* The memory object ports are
147 * being used (e.g., for pagein
148 * or pageout) -- don't change
149 * any of these fields (i.e.,
150 * don't collapse, destroy or
151 * terminate)
152 */
153 unsigned int
154 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
155 * awakened" notations. See
156 * VM_OBJECT_EVENT_* items
157 * below */
158 /* boolean_t */ pager_created:1, /* Has pager been created? */
159 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
160 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
161
162 /* boolean_t */ pager_trusted:1,/* The pager for this object
163 * is trusted. This is true for
164 * all internal objects (backed
165 * by the default pager)
166 */
167 /* boolean_t */ can_persist:1, /* The kernel may keep the data
168 * for this object (and rights
169 * to the memory object) after
170 * all address map references
171 * are deallocated?
172 */
173 /* boolean_t */ internal:1, /* Created by the kernel (and
174 * therefore, managed by the
175 * default memory manger)
176 */
177 /* boolean_t */ temporary:1, /* Permanent objects may be
178 * changed externally by the
179 * memory manager, and changes
180 * made in memory must be
181 * reflected back to the memory
182 * manager. Temporary objects
183 * lack both of these
184 * characteristics.
185 */
186 /* boolean_t */ private:1, /* magic device_pager object,
187 * holds private pages only */
188 /* boolean_t */ pageout:1, /* pageout object. contains
189 * private pages that refer to
190 * a real memory object. */
191 /* boolean_t */ alive:1, /* Not yet terminated */
192
193 /* boolean_t */ purgable:2, /* Purgable state. See
194 * VM_PURGABLE_*
195 */
196 /* boolean_t */ shadowed:1, /* Shadow may exist */
197 /* boolean_t */ silent_overwrite:1,
198 /* Allow full page overwrite
199 * without data_request if
200 * page is absent */
201 /* boolean_t */ advisory_pageout:1,
202 /* Instead of sending page
203 * via OOL, just notify
204 * pager that the kernel
205 * wants to discard it, page
206 * remains in object */
207 /* boolean_t */ true_share:1,
208 /* This object is mapped
209 * in more than one place
210 * and hence cannot be
211 * coalesced */
212 /* boolean_t */ terminating:1,
213 /* Allows vm_object_lookup
214 * and vm_object_deallocate
215 * to special case their
216 * behavior when they are
217 * called as a result of
218 * page cleaning during
219 * object termination
220 */
221 /* boolean_t */ named:1, /* An enforces an internal
222 * naming convention, by
223 * calling the right routines
224 * for allocation and
225 * destruction, UBC references
226 * against the vm_object are
227 * checked.
228 */
229 /* boolean_t */ shadow_severed:1,
230 /* When a permanent object
231 * backing a COW goes away
232 * unexpectedly. This bit
233 * allows vm_fault to return
234 * an error rather than a
235 * zero filled page.
236 */
237 /* boolean_t */ phys_contiguous:1,
238 /* Memory is wired and
239 * guaranteed physically
240 * contiguous. However
241 * it is not device memory
242 * and obeys normal virtual
243 * memory rules w.r.t pmap
244 * access bits.
245 */
246 /* boolean_t */ nophyscache:1;
247 /* When mapped at the
248 * pmap level, don't allow
249 * primary caching. (for
250 * I/O)
251 */
252
253
254
255 queue_chain_t cached_list; /* Attachment point for the
256 * list of objects cached as a
257 * result of their can_persist
258 * value
259 */
260
261 queue_head_t msr_q; /* memory object synchronise
262 request queue */
263
264 /*
265 * the following fields are not protected by any locks
266 * they are updated via atomic compare and swap
267 */
268 vm_object_offset_t last_alloc; /* last allocation offset */
269 int sequential; /* sequential access size */
270
271 uint32_t pages_created;
272 uint32_t pages_used;
273 #if MACH_PAGEMAP
274 vm_external_map_t existence_map; /* bitmap of pages written to
275 * backing storage */
276 #endif /* MACH_PAGEMAP */
277 vm_offset_t cow_hint; /* last page present in */
278 /* shadow but not in object */
279 #if MACH_ASSERT
280 struct vm_object *paging_object; /* object which pages to be
281 * swapped out are temporary
282 * put in current object
283 */
284 #endif
285 /* hold object lock when altering */
286 unsigned int
287 wimg_bits:8, /* cache WIMG bits */
288 code_signed:1, /* pages are signed and should be
289 validated; the signatures are stored
290 with the pager */
291 not_in_use:23; /* for expansion */
292
293 #ifdef UPL_DEBUG
294 queue_head_t uplq; /* List of outstanding upls */
295 #endif /* UPL_DEBUG */
296
297 #ifdef VM_PIP_DEBUG
298 /*
299 * Keep track of the stack traces for the first holders
300 * of a "paging_in_progress" reference for this VM object.
301 */
302 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
303 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
304 struct __pip_backtrace {
305 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
306 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
307 #endif /* VM_PIP_DEBUG */
308
309 queue_chain_t objq; /* object queue - currently used for purgable queues */
310 };
311
312 #define VM_PAGE_REMOVE(page) \
313 MACRO_BEGIN \
314 vm_page_t __page = (page); \
315 vm_object_t __object = __page->object; \
316 if (__page == __object->memq_hint) { \
317 vm_page_t __new_hint; \
318 queue_entry_t __qe; \
319 __qe = queue_next(&__page->listq); \
320 if (queue_end(&__object->memq, __qe)) { \
321 __qe = queue_prev(&__page->listq); \
322 if (queue_end(&__object->memq, __qe)) { \
323 __qe = NULL; \
324 } \
325 } \
326 __new_hint = (vm_page_t) __qe; \
327 __object->memq_hint = __new_hint; \
328 } \
329 queue_remove(&__object->memq, __page, vm_page_t, listq); \
330 MACRO_END
331
332 #define VM_PAGE_INSERT(page, object) \
333 MACRO_BEGIN \
334 vm_page_t __page = (page); \
335 vm_object_t __object = (object); \
336 queue_enter(&__object->memq, __page, vm_page_t, listq); \
337 __object->memq_hint = __page; \
338 MACRO_END
339
340 __private_extern__
341 vm_object_t kernel_object; /* the single kernel object */
342
343 __private_extern__
344 unsigned int vm_object_absent_max; /* maximum number of absent pages
345 at a time for each object */
346
347 # define VM_MSYNC_INITIALIZED 0
348 # define VM_MSYNC_SYNCHRONIZING 1
349 # define VM_MSYNC_DONE 2
350
351 struct msync_req {
352 queue_chain_t msr_q; /* object request queue */
353 queue_chain_t req_q; /* vm_msync request queue */
354 unsigned int flag;
355 vm_object_offset_t offset;
356 vm_object_size_t length;
357 vm_object_t object; /* back pointer */
358 decl_mutex_data(, msync_req_lock) /* Lock for this structure */
359 };
360
361 typedef struct msync_req *msync_req_t;
362 #define MSYNC_REQ_NULL ((msync_req_t) 0)
363
364 /*
365 * Macros to allocate and free msync_reqs
366 */
367 #define msync_req_alloc(msr) \
368 MACRO_BEGIN \
369 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
370 mutex_init(&(msr)->msync_req_lock, 0); \
371 msr->flag = VM_MSYNC_INITIALIZED; \
372 MACRO_END
373
374 #define msync_req_free(msr) \
375 (kfree((msr), sizeof(struct msync_req)))
376
377 #define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock)
378 #define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock)
379
380 /*
381 * Declare procedures that operate on VM objects.
382 */
383
384 __private_extern__ void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
385
386 __private_extern__ void vm_object_init(void);
387
388 __private_extern__ void vm_object_init_lck_grp(void);
389
390 __private_extern__ void vm_object_reaper_init(void);
391
392 __private_extern__ vm_object_t vm_object_allocate(
393 vm_object_size_t size);
394
395 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
396 vm_object_t object);
397
398 #if TASK_SWAPPER
399
400 __private_extern__ void vm_object_res_reference(
401 vm_object_t object);
402 __private_extern__ void vm_object_res_deallocate(
403 vm_object_t object);
404 #define VM_OBJ_RES_INCR(object) (object)->res_count++
405 #define VM_OBJ_RES_DECR(object) (object)->res_count--
406
407 #else /* TASK_SWAPPER */
408
409 #define VM_OBJ_RES_INCR(object)
410 #define VM_OBJ_RES_DECR(object)
411 #define vm_object_res_reference(object)
412 #define vm_object_res_deallocate(object)
413
414 #endif /* TASK_SWAPPER */
415
416 #define vm_object_reference_locked(object) \
417 MACRO_BEGIN \
418 vm_object_t RLObject = (object); \
419 vm_object_lock_assert_exclusive(object); \
420 assert((RLObject)->ref_count > 0); \
421 (RLObject)->ref_count++; \
422 assert((RLObject)->ref_count > 1); \
423 vm_object_res_reference(RLObject); \
424 MACRO_END
425
426
427 #define vm_object_reference_shared(object) \
428 MACRO_BEGIN \
429 vm_object_t RLObject = (object); \
430 vm_object_lock_assert_shared(object); \
431 assert((RLObject)->ref_count > 0); \
432 OSAddAtomic(1, (SInt32 *)&(RLObject)->ref_count); \
433 assert((RLObject)->ref_count > 1); \
434 /* XXX we would need an atomic version of the following ... */ \
435 vm_object_res_reference(RLObject); \
436 MACRO_END
437
438
439 __private_extern__ void vm_object_reference(
440 vm_object_t object);
441
442 #if !MACH_ASSERT
443
444 #define vm_object_reference(object) \
445 MACRO_BEGIN \
446 vm_object_t RObject = (object); \
447 if (RObject) { \
448 vm_object_lock(RObject); \
449 vm_object_reference_locked(RObject); \
450 vm_object_unlock(RObject); \
451 } \
452 MACRO_END
453
454 #endif /* MACH_ASSERT */
455
456 __private_extern__ void vm_object_deallocate(
457 vm_object_t object);
458
459 __private_extern__ kern_return_t vm_object_release_name(
460 vm_object_t object,
461 int flags);
462
463 __private_extern__ void vm_object_pmap_protect(
464 vm_object_t object,
465 vm_object_offset_t offset,
466 vm_object_size_t size,
467 pmap_t pmap,
468 vm_map_offset_t pmap_start,
469 vm_prot_t prot);
470
471 __private_extern__ void vm_object_page_remove(
472 vm_object_t object,
473 vm_object_offset_t start,
474 vm_object_offset_t end);
475
476 __private_extern__ void vm_object_deactivate_pages(
477 vm_object_t object,
478 vm_object_offset_t offset,
479 vm_object_size_t size,
480 boolean_t kill_page);
481
482 __private_extern__ unsigned int vm_object_purge(
483 vm_object_t object);
484
485 __private_extern__ kern_return_t vm_object_purgable_control(
486 vm_object_t object,
487 vm_purgable_t control,
488 int *state);
489
490 __private_extern__ boolean_t vm_object_coalesce(
491 vm_object_t prev_object,
492 vm_object_t next_object,
493 vm_object_offset_t prev_offset,
494 vm_object_offset_t next_offset,
495 vm_object_size_t prev_size,
496 vm_object_size_t next_size);
497
498 __private_extern__ boolean_t vm_object_shadow(
499 vm_object_t *object,
500 vm_object_offset_t *offset,
501 vm_object_size_t length);
502
503 __private_extern__ void vm_object_collapse(
504 vm_object_t object,
505 vm_object_offset_t offset,
506 boolean_t can_bypass);
507
508 __private_extern__ boolean_t vm_object_copy_quickly(
509 vm_object_t *_object,
510 vm_object_offset_t src_offset,
511 vm_object_size_t size,
512 boolean_t *_src_needs_copy,
513 boolean_t *_dst_needs_copy);
514
515 __private_extern__ kern_return_t vm_object_copy_strategically(
516 vm_object_t src_object,
517 vm_object_offset_t src_offset,
518 vm_object_size_t size,
519 vm_object_t *dst_object,
520 vm_object_offset_t *dst_offset,
521 boolean_t *dst_needs_copy);
522
523 __private_extern__ kern_return_t vm_object_copy_slowly(
524 vm_object_t src_object,
525 vm_object_offset_t src_offset,
526 vm_object_size_t size,
527 int interruptible,
528 vm_object_t *_result_object);
529
530 __private_extern__ vm_object_t vm_object_copy_delayed(
531 vm_object_t src_object,
532 vm_object_offset_t src_offset,
533 vm_object_size_t size,
534 boolean_t src_object_shared);
535
536
537
538 __private_extern__ kern_return_t vm_object_destroy(
539 vm_object_t object,
540 kern_return_t reason);
541
542 __private_extern__ void vm_object_pager_create(
543 vm_object_t object);
544
545 __private_extern__ void vm_object_page_map(
546 vm_object_t object,
547 vm_object_offset_t offset,
548 vm_object_size_t size,
549 vm_object_offset_t (*map_fn)
550 (void *, vm_object_offset_t),
551 void *map_fn_data);
552
553 __private_extern__ kern_return_t vm_object_upl_request(
554 vm_object_t object,
555 vm_object_offset_t offset,
556 upl_size_t size,
557 upl_t *upl,
558 upl_page_info_t *page_info,
559 unsigned int *count,
560 int flags);
561
562 __private_extern__ kern_return_t vm_object_transpose(
563 vm_object_t object1,
564 vm_object_t object2,
565 vm_object_size_t transpose_size);
566
567 __private_extern__ boolean_t vm_object_sync(
568 vm_object_t object,
569 vm_object_offset_t offset,
570 vm_object_size_t size,
571 boolean_t should_flush,
572 boolean_t should_return,
573 boolean_t should_iosync);
574
575 __private_extern__ kern_return_t vm_object_update(
576 vm_object_t object,
577 vm_object_offset_t offset,
578 vm_object_size_t size,
579 vm_object_offset_t *error_offset,
580 int *io_errno,
581 memory_object_return_t should_return,
582 int flags,
583 vm_prot_t prot);
584
585 __private_extern__ kern_return_t vm_object_lock_request(
586 vm_object_t object,
587 vm_object_offset_t offset,
588 vm_object_size_t size,
589 memory_object_return_t should_return,
590 int flags,
591 vm_prot_t prot);
592
593
594
595 __private_extern__ vm_object_t vm_object_enter(
596 memory_object_t pager,
597 vm_object_size_t size,
598 boolean_t internal,
599 boolean_t init,
600 boolean_t check_named);
601
602
603 __private_extern__ void vm_object_cluster_size(
604 vm_object_t object,
605 vm_object_offset_t *start,
606 vm_size_t *length,
607 vm_object_fault_info_t fault_info);
608
609 __private_extern__ kern_return_t vm_object_populate_with_private(
610 vm_object_t object,
611 vm_object_offset_t offset,
612 ppnum_t phys_page,
613 vm_size_t size);
614
615 extern kern_return_t adjust_vm_object_cache(
616 vm_size_t oval,
617 vm_size_t nval);
618
619 extern kern_return_t vm_object_page_op(
620 vm_object_t object,
621 vm_object_offset_t offset,
622 int ops,
623 ppnum_t *phys_entry,
624 int *flags);
625
626 extern kern_return_t vm_object_range_op(
627 vm_object_t object,
628 vm_object_offset_t offset_beg,
629 vm_object_offset_t offset_end,
630 int ops,
631 int *range);
632
633 /*
634 * Event waiting handling
635 */
636
637 #define VM_OBJECT_EVENT_INITIALIZED 0
638 #define VM_OBJECT_EVENT_PAGER_READY 1
639 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
640 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
641 #define VM_OBJECT_EVENT_UNCACHING 5
642 #define VM_OBJECT_EVENT_COPY_CALL 6
643 #define VM_OBJECT_EVENT_CACHING 7
644
645 #define vm_object_assert_wait(object, event, interruptible) \
646 (((object)->all_wanted |= 1 << (event)), \
647 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
648
649 #define vm_object_wait(object, event, interruptible) \
650 (vm_object_assert_wait((object),(event),(interruptible)), \
651 vm_object_unlock(object), \
652 thread_block(THREAD_CONTINUE_NULL)) \
653
654 #define thread_sleep_vm_object(object, event, interruptible) \
655 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
656
657 #define vm_object_sleep(object, event, interruptible) \
658 (((object)->all_wanted |= 1 << (event)), \
659 thread_sleep_vm_object((object), \
660 ((vm_offset_t)(object)+(event)), (interruptible)))
661
662 #define vm_object_wakeup(object, event) \
663 MACRO_BEGIN \
664 if ((object)->all_wanted & (1 << (event))) \
665 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
666 (object)->all_wanted &= ~(1 << (event)); \
667 MACRO_END
668
669 #define vm_object_set_wanted(object, event) \
670 MACRO_BEGIN \
671 ((object)->all_wanted |= (1 << (event))); \
672 MACRO_END
673
674 #define vm_object_wanted(object, event) \
675 ((object)->all_wanted & (1 << (event)))
676
677 /*
678 * Routines implemented as macros
679 */
680 #ifdef VM_PIP_DEBUG
681 #include <libkern/OSDebug.h>
682 #define VM_PIP_DEBUG_BEGIN(object) \
683 MACRO_BEGIN \
684 if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \
685 int pip = (object)->paging_in_progress; \
686 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
687 VM_PIP_DEBUG_STACK_FRAMES); \
688 } \
689 MACRO_END
690 #else /* VM_PIP_DEBUG */
691 #define VM_PIP_DEBUG_BEGIN(object)
692 #endif /* VM_PIP_DEBUG */
693
694 #define vm_object_paging_begin(object) \
695 MACRO_BEGIN \
696 vm_object_lock_assert_exclusive((object)); \
697 assert((object)->paging_in_progress >= 0); \
698 VM_PIP_DEBUG_BEGIN((object)); \
699 (object)->paging_in_progress++; \
700 MACRO_END
701
702 #define vm_object_paging_end(object) \
703 MACRO_BEGIN \
704 vm_object_lock_assert_exclusive((object)); \
705 assert((object)->paging_in_progress > 0); \
706 if (--(object)->paging_in_progress == 0) { \
707 vm_object_wakeup(object, \
708 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
709 } \
710 MACRO_END
711
712 #define vm_object_paging_wait(object, interruptible) \
713 MACRO_BEGIN \
714 vm_object_lock_assert_exclusive((object)); \
715 while ((object)->paging_in_progress != 0) { \
716 wait_result_t _wr; \
717 \
718 _wr = vm_object_sleep((object), \
719 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
720 (interruptible)); \
721 \
722 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
723 /*XXX break; */ \
724 } \
725 MACRO_END
726
727
728
729 #define OBJECT_LOCK_SHARED 0
730 #define OBJECT_LOCK_EXCLUSIVE 1
731
732 extern lck_grp_t vm_object_lck_grp;
733 extern lck_grp_attr_t vm_object_lck_grp_attr;
734 extern lck_attr_t vm_object_lck_attr;
735 extern lck_attr_t kernel_object_lck_attr;
736
737 extern vm_object_t vm_pageout_scan_wants_object;
738
739 extern void vm_object_lock(vm_object_t);
740 extern boolean_t vm_object_lock_try(vm_object_t);
741 extern void vm_object_lock_shared(vm_object_t);
742 extern boolean_t vm_object_lock_try_shared(vm_object_t);
743
744 /*
745 * Object locking macros
746 */
747
748 #define vm_object_lock_init(object) \
749 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
750 (((object) == kernel_object || \
751 (object) == vm_submap_object) ? \
752 &kernel_object_lck_attr : \
753 &vm_object_lck_attr))
754 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
755
756 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
757 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
758 #define vm_object_lock_try_scan(object) lck_rw_try_lock_exclusive(&(object)->Lock)
759
760 /*
761 * CAUTION: the following vm_object_lock_assert_held*() macros merely
762 * check if anyone is holding the lock, but the holder may not necessarily
763 * be the caller...
764 */
765 #if DEBUG
766 #define vm_object_lock_assert_held(object) \
767 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
768 #define vm_object_lock_assert_shared(object) \
769 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
770 #define vm_object_lock_assert_exclusive(object) \
771 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
772 #else /* DEBUG */
773 #define vm_object_lock_assert_held(object)
774 #define vm_object_lock_assert_shared(object)
775 #define vm_object_lock_assert_exclusive(object)
776 #endif /* DEBUG */
777
778 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
779 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
780
781 #endif /* _VM_VM_OBJECT_H_ */