2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/lock.h>
83 #include <kern/locks.h>
84 #include <kern/assert.h>
85 #include <kern/misc_protos.h>
86 #include <kern/macro_help.h>
87 #include <ipc/ipc_types.h>
91 #include <vm/vm_external.h>
92 #endif /* MACH_PAGEMAP */
94 #include <vm/vm_options.h>
101 * vm_object_t Virtual memory object.
102 * vm_object_fault_info_t Used to determine cluster size.
105 struct vm_object_fault_info
{
108 vm_size_t cluster_size
;
109 vm_behavior_t behavior
;
110 vm_map_offset_t lo_offset
;
111 vm_map_offset_t hi_offset
;
113 /* boolean_t */ no_cache
:1,
114 /* boolean_t */ stealth
:1,
115 /* boolean_t */ io_sync
:1,
116 /* boolean_t */ cs_bypass
:1,
117 /* boolean_t */ mark_zf_absent
:1,
118 __vm_object_fault_info_unused_bits
:27;
122 #define vo_size vo_un1.vou_size
123 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
124 #define vo_shadow_offset vo_un2.vou_shadow_offset
125 #define vo_cache_ts vo_un2.vou_cache_ts
128 queue_head_t memq
; /* Resident memory */
129 lck_rw_t Lock
; /* Synchronization */
132 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
133 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
134 * external object in cache
138 struct vm_page
*memq_hint
;
139 int ref_count
; /* Number of references */
141 int res_count
; /* Residency references (swap)*/
142 #endif /* TASK_SWAPPER */
143 unsigned int resident_page_count
;
144 /* number of resident pages */
145 unsigned int wired_page_count
; /* number of wired pages */
146 unsigned int reusable_page_count
;
148 struct vm_object
*copy
; /* Object that should receive
149 * a copy of my changed pages,
150 * for copy_delay, or just the
151 * temporary object that
152 * shadows this object, for
155 struct vm_object
*shadow
; /* My shadow */
158 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
159 clock_sec_t vou_cache_ts
; /* age of an external object
164 memory_object_t pager
; /* Where to get data */
165 vm_object_offset_t paging_offset
; /* Offset into memory object */
166 memory_object_control_t pager_control
; /* Where data comes back */
168 memory_object_copy_strategy_t
169 copy_strategy
; /* How to handle data copy */
171 short paging_in_progress
;
172 /* The memory object ports are
173 * being used (e.g., for pagein
174 * or pageout) -- don't change
175 * any of these fields (i.e.,
176 * don't collapse, destroy or
179 short activity_in_progress
;
182 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
183 * awakened" notations. See
184 * VM_OBJECT_EVENT_* items
186 /* boolean_t */ pager_created
:1, /* Has pager been created? */
187 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
188 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
190 /* boolean_t */ pager_trusted
:1,/* The pager for this object
191 * is trusted. This is true for
192 * all internal objects (backed
193 * by the default pager)
195 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
196 * for this object (and rights
197 * to the memory object) after
198 * all address map references
201 /* boolean_t */ internal
:1, /* Created by the kernel (and
202 * therefore, managed by the
203 * default memory manger)
205 /* boolean_t */ temporary
:1, /* Permanent objects may be
206 * changed externally by the
207 * memory manager, and changes
208 * made in memory must be
209 * reflected back to the memory
210 * manager. Temporary objects
214 /* boolean_t */ private:1, /* magic device_pager object,
215 * holds private pages only */
216 /* boolean_t */ pageout
:1, /* pageout object. contains
217 * private pages that refer to
218 * a real memory object. */
219 /* boolean_t */ alive
:1, /* Not yet terminated */
221 /* boolean_t */ purgable
:2, /* Purgable state. See
224 /* boolean_t */ shadowed
:1, /* Shadow may exist */
225 /* boolean_t */ silent_overwrite
:1,
226 /* Allow full page overwrite
227 * without data_request if
229 /* boolean_t */ advisory_pageout
:1,
230 /* Instead of sending page
231 * via OOL, just notify
232 * pager that the kernel
233 * wants to discard it, page
234 * remains in object */
235 /* boolean_t */ true_share
:1,
236 /* This object is mapped
237 * in more than one place
238 * and hence cannot be
240 /* boolean_t */ terminating
:1,
241 /* Allows vm_object_lookup
242 * and vm_object_deallocate
243 * to special case their
244 * behavior when they are
245 * called as a result of
246 * page cleaning during
249 /* boolean_t */ named
:1, /* An enforces an internal
250 * naming convention, by
251 * calling the right routines
253 * destruction, UBC references
254 * against the vm_object are
257 /* boolean_t */ shadow_severed
:1,
258 /* When a permanent object
259 * backing a COW goes away
260 * unexpectedly. This bit
261 * allows vm_fault to return
262 * an error rather than a
265 /* boolean_t */ phys_contiguous
:1,
266 /* Memory is wired and
267 * guaranteed physically
268 * contiguous. However
269 * it is not device memory
270 * and obeys normal virtual
271 * memory rules w.r.t pmap
274 /* boolean_t */ nophyscache
:1;
275 /* When mapped at the
276 * pmap level, don't allow
277 * primary caching. (for
283 queue_chain_t cached_list
; /* Attachment point for the
284 * list of objects cached as a
285 * result of their can_persist
289 queue_head_t msr_q
; /* memory object synchronise
293 * the following fields are not protected by any locks
294 * they are updated via atomic compare and swap
296 vm_object_offset_t last_alloc
; /* last allocation offset */
297 int sequential
; /* sequential access size */
299 uint32_t pages_created
;
302 vm_external_map_t existence_map
; /* bitmap of pages written to
304 #endif /* MACH_PAGEMAP */
305 vm_offset_t cow_hint
; /* last page present in */
306 /* shadow but not in object */
308 struct vm_object
*paging_object
; /* object which pages to be
309 * swapped out are temporary
310 * put in current object
313 /* hold object lock when altering */
315 wimg_bits
:8, /* cache WIMG bits */
316 code_signed
:1, /* pages are signed and should be
317 validated; the signatures are stored
319 hashed
:1, /* object/pager entered in hash */
320 transposed
:1, /* object was transposed with another */
321 mapping_in_progress
:1, /* pager being mapped/unmapped */
327 __object2_unused_bits
:15; /* for expansion */
329 uint32_t scan_collisions
;
332 queue_head_t uplq
; /* List of outstanding upls */
333 #endif /* UPL_DEBUG */
337 * Keep track of the stack traces for the first holders
338 * of a "paging_in_progress" reference for this VM object.
340 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
341 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
342 struct __pip_backtrace
{
343 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
344 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
345 #endif /* VM_PIP_DEBUG */
347 queue_chain_t objq
; /* object queue - currently used for purgable queues */
350 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
351 ((object)->volatile_fault && \
352 ((object)->purgable == VM_PURGABLE_VOLATILE || \
353 (object)->purgable == VM_PURGABLE_EMPTY))
355 #define VM_PAGE_REMOVE(page) \
357 vm_page_t __page = (page); \
358 vm_object_t __object = __page->object; \
359 if (__page == __object->memq_hint) { \
360 vm_page_t __new_hint; \
361 queue_entry_t __qe; \
362 __qe = queue_next(&__page->listq); \
363 if (queue_end(&__object->memq, __qe)) { \
364 __qe = queue_prev(&__page->listq); \
365 if (queue_end(&__object->memq, __qe)) { \
369 __new_hint = (vm_page_t) __qe; \
370 __object->memq_hint = __new_hint; \
372 queue_remove(&__object->memq, __page, vm_page_t, listq); \
375 #define VM_PAGE_INSERT(page, object) \
377 vm_page_t __page = (page); \
378 vm_object_t __object = (object); \
379 queue_enter(&__object->memq, __page, vm_page_t, listq); \
380 __object->memq_hint = __page; \
384 vm_object_t kernel_object
; /* the single kernel object */
387 unsigned int vm_object_absent_max
; /* maximum number of absent pages
388 at a time for each object */
390 # define VM_MSYNC_INITIALIZED 0
391 # define VM_MSYNC_SYNCHRONIZING 1
392 # define VM_MSYNC_DONE 2
395 queue_chain_t msr_q
; /* object request queue */
396 queue_chain_t req_q
; /* vm_msync request queue */
398 vm_object_offset_t offset
;
399 vm_object_size_t length
;
400 vm_object_t object
; /* back pointer */
401 decl_lck_mtx_data(, msync_req_lock
) /* Lock for this structure */
404 typedef struct msync_req
*msync_req_t
;
405 #define MSYNC_REQ_NULL ((msync_req_t) 0)
408 extern lck_grp_t vm_map_lck_grp
;
409 extern lck_attr_t vm_map_lck_attr
;
412 * Macros to allocate and free msync_reqs
414 #define msync_req_alloc(msr) \
416 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
417 lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \
418 msr->flag = VM_MSYNC_INITIALIZED; \
421 #define msync_req_free(msr) \
422 (kfree((msr), sizeof(struct msync_req)))
424 #define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock)
425 #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
428 * Declare procedures that operate on VM objects.
431 __private_extern__
void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
433 __private_extern__
void vm_object_init(void);
435 __private_extern__
void vm_object_init_lck_grp(void);
437 __private_extern__
void vm_object_reaper_init(void);
439 __private_extern__ vm_object_t
vm_object_allocate(
440 vm_object_size_t size
);
442 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
447 __private_extern__
void vm_object_res_reference(
449 __private_extern__
void vm_object_res_deallocate(
451 #define VM_OBJ_RES_INCR(object) (object)->res_count++
452 #define VM_OBJ_RES_DECR(object) (object)->res_count--
454 #else /* TASK_SWAPPER */
456 #define VM_OBJ_RES_INCR(object)
457 #define VM_OBJ_RES_DECR(object)
458 #define vm_object_res_reference(object)
459 #define vm_object_res_deallocate(object)
461 #endif /* TASK_SWAPPER */
463 #define vm_object_reference_locked(object) \
465 vm_object_t RLObject = (object); \
466 vm_object_lock_assert_exclusive(object); \
467 assert((RLObject)->ref_count > 0); \
468 (RLObject)->ref_count++; \
469 assert((RLObject)->ref_count > 1); \
470 vm_object_res_reference(RLObject); \
474 #define vm_object_reference_shared(object) \
476 vm_object_t RLObject = (object); \
477 vm_object_lock_assert_shared(object); \
478 assert((RLObject)->ref_count > 0); \
479 OSAddAtomic(1, &(RLObject)->ref_count); \
480 assert((RLObject)->ref_count > 1); \
481 /* XXX we would need an atomic version of the following ... */ \
482 vm_object_res_reference(RLObject); \
486 __private_extern__
void vm_object_reference(
491 #define vm_object_reference(object) \
493 vm_object_t RObject = (object); \
495 vm_object_lock_shared(RObject); \
496 vm_object_reference_shared(RObject); \
497 vm_object_unlock(RObject); \
501 #endif /* MACH_ASSERT */
503 __private_extern__
void vm_object_deallocate(
506 __private_extern__ kern_return_t
vm_object_release_name(
510 __private_extern__
void vm_object_pmap_protect(
512 vm_object_offset_t offset
,
513 vm_object_size_t size
,
515 vm_map_offset_t pmap_start
,
518 __private_extern__
void vm_object_page_remove(
520 vm_object_offset_t start
,
521 vm_object_offset_t end
);
523 __private_extern__
void vm_object_deactivate_pages(
525 vm_object_offset_t offset
,
526 vm_object_size_t size
,
528 boolean_t reusable_page
);
530 __private_extern__
void vm_object_reuse_pages(
532 vm_object_offset_t start_offset
,
533 vm_object_offset_t end_offset
,
534 boolean_t allow_partial_reuse
);
536 __private_extern__
void vm_object_purge(
539 __private_extern__ kern_return_t
vm_object_purgable_control(
541 vm_purgable_t control
,
544 __private_extern__ boolean_t
vm_object_coalesce(
545 vm_object_t prev_object
,
546 vm_object_t next_object
,
547 vm_object_offset_t prev_offset
,
548 vm_object_offset_t next_offset
,
549 vm_object_size_t prev_size
,
550 vm_object_size_t next_size
);
552 __private_extern__ boolean_t
vm_object_shadow(
554 vm_object_offset_t
*offset
,
555 vm_object_size_t length
);
557 __private_extern__
void vm_object_collapse(
559 vm_object_offset_t offset
,
560 boolean_t can_bypass
);
562 __private_extern__ boolean_t
vm_object_copy_quickly(
563 vm_object_t
*_object
,
564 vm_object_offset_t src_offset
,
565 vm_object_size_t size
,
566 boolean_t
*_src_needs_copy
,
567 boolean_t
*_dst_needs_copy
);
569 __private_extern__ kern_return_t
vm_object_copy_strategically(
570 vm_object_t src_object
,
571 vm_object_offset_t src_offset
,
572 vm_object_size_t size
,
573 vm_object_t
*dst_object
,
574 vm_object_offset_t
*dst_offset
,
575 boolean_t
*dst_needs_copy
);
577 __private_extern__ kern_return_t
vm_object_copy_slowly(
578 vm_object_t src_object
,
579 vm_object_offset_t src_offset
,
580 vm_object_size_t size
,
581 boolean_t interruptible
,
582 vm_object_t
*_result_object
);
584 __private_extern__ vm_object_t
vm_object_copy_delayed(
585 vm_object_t src_object
,
586 vm_object_offset_t src_offset
,
587 vm_object_size_t size
,
588 boolean_t src_object_shared
);
592 __private_extern__ kern_return_t
vm_object_destroy(
594 kern_return_t reason
);
596 __private_extern__
void vm_object_pager_create(
599 __private_extern__
void vm_object_page_map(
601 vm_object_offset_t offset
,
602 vm_object_size_t size
,
603 vm_object_offset_t (*map_fn
)
604 (void *, vm_object_offset_t
),
607 __private_extern__ kern_return_t
vm_object_upl_request(
609 vm_object_offset_t offset
,
612 upl_page_info_t
*page_info
,
616 __private_extern__ kern_return_t
vm_object_transpose(
619 vm_object_size_t transpose_size
);
621 __private_extern__ boolean_t
vm_object_sync(
623 vm_object_offset_t offset
,
624 vm_object_size_t size
,
625 boolean_t should_flush
,
626 boolean_t should_return
,
627 boolean_t should_iosync
);
629 __private_extern__ kern_return_t
vm_object_update(
631 vm_object_offset_t offset
,
632 vm_object_size_t size
,
633 vm_object_offset_t
*error_offset
,
635 memory_object_return_t should_return
,
639 __private_extern__ kern_return_t
vm_object_lock_request(
641 vm_object_offset_t offset
,
642 vm_object_size_t size
,
643 memory_object_return_t should_return
,
649 __private_extern__ vm_object_t
vm_object_enter(
650 memory_object_t pager
,
651 vm_object_size_t size
,
654 boolean_t check_named
);
657 __private_extern__
void vm_object_cluster_size(
659 vm_object_offset_t
*start
,
661 vm_object_fault_info_t fault_info
,
662 uint32_t *io_streaming
);
664 __private_extern__ kern_return_t
vm_object_populate_with_private(
666 vm_object_offset_t offset
,
670 __private_extern__
void vm_object_change_wimg_mode(
672 unsigned int wimg_mode
);
674 extern kern_return_t
adjust_vm_object_cache(
678 extern kern_return_t
vm_object_page_op(
680 vm_object_offset_t offset
,
685 extern kern_return_t
vm_object_range_op(
687 vm_object_offset_t offset_beg
,
688 vm_object_offset_t offset_end
,
693 __private_extern__
void vm_object_reap_pages(
697 #define REAP_TERMINATE 1
698 #define REAP_PURGEABLE 2
699 #define REAP_DATA_FLUSH 3
703 __private_extern__ kern_return_t
705 unsigned int *purgeable_count
,
706 unsigned int *wired_count
,
707 unsigned int *clean_count
,
708 unsigned int *dirty_count
,
710 vm_object_t src_object
,
711 vm_object_t dst_object
,
713 vm_object_offset_t
*offset
);
715 __private_extern__
void
716 vm_object_pack_pages(
717 unsigned int *wired_count
,
718 unsigned int *clean_count
,
719 unsigned int *dirty_count
,
720 vm_object_t src_object
,
721 vm_object_t dst_object
,
723 vm_object_offset_t
*offset
);
725 __private_extern__
void vm_object_pageout(
728 __private_extern__ kern_return_t
vm_object_pagein(
731 __private_extern__
void vm_object_unpack(
735 #endif /* CONFIG_FREEZE */
738 * Event waiting handling
741 #define VM_OBJECT_EVENT_INITIALIZED 0
742 #define VM_OBJECT_EVENT_PAGER_READY 1
743 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
744 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
745 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
746 #define VM_OBJECT_EVENT_UNCACHING 5
747 #define VM_OBJECT_EVENT_COPY_CALL 6
748 #define VM_OBJECT_EVENT_CACHING 7
749 #define VM_OBJECT_EVENT_UNBLOCKED 8
750 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
752 #define vm_object_assert_wait(object, event, interruptible) \
753 (((object)->all_wanted |= 1 << (event)), \
754 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
756 #define vm_object_wait(object, event, interruptible) \
757 (vm_object_assert_wait((object),(event),(interruptible)), \
758 vm_object_unlock(object), \
759 thread_block(THREAD_CONTINUE_NULL)) \
761 #define thread_sleep_vm_object(object, event, interruptible) \
762 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
764 #define vm_object_sleep(object, event, interruptible) \
765 (((object)->all_wanted |= 1 << (event)), \
766 thread_sleep_vm_object((object), \
767 ((vm_offset_t)(object)+(event)), (interruptible)))
769 #define vm_object_wakeup(object, event) \
771 if ((object)->all_wanted & (1 << (event))) \
772 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
773 (object)->all_wanted &= ~(1 << (event)); \
776 #define vm_object_set_wanted(object, event) \
778 ((object)->all_wanted |= (1 << (event))); \
781 #define vm_object_wanted(object, event) \
782 ((object)->all_wanted & (1 << (event)))
785 * Routines implemented as macros
788 #include <libkern/OSDebug.h>
789 #define VM_PIP_DEBUG_BEGIN(object) \
791 int pip = ((object)->paging_in_progress + \
792 (object)->activity_in_progress); \
793 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
794 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
795 VM_PIP_DEBUG_STACK_FRAMES); \
798 #else /* VM_PIP_DEBUG */
799 #define VM_PIP_DEBUG_BEGIN(object)
800 #endif /* VM_PIP_DEBUG */
802 #define vm_object_activity_begin(object) \
804 vm_object_lock_assert_exclusive((object)); \
805 assert((object)->paging_in_progress >= 0); \
806 VM_PIP_DEBUG_BEGIN((object)); \
807 (object)->activity_in_progress++; \
810 #define vm_object_activity_end(object) \
812 vm_object_lock_assert_exclusive((object)); \
813 assert((object)->activity_in_progress > 0); \
814 (object)->activity_in_progress--; \
815 if ((object)->paging_in_progress == 0 && \
816 (object)->activity_in_progress == 0) \
817 vm_object_wakeup((object), \
818 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
821 #define vm_object_paging_begin(object) \
823 vm_object_lock_assert_exclusive((object)); \
824 assert((object)->paging_in_progress >= 0); \
825 VM_PIP_DEBUG_BEGIN((object)); \
826 (object)->paging_in_progress++; \
829 #define vm_object_paging_end(object) \
831 vm_object_lock_assert_exclusive((object)); \
832 assert((object)->paging_in_progress > 0); \
833 (object)->paging_in_progress--; \
834 if ((object)->paging_in_progress == 0) { \
835 vm_object_wakeup((object), \
836 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
837 if ((object)->activity_in_progress == 0) \
838 vm_object_wakeup((object), \
839 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
843 #define vm_object_paging_wait(object, interruptible) \
845 vm_object_lock_assert_exclusive((object)); \
846 while ((object)->paging_in_progress != 0 || \
847 (object)->activity_in_progress != 0) { \
850 _wr = vm_object_sleep((object), \
851 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
854 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
859 #define vm_object_paging_only_wait(object, interruptible) \
861 vm_object_lock_assert_exclusive((object)); \
862 while ((object)->paging_in_progress != 0) { \
865 _wr = vm_object_sleep((object), \
866 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
869 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
875 #define vm_object_mapping_begin(object) \
877 vm_object_lock_assert_exclusive((object)); \
878 assert(! (object)->mapping_in_progress); \
879 (object)->mapping_in_progress = TRUE; \
882 #define vm_object_mapping_end(object) \
884 vm_object_lock_assert_exclusive((object)); \
885 assert((object)->mapping_in_progress); \
886 (object)->mapping_in_progress = FALSE; \
887 vm_object_wakeup((object), \
888 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
891 #define vm_object_mapping_wait(object, interruptible) \
893 vm_object_lock_assert_exclusive((object)); \
894 while ((object)->mapping_in_progress) { \
897 _wr = vm_object_sleep((object), \
898 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
900 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
903 assert(!(object)->mapping_in_progress); \
908 #define OBJECT_LOCK_SHARED 0
909 #define OBJECT_LOCK_EXCLUSIVE 1
911 extern lck_grp_t vm_object_lck_grp
;
912 extern lck_grp_attr_t vm_object_lck_grp_attr
;
913 extern lck_attr_t vm_object_lck_attr
;
914 extern lck_attr_t kernel_object_lck_attr
;
916 extern vm_object_t vm_pageout_scan_wants_object
;
918 extern void vm_object_lock(vm_object_t
);
919 extern boolean_t
vm_object_lock_try(vm_object_t
);
920 extern boolean_t
_vm_object_lock_try(vm_object_t
);
921 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
922 extern void vm_object_lock_shared(vm_object_t
);
923 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
926 * Object locking macros
929 #define vm_object_lock_init(object) \
930 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
931 (((object) == kernel_object || \
932 (object) == vm_submap_object) ? \
933 &kernel_object_lck_attr : \
934 &vm_object_lck_attr))
935 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
937 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
938 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
939 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
942 * CAUTION: the following vm_object_lock_assert_held*() macros merely
943 * check if anyone is holding the lock, but the holder may not necessarily
946 #if MACH_ASSERT || DEBUG
947 #define vm_object_lock_assert_held(object) \
948 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
949 #define vm_object_lock_assert_shared(object) \
950 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
951 #define vm_object_lock_assert_exclusive(object) \
952 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
953 #else /* MACH_ASSERT || DEBUG */
954 #define vm_object_lock_assert_held(object)
955 #define vm_object_lock_assert_shared(object)
956 #define vm_object_lock_assert_exclusive(object)
957 #endif /* MACH_ASSERT || DEBUG */
959 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
960 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
962 extern void vm_object_cache_add(vm_object_t
);
963 extern void vm_object_cache_remove(vm_object_t
);
964 extern int vm_object_cache_evict(int, int);
966 #endif /* _VM_VM_OBJECT_H_ */