2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/lock.h>
83 #include <kern/locks.h>
84 #include <kern/assert.h>
85 #include <kern/misc_protos.h>
86 #include <kern/macro_help.h>
87 #include <ipc/ipc_types.h>
90 #include <vm/vm_external.h>
92 #include <vm/vm_options.h>
95 struct vm_shared_region_slide_info
;
100 * vm_object_t Virtual memory object.
101 * vm_object_fault_info_t Used to determine cluster size.
104 struct vm_object_fault_info
{
107 vm_size_t cluster_size
;
108 vm_behavior_t behavior
;
109 vm_map_offset_t lo_offset
;
110 vm_map_offset_t hi_offset
;
112 /* boolean_t */ no_cache
:1,
113 /* boolean_t */ stealth
:1,
114 /* boolean_t */ io_sync
:1,
115 /* boolean_t */ cs_bypass
:1,
116 /* boolean_t */ mark_zf_absent
:1,
117 /* boolean_t */ batch_pmap_op
:1,
118 __vm_object_fault_info_unused_bits
:26;
122 #define vo_size vo_un1.vou_size
123 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
124 #define vo_shadow_offset vo_un2.vou_shadow_offset
125 #define vo_cache_ts vo_un2.vou_cache_ts
126 #define vo_purgeable_owner vo_un2.vou_purgeable_owner
127 #define vo_slide_info vo_un2.vou_slide_info
130 queue_head_t memq
; /* Resident memory */
131 lck_rw_t Lock
; /* Synchronization */
134 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
135 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
136 * external object in cache
140 struct vm_page
*memq_hint
;
141 int ref_count
; /* Number of references */
143 int res_count
; /* Residency references (swap)*/
144 #endif /* TASK_SWAPPER */
145 unsigned int resident_page_count
;
146 /* number of resident pages */
147 unsigned int wired_page_count
; /* number of wired pages */
148 unsigned int reusable_page_count
;
150 struct vm_object
*copy
; /* Object that should receive
151 * a copy of my changed pages,
152 * for copy_delay, or just the
153 * temporary object that
154 * shadows this object, for
157 struct vm_object
*shadow
; /* My shadow */
160 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
161 clock_sec_t vou_cache_ts
; /* age of an external object
164 task_t vou_purgeable_owner
; /* If the purg'a'ble bits below are set
165 * to volatile/emtpy, this is the task
166 * that owns this purgeable object.
168 struct vm_shared_region_slide_info
*vou_slide_info
;
171 memory_object_t pager
; /* Where to get data */
172 vm_object_offset_t paging_offset
; /* Offset into memory object */
173 memory_object_control_t pager_control
; /* Where data comes back */
175 memory_object_copy_strategy_t
176 copy_strategy
; /* How to handle data copy */
178 short paging_in_progress
;
179 /* The memory object ports are
180 * being used (e.g., for pagein
181 * or pageout) -- don't change
182 * any of these fields (i.e.,
183 * don't collapse, destroy or
186 short activity_in_progress
;
189 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
190 * awakened" notations. See
191 * VM_OBJECT_EVENT_* items
193 /* boolean_t */ pager_created
:1, /* Has pager been created? */
194 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
195 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
197 /* boolean_t */ pager_trusted
:1,/* The pager for this object
198 * is trusted. This is true for
199 * all internal objects (backed
200 * by the default pager)
202 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
203 * for this object (and rights
204 * to the memory object) after
205 * all address map references
208 /* boolean_t */ internal
:1, /* Created by the kernel (and
209 * therefore, managed by the
210 * default memory manger)
212 /* boolean_t */ temporary
:1, /* Permanent objects may be
213 * changed externally by the
214 * memory manager, and changes
215 * made in memory must be
216 * reflected back to the memory
217 * manager. Temporary objects
221 /* boolean_t */ private:1, /* magic device_pager object,
222 * holds private pages only */
223 /* boolean_t */ pageout
:1, /* pageout object. contains
224 * private pages that refer to
225 * a real memory object. */
226 /* boolean_t */ alive
:1, /* Not yet terminated */
228 /* boolean_t */ purgable
:2, /* Purgable state. See
231 /* boolean_t */ purgeable_when_ripe
:1, /* Purgeable when a token
234 /* boolean_t */ shadowed
:1, /* Shadow may exist */
235 /* boolean_t */ advisory_pageout
:1,
236 /* Instead of sending page
237 * via OOL, just notify
238 * pager that the kernel
239 * wants to discard it, page
240 * remains in object */
241 /* boolean_t */ true_share
:1,
242 /* This object is mapped
243 * in more than one place
244 * and hence cannot be
246 /* boolean_t */ terminating
:1,
247 /* Allows vm_object_lookup
248 * and vm_object_deallocate
249 * to special case their
250 * behavior when they are
251 * called as a result of
252 * page cleaning during
255 /* boolean_t */ named
:1, /* An enforces an internal
256 * naming convention, by
257 * calling the right routines
259 * destruction, UBC references
260 * against the vm_object are
263 /* boolean_t */ shadow_severed
:1,
264 /* When a permanent object
265 * backing a COW goes away
266 * unexpectedly. This bit
267 * allows vm_fault to return
268 * an error rather than a
271 /* boolean_t */ phys_contiguous
:1,
272 /* Memory is wired and
273 * guaranteed physically
274 * contiguous. However
275 * it is not device memory
276 * and obeys normal virtual
277 * memory rules w.r.t pmap
280 /* boolean_t */ nophyscache
:1;
281 /* When mapped at the
282 * pmap level, don't allow
283 * primary caching. (for
289 queue_chain_t cached_list
; /* Attachment point for the
290 * list of objects cached as a
291 * result of their can_persist
295 queue_head_t msr_q
; /* memory object synchronise
299 * the following fields are not protected by any locks
300 * they are updated via atomic compare and swap
302 vm_object_offset_t last_alloc
; /* last allocation offset */
303 int sequential
; /* sequential access size */
305 uint32_t pages_created
;
308 vm_external_map_t existence_map
; /* bitmap of pages written to
310 #endif /* MACH_PAGEMAP */
311 vm_offset_t cow_hint
; /* last page present in */
312 /* shadow but not in object */
314 struct vm_object
*paging_object
; /* object which pages to be
315 * swapped out are temporary
316 * put in current object
319 /* hold object lock when altering */
321 wimg_bits
:8, /* cache WIMG bits */
322 code_signed
:1, /* pages are signed and should be
323 validated; the signatures are stored
325 hashed
:1, /* object/pager entered in hash */
326 transposed
:1, /* object was transposed with another */
327 mapping_in_progress
:1, /* pager being mapped/unmapped */
334 purgeable_queue_type
:2,
335 purgeable_queue_group
:3,
336 __object2_unused_bits
:9; /* for expansion */
338 uint32_t scan_collisions
;
341 queue_head_t uplq
; /* List of outstanding upls */
342 #endif /* UPL_DEBUG */
346 * Keep track of the stack traces for the first holders
347 * of a "paging_in_progress" reference for this VM object.
349 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
350 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
351 struct __pip_backtrace
{
352 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
353 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
354 #endif /* VM_PIP_DEBUG */
356 queue_chain_t objq
; /* object queue - currently used for purgable queues */
359 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
360 ((object)->volatile_fault && \
361 ((object)->purgable == VM_PURGABLE_VOLATILE || \
362 (object)->purgable == VM_PURGABLE_EMPTY))
364 #define VM_PAGE_REMOVE(page) \
366 vm_page_t __page = (page); \
367 vm_object_t __object = __page->object; \
368 if (__page == __object->memq_hint) { \
369 vm_page_t __new_hint; \
370 queue_entry_t __qe; \
371 __qe = queue_next(&__page->listq); \
372 if (queue_end(&__object->memq, __qe)) { \
373 __qe = queue_prev(&__page->listq); \
374 if (queue_end(&__object->memq, __qe)) { \
378 __new_hint = (vm_page_t) __qe; \
379 __object->memq_hint = __new_hint; \
381 queue_remove(&__object->memq, __page, vm_page_t, listq); \
384 #define VM_PAGE_INSERT(page, object) \
386 vm_page_t __page = (page); \
387 vm_object_t __object = (object); \
388 queue_enter(&__object->memq, __page, vm_page_t, listq); \
389 __object->memq_hint = __page; \
393 vm_object_t kernel_object
; /* the single kernel object */
396 vm_object_t compressor_object
; /* the single compressor object */
399 unsigned int vm_object_absent_max
; /* maximum number of absent pages
400 at a time for each object */
402 # define VM_MSYNC_INITIALIZED 0
403 # define VM_MSYNC_SYNCHRONIZING 1
404 # define VM_MSYNC_DONE 2
407 queue_chain_t msr_q
; /* object request queue */
408 queue_chain_t req_q
; /* vm_msync request queue */
410 vm_object_offset_t offset
;
411 vm_object_size_t length
;
412 vm_object_t object
; /* back pointer */
413 decl_lck_mtx_data(, msync_req_lock
) /* Lock for this structure */
416 typedef struct msync_req
*msync_req_t
;
417 #define MSYNC_REQ_NULL ((msync_req_t) 0)
420 extern lck_grp_t vm_map_lck_grp
;
421 extern lck_attr_t vm_map_lck_attr
;
424 * Macros to allocate and free msync_reqs
426 #define msync_req_alloc(msr) \
428 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
429 lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \
430 msr->flag = VM_MSYNC_INITIALIZED; \
433 #define msync_req_free(msr) \
435 lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \
436 kfree((msr), sizeof(struct msync_req)); \
439 #define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock)
440 #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
443 * Declare procedures that operate on VM objects.
446 __private_extern__
void vm_object_bootstrap(void);
448 __private_extern__
void vm_object_init(void);
450 __private_extern__
void vm_object_init_lck_grp(void);
452 __private_extern__
void vm_object_reaper_init(void);
454 __private_extern__ vm_object_t
vm_object_allocate(
455 vm_object_size_t size
);
457 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
462 __private_extern__
void vm_object_res_reference(
464 __private_extern__
void vm_object_res_deallocate(
466 #define VM_OBJ_RES_INCR(object) (object)->res_count++
467 #define VM_OBJ_RES_DECR(object) (object)->res_count--
469 #else /* TASK_SWAPPER */
471 #define VM_OBJ_RES_INCR(object)
472 #define VM_OBJ_RES_DECR(object)
473 #define vm_object_res_reference(object)
474 #define vm_object_res_deallocate(object)
476 #endif /* TASK_SWAPPER */
478 #define vm_object_reference_locked(object) \
480 vm_object_t RLObject = (object); \
481 vm_object_lock_assert_exclusive(object); \
482 assert((RLObject)->ref_count > 0); \
483 (RLObject)->ref_count++; \
484 assert((RLObject)->ref_count > 1); \
485 vm_object_res_reference(RLObject); \
489 #define vm_object_reference_shared(object) \
491 vm_object_t RLObject = (object); \
492 vm_object_lock_assert_shared(object); \
493 assert((RLObject)->ref_count > 0); \
494 OSAddAtomic(1, &(RLObject)->ref_count); \
495 assert((RLObject)->ref_count > 0); \
496 /* XXX we would need an atomic version of the following ... */ \
497 vm_object_res_reference(RLObject); \
501 __private_extern__
void vm_object_reference(
506 #define vm_object_reference(object) \
508 vm_object_t RObject = (object); \
510 vm_object_lock_shared(RObject); \
511 vm_object_reference_shared(RObject); \
512 vm_object_unlock(RObject); \
516 #endif /* MACH_ASSERT */
518 __private_extern__
void vm_object_deallocate(
521 __private_extern__ kern_return_t
vm_object_release_name(
525 __private_extern__
void vm_object_pmap_protect(
527 vm_object_offset_t offset
,
528 vm_object_size_t size
,
530 vm_map_offset_t pmap_start
,
533 __private_extern__
void vm_object_pmap_protect_options(
535 vm_object_offset_t offset
,
536 vm_object_size_t size
,
538 vm_map_offset_t pmap_start
,
542 __private_extern__
void vm_object_page_remove(
544 vm_object_offset_t start
,
545 vm_object_offset_t end
);
547 __private_extern__
void vm_object_deactivate_pages(
549 vm_object_offset_t offset
,
550 vm_object_size_t size
,
552 boolean_t reusable_page
);
554 __private_extern__
void vm_object_reuse_pages(
556 vm_object_offset_t start_offset
,
557 vm_object_offset_t end_offset
,
558 boolean_t allow_partial_reuse
);
560 __private_extern__
void vm_object_purge(
563 __private_extern__ kern_return_t
vm_object_purgable_control(
565 vm_purgable_t control
,
568 __private_extern__ kern_return_t
vm_object_get_page_counts(
570 vm_object_offset_t offset
,
571 vm_object_size_t size
,
572 unsigned int *resident_page_count
,
573 unsigned int *dirty_page_count
);
575 __private_extern__ boolean_t
vm_object_coalesce(
576 vm_object_t prev_object
,
577 vm_object_t next_object
,
578 vm_object_offset_t prev_offset
,
579 vm_object_offset_t next_offset
,
580 vm_object_size_t prev_size
,
581 vm_object_size_t next_size
);
583 __private_extern__ boolean_t
vm_object_shadow(
585 vm_object_offset_t
*offset
,
586 vm_object_size_t length
);
588 __private_extern__
void vm_object_collapse(
590 vm_object_offset_t offset
,
591 boolean_t can_bypass
);
593 __private_extern__ boolean_t
vm_object_copy_quickly(
594 vm_object_t
*_object
,
595 vm_object_offset_t src_offset
,
596 vm_object_size_t size
,
597 boolean_t
*_src_needs_copy
,
598 boolean_t
*_dst_needs_copy
);
600 __private_extern__ kern_return_t
vm_object_copy_strategically(
601 vm_object_t src_object
,
602 vm_object_offset_t src_offset
,
603 vm_object_size_t size
,
604 vm_object_t
*dst_object
,
605 vm_object_offset_t
*dst_offset
,
606 boolean_t
*dst_needs_copy
);
608 __private_extern__ kern_return_t
vm_object_copy_slowly(
609 vm_object_t src_object
,
610 vm_object_offset_t src_offset
,
611 vm_object_size_t size
,
612 boolean_t interruptible
,
613 vm_object_t
*_result_object
);
615 __private_extern__ vm_object_t
vm_object_copy_delayed(
616 vm_object_t src_object
,
617 vm_object_offset_t src_offset
,
618 vm_object_size_t size
,
619 boolean_t src_object_shared
);
623 __private_extern__ kern_return_t
vm_object_destroy(
625 kern_return_t reason
);
627 __private_extern__
void vm_object_pager_create(
630 __private_extern__
void vm_object_compressor_pager_create(
633 __private_extern__
void vm_object_page_map(
635 vm_object_offset_t offset
,
636 vm_object_size_t size
,
637 vm_object_offset_t (*map_fn
)
638 (void *, vm_object_offset_t
),
641 __private_extern__ kern_return_t
vm_object_upl_request(
643 vm_object_offset_t offset
,
646 upl_page_info_t
*page_info
,
650 __private_extern__ kern_return_t
vm_object_transpose(
653 vm_object_size_t transpose_size
);
655 __private_extern__ boolean_t
vm_object_sync(
657 vm_object_offset_t offset
,
658 vm_object_size_t size
,
659 boolean_t should_flush
,
660 boolean_t should_return
,
661 boolean_t should_iosync
);
663 __private_extern__ kern_return_t
vm_object_update(
665 vm_object_offset_t offset
,
666 vm_object_size_t size
,
667 vm_object_offset_t
*error_offset
,
669 memory_object_return_t should_return
,
673 __private_extern__ kern_return_t
vm_object_lock_request(
675 vm_object_offset_t offset
,
676 vm_object_size_t size
,
677 memory_object_return_t should_return
,
683 __private_extern__ vm_object_t
vm_object_enter(
684 memory_object_t pager
,
685 vm_object_size_t size
,
688 boolean_t check_named
);
691 __private_extern__
void vm_object_cluster_size(
693 vm_object_offset_t
*start
,
695 vm_object_fault_info_t fault_info
,
696 uint32_t *io_streaming
);
698 __private_extern__ kern_return_t
vm_object_populate_with_private(
700 vm_object_offset_t offset
,
704 __private_extern__
void vm_object_change_wimg_mode(
706 unsigned int wimg_mode
);
708 extern kern_return_t
adjust_vm_object_cache(
712 extern kern_return_t
vm_object_page_op(
714 vm_object_offset_t offset
,
719 extern kern_return_t
vm_object_range_op(
721 vm_object_offset_t offset_beg
,
722 vm_object_offset_t offset_end
,
727 __private_extern__
void vm_object_reap_pages(
731 #define REAP_TERMINATE 1
732 #define REAP_PURGEABLE 2
733 #define REAP_DATA_FLUSH 3
736 struct default_freezer_handle
;
738 __private_extern__ kern_return_t
740 unsigned int *purgeable_count
,
741 unsigned int *wired_count
,
742 unsigned int *clean_count
,
743 unsigned int *dirty_count
,
744 unsigned int dirty_budget
,
746 vm_object_t src_object
,
747 struct default_freezer_handle
*df_handle
);
749 __private_extern__
void
750 vm_object_pack_pages(
751 unsigned int *wired_count
,
752 unsigned int *clean_count
,
753 unsigned int *dirty_count
,
754 unsigned int dirty_budget
,
755 vm_object_t src_object
,
756 struct default_freezer_handle
*df_handle
);
758 __private_extern__
void
762 __private_extern__ kern_return_t
765 #endif /* CONFIG_FREEZE */
768 * Event waiting handling
771 #define VM_OBJECT_EVENT_INITIALIZED 0
772 #define VM_OBJECT_EVENT_PAGER_READY 1
773 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
774 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
775 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
776 #define VM_OBJECT_EVENT_UNCACHING 5
777 #define VM_OBJECT_EVENT_COPY_CALL 6
778 #define VM_OBJECT_EVENT_CACHING 7
779 #define VM_OBJECT_EVENT_UNBLOCKED 8
780 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
782 #define vm_object_assert_wait(object, event, interruptible) \
783 (((object)->all_wanted |= 1 << (event)), \
784 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
786 #define vm_object_wait(object, event, interruptible) \
787 (vm_object_assert_wait((object),(event),(interruptible)), \
788 vm_object_unlock(object), \
789 thread_block(THREAD_CONTINUE_NULL)) \
791 #define thread_sleep_vm_object(object, event, interruptible) \
792 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
794 #define vm_object_sleep(object, event, interruptible) \
795 (((object)->all_wanted |= 1 << (event)), \
796 thread_sleep_vm_object((object), \
797 ((vm_offset_t)(object)+(event)), (interruptible)))
799 #define vm_object_wakeup(object, event) \
801 if ((object)->all_wanted & (1 << (event))) \
802 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
803 (object)->all_wanted &= ~(1 << (event)); \
806 #define vm_object_set_wanted(object, event) \
808 ((object)->all_wanted |= (1 << (event))); \
811 #define vm_object_wanted(object, event) \
812 ((object)->all_wanted & (1 << (event)))
815 * Routines implemented as macros
818 #include <libkern/OSDebug.h>
819 #define VM_PIP_DEBUG_BEGIN(object) \
821 int pip = ((object)->paging_in_progress + \
822 (object)->activity_in_progress); \
823 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
824 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
825 VM_PIP_DEBUG_STACK_FRAMES); \
828 #else /* VM_PIP_DEBUG */
829 #define VM_PIP_DEBUG_BEGIN(object)
830 #endif /* VM_PIP_DEBUG */
832 #define vm_object_activity_begin(object) \
834 vm_object_lock_assert_exclusive((object)); \
835 assert((object)->paging_in_progress >= 0); \
836 VM_PIP_DEBUG_BEGIN((object)); \
837 (object)->activity_in_progress++; \
840 #define vm_object_activity_end(object) \
842 vm_object_lock_assert_exclusive((object)); \
843 assert((object)->activity_in_progress > 0); \
844 (object)->activity_in_progress--; \
845 if ((object)->paging_in_progress == 0 && \
846 (object)->activity_in_progress == 0) \
847 vm_object_wakeup((object), \
848 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
851 #define vm_object_paging_begin(object) \
853 vm_object_lock_assert_exclusive((object)); \
854 assert((object)->paging_in_progress >= 0); \
855 VM_PIP_DEBUG_BEGIN((object)); \
856 (object)->paging_in_progress++; \
859 #define vm_object_paging_end(object) \
861 vm_object_lock_assert_exclusive((object)); \
862 assert((object)->paging_in_progress > 0); \
863 (object)->paging_in_progress--; \
864 if ((object)->paging_in_progress == 0) { \
865 vm_object_wakeup((object), \
866 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
867 if ((object)->activity_in_progress == 0) \
868 vm_object_wakeup((object), \
869 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
873 #define vm_object_paging_wait(object, interruptible) \
875 vm_object_lock_assert_exclusive((object)); \
876 while ((object)->paging_in_progress != 0 || \
877 (object)->activity_in_progress != 0) { \
880 _wr = vm_object_sleep((object), \
881 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
884 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
889 #define vm_object_paging_only_wait(object, interruptible) \
891 vm_object_lock_assert_exclusive((object)); \
892 while ((object)->paging_in_progress != 0) { \
895 _wr = vm_object_sleep((object), \
896 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
899 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
905 #define vm_object_mapping_begin(object) \
907 vm_object_lock_assert_exclusive((object)); \
908 assert(! (object)->mapping_in_progress); \
909 (object)->mapping_in_progress = TRUE; \
912 #define vm_object_mapping_end(object) \
914 vm_object_lock_assert_exclusive((object)); \
915 assert((object)->mapping_in_progress); \
916 (object)->mapping_in_progress = FALSE; \
917 vm_object_wakeup((object), \
918 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
921 #define vm_object_mapping_wait(object, interruptible) \
923 vm_object_lock_assert_exclusive((object)); \
924 while ((object)->mapping_in_progress) { \
927 _wr = vm_object_sleep((object), \
928 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
930 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
933 assert(!(object)->mapping_in_progress); \
938 #define OBJECT_LOCK_SHARED 0
939 #define OBJECT_LOCK_EXCLUSIVE 1
941 extern lck_grp_t vm_object_lck_grp
;
942 extern lck_grp_attr_t vm_object_lck_grp_attr
;
943 extern lck_attr_t vm_object_lck_attr
;
944 extern lck_attr_t kernel_object_lck_attr
;
945 extern lck_attr_t compressor_object_lck_attr
;
947 extern vm_object_t vm_pageout_scan_wants_object
;
949 extern void vm_object_lock(vm_object_t
);
950 extern boolean_t
vm_object_lock_try(vm_object_t
);
951 extern boolean_t
_vm_object_lock_try(vm_object_t
);
952 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
953 extern void vm_object_lock_shared(vm_object_t
);
954 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
957 * Object locking macros
960 #define vm_object_lock_init(object) \
961 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
962 (((object) == kernel_object || \
963 (object) == vm_submap_object) ? \
964 &kernel_object_lck_attr : \
965 (((object) == compressor_object) ? \
966 &compressor_object_lck_attr : \
967 &vm_object_lck_attr)))
968 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
970 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
971 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
972 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
975 * CAUTION: the following vm_object_lock_assert_held*() macros merely
976 * check if anyone is holding the lock, but the holder may not necessarily
979 #if MACH_ASSERT || DEBUG
980 #define vm_object_lock_assert_held(object) \
981 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
982 #define vm_object_lock_assert_shared(object) \
983 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
984 #define vm_object_lock_assert_exclusive(object) \
985 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
986 #else /* MACH_ASSERT || DEBUG */
987 #define vm_object_lock_assert_held(object)
988 #define vm_object_lock_assert_shared(object)
989 #define vm_object_lock_assert_exclusive(object)
990 #endif /* MACH_ASSERT || DEBUG */
992 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
993 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
995 extern void vm_object_cache_add(vm_object_t
);
996 extern void vm_object_cache_remove(vm_object_t
);
997 extern int vm_object_cache_evict(int, int);
999 #endif /* _VM_VM_OBJECT_H_ */