2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
91 #include <vm/vm_options.h>
93 #if VM_OBJECT_TRACKING
94 #include <libkern/OSDebug.h>
95 #include <kern/btlog.h>
96 extern void vm_object_tracking_init(void);
97 extern boolean_t vm_object_tracking_inited
;
98 extern btlog_t
*vm_object_tracking_btlog
;
99 #define VM_OBJECT_TRACKING_BTDEPTH 7
100 #define VM_OBJECT_TRACKING_OP_CREATED 1
101 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
102 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
103 #endif /* VM_OBJECT_TRACKING */
106 struct vm_shared_region_slide_info
;
111 * vm_object_t Virtual memory object.
112 * vm_object_fault_info_t Used to determine cluster size.
115 struct vm_object_fault_info
{
118 vm_size_t cluster_size
;
119 vm_behavior_t behavior
;
120 vm_map_offset_t lo_offset
;
121 vm_map_offset_t hi_offset
;
123 /* boolean_t */ no_cache
:1,
124 /* boolean_t */ stealth
:1,
125 /* boolean_t */ io_sync
:1,
126 /* boolean_t */ cs_bypass
:1,
127 /* boolean_t */ mark_zf_absent
:1,
128 /* boolean_t */ batch_pmap_op
:1,
129 __vm_object_fault_info_unused_bits
:26;
134 #define vo_size vo_un1.vou_size
135 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
136 #define vo_shadow_offset vo_un2.vou_shadow_offset
137 #define vo_cache_ts vo_un2.vou_cache_ts
138 #define vo_purgeable_owner vo_un2.vou_purgeable_owner
139 #define vo_slide_info vo_un2.vou_slide_info
142 queue_head_t memq
; /* Resident memory */
143 lck_rw_t Lock
; /* Synchronization */
146 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
147 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
148 * external object in cache
152 struct vm_page
*memq_hint
;
153 int ref_count
; /* Number of references */
155 int res_count
; /* Residency references (swap)*/
156 #endif /* TASK_SWAPPER */
157 unsigned int resident_page_count
;
158 /* number of resident pages */
159 unsigned int wired_page_count
; /* number of wired pages */
160 unsigned int reusable_page_count
;
162 struct vm_object
*copy
; /* Object that should receive
163 * a copy of my changed pages,
164 * for copy_delay, or just the
165 * temporary object that
166 * shadows this object, for
169 struct vm_object
*shadow
; /* My shadow */
172 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
173 clock_sec_t vou_cache_ts
; /* age of an external object
176 task_t vou_purgeable_owner
; /* If the purg'a'ble bits below are set
177 * to volatile/emtpy, this is the task
178 * that owns this purgeable object.
180 struct vm_shared_region_slide_info
*vou_slide_info
;
183 memory_object_t pager
; /* Where to get data */
184 vm_object_offset_t paging_offset
; /* Offset into memory object */
185 memory_object_control_t pager_control
; /* Where data comes back */
187 memory_object_copy_strategy_t
188 copy_strategy
; /* How to handle data copy */
192 * Some user processes (mostly VirtualMachine software) take a large
193 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
194 * VM objects and overflow the 16-bit "activity_in_progress" counter.
195 * Since we never enforced any limit there, let's give them 32 bits
196 * for backwards compatibility's sake.
198 unsigned int paging_in_progress
:16,
199 __object1_unused_bits
:16;
200 unsigned int activity_in_progress
;
203 * On 32-bit platforms, enlarging "activity_in_progress" would increase
204 * the size of "struct vm_object". Since we don't know of any actual
205 * overflow of these counters on these platforms, let's keep the
206 * counters as 16-bit integers.
208 unsigned short paging_in_progress
;
209 unsigned short activity_in_progress
;
210 #endif /* __LP64__ */
211 /* The memory object ports are
212 * being used (e.g., for pagein
213 * or pageout) -- don't change
214 * any of these fields (i.e.,
215 * don't collapse, destroy or
220 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
221 * awakened" notations. See
222 * VM_OBJECT_EVENT_* items
224 /* boolean_t */ pager_created
:1, /* Has pager been created? */
225 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
226 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
228 /* boolean_t */ pager_trusted
:1,/* The pager for this object
229 * is trusted. This is true for
230 * all internal objects (backed
231 * by the default pager)
233 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
234 * for this object (and rights
235 * to the memory object) after
236 * all address map references
239 /* boolean_t */ internal
:1, /* Created by the kernel (and
240 * therefore, managed by the
241 * default memory manger)
243 /* boolean_t */ temporary
:1, /* Permanent objects may be
244 * changed externally by the
245 * memory manager, and changes
246 * made in memory must be
247 * reflected back to the memory
248 * manager. Temporary objects
252 /* boolean_t */ private:1, /* magic device_pager object,
253 * holds private pages only */
254 /* boolean_t */ pageout
:1, /* pageout object. contains
255 * private pages that refer to
256 * a real memory object. */
257 /* boolean_t */ alive
:1, /* Not yet terminated */
259 /* boolean_t */ purgable
:2, /* Purgable state. See
262 /* boolean_t */ purgeable_when_ripe
:1, /* Purgeable when a token
265 /* boolean_t */ shadowed
:1, /* Shadow may exist */
266 /* boolean_t */ advisory_pageout
:1,
267 /* Instead of sending page
268 * via OOL, just notify
269 * pager that the kernel
270 * wants to discard it, page
271 * remains in object */
272 /* boolean_t */ true_share
:1,
273 /* This object is mapped
274 * in more than one place
275 * and hence cannot be
277 /* boolean_t */ terminating
:1,
278 /* Allows vm_object_lookup
279 * and vm_object_deallocate
280 * to special case their
281 * behavior when they are
282 * called as a result of
283 * page cleaning during
286 /* boolean_t */ named
:1, /* An enforces an internal
287 * naming convention, by
288 * calling the right routines
290 * destruction, UBC references
291 * against the vm_object are
294 /* boolean_t */ shadow_severed
:1,
295 /* When a permanent object
296 * backing a COW goes away
297 * unexpectedly. This bit
298 * allows vm_fault to return
299 * an error rather than a
302 /* boolean_t */ phys_contiguous
:1,
303 /* Memory is wired and
304 * guaranteed physically
305 * contiguous. However
306 * it is not device memory
307 * and obeys normal virtual
308 * memory rules w.r.t pmap
311 /* boolean_t */ nophyscache
:1;
312 /* When mapped at the
313 * pmap level, don't allow
314 * primary caching. (for
320 queue_chain_t cached_list
; /* Attachment point for the
321 * list of objects cached as a
322 * result of their can_persist
326 queue_head_t msr_q
; /* memory object synchronise
330 * the following fields are not protected by any locks
331 * they are updated via atomic compare and swap
333 vm_object_offset_t last_alloc
; /* last allocation offset */
334 int sequential
; /* sequential access size */
336 uint32_t pages_created
;
339 vm_external_map_t existence_map
; /* bitmap of pages written to
341 #endif /* MACH_PAGEMAP */
342 vm_offset_t cow_hint
; /* last page present in */
343 /* shadow but not in object */
345 struct vm_object
*paging_object
; /* object which pages to be
346 * swapped out are temporary
347 * put in current object
350 /* hold object lock when altering */
352 wimg_bits
:8, /* cache WIMG bits */
353 code_signed
:1, /* pages are signed and should be
354 validated; the signatures are stored
356 hashed
:1, /* object/pager entered in hash */
357 transposed
:1, /* object was transposed with another */
358 mapping_in_progress
:1, /* pager being mapped/unmapped */
366 purgeable_queue_type
:2,
367 purgeable_queue_group
:3,
369 __object2_unused_bits
:7; /* for expansion */
371 uint32_t scan_collisions
;
372 #if CONFIG_PHANTOM_CACHE
373 uint32_t phantom_object_id
;
375 #if CONFIG_IOSCHED || UPL_DEBUG
376 queue_head_t uplq
; /* List of outstanding upls */
381 * Keep track of the stack traces for the first holders
382 * of a "paging_in_progress" reference for this VM object.
384 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
385 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
386 struct __pip_backtrace
{
387 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
388 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
389 #endif /* VM_PIP_DEBUG */
391 queue_chain_t objq
; /* object queue - currently used for purgable queues */
394 void *purgeable_owner_bt
[16];
395 task_t vo_purgeable_volatilizer
; /* who made it volatile? */
396 void *purgeable_volatilizer_bt
[16];
400 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
401 ((object)->volatile_fault && \
402 ((object)->purgable == VM_PURGABLE_VOLATILE || \
403 (object)->purgable == VM_PURGABLE_EMPTY))
405 #define VM_PAGE_REMOVE(page) \
407 vm_page_t __page = (page); \
408 vm_object_t __object = __page->object; \
409 if (__page == __object->memq_hint) { \
410 vm_page_t __new_hint; \
411 queue_entry_t __qe; \
412 __qe = queue_next(&__page->listq); \
413 if (queue_end(&__object->memq, __qe)) { \
414 __qe = queue_prev(&__page->listq); \
415 if (queue_end(&__object->memq, __qe)) { \
419 __new_hint = (vm_page_t) __qe; \
420 __object->memq_hint = __new_hint; \
422 queue_remove(&__object->memq, __page, vm_page_t, listq); \
425 #define VM_PAGE_INSERT(page, object) \
427 vm_page_t __page = (page); \
428 vm_object_t __object = (object); \
429 queue_enter(&__object->memq, __page, vm_page_t, listq); \
430 __object->memq_hint = __page; \
434 vm_object_t kernel_object
; /* the single kernel object */
437 vm_object_t compressor_object
; /* the single compressor object */
440 unsigned int vm_object_absent_max
; /* maximum number of absent pages
441 at a time for each object */
443 # define VM_MSYNC_INITIALIZED 0
444 # define VM_MSYNC_SYNCHRONIZING 1
445 # define VM_MSYNC_DONE 2
448 queue_chain_t msr_q
; /* object request queue */
449 queue_chain_t req_q
; /* vm_msync request queue */
451 vm_object_offset_t offset
;
452 vm_object_size_t length
;
453 vm_object_t object
; /* back pointer */
454 decl_lck_mtx_data(, msync_req_lock
) /* Lock for this structure */
457 typedef struct msync_req
*msync_req_t
;
458 #define MSYNC_REQ_NULL ((msync_req_t) 0)
461 extern lck_grp_t vm_map_lck_grp
;
462 extern lck_attr_t vm_map_lck_attr
;
465 * Macros to allocate and free msync_reqs
467 #define msync_req_alloc(msr) \
469 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
470 lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \
471 msr->flag = VM_MSYNC_INITIALIZED; \
474 #define msync_req_free(msr) \
476 lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \
477 kfree((msr), sizeof(struct msync_req)); \
480 #define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock)
481 #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
484 * Declare procedures that operate on VM objects.
487 __private_extern__
void vm_object_bootstrap(void);
489 __private_extern__
void vm_object_init(void);
491 __private_extern__
void vm_object_init_lck_grp(void);
493 __private_extern__
void vm_object_reaper_init(void);
495 __private_extern__ vm_object_t
vm_object_allocate(
496 vm_object_size_t size
);
498 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
503 __private_extern__
void vm_object_res_reference(
505 __private_extern__
void vm_object_res_deallocate(
507 #define VM_OBJ_RES_INCR(object) (object)->res_count++
508 #define VM_OBJ_RES_DECR(object) (object)->res_count--
510 #else /* TASK_SWAPPER */
512 #define VM_OBJ_RES_INCR(object)
513 #define VM_OBJ_RES_DECR(object)
514 #define vm_object_res_reference(object)
515 #define vm_object_res_deallocate(object)
517 #endif /* TASK_SWAPPER */
519 #define vm_object_reference_locked(object) \
521 vm_object_t RLObject = (object); \
522 vm_object_lock_assert_exclusive(object); \
523 assert((RLObject)->ref_count > 0); \
524 (RLObject)->ref_count++; \
525 assert((RLObject)->ref_count > 1); \
526 vm_object_res_reference(RLObject); \
530 #define vm_object_reference_shared(object) \
532 vm_object_t RLObject = (object); \
533 vm_object_lock_assert_shared(object); \
534 assert((RLObject)->ref_count > 0); \
535 OSAddAtomic(1, &(RLObject)->ref_count); \
536 assert((RLObject)->ref_count > 0); \
537 /* XXX we would need an atomic version of the following ... */ \
538 vm_object_res_reference(RLObject); \
542 __private_extern__
void vm_object_reference(
547 #define vm_object_reference(object) \
549 vm_object_t RObject = (object); \
551 vm_object_lock_shared(RObject); \
552 vm_object_reference_shared(RObject); \
553 vm_object_unlock(RObject); \
557 #endif /* MACH_ASSERT */
559 __private_extern__
void vm_object_deallocate(
562 __private_extern__ kern_return_t
vm_object_release_name(
566 __private_extern__
void vm_object_pmap_protect(
568 vm_object_offset_t offset
,
569 vm_object_size_t size
,
571 vm_map_offset_t pmap_start
,
574 __private_extern__
void vm_object_pmap_protect_options(
576 vm_object_offset_t offset
,
577 vm_object_size_t size
,
579 vm_map_offset_t pmap_start
,
583 __private_extern__
void vm_object_page_remove(
585 vm_object_offset_t start
,
586 vm_object_offset_t end
);
588 __private_extern__
void vm_object_deactivate_pages(
590 vm_object_offset_t offset
,
591 vm_object_size_t size
,
593 boolean_t reusable_page
);
595 __private_extern__
void vm_object_reuse_pages(
597 vm_object_offset_t start_offset
,
598 vm_object_offset_t end_offset
,
599 boolean_t allow_partial_reuse
);
601 __private_extern__
void vm_object_purge(
605 __private_extern__ kern_return_t
vm_object_purgable_control(
607 vm_purgable_t control
,
610 __private_extern__ kern_return_t
vm_object_get_page_counts(
612 vm_object_offset_t offset
,
613 vm_object_size_t size
,
614 unsigned int *resident_page_count
,
615 unsigned int *dirty_page_count
);
617 __private_extern__ boolean_t
vm_object_coalesce(
618 vm_object_t prev_object
,
619 vm_object_t next_object
,
620 vm_object_offset_t prev_offset
,
621 vm_object_offset_t next_offset
,
622 vm_object_size_t prev_size
,
623 vm_object_size_t next_size
);
625 __private_extern__ boolean_t
vm_object_shadow(
627 vm_object_offset_t
*offset
,
628 vm_object_size_t length
);
630 __private_extern__
void vm_object_collapse(
632 vm_object_offset_t offset
,
633 boolean_t can_bypass
);
635 __private_extern__ boolean_t
vm_object_copy_quickly(
636 vm_object_t
*_object
,
637 vm_object_offset_t src_offset
,
638 vm_object_size_t size
,
639 boolean_t
*_src_needs_copy
,
640 boolean_t
*_dst_needs_copy
);
642 __private_extern__ kern_return_t
vm_object_copy_strategically(
643 vm_object_t src_object
,
644 vm_object_offset_t src_offset
,
645 vm_object_size_t size
,
646 vm_object_t
*dst_object
,
647 vm_object_offset_t
*dst_offset
,
648 boolean_t
*dst_needs_copy
);
650 __private_extern__ kern_return_t
vm_object_copy_slowly(
651 vm_object_t src_object
,
652 vm_object_offset_t src_offset
,
653 vm_object_size_t size
,
654 boolean_t interruptible
,
655 vm_object_t
*_result_object
);
657 __private_extern__ vm_object_t
vm_object_copy_delayed(
658 vm_object_t src_object
,
659 vm_object_offset_t src_offset
,
660 vm_object_size_t size
,
661 boolean_t src_object_shared
);
665 __private_extern__ kern_return_t
vm_object_destroy(
667 kern_return_t reason
);
669 __private_extern__
void vm_object_pager_create(
672 __private_extern__
void vm_object_compressor_pager_create(
675 __private_extern__
void vm_object_page_map(
677 vm_object_offset_t offset
,
678 vm_object_size_t size
,
679 vm_object_offset_t (*map_fn
)
680 (void *, vm_object_offset_t
),
683 __private_extern__ kern_return_t
vm_object_upl_request(
685 vm_object_offset_t offset
,
688 upl_page_info_t
*page_info
,
692 __private_extern__ kern_return_t
vm_object_transpose(
695 vm_object_size_t transpose_size
);
697 __private_extern__ boolean_t
vm_object_sync(
699 vm_object_offset_t offset
,
700 vm_object_size_t size
,
701 boolean_t should_flush
,
702 boolean_t should_return
,
703 boolean_t should_iosync
);
705 __private_extern__ kern_return_t
vm_object_update(
707 vm_object_offset_t offset
,
708 vm_object_size_t size
,
709 vm_object_offset_t
*error_offset
,
711 memory_object_return_t should_return
,
715 __private_extern__ kern_return_t
vm_object_lock_request(
717 vm_object_offset_t offset
,
718 vm_object_size_t size
,
719 memory_object_return_t should_return
,
725 __private_extern__ vm_object_t
vm_object_enter(
726 memory_object_t pager
,
727 vm_object_size_t size
,
730 boolean_t check_named
);
733 __private_extern__
void vm_object_cluster_size(
735 vm_object_offset_t
*start
,
737 vm_object_fault_info_t fault_info
,
738 uint32_t *io_streaming
);
740 __private_extern__ kern_return_t
vm_object_populate_with_private(
742 vm_object_offset_t offset
,
746 __private_extern__
void vm_object_change_wimg_mode(
748 unsigned int wimg_mode
);
750 extern kern_return_t
adjust_vm_object_cache(
754 extern kern_return_t
vm_object_page_op(
756 vm_object_offset_t offset
,
761 extern kern_return_t
vm_object_range_op(
763 vm_object_offset_t offset_beg
,
764 vm_object_offset_t offset_end
,
769 __private_extern__
void vm_object_reap_pages(
773 #define REAP_TERMINATE 1
774 #define REAP_PURGEABLE 2
775 #define REAP_DATA_FLUSH 3
778 struct default_freezer_handle
;
780 __private_extern__ kern_return_t
782 unsigned int *purgeable_count
,
783 unsigned int *wired_count
,
784 unsigned int *clean_count
,
785 unsigned int *dirty_count
,
786 unsigned int dirty_budget
,
788 vm_object_t src_object
,
789 struct default_freezer_handle
*df_handle
);
791 __private_extern__
void
792 vm_object_pack_pages(
793 unsigned int *wired_count
,
794 unsigned int *clean_count
,
795 unsigned int *dirty_count
,
796 unsigned int dirty_budget
,
797 vm_object_t src_object
,
798 struct default_freezer_handle
*df_handle
);
800 __private_extern__
void
804 __private_extern__ kern_return_t
807 #endif /* CONFIG_FREEZE */
810 struct io_reprioritize_req
{
815 queue_chain_t io_reprioritize_list
;
817 typedef struct io_reprioritize_req
*io_reprioritize_req_t
;
819 extern void vm_io_reprioritize_init(void);
823 * Event waiting handling
826 #define VM_OBJECT_EVENT_INITIALIZED 0
827 #define VM_OBJECT_EVENT_PAGER_READY 1
828 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
829 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
830 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
831 #define VM_OBJECT_EVENT_UNCACHING 5
832 #define VM_OBJECT_EVENT_COPY_CALL 6
833 #define VM_OBJECT_EVENT_CACHING 7
834 #define VM_OBJECT_EVENT_UNBLOCKED 8
835 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
837 #define vm_object_assert_wait(object, event, interruptible) \
838 (((object)->all_wanted |= 1 << (event)), \
839 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
841 #define vm_object_wait(object, event, interruptible) \
842 (vm_object_assert_wait((object),(event),(interruptible)), \
843 vm_object_unlock(object), \
844 thread_block(THREAD_CONTINUE_NULL)) \
846 #define thread_sleep_vm_object(object, event, interruptible) \
847 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_PROMOTED_PRI, (event_t)(event), (interruptible))
849 #define vm_object_sleep(object, event, interruptible) \
850 (((object)->all_wanted |= 1 << (event)), \
851 thread_sleep_vm_object((object), \
852 ((vm_offset_t)(object)+(event)), (interruptible)))
854 #define vm_object_wakeup(object, event) \
856 if ((object)->all_wanted & (1 << (event))) \
857 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
858 (object)->all_wanted &= ~(1 << (event)); \
861 #define vm_object_set_wanted(object, event) \
863 ((object)->all_wanted |= (1 << (event))); \
866 #define vm_object_wanted(object, event) \
867 ((object)->all_wanted & (1 << (event)))
870 * Routines implemented as macros
873 #include <libkern/OSDebug.h>
874 #define VM_PIP_DEBUG_BEGIN(object) \
876 int pip = ((object)->paging_in_progress + \
877 (object)->activity_in_progress); \
878 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
879 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
880 VM_PIP_DEBUG_STACK_FRAMES); \
883 #else /* VM_PIP_DEBUG */
884 #define VM_PIP_DEBUG_BEGIN(object)
885 #endif /* VM_PIP_DEBUG */
887 #define vm_object_activity_begin(object) \
889 vm_object_lock_assert_exclusive((object)); \
890 VM_PIP_DEBUG_BEGIN((object)); \
891 (object)->activity_in_progress++; \
892 if ((object)->activity_in_progress == 0) { \
893 panic("vm_object_activity_begin(%p): overflow\n", (object));\
897 #define vm_object_activity_end(object) \
899 vm_object_lock_assert_exclusive((object)); \
900 if ((object)->activity_in_progress == 0) { \
901 panic("vm_object_activity_end(%p): underflow\n", (object));\
903 (object)->activity_in_progress--; \
904 if ((object)->paging_in_progress == 0 && \
905 (object)->activity_in_progress == 0) \
906 vm_object_wakeup((object), \
907 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
910 #define vm_object_paging_begin(object) \
912 vm_object_lock_assert_exclusive((object)); \
913 VM_PIP_DEBUG_BEGIN((object)); \
914 (object)->paging_in_progress++; \
915 if ((object)->paging_in_progress == 0) { \
916 panic("vm_object_paging_begin(%p): overflow\n", (object));\
920 #define vm_object_paging_end(object) \
922 vm_object_lock_assert_exclusive((object)); \
923 if ((object)->paging_in_progress == 0) { \
924 panic("vm_object_paging_end(%p): underflow\n", (object));\
926 (object)->paging_in_progress--; \
927 if ((object)->paging_in_progress == 0) { \
928 vm_object_wakeup((object), \
929 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
930 if ((object)->activity_in_progress == 0) \
931 vm_object_wakeup((object), \
932 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
936 #define vm_object_paging_wait(object, interruptible) \
938 vm_object_lock_assert_exclusive((object)); \
939 while ((object)->paging_in_progress != 0 || \
940 (object)->activity_in_progress != 0) { \
943 _wr = vm_object_sleep((object), \
944 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
947 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
952 #define vm_object_paging_only_wait(object, interruptible) \
954 vm_object_lock_assert_exclusive((object)); \
955 while ((object)->paging_in_progress != 0) { \
958 _wr = vm_object_sleep((object), \
959 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
962 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
968 #define vm_object_mapping_begin(object) \
970 vm_object_lock_assert_exclusive((object)); \
971 assert(! (object)->mapping_in_progress); \
972 (object)->mapping_in_progress = TRUE; \
975 #define vm_object_mapping_end(object) \
977 vm_object_lock_assert_exclusive((object)); \
978 assert((object)->mapping_in_progress); \
979 (object)->mapping_in_progress = FALSE; \
980 vm_object_wakeup((object), \
981 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
984 #define vm_object_mapping_wait(object, interruptible) \
986 vm_object_lock_assert_exclusive((object)); \
987 while ((object)->mapping_in_progress) { \
990 _wr = vm_object_sleep((object), \
991 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
993 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
996 assert(!(object)->mapping_in_progress); \
1001 #define OBJECT_LOCK_SHARED 0
1002 #define OBJECT_LOCK_EXCLUSIVE 1
1004 extern lck_grp_t vm_object_lck_grp
;
1005 extern lck_grp_attr_t vm_object_lck_grp_attr
;
1006 extern lck_attr_t vm_object_lck_attr
;
1007 extern lck_attr_t kernel_object_lck_attr
;
1008 extern lck_attr_t compressor_object_lck_attr
;
1010 extern vm_object_t vm_pageout_scan_wants_object
;
1012 extern void vm_object_lock(vm_object_t
);
1013 extern boolean_t
vm_object_lock_try(vm_object_t
);
1014 extern boolean_t
_vm_object_lock_try(vm_object_t
);
1015 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
1016 extern void vm_object_lock_shared(vm_object_t
);
1017 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
1020 * Object locking macros
1023 #define vm_object_lock_init(object) \
1024 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
1025 (((object) == kernel_object || \
1026 (object) == vm_submap_object) ? \
1027 &kernel_object_lck_attr : \
1028 (((object) == compressor_object) ? \
1029 &compressor_object_lck_attr : \
1030 &vm_object_lck_attr)))
1031 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
1033 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
1034 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
1035 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
1038 * CAUTION: the following vm_object_lock_assert_held*() macros merely
1039 * check if anyone is holding the lock, but the holder may not necessarily
1042 #if MACH_ASSERT || DEBUG
1043 #define vm_object_lock_assert_held(object) \
1044 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
1045 #define vm_object_lock_assert_shared(object) \
1046 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
1047 #define vm_object_lock_assert_exclusive(object) \
1048 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
1049 #else /* MACH_ASSERT || DEBUG */
1050 #define vm_object_lock_assert_held(object)
1051 #define vm_object_lock_assert_shared(object)
1052 #define vm_object_lock_assert_exclusive(object)
1053 #endif /* MACH_ASSERT || DEBUG */
1055 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1056 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1058 extern void vm_object_cache_add(vm_object_t
);
1059 extern void vm_object_cache_remove(vm_object_t
);
1060 extern int vm_object_cache_evict(int, int);
1062 #endif /* _VM_VM_OBJECT_H_ */