2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited
;
99 extern btlog_t
*vm_object_tracking_btlog
;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
108 struct vm_shared_region_slide_info
;
113 * vm_object_t Virtual memory object.
114 * vm_object_fault_info_t Used to determine cluster size.
117 struct vm_object_fault_info
{
120 vm_size_t cluster_size
;
121 vm_behavior_t behavior
;
122 vm_map_offset_t lo_offset
;
123 vm_map_offset_t hi_offset
;
125 /* boolean_t */ no_cache
:1,
126 /* boolean_t */ stealth
:1,
127 /* boolean_t */ io_sync
:1,
128 /* boolean_t */ cs_bypass
:1,
129 /* boolean_t */ mark_zf_absent
:1,
130 /* boolean_t */ batch_pmap_op
:1,
131 __vm_object_fault_info_unused_bits
:26;
136 #define vo_size vo_un1.vou_size
137 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138 #define vo_shadow_offset vo_un2.vou_shadow_offset
139 #define vo_cache_ts vo_un2.vou_cache_ts
140 #define vo_purgeable_owner vo_un2.vou_purgeable_owner
141 #define vo_slide_info vo_un2.vou_slide_info
145 * on 64 bit systems we pack the pointers hung off the memq.
146 * those pointers have to be able to point back to the memq.
147 * the packed pointers are required to be on a 64 byte boundary
148 * which means 2 things for the vm_object... (1) the memq
149 * struct has to be the first element of the structure so that
150 * we can control it's alignment... (2) the vm_object must be
151 * aligned on a 64 byte boundary... for static vm_object's
152 * this is accomplished via the 'aligned' attribute... for
153 * vm_object's in the zone pool, this is accomplished by
154 * rounding the size of the vm_object element to the nearest
155 * 64 byte size before creating the zone.
157 vm_page_queue_head_t memq
; /* Resident memory - must be first */
158 lck_rw_t Lock
; /* Synchronization */
160 #if DEVELOPMENT || DEBUG
164 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
165 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
166 * external object in cache
170 struct vm_page
*memq_hint
;
171 int ref_count
; /* Number of references */
172 unsigned int resident_page_count
;
173 /* number of resident pages */
174 const unsigned int wired_page_count
; /* number of wired pages
175 use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
176 unsigned int reusable_page_count
;
178 struct vm_object
*copy
; /* Object that should receive
179 * a copy of my changed pages,
180 * for copy_delay, or just the
181 * temporary object that
182 * shadows this object, for
185 struct vm_object
*shadow
; /* My shadow */
188 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
189 clock_sec_t vou_cache_ts
; /* age of an external object
192 task_t vou_purgeable_owner
; /* If the purg'a'ble bits below are set
193 * to volatile/emtpy, this is the task
194 * that owns this purgeable object.
196 struct vm_shared_region_slide_info
*vou_slide_info
;
199 memory_object_t pager
; /* Where to get data */
200 vm_object_offset_t paging_offset
; /* Offset into memory object */
201 memory_object_control_t pager_control
; /* Where data comes back */
203 memory_object_copy_strategy_t
204 copy_strategy
; /* How to handle data copy */
208 * Some user processes (mostly VirtualMachine software) take a large
209 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
210 * VM objects and overflow the 16-bit "activity_in_progress" counter.
211 * Since we never enforced any limit there, let's give them 32 bits
212 * for backwards compatibility's sake.
214 unsigned int paging_in_progress
:16,
215 __object1_unused_bits
:16;
216 unsigned int activity_in_progress
;
219 * On 32-bit platforms, enlarging "activity_in_progress" would increase
220 * the size of "struct vm_object". Since we don't know of any actual
221 * overflow of these counters on these platforms, let's keep the
222 * counters as 16-bit integers.
224 unsigned short paging_in_progress
;
225 unsigned short activity_in_progress
;
226 #endif /* __LP64__ */
227 /* The memory object ports are
228 * being used (e.g., for pagein
229 * or pageout) -- don't change
230 * any of these fields (i.e.,
231 * don't collapse, destroy or
236 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
237 * awakened" notations. See
238 * VM_OBJECT_EVENT_* items
240 /* boolean_t */ pager_created
:1, /* Has pager been created? */
241 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
242 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
244 /* boolean_t */ pager_trusted
:1,/* The pager for this object
245 * is trusted. This is true for
246 * all internal objects (backed
247 * by the default pager)
249 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
250 * for this object (and rights
251 * to the memory object) after
252 * all address map references
255 /* boolean_t */ internal
:1, /* Created by the kernel (and
256 * therefore, managed by the
257 * default memory manger)
259 /* boolean_t */ private:1, /* magic device_pager object,
260 * holds private pages only */
261 /* boolean_t */ pageout
:1, /* pageout object. contains
262 * private pages that refer to
263 * a real memory object. */
264 /* boolean_t */ alive
:1, /* Not yet terminated */
266 /* boolean_t */ purgable
:2, /* Purgable state. See
269 /* boolean_t */ purgeable_only_by_kernel
:1,
270 /* boolean_t */ purgeable_when_ripe
:1, /* Purgeable when a token
273 /* boolean_t */ shadowed
:1, /* Shadow may exist */
274 /* boolean_t */ true_share
:1,
275 /* This object is mapped
276 * in more than one place
277 * and hence cannot be
279 /* boolean_t */ terminating
:1,
280 /* Allows vm_object_lookup
281 * and vm_object_deallocate
282 * to special case their
283 * behavior when they are
284 * called as a result of
285 * page cleaning during
288 /* boolean_t */ named
:1, /* An enforces an internal
289 * naming convention, by
290 * calling the right routines
292 * destruction, UBC references
293 * against the vm_object are
296 /* boolean_t */ shadow_severed
:1,
297 /* When a permanent object
298 * backing a COW goes away
299 * unexpectedly. This bit
300 * allows vm_fault to return
301 * an error rather than a
304 /* boolean_t */ phys_contiguous
:1,
305 /* Memory is wired and
306 * guaranteed physically
307 * contiguous. However
308 * it is not device memory
309 * and obeys normal virtual
310 * memory rules w.r.t pmap
313 /* boolean_t */ nophyscache
:1,
314 /* When mapped at the
315 * pmap level, don't allow
316 * primary caching. (for
319 /* boolean_t */ _object5_unused_bits
:1;
321 queue_chain_t cached_list
; /* Attachment point for the
322 * list of objects cached as a
323 * result of their can_persist
327 * the following fields are not protected by any locks
328 * they are updated via atomic compare and swap
330 vm_object_offset_t last_alloc
; /* last allocation offset */
331 int sequential
; /* sequential access size */
333 uint32_t pages_created
;
335 vm_offset_t cow_hint
; /* last page present in */
336 /* shadow but not in object */
337 /* hold object lock when altering */
339 wimg_bits
:8, /* cache WIMG bits */
340 code_signed
:1, /* pages are signed and should be
341 validated; the signatures are stored
343 transposed
:1, /* object was transposed with another */
344 mapping_in_progress
:1, /* pager being mapped/unmapped */
352 purgeable_queue_type
:2,
353 purgeable_queue_group
:3,
355 no_tag_update
:1, /* */
356 #if CONFIG_SECLUDED_MEMORY
357 eligible_for_secluded
:1,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 __object3_unused_bits
:2,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 __object2_unused_bits
:5; /* for expansion */
364 uint8_t scan_collisions
;
366 uint8_t __object4_unused_bits
[2];
368 #if CONFIG_PHANTOM_CACHE
369 uint32_t phantom_object_id
;
371 #if CONFIG_IOSCHED || UPL_DEBUG
372 queue_head_t uplq
; /* List of outstanding upls */
377 * Keep track of the stack traces for the first holders
378 * of a "paging_in_progress" reference for this VM object.
380 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
381 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
382 struct __pip_backtrace
{
383 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
384 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
385 #endif /* VM_PIP_DEBUG */
387 queue_chain_t objq
; /* object queue - currently used for purgable queues */
388 queue_chain_t task_objq
; /* objects owned by task - protected by task lock */
391 void *purgeable_owner_bt
[16];
392 task_t vo_purgeable_volatilizer
; /* who made it volatile? */
393 void *purgeable_volatilizer_bt
[16];
397 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
398 ((object)->volatile_fault && \
399 ((object)->purgable == VM_PURGABLE_VOLATILE || \
400 (object)->purgable == VM_PURGABLE_EMPTY))
403 vm_object_t kernel_object
; /* the single kernel object */
406 vm_object_t compressor_object
; /* the single compressor object */
409 unsigned int vm_object_absent_max
; /* maximum number of absent pages
410 at a time for each object */
412 # define VM_MSYNC_INITIALIZED 0
413 # define VM_MSYNC_SYNCHRONIZING 1
414 # define VM_MSYNC_DONE 2
417 extern lck_grp_t vm_map_lck_grp
;
418 extern lck_attr_t vm_map_lck_attr
;
420 #ifndef VM_TAG_ACTIVE_UPDATE
421 #error VM_TAG_ACTIVE_UPDATE
424 #define VM_OBJECT_WIRED(object, tag) \
426 assert(VM_KERN_MEMORY_NONE != (tag)); \
427 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
428 (object)->wire_tag = (tag); \
429 if (!VM_TAG_ACTIVE_UPDATE \
430 && ((object)->purgable == VM_PURGABLE_DENY)) \
432 lck_spin_lock(&vm_objects_wired_lock); \
433 assert(!(object)->objq.next); \
434 assert(!(object)->objq.prev); \
435 queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \
436 lck_spin_unlock(&vm_objects_wired_lock); \
440 #define VM_OBJECT_UNWIRED(object) \
442 if (!VM_TAG_ACTIVE_UPDATE \
443 && ((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \
445 lck_spin_lock(&vm_objects_wired_lock); \
446 queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \
447 lck_spin_unlock(&vm_objects_wired_lock); \
449 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
450 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
451 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
455 // These two macros start & end a C block
456 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
459 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
461 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
462 if (__wireddelta) { \
463 boolean_t __overflow __assert_only = \
464 os_add_overflow((object)->wired_page_count, __wireddelta, \
465 (unsigned int *)(uintptr_t)&(object)->wired_page_count); \
466 assert(!__overflow); \
467 if (!(object)->pageout && !(object)->no_tag_update) { \
468 if (__wireddelta > 0) { \
469 assert (VM_KERN_MEMORY_NONE != (tag)); \
470 if (VM_KERN_MEMORY_NONE == __waswired) { \
471 VM_OBJECT_WIRED((object), (tag)); \
473 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
474 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
475 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
476 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
477 if (!(object)->wired_page_count) { \
478 VM_OBJECT_UNWIRED((object)); \
486 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
487 __wireddelta += delta; \
489 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
490 if (!m->private && !m->fictitious) __wireddelta++;
492 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
493 if (!m->private && !m->fictitious) __wireddelta--;
497 #define OBJECT_LOCK_SHARED 0
498 #define OBJECT_LOCK_EXCLUSIVE 1
500 extern lck_grp_t vm_object_lck_grp
;
501 extern lck_grp_attr_t vm_object_lck_grp_attr
;
502 extern lck_attr_t vm_object_lck_attr
;
503 extern lck_attr_t kernel_object_lck_attr
;
504 extern lck_attr_t compressor_object_lck_attr
;
506 extern vm_object_t vm_pageout_scan_wants_object
;
508 extern void vm_object_lock(vm_object_t
);
509 extern boolean_t
vm_object_lock_try(vm_object_t
);
510 extern boolean_t
_vm_object_lock_try(vm_object_t
);
511 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
512 extern void vm_object_lock_shared(vm_object_t
);
513 extern boolean_t
vm_object_lock_yield_shared(vm_object_t
);
514 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
515 extern void vm_object_unlock(vm_object_t
);
516 extern boolean_t
vm_object_lock_upgrade(vm_object_t
);
519 * Object locking macros
522 #define vm_object_lock_init(object) \
523 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
524 (((object) == kernel_object || \
525 (object) == vm_submap_object) ? \
526 &kernel_object_lck_attr : \
527 (((object) == compressor_object) ? \
528 &compressor_object_lck_attr : \
529 &vm_object_lck_attr)))
530 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
532 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
535 * CAUTION: the following vm_object_lock_assert_held*() macros merely
536 * check if anyone is holding the lock, but the holder may not necessarily
539 #if MACH_ASSERT || DEBUG
540 #define vm_object_lock_assert_held(object) \
541 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
542 #define vm_object_lock_assert_shared(object) \
543 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
544 #define vm_object_lock_assert_exclusive(object) \
545 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
546 #define vm_object_lock_assert_notheld(object) \
547 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
548 #else /* MACH_ASSERT || DEBUG */
549 #define vm_object_lock_assert_held(object)
550 #define vm_object_lock_assert_shared(object)
551 #define vm_object_lock_assert_exclusive(object)
552 #define vm_object_lock_assert_notheld(object)
553 #endif /* MACH_ASSERT || DEBUG */
557 * Declare procedures that operate on VM objects.
560 __private_extern__
void vm_object_bootstrap(void);
562 __private_extern__
void vm_object_init(void);
564 __private_extern__
void vm_object_init_lck_grp(void);
566 __private_extern__
void vm_object_reaper_init(void);
568 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size
);
570 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
575 __private_extern__
void vm_object_res_reference(
577 __private_extern__
void vm_object_res_deallocate(
579 #define VM_OBJ_RES_INCR(object) (object)->res_count++
580 #define VM_OBJ_RES_DECR(object) (object)->res_count--
582 #else /* TASK_SWAPPER */
584 #define VM_OBJ_RES_INCR(object)
585 #define VM_OBJ_RES_DECR(object)
586 #define vm_object_res_reference(object)
587 #define vm_object_res_deallocate(object)
589 #endif /* TASK_SWAPPER */
591 #define vm_object_reference_locked(object) \
593 vm_object_t RLObject = (object); \
594 vm_object_lock_assert_exclusive(object); \
595 assert((RLObject)->ref_count > 0); \
596 (RLObject)->ref_count++; \
597 assert((RLObject)->ref_count > 1); \
598 vm_object_res_reference(RLObject); \
602 #define vm_object_reference_shared(object) \
604 vm_object_t RLObject = (object); \
605 vm_object_lock_assert_shared(object); \
606 assert((RLObject)->ref_count > 0); \
607 OSAddAtomic(1, &(RLObject)->ref_count); \
608 assert((RLObject)->ref_count > 0); \
609 /* XXX we would need an atomic version of the following ... */ \
610 vm_object_res_reference(RLObject); \
614 __private_extern__
void vm_object_reference(
619 #define vm_object_reference(object) \
621 vm_object_t RObject = (object); \
623 vm_object_lock_shared(RObject); \
624 vm_object_reference_shared(RObject); \
625 vm_object_unlock(RObject); \
629 #endif /* MACH_ASSERT */
631 __private_extern__
void vm_object_deallocate(
634 __private_extern__ kern_return_t
vm_object_release_name(
638 __private_extern__
void vm_object_pmap_protect(
640 vm_object_offset_t offset
,
641 vm_object_size_t size
,
643 vm_map_offset_t pmap_start
,
646 __private_extern__
void vm_object_pmap_protect_options(
648 vm_object_offset_t offset
,
649 vm_object_size_t size
,
651 vm_map_offset_t pmap_start
,
655 __private_extern__
void vm_object_page_remove(
657 vm_object_offset_t start
,
658 vm_object_offset_t end
);
660 __private_extern__
void vm_object_deactivate_pages(
662 vm_object_offset_t offset
,
663 vm_object_size_t size
,
665 boolean_t reusable_page
,
667 vm_map_offset_t pmap_offset
);
669 __private_extern__
void vm_object_reuse_pages(
671 vm_object_offset_t start_offset
,
672 vm_object_offset_t end_offset
,
673 boolean_t allow_partial_reuse
);
675 __private_extern__
uint64_t vm_object_purge(
679 __private_extern__ kern_return_t
vm_object_purgable_control(
681 vm_purgable_t control
,
684 __private_extern__ kern_return_t
vm_object_get_page_counts(
686 vm_object_offset_t offset
,
687 vm_object_size_t size
,
688 unsigned int *resident_page_count
,
689 unsigned int *dirty_page_count
);
691 __private_extern__ boolean_t
vm_object_coalesce(
692 vm_object_t prev_object
,
693 vm_object_t next_object
,
694 vm_object_offset_t prev_offset
,
695 vm_object_offset_t next_offset
,
696 vm_object_size_t prev_size
,
697 vm_object_size_t next_size
);
699 __private_extern__ boolean_t
vm_object_shadow(
701 vm_object_offset_t
*offset
,
702 vm_object_size_t length
);
704 __private_extern__
void vm_object_collapse(
706 vm_object_offset_t offset
,
707 boolean_t can_bypass
);
709 __private_extern__ boolean_t
vm_object_copy_quickly(
710 vm_object_t
*_object
,
711 vm_object_offset_t src_offset
,
712 vm_object_size_t size
,
713 boolean_t
*_src_needs_copy
,
714 boolean_t
*_dst_needs_copy
);
716 __private_extern__ kern_return_t
vm_object_copy_strategically(
717 vm_object_t src_object
,
718 vm_object_offset_t src_offset
,
719 vm_object_size_t size
,
720 vm_object_t
*dst_object
,
721 vm_object_offset_t
*dst_offset
,
722 boolean_t
*dst_needs_copy
);
724 __private_extern__ kern_return_t
vm_object_copy_slowly(
725 vm_object_t src_object
,
726 vm_object_offset_t src_offset
,
727 vm_object_size_t size
,
728 boolean_t interruptible
,
729 vm_object_t
*_result_object
);
731 __private_extern__ vm_object_t
vm_object_copy_delayed(
732 vm_object_t src_object
,
733 vm_object_offset_t src_offset
,
734 vm_object_size_t size
,
735 boolean_t src_object_shared
);
739 __private_extern__ kern_return_t
vm_object_destroy(
741 kern_return_t reason
);
743 __private_extern__
void vm_object_pager_create(
746 __private_extern__
void vm_object_compressor_pager_create(
749 __private_extern__
void vm_object_page_map(
751 vm_object_offset_t offset
,
752 vm_object_size_t size
,
753 vm_object_offset_t (*map_fn
)
754 (void *, vm_object_offset_t
),
757 __private_extern__ kern_return_t
vm_object_upl_request(
759 vm_object_offset_t offset
,
762 upl_page_info_t
*page_info
,
764 upl_control_flags_t flags
,
767 __private_extern__ kern_return_t
vm_object_transpose(
770 vm_object_size_t transpose_size
);
772 __private_extern__ boolean_t
vm_object_sync(
774 vm_object_offset_t offset
,
775 vm_object_size_t size
,
776 boolean_t should_flush
,
777 boolean_t should_return
,
778 boolean_t should_iosync
);
780 __private_extern__ kern_return_t
vm_object_update(
782 vm_object_offset_t offset
,
783 vm_object_size_t size
,
784 vm_object_offset_t
*error_offset
,
786 memory_object_return_t should_return
,
790 __private_extern__ kern_return_t
vm_object_lock_request(
792 vm_object_offset_t offset
,
793 vm_object_size_t size
,
794 memory_object_return_t should_return
,
800 __private_extern__ vm_object_t
vm_object_memory_object_associate(
801 memory_object_t pager
,
803 vm_object_size_t size
,
804 boolean_t check_named
);
807 __private_extern__
void vm_object_cluster_size(
809 vm_object_offset_t
*start
,
811 vm_object_fault_info_t fault_info
,
812 uint32_t *io_streaming
);
814 __private_extern__ kern_return_t
vm_object_populate_with_private(
816 vm_object_offset_t offset
,
820 __private_extern__
void vm_object_change_wimg_mode(
822 unsigned int wimg_mode
);
824 extern kern_return_t
adjust_vm_object_cache(
828 extern kern_return_t
vm_object_page_op(
830 vm_object_offset_t offset
,
835 extern kern_return_t
vm_object_range_op(
837 vm_object_offset_t offset_beg
,
838 vm_object_offset_t offset_end
,
843 __private_extern__
void vm_object_reap_pages(
847 #define REAP_TERMINATE 1
848 #define REAP_PURGEABLE 2
849 #define REAP_DATA_FLUSH 3
853 __private_extern__
void
854 vm_object_compressed_freezer_pageout(
857 __private_extern__
void
858 vm_object_compressed_freezer_done(
861 #endif /* CONFIG_FREEZE */
863 __private_extern__
void
868 struct io_reprioritize_req
{
873 queue_chain_t io_reprioritize_list
;
875 typedef struct io_reprioritize_req
*io_reprioritize_req_t
;
877 extern void vm_io_reprioritize_init(void);
881 * Event waiting handling
884 #define VM_OBJECT_EVENT_INITIALIZED 0
885 #define VM_OBJECT_EVENT_PAGER_READY 1
886 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
887 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
888 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
889 #define VM_OBJECT_EVENT_UNCACHING 5
890 #define VM_OBJECT_EVENT_COPY_CALL 6
891 #define VM_OBJECT_EVENT_CACHING 7
892 #define VM_OBJECT_EVENT_UNBLOCKED 8
893 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
895 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
897 static __inline__ wait_result_t
898 vm_object_assert_wait(
901 wait_interrupt_t interruptible
)
905 vm_object_lock_assert_exclusive(object
);
906 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
908 object
->all_wanted
|= 1 << event
;
909 wr
= assert_wait((event_t
)((vm_offset_t
)object
+ event
),
914 static __inline__ wait_result_t
918 wait_interrupt_t interruptible
)
922 vm_object_assert_wait(object
, event
, interruptible
);
923 vm_object_unlock(object
);
924 wr
= thread_block(THREAD_CONTINUE_NULL
);
928 static __inline__ wait_result_t
929 thread_sleep_vm_object(
932 wait_interrupt_t interruptible
)
936 #if DEVELOPMENT || DEBUG
937 if (object
->Lock_owner
!= current_thread())
938 panic("thread_sleep_vm_object: now owner - %p\n", object
);
939 object
->Lock_owner
= 0;
941 wr
= lck_rw_sleep(&object
->Lock
,
942 LCK_SLEEP_PROMOTED_PRI
,
945 #if DEVELOPMENT || DEBUG
946 object
->Lock_owner
= current_thread();
951 static __inline__ wait_result_t
955 wait_interrupt_t interruptible
)
959 vm_object_lock_assert_exclusive(object
);
960 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
962 object
->all_wanted
|= 1 << event
;
963 wr
= thread_sleep_vm_object(object
,
964 (event_t
)((vm_offset_t
)object
+ event
),
969 static __inline__
void
974 vm_object_lock_assert_exclusive(object
);
975 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
977 if (object
->all_wanted
& (1 << event
))
978 thread_wakeup((event_t
)((vm_offset_t
)object
+ event
));
979 object
->all_wanted
&= ~(1 << event
);
982 static __inline__
void
983 vm_object_set_wanted(
987 vm_object_lock_assert_exclusive(object
);
988 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
990 object
->all_wanted
|= (1 << event
);
993 static __inline__
int
998 vm_object_lock_assert_held(object
);
999 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1001 return object
->all_wanted
& (1 << event
);
1005 * Routines implemented as macros
1008 #include <libkern/OSDebug.h>
1009 #define VM_PIP_DEBUG_BEGIN(object) \
1011 int pip = ((object)->paging_in_progress + \
1012 (object)->activity_in_progress); \
1013 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1014 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1015 VM_PIP_DEBUG_STACK_FRAMES); \
1018 #else /* VM_PIP_DEBUG */
1019 #define VM_PIP_DEBUG_BEGIN(object)
1020 #endif /* VM_PIP_DEBUG */
1022 #define vm_object_activity_begin(object) \
1024 vm_object_lock_assert_exclusive((object)); \
1025 VM_PIP_DEBUG_BEGIN((object)); \
1026 (object)->activity_in_progress++; \
1027 if ((object)->activity_in_progress == 0) { \
1028 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1032 #define vm_object_activity_end(object) \
1034 vm_object_lock_assert_exclusive((object)); \
1035 if ((object)->activity_in_progress == 0) { \
1036 panic("vm_object_activity_end(%p): underflow\n", (object));\
1038 (object)->activity_in_progress--; \
1039 if ((object)->paging_in_progress == 0 && \
1040 (object)->activity_in_progress == 0) \
1041 vm_object_wakeup((object), \
1042 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1045 #define vm_object_paging_begin(object) \
1047 vm_object_lock_assert_exclusive((object)); \
1048 VM_PIP_DEBUG_BEGIN((object)); \
1049 (object)->paging_in_progress++; \
1050 if ((object)->paging_in_progress == 0) { \
1051 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1055 #define vm_object_paging_end(object) \
1057 vm_object_lock_assert_exclusive((object)); \
1058 if ((object)->paging_in_progress == 0) { \
1059 panic("vm_object_paging_end(%p): underflow\n", (object));\
1061 (object)->paging_in_progress--; \
1062 if ((object)->paging_in_progress == 0) { \
1063 vm_object_wakeup((object), \
1064 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1065 if ((object)->activity_in_progress == 0) \
1066 vm_object_wakeup((object), \
1067 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1071 #define vm_object_paging_wait(object, interruptible) \
1073 vm_object_lock_assert_exclusive((object)); \
1074 while ((object)->paging_in_progress != 0 || \
1075 (object)->activity_in_progress != 0) { \
1076 wait_result_t _wr; \
1078 _wr = vm_object_sleep((object), \
1079 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1082 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1087 #define vm_object_paging_only_wait(object, interruptible) \
1089 vm_object_lock_assert_exclusive((object)); \
1090 while ((object)->paging_in_progress != 0) { \
1091 wait_result_t _wr; \
1093 _wr = vm_object_sleep((object), \
1094 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1097 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1103 #define vm_object_mapping_begin(object) \
1105 vm_object_lock_assert_exclusive((object)); \
1106 assert(! (object)->mapping_in_progress); \
1107 (object)->mapping_in_progress = TRUE; \
1110 #define vm_object_mapping_end(object) \
1112 vm_object_lock_assert_exclusive((object)); \
1113 assert((object)->mapping_in_progress); \
1114 (object)->mapping_in_progress = FALSE; \
1115 vm_object_wakeup((object), \
1116 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1119 #define vm_object_mapping_wait(object, interruptible) \
1121 vm_object_lock_assert_exclusive((object)); \
1122 while ((object)->mapping_in_progress) { \
1123 wait_result_t _wr; \
1125 _wr = vm_object_sleep((object), \
1126 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1128 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1131 assert(!(object)->mapping_in_progress); \
1136 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1137 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1139 extern void vm_object_cache_add(vm_object_t
);
1140 extern void vm_object_cache_remove(vm_object_t
);
1141 extern int vm_object_cache_evict(int, int);
1143 #endif /* _VM_VM_OBJECT_H_ */