2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited
;
99 extern btlog_t
*vm_object_tracking_btlog
;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
108 struct vm_shared_region_slide_info
;
113 * vm_object_t Virtual memory object.
114 * vm_object_fault_info_t Used to determine cluster size.
117 struct vm_object_fault_info
{
120 vm_size_t cluster_size
;
121 vm_behavior_t behavior
;
122 vm_map_offset_t lo_offset
;
123 vm_map_offset_t hi_offset
;
125 /* boolean_t */ no_cache
:1,
126 /* boolean_t */ stealth
:1,
127 /* boolean_t */ io_sync
:1,
128 /* boolean_t */ cs_bypass
:1,
129 /* boolean_t */ mark_zf_absent
:1,
130 /* boolean_t */ batch_pmap_op
:1,
131 __vm_object_fault_info_unused_bits
:26;
136 #define vo_size vo_un1.vou_size
137 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138 #define vo_shadow_offset vo_un2.vou_shadow_offset
139 #define vo_cache_ts vo_un2.vou_cache_ts
140 #define vo_purgeable_owner vo_un2.vou_purgeable_owner
141 #define vo_slide_info vo_un2.vou_slide_info
145 * on 64 bit systems we pack the pointers hung off the memq.
146 * those pointers have to be able to point back to the memq.
147 * the packed pointers are required to be on a 64 byte boundary
148 * which means 2 things for the vm_object... (1) the memq
149 * struct has to be the first element of the structure so that
150 * we can control it's alignment... (2) the vm_object must be
151 * aligned on a 64 byte boundary... for static vm_object's
152 * this is accomplished via the 'aligned' attribute... for
153 * vm_object's in the zone pool, this is accomplished by
154 * rounding the size of the vm_object element to the nearest
155 * 64 byte size before creating the zone.
157 vm_page_queue_head_t memq
; /* Resident memory - must be first */
158 lck_rw_t Lock
; /* Synchronization */
160 #if DEVELOPMENT || DEBUG
164 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
165 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
166 * external object in cache
170 struct vm_page
*memq_hint
;
171 int ref_count
; /* Number of references */
172 unsigned int resident_page_count
;
173 /* number of resident pages */
174 const unsigned int wired_page_count
; /* number of wired pages
175 use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
176 unsigned int reusable_page_count
;
178 struct vm_object
*copy
; /* Object that should receive
179 * a copy of my changed pages,
180 * for copy_delay, or just the
181 * temporary object that
182 * shadows this object, for
185 struct vm_object
*shadow
; /* My shadow */
188 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
189 clock_sec_t vou_cache_ts
; /* age of an external object
192 task_t vou_purgeable_owner
; /* If the purg'a'ble bits below are set
193 * to volatile/emtpy, this is the task
194 * that owns this purgeable object.
196 struct vm_shared_region_slide_info
*vou_slide_info
;
199 memory_object_t pager
; /* Where to get data */
200 vm_object_offset_t paging_offset
; /* Offset into memory object */
201 memory_object_control_t pager_control
; /* Where data comes back */
203 memory_object_copy_strategy_t
204 copy_strategy
; /* How to handle data copy */
208 * Some user processes (mostly VirtualMachine software) take a large
209 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
210 * VM objects and overflow the 16-bit "activity_in_progress" counter.
211 * Since we never enforced any limit there, let's give them 32 bits
212 * for backwards compatibility's sake.
214 unsigned int paging_in_progress
:16,
215 __object1_unused_bits
:16;
216 unsigned int activity_in_progress
;
219 * On 32-bit platforms, enlarging "activity_in_progress" would increase
220 * the size of "struct vm_object". Since we don't know of any actual
221 * overflow of these counters on these platforms, let's keep the
222 * counters as 16-bit integers.
224 unsigned short paging_in_progress
;
225 unsigned short activity_in_progress
;
226 #endif /* __LP64__ */
227 /* The memory object ports are
228 * being used (e.g., for pagein
229 * or pageout) -- don't change
230 * any of these fields (i.e.,
231 * don't collapse, destroy or
236 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
237 * awakened" notations. See
238 * VM_OBJECT_EVENT_* items
240 /* boolean_t */ pager_created
:1, /* Has pager been created? */
241 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
242 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
244 /* boolean_t */ pager_trusted
:1,/* The pager for this object
245 * is trusted. This is true for
246 * all internal objects (backed
247 * by the default pager)
249 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
250 * for this object (and rights
251 * to the memory object) after
252 * all address map references
255 /* boolean_t */ internal
:1, /* Created by the kernel (and
256 * therefore, managed by the
257 * default memory manger)
259 /* boolean_t */ private:1, /* magic device_pager object,
260 * holds private pages only */
261 /* boolean_t */ pageout
:1, /* pageout object. contains
262 * private pages that refer to
263 * a real memory object. */
264 /* boolean_t */ alive
:1, /* Not yet terminated */
266 /* boolean_t */ purgable
:2, /* Purgable state. See
269 /* boolean_t */ purgeable_only_by_kernel
:1,
270 /* boolean_t */ purgeable_when_ripe
:1, /* Purgeable when a token
273 /* boolean_t */ shadowed
:1, /* Shadow may exist */
274 /* boolean_t */ true_share
:1,
275 /* This object is mapped
276 * in more than one place
277 * and hence cannot be
279 /* boolean_t */ terminating
:1,
280 /* Allows vm_object_lookup
281 * and vm_object_deallocate
282 * to special case their
283 * behavior when they are
284 * called as a result of
285 * page cleaning during
288 /* boolean_t */ named
:1, /* An enforces an internal
289 * naming convention, by
290 * calling the right routines
292 * destruction, UBC references
293 * against the vm_object are
296 /* boolean_t */ shadow_severed
:1,
297 /* When a permanent object
298 * backing a COW goes away
299 * unexpectedly. This bit
300 * allows vm_fault to return
301 * an error rather than a
304 /* boolean_t */ phys_contiguous
:1,
305 /* Memory is wired and
306 * guaranteed physically
307 * contiguous. However
308 * it is not device memory
309 * and obeys normal virtual
310 * memory rules w.r.t pmap
313 /* boolean_t */ nophyscache
:1,
314 /* When mapped at the
315 * pmap level, don't allow
316 * primary caching. (for
319 /* boolean_t */ _object5_unused_bits
:1;
321 queue_chain_t cached_list
; /* Attachment point for the
322 * list of objects cached as a
323 * result of their can_persist
327 * the following fields are not protected by any locks
328 * they are updated via atomic compare and swap
330 vm_object_offset_t last_alloc
; /* last allocation offset */
331 int sequential
; /* sequential access size */
333 uint32_t pages_created
;
335 vm_offset_t cow_hint
; /* last page present in */
336 /* shadow but not in object */
337 /* hold object lock when altering */
339 wimg_bits
:8, /* cache WIMG bits */
340 code_signed
:1, /* pages are signed and should be
341 validated; the signatures are stored
343 transposed
:1, /* object was transposed with another */
344 mapping_in_progress
:1, /* pager being mapped/unmapped */
352 purgeable_queue_type
:2,
353 purgeable_queue_group
:3,
355 no_tag_update
:1, /* */
356 #if CONFIG_SECLUDED_MEMORY
357 eligible_for_secluded
:1,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 __object3_unused_bits
:2,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 __object2_unused_bits
:5; /* for expansion */
364 uint8_t scan_collisions
;
366 uint8_t __object4_unused_bits
[2];
368 #if CONFIG_PHANTOM_CACHE
369 uint32_t phantom_object_id
;
371 #if CONFIG_IOSCHED || UPL_DEBUG
372 queue_head_t uplq
; /* List of outstanding upls */
377 * Keep track of the stack traces for the first holders
378 * of a "paging_in_progress" reference for this VM object.
380 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
381 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
382 struct __pip_backtrace
{
383 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
384 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
385 #endif /* VM_PIP_DEBUG */
387 queue_chain_t objq
; /* object queue - currently used for purgable queues */
390 void *purgeable_owner_bt
[16];
391 task_t vo_purgeable_volatilizer
; /* who made it volatile? */
392 void *purgeable_volatilizer_bt
[16];
396 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
397 ((object)->volatile_fault && \
398 ((object)->purgable == VM_PURGABLE_VOLATILE || \
399 (object)->purgable == VM_PURGABLE_EMPTY))
402 vm_object_t kernel_object
; /* the single kernel object */
405 vm_object_t compressor_object
; /* the single compressor object */
408 unsigned int vm_object_absent_max
; /* maximum number of absent pages
409 at a time for each object */
411 # define VM_MSYNC_INITIALIZED 0
412 # define VM_MSYNC_SYNCHRONIZING 1
413 # define VM_MSYNC_DONE 2
416 extern lck_grp_t vm_map_lck_grp
;
417 extern lck_attr_t vm_map_lck_attr
;
419 #ifndef VM_TAG_ACTIVE_UPDATE
420 #error VM_TAG_ACTIVE_UPDATE
423 #define VM_OBJECT_WIRED(object, tag) \
425 assert(VM_KERN_MEMORY_NONE != (tag)); \
426 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
427 (object)->wire_tag = (tag); \
428 if (!VM_TAG_ACTIVE_UPDATE \
429 && ((object)->purgable == VM_PURGABLE_DENY)) \
431 lck_spin_lock(&vm_objects_wired_lock); \
432 assert(!(object)->objq.next); \
433 assert(!(object)->objq.prev); \
434 queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \
435 lck_spin_unlock(&vm_objects_wired_lock); \
439 #define VM_OBJECT_UNWIRED(object) \
441 if (!VM_TAG_ACTIVE_UPDATE \
442 && ((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \
444 lck_spin_lock(&vm_objects_wired_lock); \
445 queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \
446 lck_spin_unlock(&vm_objects_wired_lock); \
448 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
449 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
450 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
454 // These two macros start & end a C block
455 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
458 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
460 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
461 if (__wireddelta) { \
462 boolean_t __overflow __assert_only = \
463 os_add_overflow((object)->wired_page_count, __wireddelta, \
464 (unsigned int *)(uintptr_t)&(object)->wired_page_count); \
465 assert(!__overflow); \
466 if (!(object)->pageout && !(object)->no_tag_update) { \
467 if (__wireddelta > 0) { \
468 assert (VM_KERN_MEMORY_NONE != (tag)); \
469 if (VM_KERN_MEMORY_NONE == __waswired) { \
470 VM_OBJECT_WIRED((object), (tag)); \
472 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
473 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
474 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
475 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
476 if (!(object)->wired_page_count) { \
477 VM_OBJECT_UNWIRED((object)); \
485 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
486 __wireddelta += delta; \
488 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
489 if (!m->private && !m->fictitious) __wireddelta++;
491 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
492 if (!m->private && !m->fictitious) __wireddelta--;
496 #define OBJECT_LOCK_SHARED 0
497 #define OBJECT_LOCK_EXCLUSIVE 1
499 extern lck_grp_t vm_object_lck_grp
;
500 extern lck_grp_attr_t vm_object_lck_grp_attr
;
501 extern lck_attr_t vm_object_lck_attr
;
502 extern lck_attr_t kernel_object_lck_attr
;
503 extern lck_attr_t compressor_object_lck_attr
;
505 extern vm_object_t vm_pageout_scan_wants_object
;
507 extern void vm_object_lock(vm_object_t
);
508 extern boolean_t
vm_object_lock_try(vm_object_t
);
509 extern boolean_t
_vm_object_lock_try(vm_object_t
);
510 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
511 extern void vm_object_lock_shared(vm_object_t
);
512 extern boolean_t
vm_object_lock_yield_shared(vm_object_t
);
513 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
514 extern void vm_object_unlock(vm_object_t
);
515 extern boolean_t
vm_object_lock_upgrade(vm_object_t
);
518 * Object locking macros
521 #define vm_object_lock_init(object) \
522 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
523 (((object) == kernel_object || \
524 (object) == vm_submap_object) ? \
525 &kernel_object_lck_attr : \
526 (((object) == compressor_object) ? \
527 &compressor_object_lck_attr : \
528 &vm_object_lck_attr)))
529 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
531 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
534 * CAUTION: the following vm_object_lock_assert_held*() macros merely
535 * check if anyone is holding the lock, but the holder may not necessarily
538 #if MACH_ASSERT || DEBUG
539 #define vm_object_lock_assert_held(object) \
540 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
541 #define vm_object_lock_assert_shared(object) \
542 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
543 #define vm_object_lock_assert_exclusive(object) \
544 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
545 #define vm_object_lock_assert_notheld(object) \
546 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
547 #else /* MACH_ASSERT || DEBUG */
548 #define vm_object_lock_assert_held(object)
549 #define vm_object_lock_assert_shared(object)
550 #define vm_object_lock_assert_exclusive(object)
551 #define vm_object_lock_assert_notheld(object)
552 #endif /* MACH_ASSERT || DEBUG */
556 * Declare procedures that operate on VM objects.
559 __private_extern__
void vm_object_bootstrap(void);
561 __private_extern__
void vm_object_init(void);
563 __private_extern__
void vm_object_init_lck_grp(void);
565 __private_extern__
void vm_object_reaper_init(void);
567 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size
);
569 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
574 __private_extern__
void vm_object_res_reference(
576 __private_extern__
void vm_object_res_deallocate(
578 #define VM_OBJ_RES_INCR(object) (object)->res_count++
579 #define VM_OBJ_RES_DECR(object) (object)->res_count--
581 #else /* TASK_SWAPPER */
583 #define VM_OBJ_RES_INCR(object)
584 #define VM_OBJ_RES_DECR(object)
585 #define vm_object_res_reference(object)
586 #define vm_object_res_deallocate(object)
588 #endif /* TASK_SWAPPER */
590 #define vm_object_reference_locked(object) \
592 vm_object_t RLObject = (object); \
593 vm_object_lock_assert_exclusive(object); \
594 assert((RLObject)->ref_count > 0); \
595 (RLObject)->ref_count++; \
596 assert((RLObject)->ref_count > 1); \
597 vm_object_res_reference(RLObject); \
601 #define vm_object_reference_shared(object) \
603 vm_object_t RLObject = (object); \
604 vm_object_lock_assert_shared(object); \
605 assert((RLObject)->ref_count > 0); \
606 OSAddAtomic(1, &(RLObject)->ref_count); \
607 assert((RLObject)->ref_count > 0); \
608 /* XXX we would need an atomic version of the following ... */ \
609 vm_object_res_reference(RLObject); \
613 __private_extern__
void vm_object_reference(
618 #define vm_object_reference(object) \
620 vm_object_t RObject = (object); \
622 vm_object_lock_shared(RObject); \
623 vm_object_reference_shared(RObject); \
624 vm_object_unlock(RObject); \
628 #endif /* MACH_ASSERT */
630 __private_extern__
void vm_object_deallocate(
633 __private_extern__ kern_return_t
vm_object_release_name(
637 __private_extern__
void vm_object_pmap_protect(
639 vm_object_offset_t offset
,
640 vm_object_size_t size
,
642 vm_map_offset_t pmap_start
,
645 __private_extern__
void vm_object_pmap_protect_options(
647 vm_object_offset_t offset
,
648 vm_object_size_t size
,
650 vm_map_offset_t pmap_start
,
654 __private_extern__
void vm_object_page_remove(
656 vm_object_offset_t start
,
657 vm_object_offset_t end
);
659 __private_extern__
void vm_object_deactivate_pages(
661 vm_object_offset_t offset
,
662 vm_object_size_t size
,
664 boolean_t reusable_page
,
666 vm_map_offset_t pmap_offset
);
668 __private_extern__
void vm_object_reuse_pages(
670 vm_object_offset_t start_offset
,
671 vm_object_offset_t end_offset
,
672 boolean_t allow_partial_reuse
);
674 __private_extern__
void vm_object_purge(
678 __private_extern__ kern_return_t
vm_object_purgable_control(
680 vm_purgable_t control
,
683 __private_extern__ kern_return_t
vm_object_get_page_counts(
685 vm_object_offset_t offset
,
686 vm_object_size_t size
,
687 unsigned int *resident_page_count
,
688 unsigned int *dirty_page_count
);
690 __private_extern__ boolean_t
vm_object_coalesce(
691 vm_object_t prev_object
,
692 vm_object_t next_object
,
693 vm_object_offset_t prev_offset
,
694 vm_object_offset_t next_offset
,
695 vm_object_size_t prev_size
,
696 vm_object_size_t next_size
);
698 __private_extern__ boolean_t
vm_object_shadow(
700 vm_object_offset_t
*offset
,
701 vm_object_size_t length
);
703 __private_extern__
void vm_object_collapse(
705 vm_object_offset_t offset
,
706 boolean_t can_bypass
);
708 __private_extern__ boolean_t
vm_object_copy_quickly(
709 vm_object_t
*_object
,
710 vm_object_offset_t src_offset
,
711 vm_object_size_t size
,
712 boolean_t
*_src_needs_copy
,
713 boolean_t
*_dst_needs_copy
);
715 __private_extern__ kern_return_t
vm_object_copy_strategically(
716 vm_object_t src_object
,
717 vm_object_offset_t src_offset
,
718 vm_object_size_t size
,
719 vm_object_t
*dst_object
,
720 vm_object_offset_t
*dst_offset
,
721 boolean_t
*dst_needs_copy
);
723 __private_extern__ kern_return_t
vm_object_copy_slowly(
724 vm_object_t src_object
,
725 vm_object_offset_t src_offset
,
726 vm_object_size_t size
,
727 boolean_t interruptible
,
728 vm_object_t
*_result_object
);
730 __private_extern__ vm_object_t
vm_object_copy_delayed(
731 vm_object_t src_object
,
732 vm_object_offset_t src_offset
,
733 vm_object_size_t size
,
734 boolean_t src_object_shared
);
738 __private_extern__ kern_return_t
vm_object_destroy(
740 kern_return_t reason
);
742 __private_extern__
void vm_object_pager_create(
745 __private_extern__
void vm_object_compressor_pager_create(
748 __private_extern__
void vm_object_page_map(
750 vm_object_offset_t offset
,
751 vm_object_size_t size
,
752 vm_object_offset_t (*map_fn
)
753 (void *, vm_object_offset_t
),
756 __private_extern__ kern_return_t
vm_object_upl_request(
758 vm_object_offset_t offset
,
761 upl_page_info_t
*page_info
,
763 upl_control_flags_t flags
,
766 __private_extern__ kern_return_t
vm_object_transpose(
769 vm_object_size_t transpose_size
);
771 __private_extern__ boolean_t
vm_object_sync(
773 vm_object_offset_t offset
,
774 vm_object_size_t size
,
775 boolean_t should_flush
,
776 boolean_t should_return
,
777 boolean_t should_iosync
);
779 __private_extern__ kern_return_t
vm_object_update(
781 vm_object_offset_t offset
,
782 vm_object_size_t size
,
783 vm_object_offset_t
*error_offset
,
785 memory_object_return_t should_return
,
789 __private_extern__ kern_return_t
vm_object_lock_request(
791 vm_object_offset_t offset
,
792 vm_object_size_t size
,
793 memory_object_return_t should_return
,
799 __private_extern__ vm_object_t
vm_object_memory_object_associate(
800 memory_object_t pager
,
802 vm_object_size_t size
,
803 boolean_t check_named
);
806 __private_extern__
void vm_object_cluster_size(
808 vm_object_offset_t
*start
,
810 vm_object_fault_info_t fault_info
,
811 uint32_t *io_streaming
);
813 __private_extern__ kern_return_t
vm_object_populate_with_private(
815 vm_object_offset_t offset
,
819 __private_extern__
void vm_object_change_wimg_mode(
821 unsigned int wimg_mode
);
823 extern kern_return_t
adjust_vm_object_cache(
827 extern kern_return_t
vm_object_page_op(
829 vm_object_offset_t offset
,
834 extern kern_return_t
vm_object_range_op(
836 vm_object_offset_t offset_beg
,
837 vm_object_offset_t offset_end
,
842 __private_extern__
void vm_object_reap_pages(
846 #define REAP_TERMINATE 1
847 #define REAP_PURGEABLE 2
848 #define REAP_DATA_FLUSH 3
852 __private_extern__
void
853 vm_object_compressed_freezer_pageout(
856 __private_extern__
void
857 vm_object_compressed_freezer_done(
860 #endif /* CONFIG_FREEZE */
862 __private_extern__
void
867 struct io_reprioritize_req
{
872 queue_chain_t io_reprioritize_list
;
874 typedef struct io_reprioritize_req
*io_reprioritize_req_t
;
876 extern void vm_io_reprioritize_init(void);
880 * Event waiting handling
883 #define VM_OBJECT_EVENT_INITIALIZED 0
884 #define VM_OBJECT_EVENT_PAGER_READY 1
885 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
886 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
887 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
888 #define VM_OBJECT_EVENT_UNCACHING 5
889 #define VM_OBJECT_EVENT_COPY_CALL 6
890 #define VM_OBJECT_EVENT_CACHING 7
891 #define VM_OBJECT_EVENT_UNBLOCKED 8
892 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
894 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
896 static __inline__ wait_result_t
897 vm_object_assert_wait(
900 wait_interrupt_t interruptible
)
904 vm_object_lock_assert_exclusive(object
);
905 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
907 object
->all_wanted
|= 1 << event
;
908 wr
= assert_wait((event_t
)((vm_offset_t
)object
+ event
),
913 static __inline__ wait_result_t
917 wait_interrupt_t interruptible
)
921 vm_object_assert_wait(object
, event
, interruptible
);
922 vm_object_unlock(object
);
923 wr
= thread_block(THREAD_CONTINUE_NULL
);
927 static __inline__ wait_result_t
928 thread_sleep_vm_object(
931 wait_interrupt_t interruptible
)
935 #if DEVELOPMENT || DEBUG
936 if (object
->Lock_owner
!= current_thread())
937 panic("thread_sleep_vm_object: now owner - %p\n", object
);
938 object
->Lock_owner
= 0;
940 wr
= lck_rw_sleep(&object
->Lock
,
941 LCK_SLEEP_PROMOTED_PRI
,
944 #if DEVELOPMENT || DEBUG
945 object
->Lock_owner
= current_thread();
950 static __inline__ wait_result_t
954 wait_interrupt_t interruptible
)
958 vm_object_lock_assert_exclusive(object
);
959 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
961 object
->all_wanted
|= 1 << event
;
962 wr
= thread_sleep_vm_object(object
,
963 (event_t
)((vm_offset_t
)object
+ event
),
968 static __inline__
void
973 vm_object_lock_assert_exclusive(object
);
974 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
976 if (object
->all_wanted
& (1 << event
))
977 thread_wakeup((event_t
)((vm_offset_t
)object
+ event
));
978 object
->all_wanted
&= ~(1 << event
);
981 static __inline__
void
982 vm_object_set_wanted(
986 vm_object_lock_assert_exclusive(object
);
987 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
989 object
->all_wanted
|= (1 << event
);
992 static __inline__
int
997 vm_object_lock_assert_held(object
);
998 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1000 return object
->all_wanted
& (1 << event
);
1004 * Routines implemented as macros
1007 #include <libkern/OSDebug.h>
1008 #define VM_PIP_DEBUG_BEGIN(object) \
1010 int pip = ((object)->paging_in_progress + \
1011 (object)->activity_in_progress); \
1012 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1013 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1014 VM_PIP_DEBUG_STACK_FRAMES); \
1017 #else /* VM_PIP_DEBUG */
1018 #define VM_PIP_DEBUG_BEGIN(object)
1019 #endif /* VM_PIP_DEBUG */
1021 #define vm_object_activity_begin(object) \
1023 vm_object_lock_assert_exclusive((object)); \
1024 VM_PIP_DEBUG_BEGIN((object)); \
1025 (object)->activity_in_progress++; \
1026 if ((object)->activity_in_progress == 0) { \
1027 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1031 #define vm_object_activity_end(object) \
1033 vm_object_lock_assert_exclusive((object)); \
1034 if ((object)->activity_in_progress == 0) { \
1035 panic("vm_object_activity_end(%p): underflow\n", (object));\
1037 (object)->activity_in_progress--; \
1038 if ((object)->paging_in_progress == 0 && \
1039 (object)->activity_in_progress == 0) \
1040 vm_object_wakeup((object), \
1041 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1044 #define vm_object_paging_begin(object) \
1046 vm_object_lock_assert_exclusive((object)); \
1047 VM_PIP_DEBUG_BEGIN((object)); \
1048 (object)->paging_in_progress++; \
1049 if ((object)->paging_in_progress == 0) { \
1050 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1054 #define vm_object_paging_end(object) \
1056 vm_object_lock_assert_exclusive((object)); \
1057 if ((object)->paging_in_progress == 0) { \
1058 panic("vm_object_paging_end(%p): underflow\n", (object));\
1060 (object)->paging_in_progress--; \
1061 if ((object)->paging_in_progress == 0) { \
1062 vm_object_wakeup((object), \
1063 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1064 if ((object)->activity_in_progress == 0) \
1065 vm_object_wakeup((object), \
1066 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1070 #define vm_object_paging_wait(object, interruptible) \
1072 vm_object_lock_assert_exclusive((object)); \
1073 while ((object)->paging_in_progress != 0 || \
1074 (object)->activity_in_progress != 0) { \
1075 wait_result_t _wr; \
1077 _wr = vm_object_sleep((object), \
1078 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1081 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1086 #define vm_object_paging_only_wait(object, interruptible) \
1088 vm_object_lock_assert_exclusive((object)); \
1089 while ((object)->paging_in_progress != 0) { \
1090 wait_result_t _wr; \
1092 _wr = vm_object_sleep((object), \
1093 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1096 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1102 #define vm_object_mapping_begin(object) \
1104 vm_object_lock_assert_exclusive((object)); \
1105 assert(! (object)->mapping_in_progress); \
1106 (object)->mapping_in_progress = TRUE; \
1109 #define vm_object_mapping_end(object) \
1111 vm_object_lock_assert_exclusive((object)); \
1112 assert((object)->mapping_in_progress); \
1113 (object)->mapping_in_progress = FALSE; \
1114 vm_object_wakeup((object), \
1115 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1118 #define vm_object_mapping_wait(object, interruptible) \
1120 vm_object_lock_assert_exclusive((object)); \
1121 while ((object)->mapping_in_progress) { \
1122 wait_result_t _wr; \
1124 _wr = vm_object_sleep((object), \
1125 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1127 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1130 assert(!(object)->mapping_in_progress); \
1135 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1136 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1138 extern void vm_object_cache_add(vm_object_t
);
1139 extern void vm_object_cache_remove(vm_object_t
);
1140 extern int vm_object_cache_evict(int, int);
1142 #endif /* _VM_VM_OBJECT_H_ */