2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited
;
99 extern btlog_t
*vm_object_tracking_btlog
;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
112 * vm_object_t Virtual memory object.
113 * vm_object_fault_info_t Used to determine cluster size.
116 struct vm_object_fault_info
{
119 vm_size_t cluster_size
;
120 vm_behavior_t behavior
;
121 vm_map_offset_t lo_offset
;
122 vm_map_offset_t hi_offset
;
124 /* boolean_t */ no_cache
:1,
125 /* boolean_t */ stealth
:1,
126 /* boolean_t */ io_sync
:1,
127 /* boolean_t */ cs_bypass
:1,
128 /* boolean_t */ pmap_cs_associated
:1,
129 /* boolean_t */ mark_zf_absent
:1,
130 /* boolean_t */ batch_pmap_op
:1,
131 __vm_object_fault_info_unused_bits
:25;
136 #define vo_size vo_un1.vou_size
137 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138 #define vo_shadow_offset vo_un2.vou_shadow_offset
139 #define vo_cache_ts vo_un2.vou_cache_ts
140 #define vo_owner vo_un2.vou_owner
144 * on 64 bit systems we pack the pointers hung off the memq.
145 * those pointers have to be able to point back to the memq.
146 * the packed pointers are required to be on a 64 byte boundary
147 * which means 2 things for the vm_object... (1) the memq
148 * struct has to be the first element of the structure so that
149 * we can control it's alignment... (2) the vm_object must be
150 * aligned on a 64 byte boundary... for static vm_object's
151 * this is accomplished via the 'aligned' attribute... for
152 * vm_object's in the zone pool, this is accomplished by
153 * rounding the size of the vm_object element to the nearest
154 * 64 byte size before creating the zone.
156 vm_page_queue_head_t memq
; /* Resident memory - must be first */
157 lck_rw_t Lock
; /* Synchronization */
159 #if DEVELOPMENT || DEBUG
163 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
164 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
165 * external object in cache
169 struct vm_page
*memq_hint
;
170 int ref_count
; /* Number of references */
171 unsigned int resident_page_count
;
172 /* number of resident pages */
173 unsigned int wired_page_count
; /* number of wired pages
174 use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
175 unsigned int reusable_page_count
;
177 struct vm_object
*copy
; /* Object that should receive
178 * a copy of my changed pages,
179 * for copy_delay, or just the
180 * temporary object that
181 * shadows this object, for
184 struct vm_object
*shadow
; /* My shadow */
187 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
188 clock_sec_t vou_cache_ts
; /* age of an external object
191 task_t vou_owner
; /* If the object is purgeable
192 * or has a "ledger_tag", this
193 * is the task that owns it.
197 memory_object_t pager
; /* Where to get data */
198 vm_object_offset_t paging_offset
; /* Offset into memory object */
199 memory_object_control_t pager_control
; /* Where data comes back */
201 memory_object_copy_strategy_t
202 copy_strategy
; /* How to handle data copy */
206 * Some user processes (mostly VirtualMachine software) take a large
207 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
208 * VM objects and overflow the 16-bit "activity_in_progress" counter.
209 * Since we never enforced any limit there, let's give them 32 bits
210 * for backwards compatibility's sake.
212 unsigned int paging_in_progress
:16,
213 __object1_unused_bits
:16;
214 unsigned int activity_in_progress
;
217 * On 32-bit platforms, enlarging "activity_in_progress" would increase
218 * the size of "struct vm_object". Since we don't know of any actual
219 * overflow of these counters on these platforms, let's keep the
220 * counters as 16-bit integers.
222 unsigned short paging_in_progress
;
223 unsigned short activity_in_progress
;
224 #endif /* __LP64__ */
225 /* The memory object ports are
226 * being used (e.g., for pagein
227 * or pageout) -- don't change
228 * any of these fields (i.e.,
229 * don't collapse, destroy or
234 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
235 * awakened" notations. See
236 * VM_OBJECT_EVENT_* items
238 /* boolean_t */ pager_created
:1, /* Has pager been created? */
239 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
240 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
242 /* boolean_t */ pager_trusted
:1,/* The pager for this object
243 * is trusted. This is true for
244 * all internal objects (backed
245 * by the default pager)
247 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
248 * for this object (and rights
249 * to the memory object) after
250 * all address map references
253 /* boolean_t */ internal
:1, /* Created by the kernel (and
254 * therefore, managed by the
255 * default memory manger)
257 /* boolean_t */ private:1, /* magic device_pager object,
258 * holds private pages only */
259 /* boolean_t */ pageout
:1, /* pageout object. contains
260 * private pages that refer to
261 * a real memory object. */
262 /* boolean_t */ alive
:1, /* Not yet terminated */
264 /* boolean_t */ purgable
:2, /* Purgable state. See
267 /* boolean_t */ purgeable_only_by_kernel
:1,
268 /* boolean_t */ purgeable_when_ripe
:1, /* Purgeable when a token
271 /* boolean_t */ shadowed
:1, /* Shadow may exist */
272 /* boolean_t */ true_share
:1,
273 /* This object is mapped
274 * in more than one place
275 * and hence cannot be
277 /* boolean_t */ terminating
:1,
278 /* Allows vm_object_lookup
279 * and vm_object_deallocate
280 * to special case their
281 * behavior when they are
282 * called as a result of
283 * page cleaning during
286 /* boolean_t */ named
:1, /* An enforces an internal
287 * naming convention, by
288 * calling the right routines
290 * destruction, UBC references
291 * against the vm_object are
294 /* boolean_t */ shadow_severed
:1,
295 /* When a permanent object
296 * backing a COW goes away
297 * unexpectedly. This bit
298 * allows vm_fault to return
299 * an error rather than a
302 /* boolean_t */ phys_contiguous
:1,
303 /* Memory is wired and
304 * guaranteed physically
305 * contiguous. However
306 * it is not device memory
307 * and obeys normal virtual
308 * memory rules w.r.t pmap
311 /* boolean_t */ nophyscache
:1,
312 /* When mapped at the
313 * pmap level, don't allow
314 * primary caching. (for
317 /* boolean_t */ _object5_unused_bits
:1;
319 queue_chain_t cached_list
; /* Attachment point for the
320 * list of objects cached as a
321 * result of their can_persist
325 * the following fields are not protected by any locks
326 * they are updated via atomic compare and swap
328 vm_object_offset_t last_alloc
; /* last allocation offset */
329 int sequential
; /* sequential access size */
331 uint32_t pages_created
;
333 vm_offset_t cow_hint
; /* last page present in */
334 /* shadow but not in object */
335 /* hold object lock when altering */
337 wimg_bits
:8, /* cache WIMG bits */
338 code_signed
:1, /* pages are signed and should be
339 validated; the signatures are stored
341 transposed
:1, /* object was transposed with another */
342 mapping_in_progress
:1, /* pager being mapped/unmapped */
349 object_is_shared_cache
:1,
350 purgeable_queue_type
:2,
351 purgeable_queue_group
:3,
353 no_tag_update
:1, /* */
354 #if CONFIG_SECLUDED_MEMORY
355 eligible_for_secluded
:1,
357 #else /* CONFIG_SECLUDED_MEMORY */
358 __object3_unused_bits
:2,
359 #endif /* CONFIG_SECLUDED_MEMORY */
360 #if VM_OBJECT_ACCESS_TRACKING
362 #else /* VM_OBJECT_ACCESS_TRACKING */
363 __unused_access_tracking
:1,
364 #endif /* VM_OBJECT_ACCESS_TRACKING */
366 __object2_unused_bits
:2; /* for expansion */
368 #if VM_OBJECT_ACCESS_TRACKING
369 uint32_t access_tracking_reads
;
370 uint32_t access_tracking_writes
;
371 #endif /* VM_OBJECT_ACCESS_TRACKING */
373 uint8_t scan_collisions
;
375 uint8_t __object4_unused_bits
[2];
377 #if CONFIG_PHANTOM_CACHE
378 uint32_t phantom_object_id
;
380 #if CONFIG_IOSCHED || UPL_DEBUG
381 queue_head_t uplq
; /* List of outstanding upls */
386 * Keep track of the stack traces for the first holders
387 * of a "paging_in_progress" reference for this VM object.
389 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
390 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
391 struct __pip_backtrace
{
392 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
393 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
394 #endif /* VM_PIP_DEBUG */
396 queue_chain_t objq
; /* object queue - currently used for purgable queues */
397 queue_chain_t task_objq
; /* objects owned by task - protected by task lock */
399 #if !VM_TAG_ACTIVE_UPDATE
400 queue_chain_t wired_objq
;
401 #endif /* !VM_TAG_ACTIVE_UPDATE */
404 void *purgeable_owner_bt
[16];
405 task_t vo_purgeable_volatilizer
; /* who made it volatile? */
406 void *purgeable_volatilizer_bt
[16];
410 /* values for object->vo_ledger_tag */
411 #define VM_OBJECT_LEDGER_TAG_NONE 0
412 #define VM_OBJECT_LEDGER_TAG_NETWORK 1
413 #define VM_OBJECT_LEDGER_TAG_MEDIA 2
414 #define VM_OBJECT_LEDGER_TAG_RESERVED 3
416 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
417 ((object)->volatile_fault && \
418 ((object)->purgable == VM_PURGABLE_VOLATILE || \
419 (object)->purgable == VM_PURGABLE_EMPTY))
421 #if VM_OBJECT_ACCESS_TRACKING
422 extern uint64_t vm_object_access_tracking_reads
;
423 extern uint64_t vm_object_access_tracking_writes
;
424 extern void vm_object_access_tracking(vm_object_t object
,
425 int *access_tracking
,
426 uint32_t *access_tracking_reads
,
427 uint32_t *acess_tracking_writes
);
428 #endif /* VM_OBJECT_ACCESS_TRACKING */
431 vm_object_t kernel_object
; /* the single kernel object */
434 vm_object_t compressor_object
; /* the single compressor object */
437 unsigned int vm_object_absent_max
; /* maximum number of absent pages
438 at a time for each object */
440 # define VM_MSYNC_INITIALIZED 0
441 # define VM_MSYNC_SYNCHRONIZING 1
442 # define VM_MSYNC_DONE 2
445 extern lck_grp_t vm_map_lck_grp
;
446 extern lck_attr_t vm_map_lck_attr
;
448 #ifndef VM_TAG_ACTIVE_UPDATE
449 #error VM_TAG_ACTIVE_UPDATE
452 #if VM_TAG_ACTIVE_UPDATE
453 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
454 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
455 #else /* VM_TAG_ACTIVE_UPDATE */
456 #define VM_OBJECT_WIRED_ENQUEUE(object) \
458 lck_spin_lock(&vm_objects_wired_lock); \
459 assert(!(object)->wired_objq.next); \
460 assert(!(object)->wired_objq.prev); \
461 queue_enter(&vm_objects_wired, (object), \
462 vm_object_t, wired_objq); \
463 lck_spin_unlock(&vm_objects_wired_lock); \
465 #define VM_OBJECT_WIRED_DEQUEUE(object) \
467 if ((object)->wired_objq.next) { \
468 lck_spin_lock(&vm_objects_wired_lock); \
469 queue_remove(&vm_objects_wired, (object), \
470 vm_object_t, wired_objq); \
471 lck_spin_unlock(&vm_objects_wired_lock); \
474 #endif /* VM_TAG_ACTIVE_UPDATE */
476 #define VM_OBJECT_WIRED(object, tag) \
478 assert(VM_KERN_MEMORY_NONE != (tag)); \
479 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
480 (object)->wire_tag = (tag); \
481 if (!VM_TAG_ACTIVE_UPDATE) { \
482 VM_OBJECT_WIRED_ENQUEUE((object)); \
486 #define VM_OBJECT_UNWIRED(object) \
488 if (!VM_TAG_ACTIVE_UPDATE) { \
489 VM_OBJECT_WIRED_DEQUEUE((object)); \
491 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
492 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
493 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
497 // These two macros start & end a C block
498 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
501 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
503 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
504 if (__wireddelta) { \
505 boolean_t __overflow __assert_only = \
506 os_add_overflow((object)->wired_page_count, __wireddelta, \
507 &(object)->wired_page_count); \
508 assert(!__overflow); \
509 if (!(object)->pageout && !(object)->no_tag_update) { \
510 if (__wireddelta > 0) { \
511 assert (VM_KERN_MEMORY_NONE != (tag)); \
512 if (VM_KERN_MEMORY_NONE == __waswired) { \
513 VM_OBJECT_WIRED((object), (tag)); \
515 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
516 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
517 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
518 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
519 if (!(object)->wired_page_count) { \
520 VM_OBJECT_UNWIRED((object)); \
528 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
529 __wireddelta += delta; \
531 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
532 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
534 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
535 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
539 #define OBJECT_LOCK_SHARED 0
540 #define OBJECT_LOCK_EXCLUSIVE 1
542 extern lck_grp_t vm_object_lck_grp
;
543 extern lck_grp_attr_t vm_object_lck_grp_attr
;
544 extern lck_attr_t vm_object_lck_attr
;
545 extern lck_attr_t kernel_object_lck_attr
;
546 extern lck_attr_t compressor_object_lck_attr
;
548 extern vm_object_t vm_pageout_scan_wants_object
;
550 extern void vm_object_lock(vm_object_t
);
551 extern boolean_t
vm_object_lock_try(vm_object_t
);
552 extern boolean_t
_vm_object_lock_try(vm_object_t
);
553 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
554 extern void vm_object_lock_shared(vm_object_t
);
555 extern boolean_t
vm_object_lock_yield_shared(vm_object_t
);
556 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
557 extern void vm_object_unlock(vm_object_t
);
558 extern boolean_t
vm_object_lock_upgrade(vm_object_t
);
561 * Object locking macros
564 #define vm_object_lock_init(object) \
565 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
566 (((object) == kernel_object || \
567 (object) == vm_submap_object) ? \
568 &kernel_object_lck_attr : \
569 (((object) == compressor_object) ? \
570 &compressor_object_lck_attr : \
571 &vm_object_lck_attr)))
572 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
574 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
577 * CAUTION: the following vm_object_lock_assert_held*() macros merely
578 * check if anyone is holding the lock, but the holder may not necessarily
581 #if MACH_ASSERT || DEBUG
582 #define vm_object_lock_assert_held(object) \
583 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
584 #define vm_object_lock_assert_shared(object) \
585 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
586 #define vm_object_lock_assert_exclusive(object) \
587 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
588 #define vm_object_lock_assert_notheld(object) \
589 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
590 #else /* MACH_ASSERT || DEBUG */
591 #define vm_object_lock_assert_held(object)
592 #define vm_object_lock_assert_shared(object)
593 #define vm_object_lock_assert_exclusive(object)
594 #define vm_object_lock_assert_notheld(object)
595 #endif /* MACH_ASSERT || DEBUG */
599 * Declare procedures that operate on VM objects.
602 __private_extern__
void vm_object_bootstrap(void);
604 __private_extern__
void vm_object_init(void);
606 __private_extern__
void vm_object_init_lck_grp(void);
608 __private_extern__
void vm_object_reaper_init(void);
610 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size
);
612 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
617 __private_extern__
void vm_object_res_reference(
619 __private_extern__
void vm_object_res_deallocate(
621 #define VM_OBJ_RES_INCR(object) (object)->res_count++
622 #define VM_OBJ_RES_DECR(object) (object)->res_count--
624 #else /* TASK_SWAPPER */
626 #define VM_OBJ_RES_INCR(object)
627 #define VM_OBJ_RES_DECR(object)
628 #define vm_object_res_reference(object)
629 #define vm_object_res_deallocate(object)
631 #endif /* TASK_SWAPPER */
633 #define vm_object_reference_locked(object) \
635 vm_object_t RLObject = (object); \
636 vm_object_lock_assert_exclusive(object); \
637 assert((RLObject)->ref_count > 0); \
638 (RLObject)->ref_count++; \
639 assert((RLObject)->ref_count > 1); \
640 vm_object_res_reference(RLObject); \
644 #define vm_object_reference_shared(object) \
646 vm_object_t RLObject = (object); \
647 vm_object_lock_assert_shared(object); \
648 assert((RLObject)->ref_count > 0); \
649 OSAddAtomic(1, &(RLObject)->ref_count); \
650 assert((RLObject)->ref_count > 0); \
651 /* XXX we would need an atomic version of the following ... */ \
652 vm_object_res_reference(RLObject); \
656 __private_extern__
void vm_object_reference(
661 #define vm_object_reference(object) \
663 vm_object_t RObject = (object); \
665 vm_object_lock_shared(RObject); \
666 vm_object_reference_shared(RObject); \
667 vm_object_unlock(RObject); \
671 #endif /* MACH_ASSERT */
673 __private_extern__
void vm_object_deallocate(
676 __private_extern__ kern_return_t
vm_object_release_name(
680 __private_extern__
void vm_object_pmap_protect(
682 vm_object_offset_t offset
,
683 vm_object_size_t size
,
685 vm_map_offset_t pmap_start
,
688 __private_extern__
void vm_object_pmap_protect_options(
690 vm_object_offset_t offset
,
691 vm_object_size_t size
,
693 vm_map_offset_t pmap_start
,
697 __private_extern__
void vm_object_page_remove(
699 vm_object_offset_t start
,
700 vm_object_offset_t end
);
702 __private_extern__
void vm_object_deactivate_pages(
704 vm_object_offset_t offset
,
705 vm_object_size_t size
,
707 boolean_t reusable_page
,
709 vm_map_offset_t pmap_offset
);
711 __private_extern__
void vm_object_reuse_pages(
713 vm_object_offset_t start_offset
,
714 vm_object_offset_t end_offset
,
715 boolean_t allow_partial_reuse
);
717 __private_extern__
uint64_t vm_object_purge(
721 __private_extern__ kern_return_t
vm_object_purgable_control(
723 vm_purgable_t control
,
726 __private_extern__ kern_return_t
vm_object_get_page_counts(
728 vm_object_offset_t offset
,
729 vm_object_size_t size
,
730 unsigned int *resident_page_count
,
731 unsigned int *dirty_page_count
);
733 __private_extern__ boolean_t
vm_object_coalesce(
734 vm_object_t prev_object
,
735 vm_object_t next_object
,
736 vm_object_offset_t prev_offset
,
737 vm_object_offset_t next_offset
,
738 vm_object_size_t prev_size
,
739 vm_object_size_t next_size
);
741 __private_extern__ boolean_t
vm_object_shadow(
743 vm_object_offset_t
*offset
,
744 vm_object_size_t length
);
746 __private_extern__
void vm_object_collapse(
748 vm_object_offset_t offset
,
749 boolean_t can_bypass
);
751 __private_extern__ boolean_t
vm_object_copy_quickly(
752 vm_object_t
*_object
,
753 vm_object_offset_t src_offset
,
754 vm_object_size_t size
,
755 boolean_t
*_src_needs_copy
,
756 boolean_t
*_dst_needs_copy
);
758 __private_extern__ kern_return_t
vm_object_copy_strategically(
759 vm_object_t src_object
,
760 vm_object_offset_t src_offset
,
761 vm_object_size_t size
,
762 vm_object_t
*dst_object
,
763 vm_object_offset_t
*dst_offset
,
764 boolean_t
*dst_needs_copy
);
766 __private_extern__ kern_return_t
vm_object_copy_slowly(
767 vm_object_t src_object
,
768 vm_object_offset_t src_offset
,
769 vm_object_size_t size
,
770 boolean_t interruptible
,
771 vm_object_t
*_result_object
);
773 __private_extern__ vm_object_t
vm_object_copy_delayed(
774 vm_object_t src_object
,
775 vm_object_offset_t src_offset
,
776 vm_object_size_t size
,
777 boolean_t src_object_shared
);
781 __private_extern__ kern_return_t
vm_object_destroy(
783 kern_return_t reason
);
785 __private_extern__
void vm_object_pager_create(
788 __private_extern__
void vm_object_compressor_pager_create(
791 __private_extern__
void vm_object_page_map(
793 vm_object_offset_t offset
,
794 vm_object_size_t size
,
795 vm_object_offset_t (*map_fn
)
796 (void *, vm_object_offset_t
),
799 __private_extern__ kern_return_t
vm_object_upl_request(
801 vm_object_offset_t offset
,
804 upl_page_info_t
*page_info
,
806 upl_control_flags_t flags
,
809 __private_extern__ kern_return_t
vm_object_transpose(
812 vm_object_size_t transpose_size
);
814 __private_extern__ boolean_t
vm_object_sync(
816 vm_object_offset_t offset
,
817 vm_object_size_t size
,
818 boolean_t should_flush
,
819 boolean_t should_return
,
820 boolean_t should_iosync
);
822 __private_extern__ kern_return_t
vm_object_update(
824 vm_object_offset_t offset
,
825 vm_object_size_t size
,
826 vm_object_offset_t
*error_offset
,
828 memory_object_return_t should_return
,
832 __private_extern__ kern_return_t
vm_object_lock_request(
834 vm_object_offset_t offset
,
835 vm_object_size_t size
,
836 memory_object_return_t should_return
,
842 __private_extern__ vm_object_t
vm_object_memory_object_associate(
843 memory_object_t pager
,
845 vm_object_size_t size
,
846 boolean_t check_named
);
849 __private_extern__
void vm_object_cluster_size(
851 vm_object_offset_t
*start
,
853 vm_object_fault_info_t fault_info
,
854 uint32_t *io_streaming
);
856 __private_extern__ kern_return_t
vm_object_populate_with_private(
858 vm_object_offset_t offset
,
862 __private_extern__
void vm_object_change_wimg_mode(
864 unsigned int wimg_mode
);
866 extern kern_return_t
adjust_vm_object_cache(
870 extern kern_return_t
vm_object_page_op(
872 vm_object_offset_t offset
,
877 extern kern_return_t
vm_object_range_op(
879 vm_object_offset_t offset_beg
,
880 vm_object_offset_t offset_end
,
885 __private_extern__
void vm_object_reap_pages(
889 #define REAP_TERMINATE 1
890 #define REAP_PURGEABLE 2
891 #define REAP_DATA_FLUSH 3
895 __private_extern__
void
896 vm_object_compressed_freezer_pageout(
899 __private_extern__
void
900 vm_object_compressed_freezer_done(
903 #endif /* CONFIG_FREEZE */
905 __private_extern__
void
910 struct io_reprioritize_req
{
915 queue_chain_t io_reprioritize_list
;
917 typedef struct io_reprioritize_req
*io_reprioritize_req_t
;
919 extern void vm_io_reprioritize_init(void);
923 * Event waiting handling
926 #define VM_OBJECT_EVENT_INITIALIZED 0
927 #define VM_OBJECT_EVENT_PAGER_READY 1
928 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
929 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
930 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
931 #define VM_OBJECT_EVENT_UNCACHING 5
932 #define VM_OBJECT_EVENT_COPY_CALL 6
933 #define VM_OBJECT_EVENT_CACHING 7
934 #define VM_OBJECT_EVENT_UNBLOCKED 8
935 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
937 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
939 static __inline__ wait_result_t
940 vm_object_assert_wait(
943 wait_interrupt_t interruptible
)
947 vm_object_lock_assert_exclusive(object
);
948 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
950 object
->all_wanted
|= 1 << event
;
951 wr
= assert_wait((event_t
)((vm_offset_t
)object
+ event
),
956 static __inline__ wait_result_t
960 wait_interrupt_t interruptible
)
964 vm_object_assert_wait(object
, event
, interruptible
);
965 vm_object_unlock(object
);
966 wr
= thread_block(THREAD_CONTINUE_NULL
);
970 static __inline__ wait_result_t
971 thread_sleep_vm_object(
974 wait_interrupt_t interruptible
)
978 #if DEVELOPMENT || DEBUG
979 if (object
->Lock_owner
!= current_thread())
980 panic("thread_sleep_vm_object: now owner - %p\n", object
);
981 object
->Lock_owner
= 0;
983 wr
= lck_rw_sleep(&object
->Lock
,
984 LCK_SLEEP_PROMOTED_PRI
,
987 #if DEVELOPMENT || DEBUG
988 object
->Lock_owner
= current_thread();
993 static __inline__ wait_result_t
997 wait_interrupt_t interruptible
)
1001 vm_object_lock_assert_exclusive(object
);
1002 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1004 object
->all_wanted
|= 1 << event
;
1005 wr
= thread_sleep_vm_object(object
,
1006 (event_t
)((vm_offset_t
)object
+ event
),
1011 static __inline__
void
1016 vm_object_lock_assert_exclusive(object
);
1017 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1019 if (object
->all_wanted
& (1 << event
))
1020 thread_wakeup((event_t
)((vm_offset_t
)object
+ event
));
1021 object
->all_wanted
&= ~(1 << event
);
1024 static __inline__
void
1025 vm_object_set_wanted(
1029 vm_object_lock_assert_exclusive(object
);
1030 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1032 object
->all_wanted
|= (1 << event
);
1035 static __inline__
int
1040 vm_object_lock_assert_held(object
);
1041 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1043 return object
->all_wanted
& (1 << event
);
1047 * Routines implemented as macros
1050 #include <libkern/OSDebug.h>
1051 #define VM_PIP_DEBUG_BEGIN(object) \
1053 int pip = ((object)->paging_in_progress + \
1054 (object)->activity_in_progress); \
1055 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1056 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1057 VM_PIP_DEBUG_STACK_FRAMES); \
1060 #else /* VM_PIP_DEBUG */
1061 #define VM_PIP_DEBUG_BEGIN(object)
1062 #endif /* VM_PIP_DEBUG */
1064 #define vm_object_activity_begin(object) \
1066 vm_object_lock_assert_exclusive((object)); \
1067 VM_PIP_DEBUG_BEGIN((object)); \
1068 (object)->activity_in_progress++; \
1069 if ((object)->activity_in_progress == 0) { \
1070 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1074 #define vm_object_activity_end(object) \
1076 vm_object_lock_assert_exclusive((object)); \
1077 if ((object)->activity_in_progress == 0) { \
1078 panic("vm_object_activity_end(%p): underflow\n", (object));\
1080 (object)->activity_in_progress--; \
1081 if ((object)->paging_in_progress == 0 && \
1082 (object)->activity_in_progress == 0) \
1083 vm_object_wakeup((object), \
1084 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1087 #define vm_object_paging_begin(object) \
1089 vm_object_lock_assert_exclusive((object)); \
1090 VM_PIP_DEBUG_BEGIN((object)); \
1091 (object)->paging_in_progress++; \
1092 if ((object)->paging_in_progress == 0) { \
1093 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1097 #define vm_object_paging_end(object) \
1099 vm_object_lock_assert_exclusive((object)); \
1100 if ((object)->paging_in_progress == 0) { \
1101 panic("vm_object_paging_end(%p): underflow\n", (object));\
1103 (object)->paging_in_progress--; \
1104 if ((object)->paging_in_progress == 0) { \
1105 vm_object_wakeup((object), \
1106 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1107 if ((object)->activity_in_progress == 0) \
1108 vm_object_wakeup((object), \
1109 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1113 #define vm_object_paging_wait(object, interruptible) \
1115 vm_object_lock_assert_exclusive((object)); \
1116 while ((object)->paging_in_progress != 0 || \
1117 (object)->activity_in_progress != 0) { \
1118 wait_result_t _wr; \
1120 _wr = vm_object_sleep((object), \
1121 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1124 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1129 #define vm_object_paging_only_wait(object, interruptible) \
1131 vm_object_lock_assert_exclusive((object)); \
1132 while ((object)->paging_in_progress != 0) { \
1133 wait_result_t _wr; \
1135 _wr = vm_object_sleep((object), \
1136 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1139 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1145 #define vm_object_mapping_begin(object) \
1147 vm_object_lock_assert_exclusive((object)); \
1148 assert(! (object)->mapping_in_progress); \
1149 (object)->mapping_in_progress = TRUE; \
1152 #define vm_object_mapping_end(object) \
1154 vm_object_lock_assert_exclusive((object)); \
1155 assert((object)->mapping_in_progress); \
1156 (object)->mapping_in_progress = FALSE; \
1157 vm_object_wakeup((object), \
1158 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1161 #define vm_object_mapping_wait(object, interruptible) \
1163 vm_object_lock_assert_exclusive((object)); \
1164 while ((object)->mapping_in_progress) { \
1165 wait_result_t _wr; \
1167 _wr = vm_object_sleep((object), \
1168 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1170 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1173 assert(!(object)->mapping_in_progress); \
1178 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1179 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1181 extern void vm_object_cache_add(vm_object_t
);
1182 extern void vm_object_cache_remove(vm_object_t
);
1183 extern int vm_object_cache_evict(int, int);
1185 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1186 #define VM_OBJECT_OWNER(object) \
1187 ((((object)->purgable == VM_PURGABLE_DENY && \
1188 (object)->vo_ledger_tag == 0) || \
1189 (object)->vo_owner == TASK_NULL) \
1190 ? TASK_NULL /* not owned */ \
1191 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1192 ? kernel_task /* disowned -> kernel */ \
1193 : (object)->vo_owner)) /* explicit owner */ \
1195 extern void vm_object_ledger_tag_ledgers(
1197 int *ledger_idx_volatile
,
1198 int *ledger_idx_nonvolatile
,
1199 int *ledger_idx_volatile_compressed
,
1200 int *ledger_idx_nonvolatile_compressed
,
1201 boolean_t
*do_footprint
);
1202 extern kern_return_t
vm_object_ownership_change(
1206 boolean_t task_objq_locked
);
1208 #endif /* _VM_VM_OBJECT_H_ */