2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited
;
99 extern btlog_t
*vm_object_tracking_btlog
;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
112 * vm_object_t Virtual memory object.
113 * vm_object_fault_info_t Used to determine cluster size.
116 struct vm_object_fault_info
{
119 vm_size_t cluster_size
;
120 vm_behavior_t behavior
;
121 vm_object_offset_t lo_offset
;
122 vm_object_offset_t hi_offset
;
124 /* boolean_t */ no_cache
:1,
125 /* boolean_t */ stealth
:1,
126 /* boolean_t */ io_sync
:1,
127 /* boolean_t */ cs_bypass
:1,
128 /* boolean_t */ pmap_cs_associated
:1,
129 /* boolean_t */ mark_zf_absent
:1,
130 /* boolean_t */ batch_pmap_op
:1,
131 /* boolean_t */ resilient_media
:1,
132 /* boolean_t */ no_copy_on_read
:1,
133 __vm_object_fault_info_unused_bits
:23;
138 #define vo_size vo_un1.vou_size
139 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
140 #define vo_shadow_offset vo_un2.vou_shadow_offset
141 #define vo_cache_ts vo_un2.vou_cache_ts
142 #define vo_owner vo_un2.vou_owner
146 * on 64 bit systems we pack the pointers hung off the memq.
147 * those pointers have to be able to point back to the memq.
148 * the packed pointers are required to be on a 64 byte boundary
149 * which means 2 things for the vm_object... (1) the memq
150 * struct has to be the first element of the structure so that
151 * we can control it's alignment... (2) the vm_object must be
152 * aligned on a 64 byte boundary... for static vm_object's
153 * this is accomplished via the 'aligned' attribute... for
154 * vm_object's in the zone pool, this is accomplished by
155 * rounding the size of the vm_object element to the nearest
156 * 64 byte size before creating the zone.
158 vm_page_queue_head_t memq
; /* Resident memory - must be first */
159 lck_rw_t Lock
; /* Synchronization */
161 #if DEVELOPMENT || DEBUG
165 vm_object_size_t vou_size
; /* Object size (only valid if internal) */
166 int vou_cache_pages_to_scan
; /* pages yet to be visited in an
167 * external object in cache
171 struct vm_page
*memq_hint
;
172 int ref_count
; /* Number of references */
173 unsigned int resident_page_count
;
174 /* number of resident pages */
175 unsigned int wired_page_count
; /* number of wired pages
176 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
177 unsigned int reusable_page_count
;
179 struct vm_object
*copy
; /* Object that should receive
180 * a copy of my changed pages,
181 * for copy_delay, or just the
182 * temporary object that
183 * shadows this object, for
186 struct vm_object
*shadow
; /* My shadow */
187 memory_object_t pager
; /* Where to get data */
190 vm_object_offset_t vou_shadow_offset
; /* Offset into shadow */
191 clock_sec_t vou_cache_ts
; /* age of an external object
194 task_t vou_owner
; /* If the object is purgeable
195 * or has a "ledger_tag", this
196 * is the task that owns it.
200 vm_object_offset_t paging_offset
; /* Offset into memory object */
201 memory_object_control_t pager_control
; /* Where data comes back */
203 memory_object_copy_strategy_t
204 copy_strategy
; /* How to handle data copy */
208 * Some user processes (mostly VirtualMachine software) take a large
209 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
210 * VM objects and overflow the 16-bit "activity_in_progress" counter.
211 * Since we never enforced any limit there, let's give them 32 bits
212 * for backwards compatibility's sake.
214 unsigned int paging_in_progress
:16,
215 __object1_unused_bits
:16;
216 unsigned int activity_in_progress
;
219 * On 32-bit platforms, enlarging "activity_in_progress" would increase
220 * the size of "struct vm_object". Since we don't know of any actual
221 * overflow of these counters on these platforms, let's keep the
222 * counters as 16-bit integers.
224 unsigned short paging_in_progress
;
225 unsigned short activity_in_progress
;
226 #endif /* __LP64__ */
227 /* The memory object ports are
228 * being used (e.g., for pagein
229 * or pageout) -- don't change
230 * any of these fields (i.e.,
231 * don't collapse, destroy or
236 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
237 * awakened" notations. See
238 * VM_OBJECT_EVENT_* items
240 /* boolean_t */ pager_created
:1, /* Has pager been created? */
241 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
242 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
244 /* boolean_t */ pager_trusted
:1, /* The pager for this object
245 * is trusted. This is true for
246 * all internal objects (backed
247 * by the default pager)
249 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
250 * for this object (and rights
251 * to the memory object) after
252 * all address map references
255 /* boolean_t */ internal
:1, /* Created by the kernel (and
256 * therefore, managed by the
257 * default memory manger)
259 /* boolean_t */ private:1, /* magic device_pager object,
260 * holds private pages only */
261 /* boolean_t */ pageout
:1, /* pageout object. contains
262 * private pages that refer to
263 * a real memory object. */
264 /* boolean_t */ alive
:1, /* Not yet terminated */
266 /* boolean_t */ purgable
:2, /* Purgable state. See
269 /* boolean_t */ purgeable_only_by_kernel
:1,
270 /* boolean_t */ purgeable_when_ripe
:1, /* Purgeable when a token
273 /* boolean_t */ shadowed
:1, /* Shadow may exist */
274 /* boolean_t */ true_share
:1,
275 /* This object is mapped
276 * in more than one place
277 * and hence cannot be
279 /* boolean_t */ terminating
:1,
280 /* Allows vm_object_lookup
281 * and vm_object_deallocate
282 * to special case their
283 * behavior when they are
284 * called as a result of
285 * page cleaning during
288 /* boolean_t */ named
:1, /* An enforces an internal
289 * naming convention, by
290 * calling the right routines
292 * destruction, UBC references
293 * against the vm_object are
296 /* boolean_t */ shadow_severed
:1,
297 /* When a permanent object
298 * backing a COW goes away
299 * unexpectedly. This bit
300 * allows vm_fault to return
301 * an error rather than a
304 /* boolean_t */ phys_contiguous
:1,
305 /* Memory is wired and
306 * guaranteed physically
307 * contiguous. However
308 * it is not device memory
309 * and obeys normal virtual
310 * memory rules w.r.t pmap
313 /* boolean_t */ nophyscache
:1,
314 /* When mapped at the
315 * pmap level, don't allow
316 * primary caching. (for
319 /* boolean_t */ _object5_unused_bits
:1;
321 queue_chain_t cached_list
; /* Attachment point for the
322 * list of objects cached as a
323 * result of their can_persist
327 * the following fields are not protected by any locks
328 * they are updated via atomic compare and swap
330 vm_object_offset_t last_alloc
; /* last allocation offset */
331 vm_offset_t cow_hint
; /* last page present in */
332 /* shadow but not in object */
333 int sequential
; /* sequential access size */
335 uint32_t pages_created
;
337 /* hold object lock when altering */
339 wimg_bits
:8, /* cache WIMG bits */
340 code_signed
:1, /* pages are signed and should be
341 * validated; the signatures are stored
343 transposed
:1, /* object was transposed with another */
344 mapping_in_progress
:1, /* pager being mapped/unmapped */
351 object_is_shared_cache
:1,
352 purgeable_queue_type
:2,
353 purgeable_queue_group
:3,
355 no_tag_update
:1, /* */
356 #if CONFIG_SECLUDED_MEMORY
357 eligible_for_secluded
:1,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 __object3_unused_bits
:2,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 #if VM_OBJECT_ACCESS_TRACKING
364 #else /* VM_OBJECT_ACCESS_TRACKING */
365 __unused_access_tracking
:1,
366 #endif /* VM_OBJECT_ACCESS_TRACKING */
370 #if VM_OBJECT_ACCESS_TRACKING
371 uint32_t access_tracking_reads
;
372 uint32_t access_tracking_writes
;
373 #endif /* VM_OBJECT_ACCESS_TRACKING */
375 uint8_t scan_collisions
;
376 uint8_t __object4_unused_bits
[1];
379 #if CONFIG_PHANTOM_CACHE
380 uint32_t phantom_object_id
;
382 #if CONFIG_IOSCHED || UPL_DEBUG
383 queue_head_t uplq
; /* List of outstanding upls */
388 * Keep track of the stack traces for the first holders
389 * of a "paging_in_progress" reference for this VM object.
391 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
392 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
393 struct __pip_backtrace
{
394 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
395 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
396 #endif /* VM_PIP_DEBUG */
398 queue_chain_t objq
; /* object queue - currently used for purgable queues */
399 queue_chain_t task_objq
; /* objects owned by task - protected by task lock */
401 #if !VM_TAG_ACTIVE_UPDATE
402 queue_chain_t wired_objq
;
403 #endif /* !VM_TAG_ACTIVE_UPDATE */
406 void *purgeable_owner_bt
[16];
407 task_t vo_purgeable_volatilizer
; /* who made it volatile? */
408 void *purgeable_volatilizer_bt
[16];
412 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
413 ((object)->volatile_fault && \
414 ((object)->purgable == VM_PURGABLE_VOLATILE || \
415 (object)->purgable == VM_PURGABLE_EMPTY))
417 #if VM_OBJECT_ACCESS_TRACKING
418 extern uint64_t vm_object_access_tracking_reads
;
419 extern uint64_t vm_object_access_tracking_writes
;
420 extern void vm_object_access_tracking(vm_object_t object
,
421 int *access_tracking
,
422 uint32_t *access_tracking_reads
,
423 uint32_t *acess_tracking_writes
);
424 #endif /* VM_OBJECT_ACCESS_TRACKING */
427 vm_object_t kernel_object
; /* the single kernel object */
430 vm_object_t compressor_object
; /* the single compressor object */
433 unsigned int vm_object_absent_max
; /* maximum number of absent pages
434 * at a time for each object */
436 # define VM_MSYNC_INITIALIZED 0
437 # define VM_MSYNC_SYNCHRONIZING 1
438 # define VM_MSYNC_DONE 2
441 extern lck_grp_t vm_map_lck_grp
;
442 extern lck_attr_t vm_map_lck_attr
;
444 #ifndef VM_TAG_ACTIVE_UPDATE
445 #error VM_TAG_ACTIVE_UPDATE
448 #if VM_TAG_ACTIVE_UPDATE
449 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
450 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
451 #else /* VM_TAG_ACTIVE_UPDATE */
452 #define VM_OBJECT_WIRED_ENQUEUE(object) \
454 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
455 assert(!(object)->wired_objq.next); \
456 assert(!(object)->wired_objq.prev); \
457 queue_enter(&vm_objects_wired, (object), \
458 vm_object_t, wired_objq); \
459 lck_spin_unlock(&vm_objects_wired_lock); \
461 #define VM_OBJECT_WIRED_DEQUEUE(object) \
463 if ((object)->wired_objq.next) { \
464 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
465 queue_remove(&vm_objects_wired, (object), \
466 vm_object_t, wired_objq); \
467 lck_spin_unlock(&vm_objects_wired_lock); \
470 #endif /* VM_TAG_ACTIVE_UPDATE */
472 #define VM_OBJECT_WIRED(object, tag) \
474 assert(VM_KERN_MEMORY_NONE != (tag)); \
475 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
476 (object)->wire_tag = (tag); \
477 if (!VM_TAG_ACTIVE_UPDATE) { \
478 VM_OBJECT_WIRED_ENQUEUE((object)); \
482 #define VM_OBJECT_UNWIRED(object) \
484 if (!VM_TAG_ACTIVE_UPDATE) { \
485 VM_OBJECT_WIRED_DEQUEUE((object)); \
487 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
488 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
489 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
493 // These two macros start & end a C block
494 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
497 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
499 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
500 if (__wireddelta) { \
501 boolean_t __overflow __assert_only = \
502 os_add_overflow((object)->wired_page_count, __wireddelta, \
503 &(object)->wired_page_count); \
504 assert(!__overflow); \
505 if (!(object)->pageout && !(object)->no_tag_update) { \
506 if (__wireddelta > 0) { \
507 assert (VM_KERN_MEMORY_NONE != (tag)); \
508 if (VM_KERN_MEMORY_NONE == __waswired) { \
509 VM_OBJECT_WIRED((object), (tag)); \
511 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
512 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
513 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
514 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
515 if (!(object)->wired_page_count) { \
516 VM_OBJECT_UNWIRED((object)); \
524 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
525 __wireddelta += delta; \
527 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
528 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
530 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
531 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
535 #define OBJECT_LOCK_SHARED 0
536 #define OBJECT_LOCK_EXCLUSIVE 1
538 extern lck_grp_t vm_object_lck_grp
;
539 extern lck_attr_t vm_object_lck_attr
;
540 extern lck_attr_t kernel_object_lck_attr
;
541 extern lck_attr_t compressor_object_lck_attr
;
543 extern vm_object_t vm_pageout_scan_wants_object
;
545 extern void vm_object_lock(vm_object_t
);
546 extern bool vm_object_lock_check_contended(vm_object_t
);
547 extern boolean_t
vm_object_lock_try(vm_object_t
);
548 extern boolean_t
_vm_object_lock_try(vm_object_t
);
549 extern boolean_t
vm_object_lock_avoid(vm_object_t
);
550 extern void vm_object_lock_shared(vm_object_t
);
551 extern boolean_t
vm_object_lock_yield_shared(vm_object_t
);
552 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
553 extern void vm_object_unlock(vm_object_t
);
554 extern boolean_t
vm_object_lock_upgrade(vm_object_t
);
557 * Object locking macros
560 #define vm_object_lock_init(object) \
561 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
562 (((object) == kernel_object || \
563 (object) == vm_submap_object) ? \
564 &kernel_object_lck_attr : \
565 (((object) == compressor_object) ? \
566 &compressor_object_lck_attr : \
567 &vm_object_lck_attr)))
568 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
570 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
573 * CAUTION: the following vm_object_lock_assert_held*() macros merely
574 * check if anyone is holding the lock, but the holder may not necessarily
577 #if MACH_ASSERT || DEBUG
578 #define vm_object_lock_assert_held(object) \
579 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
580 #define vm_object_lock_assert_shared(object) \
581 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
582 #define vm_object_lock_assert_exclusive(object) \
583 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
584 #define vm_object_lock_assert_notheld(object) \
585 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
586 #else /* MACH_ASSERT || DEBUG */
587 #define vm_object_lock_assert_held(object)
588 #define vm_object_lock_assert_shared(object)
589 #define vm_object_lock_assert_exclusive(object)
590 #define vm_object_lock_assert_notheld(object)
591 #endif /* MACH_ASSERT || DEBUG */
595 * Declare procedures that operate on VM objects.
598 __private_extern__
void vm_object_bootstrap(void);
600 __private_extern__
void vm_object_reaper_init(void);
602 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size
);
604 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
609 __private_extern__
void vm_object_res_reference(
611 __private_extern__
void vm_object_res_deallocate(
613 #define VM_OBJ_RES_INCR(object) (object)->res_count++
614 #define VM_OBJ_RES_DECR(object) (object)->res_count--
616 #else /* TASK_SWAPPER */
618 #define VM_OBJ_RES_INCR(object)
619 #define VM_OBJ_RES_DECR(object)
620 #define vm_object_res_reference(object)
621 #define vm_object_res_deallocate(object)
623 #endif /* TASK_SWAPPER */
625 #define vm_object_reference_locked(object) \
627 vm_object_t RLObject = (object); \
628 vm_object_lock_assert_exclusive(object); \
629 assert((RLObject)->ref_count > 0); \
630 (RLObject)->ref_count++; \
631 assert((RLObject)->ref_count > 1); \
632 vm_object_res_reference(RLObject); \
636 #define vm_object_reference_shared(object) \
638 vm_object_t RLObject = (object); \
639 vm_object_lock_assert_shared(object); \
640 assert((RLObject)->ref_count > 0); \
641 OSAddAtomic(1, &(RLObject)->ref_count); \
642 assert((RLObject)->ref_count > 0); \
643 /* XXX we would need an atomic version of the following ... */ \
644 vm_object_res_reference(RLObject); \
648 __private_extern__
void vm_object_reference(
653 #define vm_object_reference(object) \
655 vm_object_t RObject = (object); \
657 vm_object_lock_shared(RObject); \
658 vm_object_reference_shared(RObject); \
659 vm_object_unlock(RObject); \
663 #endif /* MACH_ASSERT */
665 __private_extern__
void vm_object_deallocate(
668 __private_extern__ kern_return_t
vm_object_release_name(
672 __private_extern__
void vm_object_pmap_protect(
674 vm_object_offset_t offset
,
675 vm_object_size_t size
,
677 vm_map_size_t pmap_page_size
,
678 vm_map_offset_t pmap_start
,
681 __private_extern__
void vm_object_pmap_protect_options(
683 vm_object_offset_t offset
,
684 vm_object_size_t size
,
686 vm_map_size_t pmap_page_size
,
687 vm_map_offset_t pmap_start
,
691 __private_extern__
void vm_object_page_remove(
693 vm_object_offset_t start
,
694 vm_object_offset_t end
);
696 __private_extern__
void vm_object_deactivate_pages(
698 vm_object_offset_t offset
,
699 vm_object_size_t size
,
701 boolean_t reusable_page
,
703 /* XXX TODO4K: need pmap_page_size here too? */
704 vm_map_offset_t pmap_offset
);
706 __private_extern__
void vm_object_reuse_pages(
708 vm_object_offset_t start_offset
,
709 vm_object_offset_t end_offset
,
710 boolean_t allow_partial_reuse
);
712 __private_extern__
uint64_t vm_object_purge(
716 __private_extern__ kern_return_t
vm_object_purgable_control(
718 vm_purgable_t control
,
721 __private_extern__ kern_return_t
vm_object_get_page_counts(
723 vm_object_offset_t offset
,
724 vm_object_size_t size
,
725 unsigned int *resident_page_count
,
726 unsigned int *dirty_page_count
);
728 __private_extern__ boolean_t
vm_object_coalesce(
729 vm_object_t prev_object
,
730 vm_object_t next_object
,
731 vm_object_offset_t prev_offset
,
732 vm_object_offset_t next_offset
,
733 vm_object_size_t prev_size
,
734 vm_object_size_t next_size
);
736 __private_extern__ boolean_t
vm_object_shadow(
738 vm_object_offset_t
*offset
,
739 vm_object_size_t length
);
741 __private_extern__
void vm_object_collapse(
743 vm_object_offset_t offset
,
744 boolean_t can_bypass
);
746 __private_extern__ boolean_t
vm_object_copy_quickly(
747 vm_object_t
*_object
,
748 vm_object_offset_t src_offset
,
749 vm_object_size_t size
,
750 boolean_t
*_src_needs_copy
,
751 boolean_t
*_dst_needs_copy
);
753 __private_extern__ kern_return_t
vm_object_copy_strategically(
754 vm_object_t src_object
,
755 vm_object_offset_t src_offset
,
756 vm_object_size_t size
,
757 vm_object_t
*dst_object
,
758 vm_object_offset_t
*dst_offset
,
759 boolean_t
*dst_needs_copy
);
761 __private_extern__ kern_return_t
vm_object_copy_slowly(
762 vm_object_t src_object
,
763 vm_object_offset_t src_offset
,
764 vm_object_size_t size
,
765 boolean_t interruptible
,
766 vm_object_t
*_result_object
);
768 __private_extern__ vm_object_t
vm_object_copy_delayed(
769 vm_object_t src_object
,
770 vm_object_offset_t src_offset
,
771 vm_object_size_t size
,
772 boolean_t src_object_shared
);
776 __private_extern__ kern_return_t
vm_object_destroy(
778 kern_return_t reason
);
780 __private_extern__
void vm_object_pager_create(
783 __private_extern__
void vm_object_compressor_pager_create(
786 __private_extern__
void vm_object_page_map(
788 vm_object_offset_t offset
,
789 vm_object_size_t size
,
790 vm_object_offset_t (*map_fn
)
791 (void *, vm_object_offset_t
),
794 __private_extern__ kern_return_t
vm_object_upl_request(
796 vm_object_offset_t offset
,
799 upl_page_info_t
*page_info
,
801 upl_control_flags_t flags
,
804 __private_extern__ kern_return_t
vm_object_transpose(
807 vm_object_size_t transpose_size
);
809 __private_extern__ boolean_t
vm_object_sync(
811 vm_object_offset_t offset
,
812 vm_object_size_t size
,
813 boolean_t should_flush
,
814 boolean_t should_return
,
815 boolean_t should_iosync
);
817 __private_extern__ kern_return_t
vm_object_update(
819 vm_object_offset_t offset
,
820 vm_object_size_t size
,
821 vm_object_offset_t
*error_offset
,
823 memory_object_return_t should_return
,
827 __private_extern__ kern_return_t
vm_object_lock_request(
829 vm_object_offset_t offset
,
830 vm_object_size_t size
,
831 memory_object_return_t should_return
,
837 __private_extern__ vm_object_t
vm_object_memory_object_associate(
838 memory_object_t pager
,
840 vm_object_size_t size
,
841 boolean_t check_named
);
844 __private_extern__
void vm_object_cluster_size(
846 vm_object_offset_t
*start
,
848 vm_object_fault_info_t fault_info
,
849 uint32_t *io_streaming
);
851 __private_extern__ kern_return_t
vm_object_populate_with_private(
853 vm_object_offset_t offset
,
857 __private_extern__
void vm_object_change_wimg_mode(
859 unsigned int wimg_mode
);
861 extern kern_return_t
adjust_vm_object_cache(
865 extern kern_return_t
vm_object_page_op(
867 vm_object_offset_t offset
,
872 extern kern_return_t
vm_object_range_op(
874 vm_object_offset_t offset_beg
,
875 vm_object_offset_t offset_end
,
880 __private_extern__
void vm_object_reap_pages(
884 #define REAP_TERMINATE 1
885 #define REAP_PURGEABLE 2
886 #define REAP_DATA_FLUSH 3
890 __private_extern__
uint32_t
891 vm_object_compressed_freezer_pageout(
892 vm_object_t object
, uint32_t dirty_budget
);
894 __private_extern__
void
895 vm_object_compressed_freezer_done(
898 #endif /* CONFIG_FREEZE */
900 __private_extern__
void
905 struct io_reprioritize_req
{
910 queue_chain_t io_reprioritize_list
;
912 typedef struct io_reprioritize_req
*io_reprioritize_req_t
;
914 extern void vm_io_reprioritize_init(void);
918 * Event waiting handling
921 #define VM_OBJECT_EVENT_INITIALIZED 0
922 #define VM_OBJECT_EVENT_PAGER_READY 1
923 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
924 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
925 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
926 #define VM_OBJECT_EVENT_UNCACHING 5
927 #define VM_OBJECT_EVENT_COPY_CALL 6
928 #define VM_OBJECT_EVENT_CACHING 7
929 #define VM_OBJECT_EVENT_UNBLOCKED 8
930 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
932 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
934 static __inline__ wait_result_t
935 vm_object_assert_wait(
938 wait_interrupt_t interruptible
)
942 vm_object_lock_assert_exclusive(object
);
943 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
945 object
->all_wanted
|= 1 << event
;
946 wr
= assert_wait((event_t
)((vm_offset_t
)object
+ (vm_offset_t
)event
),
951 static __inline__ wait_result_t
955 wait_interrupt_t interruptible
)
959 vm_object_assert_wait(object
, event
, interruptible
);
960 vm_object_unlock(object
);
961 wr
= thread_block(THREAD_CONTINUE_NULL
);
965 static __inline__ wait_result_t
966 thread_sleep_vm_object(
969 wait_interrupt_t interruptible
)
973 #if DEVELOPMENT || DEBUG
974 if (object
->Lock_owner
!= current_thread()) {
975 panic("thread_sleep_vm_object: now owner - %p\n", object
);
977 object
->Lock_owner
= 0;
979 wr
= lck_rw_sleep(&object
->Lock
,
980 LCK_SLEEP_PROMOTED_PRI
,
983 #if DEVELOPMENT || DEBUG
984 object
->Lock_owner
= current_thread();
989 static __inline__ wait_result_t
993 wait_interrupt_t interruptible
)
997 vm_object_lock_assert_exclusive(object
);
998 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1000 object
->all_wanted
|= 1 << event
;
1001 wr
= thread_sleep_vm_object(object
,
1002 (event_t
)((vm_offset_t
)object
+ (vm_offset_t
)event
),
1007 static __inline__
void
1012 vm_object_lock_assert_exclusive(object
);
1013 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1015 if (object
->all_wanted
& (1 << event
)) {
1016 thread_wakeup((event_t
)((vm_offset_t
)object
+ (vm_offset_t
)event
));
1018 object
->all_wanted
&= ~(1 << event
);
1021 static __inline__
void
1022 vm_object_set_wanted(
1026 vm_object_lock_assert_exclusive(object
);
1027 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1029 object
->all_wanted
|= (1 << event
);
1032 static __inline__
int
1037 vm_object_lock_assert_held(object
);
1038 assert(event
>= 0 && event
<= VM_OBJECT_EVENT_MAX
);
1040 return object
->all_wanted
& (1 << event
);
1044 * Routines implemented as macros
1047 #include <libkern/OSDebug.h>
1048 #define VM_PIP_DEBUG_BEGIN(object) \
1050 int pip = ((object)->paging_in_progress + \
1051 (object)->activity_in_progress); \
1052 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1053 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1054 VM_PIP_DEBUG_STACK_FRAMES); \
1057 #else /* VM_PIP_DEBUG */
1058 #define VM_PIP_DEBUG_BEGIN(object)
1059 #endif /* VM_PIP_DEBUG */
1061 #define vm_object_activity_begin(object) \
1063 vm_object_lock_assert_exclusive((object)); \
1064 VM_PIP_DEBUG_BEGIN((object)); \
1065 (object)->activity_in_progress++; \
1066 if ((object)->activity_in_progress == 0) { \
1067 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1071 #define vm_object_activity_end(object) \
1073 vm_object_lock_assert_exclusive((object)); \
1074 if ((object)->activity_in_progress == 0) { \
1075 panic("vm_object_activity_end(%p): underflow\n", (object));\
1077 (object)->activity_in_progress--; \
1078 if ((object)->paging_in_progress == 0 && \
1079 (object)->activity_in_progress == 0) \
1080 vm_object_wakeup((object), \
1081 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1084 #define vm_object_paging_begin(object) \
1086 vm_object_lock_assert_exclusive((object)); \
1087 VM_PIP_DEBUG_BEGIN((object)); \
1088 (object)->paging_in_progress++; \
1089 if ((object)->paging_in_progress == 0) { \
1090 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1094 #define vm_object_paging_end(object) \
1096 vm_object_lock_assert_exclusive((object)); \
1097 if ((object)->paging_in_progress == 0) { \
1098 panic("vm_object_paging_end(%p): underflow\n", (object));\
1100 (object)->paging_in_progress--; \
1101 if ((object)->paging_in_progress == 0) { \
1102 vm_object_wakeup((object), \
1103 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1104 if ((object)->activity_in_progress == 0) \
1105 vm_object_wakeup((object), \
1106 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1110 #define vm_object_paging_wait(object, interruptible) \
1112 vm_object_lock_assert_exclusive((object)); \
1113 while ((object)->paging_in_progress != 0 || \
1114 (object)->activity_in_progress != 0) { \
1115 wait_result_t _wr; \
1117 _wr = vm_object_sleep((object), \
1118 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1121 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1126 #define vm_object_paging_only_wait(object, interruptible) \
1128 vm_object_lock_assert_exclusive((object)); \
1129 while ((object)->paging_in_progress != 0) { \
1130 wait_result_t _wr; \
1132 _wr = vm_object_sleep((object), \
1133 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1136 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1142 #define vm_object_mapping_begin(object) \
1144 vm_object_lock_assert_exclusive((object)); \
1145 assert(! (object)->mapping_in_progress); \
1146 (object)->mapping_in_progress = TRUE; \
1149 #define vm_object_mapping_end(object) \
1151 vm_object_lock_assert_exclusive((object)); \
1152 assert((object)->mapping_in_progress); \
1153 (object)->mapping_in_progress = FALSE; \
1154 vm_object_wakeup((object), \
1155 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1158 #define vm_object_mapping_wait(object, interruptible) \
1160 vm_object_lock_assert_exclusive((object)); \
1161 while ((object)->mapping_in_progress) { \
1162 wait_result_t _wr; \
1164 _wr = vm_object_sleep((object), \
1165 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1167 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1170 assert(!(object)->mapping_in_progress); \
1175 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1176 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1178 extern void vm_object_cache_add(vm_object_t
);
1179 extern void vm_object_cache_remove(vm_object_t
);
1180 extern int vm_object_cache_evict(int, int);
1182 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1183 #define VM_OBJECT_OWNER(object) \
1184 ((((object)->purgable == VM_PURGABLE_DENY && \
1185 (object)->vo_ledger_tag == 0) || \
1186 (object)->vo_owner == TASK_NULL) \
1187 ? TASK_NULL /* not owned */ \
1188 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1189 ? kernel_task /* disowned -> kernel */ \
1190 : (object)->vo_owner)) /* explicit owner */ \
1192 extern void vm_object_ledger_tag_ledgers(
1194 int *ledger_idx_volatile
,
1195 int *ledger_idx_nonvolatile
,
1196 int *ledger_idx_volatile_compressed
,
1197 int *ledger_idx_nonvolatile_compressed
,
1198 boolean_t
*do_footprint
);
1199 extern kern_return_t
vm_object_ownership_change(
1203 int new_ledger_flags
,
1204 boolean_t task_objq_locked
);
1206 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1207 // so probably should be a real 32b ID vs. ptr.
1208 // Current users just check for equality
1209 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1211 #endif /* _VM_VM_OBJECT_H_ */