2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.h
60 * Author: Avadis Tevanian, Jr.
63 * Declarations for the pageout daemon interface.
66 #ifndef _VM_VM_PAGEOUT_H_
67 #define _VM_VM_PAGEOUT_H_
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
76 #include <kern/kern_types.h>
77 #include <kern/locks.h>
79 #include <libkern/OSAtomic.h>
82 #include <vm/vm_options.h>
84 #ifdef MACH_KERNEL_PRIVATE
85 #include <vm/vm_page.h>
88 #include <sys/kdebug.h>
90 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count))
92 /* externally manipulated counters */
93 extern unsigned int vm_pageout_cleaned_fault_reactivated
;
96 extern boolean_t memorystatus_freeze_enabled
;
98 struct freezer_context
{
100 * All these counters & variables track the task
102 * Currently we only freeze one task at a time. Should that
103 * change, we'll need to add support for multiple freezer contexts.
106 task_t freezer_ctx_task
; /* Task being frozen. */
108 void *freezer_ctx_chead
; /* The chead used to track c_segs allocated */
109 /* to freeze the task.*/
111 uint64_t freezer_ctx_swapped_bytes
; /* Tracks # of compressed bytes.*/
113 int freezer_ctx_uncompressed_pages
; /* Tracks # of uncompressed pages frozen. */
115 char *freezer_ctx_compressor_scratch_buf
; /* Scratch buffer for the compressor algorithm. */
118 #endif /* CONFIG_FREEZE */
120 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE)
122 #if VM_PRESSURE_EVENTS
123 extern boolean_t vm_pressure_events_enabled
;
124 #endif /* VM_PRESSURE_EVENTS */
128 * the following codes are used in the DBG_MACH_WORKINGSET subclass
129 * of the DBG_MACH class
131 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00
132 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01
133 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02
134 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03
135 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04
136 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05
137 #define VM_REAL_FAULT_FAST 0x06
138 #define VM_REAL_FAULT_SLOW 0x07
139 #define VM_MAP_LOOKUP_OBJECT 0x08
143 extern int vm_debug_events
;
145 #define VMF_CHECK_ZFDELAY 0x100
146 #define VMF_COWDELAY 0x101
147 #define VMF_ZFDELAY 0x102
148 #define VMF_COMPRESSORDELAY 0x103
150 #define VM_PAGEOUT_SCAN 0x104
151 #define VM_PAGEOUT_BALANCE 0x105
152 #define VM_PAGEOUT_FREELIST 0x106
153 #define VM_PAGEOUT_PURGEONE 0x107
154 #define VM_PAGEOUT_CACHE_EVICT 0x108
155 #define VM_PAGEOUT_THREAD_BLOCK 0x109
156 #define VM_PAGEOUT_JETSAM 0x10A
157 #define VM_INFO1 0x10B
158 #define VM_INFO2 0x10C
159 #define VM_INFO3 0x10D
160 #define VM_INFO4 0x10E
161 #define VM_INFO5 0x10F
162 #define VM_INFO6 0x110
163 #define VM_INFO7 0x111
164 #define VM_INFO8 0x112
165 #define VM_INFO9 0x113
167 #define VM_UPL_PAGE_WAIT 0x120
168 #define VM_IOPL_PAGE_WAIT 0x121
169 #define VM_PAGE_WAIT_BLOCK 0x122
172 #define VM_PAGE_SLEEP 0x123
173 #define VM_PAGE_EXPEDITE 0x124
174 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125
177 #define VM_PAGE_GRAB 0x126
178 #define VM_PAGE_RELEASE 0x127
179 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128
180 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129
183 #define VM_PRESSURE_EVENT 0x130
184 #define VM_EXECVE 0x131
185 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132
186 #define VM_UPL_REQUEST 0x133
187 #define VM_IOPL_REQUEST 0x134
188 #define VM_KERN_REQUEST 0x135
190 #define VM_DATA_WRITE 0x140
192 #define VM_PRESSURE_LEVEL_CHANGE 0x141
194 #define VM_PHYS_WRITE_ACCT 0x142
196 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
198 if (__improbable(vm_debug_events)) { \
199 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
203 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
205 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
208 extern void memoryshot(unsigned int event
, unsigned int control
);
210 extern void update_vm_info(void);
213 extern int upl_get_cached_tier(
217 extern void upl_set_iodone(upl_t
, void *);
218 extern void upl_set_iodone_error(upl_t
, int);
219 extern void upl_callout_iodone(upl_t
);
221 extern ppnum_t
upl_get_highest_page(
224 extern upl_size_t
upl_get_size(
227 extern upl_t
upl_associated_upl(upl_t upl
);
228 extern void upl_set_associated_upl(upl_t upl
, upl_t associated_upl
);
230 #ifndef MACH_KERNEL_PRIVATE
231 typedef struct vm_page
*vm_page_t
;
233 #ifdef XNU_KERNEL_PRIVATE
234 #include <vm/vm_kern.h>
236 extern upl_size_t
upl_adjusted_size(
238 vm_map_offset_t page_mask
);
239 extern vm_object_offset_t
upl_adjusted_offset(
241 vm_map_offset_t page_mask
);
242 extern vm_object_offset_t
upl_get_data_offset(
245 extern kern_return_t
vm_map_create_upl(
247 vm_map_address_t offset
,
248 upl_size_t
*upl_size
,
250 upl_page_info_array_t page_list
,
252 upl_control_flags_t
*flags
,
255 extern void iopl_valid_data(
259 extern void vm_page_free_list(
261 boolean_t prepare_object
);
263 extern kern_return_t
vm_page_alloc_list(
268 #endif /* XNU_KERNEL_PRIVATE */
270 extern struct vnode
* upl_lookup_vnode(upl_t upl
);
272 extern void vm_page_set_offset(vm_page_t page
, vm_object_offset_t offset
);
273 extern vm_object_offset_t
vm_page_get_offset(vm_page_t page
);
274 extern ppnum_t
vm_page_get_phys_page(vm_page_t page
);
275 extern vm_page_t
vm_page_get_next(vm_page_t page
);
277 extern kern_return_t
mach_vm_pressure_level_monitor(boolean_t wait_for_pressure
, unsigned int *pressure_level
);
279 #if XNU_TARGET_OS_OSX
280 extern kern_return_t
vm_pageout_wait(uint64_t deadline
);
281 #endif /* XNU_TARGET_OS_OSX */
283 #ifdef MACH_KERNEL_PRIVATE
285 #include <vm/vm_page.h>
287 extern unsigned int vm_pageout_scan_event_counter
;
288 extern unsigned int vm_page_anonymous_count
;
292 * must hold the page queues lock to
293 * manipulate this structure
295 struct vm_pageout_queue
{
296 vm_page_queue_head_t pgo_pending
; /* laundry pages to be processed by pager's iothread */
297 uint64_t pgo_tid
; /* thread ID of I/O thread that services this queue */
298 unsigned int pgo_laundry
; /* current count of laundry pages on queue or in flight */
299 unsigned int pgo_maxlaundry
;
302 pgo_idle
:1, /* iothread is blocked waiting for work to do */
303 pgo_busy
:1, /* iothread is currently processing request from pgo_pending */
304 pgo_throttled
:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
305 pgo_lowpriority
:1, /* iothread is set to use low priority I/O */
311 #define VM_PAGE_Q_THROTTLED(q) \
312 ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
314 extern struct vm_pageout_queue vm_pageout_queue_internal
;
315 extern struct vm_pageout_queue vm_pageout_queue_external
;
319 * Routines exported to Mach.
321 extern void vm_pageout(void);
323 extern kern_return_t
vm_pageout_internal_start(void);
325 extern void vm_pageout_object_terminate(
328 extern void vm_pageout_cluster(
331 extern void vm_pageout_initialize_page(
334 /* UPL exported routines and structures */
336 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
337 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
338 #define upl_lock(object) lck_mtx_lock(&(object)->Lock)
339 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
340 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock)
342 #define MAX_VECTOR_UPL_ELEMENTS 8
344 struct _vector_upl_iostates
{
349 typedef struct _vector_upl_iostates vector_upl_iostates_t
;
354 uint32_t invalid_upls
;
357 vm_offset_t submap_dst_addr
;
358 vm_object_offset_t offset
;
359 upl_t upl_elems
[MAX_VECTOR_UPL_ELEMENTS
];
360 upl_page_info_array_t pagelist
;
361 vector_upl_iostates_t upl_iostates
[MAX_VECTOR_UPL_ELEMENTS
];
364 typedef struct _vector_upl
* vector_upl_t
;
366 /* universal page list structure */
369 #define UPL_DEBUG_STACK_FRAMES 16
370 #define UPL_DEBUG_COMMIT_RECORDS 4
376 void * c_retaddr
[UPL_DEBUG_STACK_FRAMES
];
380 struct upl_io_completion
{
382 void (*io_done
)(void *, int);
389 decl_lck_mtx_data(, Lock
); /* Synchronization */
394 * XXX CAUTION: to accomodate devices with "mixed page sizes",
395 * u_offset and u_size are now byte-aligned and no longer
396 * page-aligned, on all devices.
398 vm_object_offset_t u_offset
;
399 upl_size_t u_size
; /* size in bytes of the address space */
400 vm_offset_t kaddr
; /* secondary mapping in kernel */
401 vm_object_t map_object
;
402 ppnum_t highest_page
;
404 upl_t associated_upl
;
405 struct upl_io_completion
*upl_iodone
;
408 uint64_t *upl_reprio_info
;
411 #if CONFIG_IOSCHED || UPL_DEBUG
412 thread_t upl_creator
;
413 queue_chain_t uplq
; /* List of outstanding upls on an obj */
416 uintptr_t ubc_alias1
;
417 uintptr_t ubc_alias2
;
420 uint32_t upl_commit_index
;
421 void *upl_create_retaddr
[UPL_DEBUG_STACK_FRAMES
];
423 struct ucd upl_commit_records
[UPL_DEBUG_COMMIT_RECORDS
];
424 #endif /* UPL_DEBUG */
427 /* upl struct flags */
428 #define UPL_PAGE_LIST_MAPPED 0x1
429 #define UPL_KERNEL_MAPPED 0x2
430 #define UPL_CLEAR_DIRTY 0x4
431 #define UPL_COMPOSITE_LIST 0x8
432 #define UPL_INTERNAL 0x10
433 #define UPL_PAGE_SYNC_DONE 0x20
434 #define UPL_DEVICE_MEMORY 0x40
435 #define UPL_PAGEOUT 0x80
436 #define UPL_LITE 0x100
437 #define UPL_IO_WIRE 0x200
438 #define UPL_ACCESS_BLOCKED 0x400
439 #define UPL_SHADOWED 0x1000
440 #define UPL_KERNEL_OBJECT 0x2000
441 #define UPL_VECTOR 0x4000
442 #define UPL_SET_DIRTY 0x8000
443 #define UPL_HAS_BUSY 0x10000
444 #define UPL_TRACKED_BY_OBJECT 0x20000
445 #define UPL_EXPEDITE_SUPPORTED 0x40000
446 #define UPL_DECMP_REQ 0x80000
447 #define UPL_DECMP_REAL_IO 0x100000
449 /* flags for upl_create flags parameter */
450 #define UPL_CREATE_EXTERNAL 0
451 #define UPL_CREATE_INTERNAL 0x1
452 #define UPL_CREATE_LITE 0x2
453 #define UPL_CREATE_IO_TRACKING 0x4
454 #define UPL_CREATE_EXPEDITE_SUP 0x8
456 extern upl_t
vector_upl_create(vm_offset_t
);
457 extern void vector_upl_deallocate(upl_t
);
458 extern boolean_t
vector_upl_is_valid(upl_t
);
459 extern boolean_t
vector_upl_set_subupl(upl_t
, upl_t
, u_int32_t
);
460 extern void vector_upl_set_pagelist(upl_t
);
461 extern void vector_upl_set_submap(upl_t
, vm_map_t
, vm_offset_t
);
462 extern void vector_upl_get_submap(upl_t
, vm_map_t
*, vm_offset_t
*);
463 extern void vector_upl_set_iostate(upl_t
, upl_t
, upl_offset_t
, upl_size_t
);
464 extern void vector_upl_get_iostate(upl_t
, upl_t
, upl_offset_t
*, upl_size_t
*);
465 extern void vector_upl_get_iostate_byindex(upl_t
, uint32_t, upl_offset_t
*, upl_size_t
*);
466 extern upl_t
vector_upl_subupl_byindex(upl_t
, uint32_t);
467 extern upl_t
vector_upl_subupl_byoffset(upl_t
, upl_offset_t
*, upl_size_t
*);
469 extern void vm_object_set_pmap_cache_attr(
471 upl_page_info_array_t user_page_list
,
472 unsigned int num_pages
,
473 boolean_t batch_pmap_op
);
475 extern kern_return_t
vm_object_iopl_request(
477 vm_object_offset_t offset
,
480 upl_page_info_array_t user_page_list
,
481 unsigned int *page_list_count
,
482 upl_control_flags_t cntrl_flags
,
485 extern kern_return_t
vm_object_super_upl_request(
487 vm_object_offset_t offset
,
489 upl_size_t super_cluster
,
491 upl_page_info_t
*user_page_list
,
492 unsigned int *page_list_count
,
493 upl_control_flags_t cntrl_flags
,
496 /* should be just a regular vm_map_enter() */
497 extern kern_return_t
vm_map_enter_upl(
500 vm_map_offset_t
*dst_addr
);
502 /* should be just a regular vm_map_remove() */
503 extern kern_return_t
vm_map_remove_upl(
507 /* wired page list structure */
508 typedef uint32_t *wpl_array_t
;
510 extern struct vm_page_delayed_work
*
511 vm_page_delayed_work_get_ctx(void);
514 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work
* dwp
);
516 extern void vm_page_free_reserve(int pages
);
518 extern void vm_pageout_throttle_down(vm_page_t page
);
519 extern void vm_pageout_throttle_up(vm_page_t page
);
521 extern kern_return_t
vm_paging_map_object(
524 vm_object_offset_t offset
,
525 vm_prot_t protection
,
526 boolean_t can_unlock_object
,
527 vm_map_size_t
*size
, /* IN/OUT */
528 vm_map_offset_t
*address
, /* OUT */
529 boolean_t
*need_unmap
); /* OUT */
530 extern void vm_paging_unmap_object(
532 vm_map_offset_t start
,
533 vm_map_offset_t end
);
534 decl_simple_lock_data(extern, vm_paging_lock
);
537 * Backing store throttle when BS is exhausted
539 extern unsigned int vm_backing_store_low
;
541 extern void vm_pageout_steal_laundry(
543 boolean_t queues_locked
);
545 #endif /* MACH_KERNEL_PRIVATE */
548 extern kern_return_t
upl_ubc_alias_set(
552 extern int upl_ubc_alias_get(
556 #endif /* UPL_DEBUG */
558 extern void vm_countdirtypages(void);
560 extern void vm_backing_store_disable(
563 extern kern_return_t
upl_transpose(
567 extern kern_return_t
mach_vm_pressure_monitor(
568 boolean_t wait_for_pressure
,
569 unsigned int nsecs_monitored
,
570 unsigned int *pages_reclaimed_p
,
571 unsigned int *pages_wanted_p
);
574 vm_set_buffer_cleanup_callout(
575 boolean_t (*func
)(int));
577 struct vm_page_stats_reusable
{
578 SInt32 reusable_count
;
581 uint64_t reused_wire
;
582 uint64_t reused_remove
;
583 uint64_t all_reusable_calls
;
584 uint64_t partial_reusable_calls
;
585 uint64_t all_reuse_calls
;
586 uint64_t partial_reuse_calls
;
587 uint64_t reusable_pages_success
;
588 uint64_t reusable_pages_failure
;
589 uint64_t reusable_pages_shared
;
590 uint64_t reuse_pages_success
;
591 uint64_t reuse_pages_failure
;
592 uint64_t can_reuse_success
;
593 uint64_t can_reuse_failure
;
594 uint64_t reusable_reclaimed
;
595 uint64_t reusable_nonwritable
;
596 uint64_t reusable_shared
;
597 uint64_t free_shared
;
599 extern struct vm_page_stats_reusable vm_page_stats_reusable
;
601 extern int hibernate_flush_memory(void);
602 extern void hibernate_reset_stats(void);
603 extern void hibernate_create_paddr_map(void);
605 extern void vm_set_restrictions(unsigned int num_cpus
);
607 extern int vm_compressor_mode
;
608 extern kern_return_t
vm_pageout_compress_page(void **, char *, vm_page_t
);
609 extern void vm_pageout_anonymous_pages(void);
610 extern void vm_pageout_disconnect_all_pages(void);
614 boolean_t compressor_is_present
; /* compressor is initialized and can be used by the freezer, the sweep or the pager */
615 boolean_t compressor_is_active
; /* pager can actively compress pages... 'compressor_is_present' must be set */
616 boolean_t swap_is_present
; /* swap is initialized and can be used by the freezer, the sweep or the pager */
617 boolean_t swap_is_active
; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */
618 boolean_t freezer_swap_is_active
; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */
621 extern struct vm_config vm_config
;
624 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */
625 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */
626 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */
627 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */
628 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */
629 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/
630 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/
632 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */
635 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE)
636 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE)
637 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE)
638 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE)
639 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE)
641 #endif /* KERNEL_PRIVATE */
643 #ifdef XNU_KERNEL_PRIVATE
645 struct vm_pageout_state
{
646 boolean_t vm_pressure_thread_running
;
647 boolean_t vm_pressure_changed
;
648 boolean_t vm_restricted_to_single_processor
;
649 int vm_compressor_thread_count
;
651 unsigned int vm_page_speculative_q_age_ms
;
652 unsigned int vm_page_speculative_percentage
;
653 unsigned int vm_page_speculative_target
;
655 unsigned int vm_pageout_swap_wait
;
656 unsigned int vm_pageout_idle_wait
; /* milliseconds */
657 unsigned int vm_pageout_empty_wait
; /* milliseconds */
658 unsigned int vm_pageout_burst_wait
; /* milliseconds */
659 unsigned int vm_pageout_deadlock_wait
; /* milliseconds */
660 unsigned int vm_pageout_deadlock_relief
;
661 unsigned int vm_pageout_burst_inactive_throttle
;
663 unsigned int vm_pageout_inactive
;
664 unsigned int vm_pageout_inactive_used
; /* debugging */
665 unsigned int vm_pageout_inactive_clean
; /* debugging */
667 uint32_t vm_page_filecache_min
;
668 uint32_t vm_page_filecache_min_divisor
;
669 uint32_t vm_page_xpmapped_min
;
670 uint32_t vm_page_xpmapped_min_divisor
;
671 uint64_t vm_pageout_considered_page_last
;
673 int vm_page_free_count_init
;
675 unsigned int vm_memory_pressure
;
677 int memorystatus_purge_on_critical
;
678 int memorystatus_purge_on_warning
;
679 int memorystatus_purge_on_urgent
;
681 thread_t vm_pageout_external_iothread
;
682 thread_t vm_pageout_internal_iothread
;
685 extern struct vm_pageout_state vm_pageout_state
;
688 * This structure is used to track the VM_INFO instrumentation
690 struct vm_pageout_vminfo
{
691 unsigned long vm_pageout_considered_page
;
692 unsigned long vm_pageout_considered_bq_internal
;
693 unsigned long vm_pageout_considered_bq_external
;
694 unsigned long vm_pageout_skipped_external
;
696 unsigned long vm_pageout_pages_evicted
;
697 unsigned long vm_pageout_pages_purged
;
698 unsigned long vm_pageout_freed_cleaned
;
699 unsigned long vm_pageout_freed_speculative
;
700 unsigned long vm_pageout_freed_external
;
701 unsigned long vm_pageout_freed_internal
;
702 unsigned long vm_pageout_inactive_dirty_internal
;
703 unsigned long vm_pageout_inactive_dirty_external
;
704 unsigned long vm_pageout_inactive_referenced
;
705 unsigned long vm_pageout_reactivation_limit_exceeded
;
706 unsigned long vm_pageout_inactive_force_reclaim
;
707 unsigned long vm_pageout_inactive_nolock
;
708 unsigned long vm_pageout_filecache_min_reactivated
;
709 unsigned long vm_pageout_scan_inactive_throttled_internal
;
710 unsigned long vm_pageout_scan_inactive_throttled_external
;
712 uint64_t vm_pageout_compressions
;
713 uint64_t vm_compressor_pages_grabbed
;
714 unsigned long vm_compressor_failed
;
716 unsigned long vm_page_pages_freed
;
718 unsigned long vm_phantom_cache_found_ghost
;
719 unsigned long vm_phantom_cache_added_ghost
;
722 extern struct vm_pageout_vminfo vm_pageout_vminfo
;
725 #if DEVELOPMENT || DEBUG
728 * This structure records the pageout daemon's actions:
729 * how many pages it looks at and what happens to those pages.
730 * No locking needed because only one thread modifies the fields.
732 struct vm_pageout_debug
{
733 uint32_t vm_pageout_balanced
;
734 uint32_t vm_pageout_scan_event_counter
;
735 uint32_t vm_pageout_speculative_dirty
;
737 uint32_t vm_pageout_inactive_busy
;
738 uint32_t vm_pageout_inactive_absent
;
739 uint32_t vm_pageout_inactive_notalive
;
740 uint32_t vm_pageout_inactive_error
;
741 uint32_t vm_pageout_inactive_deactivated
;
743 uint32_t vm_pageout_enqueued_cleaned
;
745 uint32_t vm_pageout_cleaned_busy
;
746 uint32_t vm_pageout_cleaned_nolock
;
747 uint32_t vm_pageout_cleaned_reference_reactivated
;
748 uint32_t vm_pageout_cleaned_volatile_reactivated
;
749 uint32_t vm_pageout_cleaned_reactivated
; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */
750 uint32_t vm_pageout_cleaned_fault_reactivated
;
752 uint32_t vm_pageout_dirty_no_pager
;
753 uint32_t vm_pageout_purged_objects
;
755 uint32_t vm_pageout_scan_throttle
;
756 uint32_t vm_pageout_scan_reclaimed_throttled
;
757 uint32_t vm_pageout_scan_burst_throttle
;
758 uint32_t vm_pageout_scan_empty_throttle
;
759 uint32_t vm_pageout_scan_swap_throttle
;
760 uint32_t vm_pageout_scan_deadlock_detected
;
761 uint32_t vm_pageout_scan_inactive_throttle_success
;
762 uint32_t vm_pageout_scan_throttle_deferred
;
764 uint32_t vm_pageout_inactive_external_forced_jetsam_count
;
766 uint32_t vm_grab_anon_overrides
;
767 uint32_t vm_grab_anon_nops
;
769 uint32_t vm_pageout_no_victim
;
770 unsigned long vm_pageout_throttle_up_count
;
771 uint32_t vm_page_steal_pageout_page
;
773 uint32_t vm_cs_validated_resets
;
774 uint32_t vm_object_iopl_request_sleep_for_cleaning
;
775 uint32_t vm_page_slide_counter
;
776 uint32_t vm_page_slide_errors
;
777 uint32_t vm_page_throttle_count
;
779 * Statistics about UPL enforcement of copy-on-write obligations.
781 unsigned long upl_cow
;
782 unsigned long upl_cow_again
;
783 unsigned long upl_cow_pages
;
784 unsigned long upl_cow_again_pages
;
785 unsigned long iopl_cow
;
786 unsigned long iopl_cow_pages
;
789 extern struct vm_pageout_debug vm_pageout_debug
;
791 #define VM_PAGEOUT_DEBUG(member, value) \
793 vm_pageout_debug.member += value; \
796 #define VM_PAGEOUT_DEBUG(member, value)
799 #define MAX_COMPRESSOR_THREAD_COUNT 8
801 #if DEVELOPMENT || DEBUG
802 typedef struct vmct_stats_s
{
803 uint64_t vmct_runtimes
[MAX_COMPRESSOR_THREAD_COUNT
];
804 uint64_t vmct_pages
[MAX_COMPRESSOR_THREAD_COUNT
];
805 uint64_t vmct_iterations
[MAX_COMPRESSOR_THREAD_COUNT
];
806 uint64_t vmct_cthreads_total
;
807 int32_t vmct_minpages
[MAX_COMPRESSOR_THREAD_COUNT
];
808 int32_t vmct_maxpages
[MAX_COMPRESSOR_THREAD_COUNT
];
812 #endif /* _VM_VM_PAGEOUT_H_ */