]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.h
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Declarations for the pageout daemon interface.
64 */
65
66 #ifndef _VM_VM_PAGEOUT_H_
67 #define _VM_VM_PAGEOUT_H_
68
69 #ifdef KERNEL_PRIVATE
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/locks.h>
78
79 #include <libkern/OSAtomic.h>
80
81
82 #include <vm/vm_options.h>
83
84 #ifdef MACH_KERNEL_PRIVATE
85 #include <vm/vm_page.h>
86 #endif
87
88 #include <sys/kdebug.h>
89
90 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count))
91
92 /* externally manipulated counters */
93 extern unsigned int vm_pageout_cleaned_fault_reactivated;
94
95 #if CONFIG_FREEZE
96 extern boolean_t memorystatus_freeze_enabled;
97
98 struct freezer_context {
99 /*
100 * All these counters & variables track the task
101 * being frozen.
102 * Currently we only freeze one task at a time. Should that
103 * change, we'll need to add support for multiple freezer contexts.
104 */
105
106 task_t freezer_ctx_task; /* Task being frozen. */
107
108 void *freezer_ctx_chead; /* The chead used to track c_segs allocated */
109 /* to freeze the task.*/
110
111 uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/
112
113 int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */
114
115 char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */
116 };
117
118 #endif /* CONFIG_FREEZE */
119
120 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE)
121
122 #if VM_PRESSURE_EVENTS
123 extern boolean_t vm_pressure_events_enabled;
124 #endif /* VM_PRESSURE_EVENTS */
125
126
127 /*
128 * the following codes are used in the DBG_MACH_WORKINGSET subclass
129 * of the DBG_MACH class
130 */
131 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00
132 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01
133 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02
134 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03
135 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04
136 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05
137 #define VM_REAL_FAULT_FAST 0x06
138 #define VM_REAL_FAULT_SLOW 0x07
139 #define VM_MAP_LOOKUP_OBJECT 0x08
140
141
142
143 extern int vm_debug_events;
144
145 #define VMF_CHECK_ZFDELAY 0x100
146 #define VMF_COWDELAY 0x101
147 #define VMF_ZFDELAY 0x102
148 #define VMF_COMPRESSORDELAY 0x103
149
150 #define VM_PAGEOUT_SCAN 0x104
151 #define VM_PAGEOUT_BALANCE 0x105
152 #define VM_PAGEOUT_FREELIST 0x106
153 #define VM_PAGEOUT_PURGEONE 0x107
154 #define VM_PAGEOUT_CACHE_EVICT 0x108
155 #define VM_PAGEOUT_THREAD_BLOCK 0x109
156 #define VM_PAGEOUT_JETSAM 0x10A
157 #define VM_INFO1 0x10B
158 #define VM_INFO2 0x10C
159 #define VM_INFO3 0x10D
160 #define VM_INFO4 0x10E
161 #define VM_INFO5 0x10F
162 #define VM_INFO6 0x110
163 #define VM_INFO7 0x111
164 #define VM_INFO8 0x112
165 #define VM_INFO9 0x113
166
167 #define VM_UPL_PAGE_WAIT 0x120
168 #define VM_IOPL_PAGE_WAIT 0x121
169 #define VM_PAGE_WAIT_BLOCK 0x122
170
171 #if CONFIG_IOSCHED
172 #define VM_PAGE_SLEEP 0x123
173 #define VM_PAGE_EXPEDITE 0x124
174 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125
175 #endif
176
177 #define VM_PAGE_GRAB 0x126
178 #define VM_PAGE_RELEASE 0x127
179 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128
180 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129
181
182
183 #define VM_PRESSURE_EVENT 0x130
184 #define VM_EXECVE 0x131
185 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132
186 #define VM_UPL_REQUEST 0x133
187 #define VM_IOPL_REQUEST 0x134
188 #define VM_KERN_REQUEST 0x135
189
190 #define VM_DATA_WRITE 0x140
191
192 #define VM_PRESSURE_LEVEL_CHANGE 0x141
193
194 #define VM_PHYS_WRITE_ACCT 0x142
195
196 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
197 MACRO_BEGIN \
198 if (__improbable(vm_debug_events)) { \
199 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
200 } \
201 MACRO_END
202
203 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
204 MACRO_BEGIN \
205 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
206 MACRO_END
207
208 extern void memoryshot(unsigned int event, unsigned int control);
209
210 extern void update_vm_info(void);
211
212 #if CONFIG_IOSCHED
213 extern int upl_get_cached_tier(
214 upl_t upl);
215 #endif
216
217 extern void upl_set_iodone(upl_t, void *);
218 extern void upl_set_iodone_error(upl_t, int);
219 extern void upl_callout_iodone(upl_t);
220
221 extern ppnum_t upl_get_highest_page(
222 upl_t upl);
223
224 extern upl_size_t upl_get_size(
225 upl_t upl);
226
227 extern upl_t upl_associated_upl(upl_t upl);
228 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl);
229
230 #ifndef MACH_KERNEL_PRIVATE
231 typedef struct vm_page *vm_page_t;
232 #endif
233 #ifdef XNU_KERNEL_PRIVATE
234 #include <vm/vm_kern.h>
235
236 extern upl_size_t upl_adjusted_size(
237 upl_t upl,
238 vm_map_offset_t page_mask);
239 extern vm_object_offset_t upl_adjusted_offset(
240 upl_t upl,
241 vm_map_offset_t page_mask);
242 extern vm_object_offset_t upl_get_data_offset(
243 upl_t upl);
244
245 extern kern_return_t vm_map_create_upl(
246 vm_map_t map,
247 vm_map_address_t offset,
248 upl_size_t *upl_size,
249 upl_t *upl,
250 upl_page_info_array_t page_list,
251 unsigned int *count,
252 upl_control_flags_t *flags,
253 vm_tag_t tag);
254
255 extern void iopl_valid_data(
256 upl_t upl_ptr,
257 vm_tag_t tag);
258
259 extern void vm_page_free_list(
260 vm_page_t mem,
261 boolean_t prepare_object);
262
263 extern kern_return_t vm_page_alloc_list(
264 int page_count,
265 kma_flags_t flags,
266 vm_page_t *list);
267
268 #endif /* XNU_KERNEL_PRIVATE */
269
270 extern struct vnode * upl_lookup_vnode(upl_t upl);
271
272 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset);
273 extern vm_object_offset_t vm_page_get_offset(vm_page_t page);
274 extern ppnum_t vm_page_get_phys_page(vm_page_t page);
275 extern vm_page_t vm_page_get_next(vm_page_t page);
276
277 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level);
278
279 #if XNU_TARGET_OS_OSX
280 extern kern_return_t vm_pageout_wait(uint64_t deadline);
281 #endif /* XNU_TARGET_OS_OSX */
282
283 #ifdef MACH_KERNEL_PRIVATE
284
285 #include <vm/vm_page.h>
286
287 extern unsigned int vm_pageout_scan_event_counter;
288 extern unsigned int vm_page_anonymous_count;
289
290
291 /*
292 * must hold the page queues lock to
293 * manipulate this structure
294 */
295 struct vm_pageout_queue {
296 vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
297 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */
298 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
299 unsigned int pgo_maxlaundry;
300
301 uint32_t
302 pgo_idle:1, /* iothread is blocked waiting for work to do */
303 pgo_busy:1, /* iothread is currently processing request from pgo_pending */
304 pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
305 pgo_lowpriority:1, /* iothread is set to use low priority I/O */
306 pgo_draining:1,
307 pgo_inited:1,
308 pgo_unused_bits:26;
309 };
310
311 #define VM_PAGE_Q_THROTTLED(q) \
312 ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
313
314 extern struct vm_pageout_queue vm_pageout_queue_internal;
315 extern struct vm_pageout_queue vm_pageout_queue_external;
316
317
318 /*
319 * Routines exported to Mach.
320 */
321 extern void vm_pageout(void);
322
323 extern kern_return_t vm_pageout_internal_start(void);
324
325 extern void vm_pageout_object_terminate(
326 vm_object_t object);
327
328 extern void vm_pageout_cluster(
329 vm_page_t m);
330
331 extern void vm_pageout_initialize_page(
332 vm_page_t m);
333
334 /* UPL exported routines and structures */
335
336 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
337 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
338 #define upl_lock(object) lck_mtx_lock(&(object)->Lock)
339 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
340 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock)
341
342 #define MAX_VECTOR_UPL_ELEMENTS 8
343
344 struct _vector_upl_iostates {
345 upl_offset_t offset;
346 upl_size_t size;
347 };
348
349 typedef struct _vector_upl_iostates vector_upl_iostates_t;
350
351 struct _vector_upl {
352 upl_size_t size;
353 uint32_t num_upls;
354 uint32_t invalid_upls;
355 uint32_t _reserved;
356 vm_map_t submap;
357 vm_offset_t submap_dst_addr;
358 vm_object_offset_t offset;
359 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS];
360 upl_page_info_array_t pagelist;
361 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS];
362 };
363
364 typedef struct _vector_upl* vector_upl_t;
365
366 /* universal page list structure */
367
368 #if UPL_DEBUG
369 #define UPL_DEBUG_STACK_FRAMES 16
370 #define UPL_DEBUG_COMMIT_RECORDS 4
371
372 struct ucd {
373 upl_offset_t c_beg;
374 upl_offset_t c_end;
375 int c_aborted;
376 void * c_retaddr[UPL_DEBUG_STACK_FRAMES];
377 };
378 #endif
379
380 struct upl_io_completion {
381 void *io_context;
382 void (*io_done)(void *, int);
383
384 int io_error;
385 };
386
387
388 struct upl {
389 decl_lck_mtx_data(, Lock); /* Synchronization */
390 int ref_count;
391 int ext_ref_count;
392 int flags;
393 /*
394 * XXX CAUTION: to accomodate devices with "mixed page sizes",
395 * u_offset and u_size are now byte-aligned and no longer
396 * page-aligned, on all devices.
397 */
398 vm_object_offset_t u_offset;
399 upl_size_t u_size; /* size in bytes of the address space */
400 vm_offset_t kaddr; /* secondary mapping in kernel */
401 vm_object_t map_object;
402 ppnum_t highest_page;
403 void* vector_upl;
404 upl_t associated_upl;
405 struct upl_io_completion *upl_iodone;
406 #if CONFIG_IOSCHED
407 int upl_priority;
408 uint64_t *upl_reprio_info;
409 void *decmp_io_upl;
410 #endif
411 #if CONFIG_IOSCHED || UPL_DEBUG
412 thread_t upl_creator;
413 queue_chain_t uplq; /* List of outstanding upls on an obj */
414 #endif
415 #if UPL_DEBUG
416 uintptr_t ubc_alias1;
417 uintptr_t ubc_alias2;
418
419 uint32_t upl_state;
420 uint32_t upl_commit_index;
421 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES];
422
423 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
424 #endif /* UPL_DEBUG */
425 };
426
427 /* upl struct flags */
428 #define UPL_PAGE_LIST_MAPPED 0x1
429 #define UPL_KERNEL_MAPPED 0x2
430 #define UPL_CLEAR_DIRTY 0x4
431 #define UPL_COMPOSITE_LIST 0x8
432 #define UPL_INTERNAL 0x10
433 #define UPL_PAGE_SYNC_DONE 0x20
434 #define UPL_DEVICE_MEMORY 0x40
435 #define UPL_PAGEOUT 0x80
436 #define UPL_LITE 0x100
437 #define UPL_IO_WIRE 0x200
438 #define UPL_ACCESS_BLOCKED 0x400
439 #define UPL_SHADOWED 0x1000
440 #define UPL_KERNEL_OBJECT 0x2000
441 #define UPL_VECTOR 0x4000
442 #define UPL_SET_DIRTY 0x8000
443 #define UPL_HAS_BUSY 0x10000
444 #define UPL_TRACKED_BY_OBJECT 0x20000
445 #define UPL_EXPEDITE_SUPPORTED 0x40000
446 #define UPL_DECMP_REQ 0x80000
447 #define UPL_DECMP_REAL_IO 0x100000
448
449 /* flags for upl_create flags parameter */
450 #define UPL_CREATE_EXTERNAL 0
451 #define UPL_CREATE_INTERNAL 0x1
452 #define UPL_CREATE_LITE 0x2
453 #define UPL_CREATE_IO_TRACKING 0x4
454 #define UPL_CREATE_EXPEDITE_SUP 0x8
455
456 extern upl_t vector_upl_create(vm_offset_t);
457 extern void vector_upl_deallocate(upl_t);
458 extern boolean_t vector_upl_is_valid(upl_t);
459 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
460 extern void vector_upl_set_pagelist(upl_t);
461 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
462 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
463 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
464 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
465 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
466 extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t);
467 extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*);
468
469 extern void vm_object_set_pmap_cache_attr(
470 vm_object_t object,
471 upl_page_info_array_t user_page_list,
472 unsigned int num_pages,
473 boolean_t batch_pmap_op);
474
475 extern kern_return_t vm_object_iopl_request(
476 vm_object_t object,
477 vm_object_offset_t offset,
478 upl_size_t size,
479 upl_t *upl_ptr,
480 upl_page_info_array_t user_page_list,
481 unsigned int *page_list_count,
482 upl_control_flags_t cntrl_flags,
483 vm_tag_t tag);
484
485 extern kern_return_t vm_object_super_upl_request(
486 vm_object_t object,
487 vm_object_offset_t offset,
488 upl_size_t size,
489 upl_size_t super_cluster,
490 upl_t *upl,
491 upl_page_info_t *user_page_list,
492 unsigned int *page_list_count,
493 upl_control_flags_t cntrl_flags,
494 vm_tag_t tag);
495
496 /* should be just a regular vm_map_enter() */
497 extern kern_return_t vm_map_enter_upl(
498 vm_map_t map,
499 upl_t upl,
500 vm_map_offset_t *dst_addr);
501
502 /* should be just a regular vm_map_remove() */
503 extern kern_return_t vm_map_remove_upl(
504 vm_map_t map,
505 upl_t upl);
506
507 /* wired page list structure */
508 typedef uint32_t *wpl_array_t;
509
510 extern struct vm_page_delayed_work*
511 vm_page_delayed_work_get_ctx(void);
512
513 extern void
514 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp);
515
516 extern void vm_page_free_reserve(int pages);
517
518 extern void vm_pageout_throttle_down(vm_page_t page);
519 extern void vm_pageout_throttle_up(vm_page_t page);
520
521 extern kern_return_t vm_paging_map_object(
522 vm_page_t page,
523 vm_object_t object,
524 vm_object_offset_t offset,
525 vm_prot_t protection,
526 boolean_t can_unlock_object,
527 vm_map_size_t *size, /* IN/OUT */
528 vm_map_offset_t *address, /* OUT */
529 boolean_t *need_unmap); /* OUT */
530 extern void vm_paging_unmap_object(
531 vm_object_t object,
532 vm_map_offset_t start,
533 vm_map_offset_t end);
534 decl_simple_lock_data(extern, vm_paging_lock);
535
536 /*
537 * Backing store throttle when BS is exhausted
538 */
539 extern unsigned int vm_backing_store_low;
540
541 extern void vm_pageout_steal_laundry(
542 vm_page_t page,
543 boolean_t queues_locked);
544
545 #endif /* MACH_KERNEL_PRIVATE */
546
547 #if UPL_DEBUG
548 extern kern_return_t upl_ubc_alias_set(
549 upl_t upl,
550 uintptr_t alias1,
551 uintptr_t alias2);
552 extern int upl_ubc_alias_get(
553 upl_t upl,
554 uintptr_t * al,
555 uintptr_t * al2);
556 #endif /* UPL_DEBUG */
557
558 extern void vm_countdirtypages(void);
559
560 extern void vm_backing_store_disable(
561 boolean_t suspend);
562
563 extern kern_return_t upl_transpose(
564 upl_t upl1,
565 upl_t upl2);
566
567 extern kern_return_t mach_vm_pressure_monitor(
568 boolean_t wait_for_pressure,
569 unsigned int nsecs_monitored,
570 unsigned int *pages_reclaimed_p,
571 unsigned int *pages_wanted_p);
572
573 extern kern_return_t
574 vm_set_buffer_cleanup_callout(
575 boolean_t (*func)(int));
576
577 struct vm_page_stats_reusable {
578 SInt32 reusable_count;
579 uint64_t reusable;
580 uint64_t reused;
581 uint64_t reused_wire;
582 uint64_t reused_remove;
583 uint64_t all_reusable_calls;
584 uint64_t partial_reusable_calls;
585 uint64_t all_reuse_calls;
586 uint64_t partial_reuse_calls;
587 uint64_t reusable_pages_success;
588 uint64_t reusable_pages_failure;
589 uint64_t reusable_pages_shared;
590 uint64_t reuse_pages_success;
591 uint64_t reuse_pages_failure;
592 uint64_t can_reuse_success;
593 uint64_t can_reuse_failure;
594 uint64_t reusable_reclaimed;
595 uint64_t reusable_nonwritable;
596 uint64_t reusable_shared;
597 uint64_t free_shared;
598 };
599 extern struct vm_page_stats_reusable vm_page_stats_reusable;
600
601 extern int hibernate_flush_memory(void);
602 extern void hibernate_reset_stats(void);
603 extern void hibernate_create_paddr_map(void);
604
605 extern void vm_set_restrictions(unsigned int num_cpus);
606
607 extern int vm_compressor_mode;
608 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t);
609 extern void vm_pageout_anonymous_pages(void);
610 extern void vm_pageout_disconnect_all_pages(void);
611
612
613 struct vm_config {
614 boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */
615 boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */
616 boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */
617 boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */
618 boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */
619 };
620
621 extern struct vm_config vm_config;
622
623
624 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */
625 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */
626 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */
627 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */
628 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */
629 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/
630 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/
631
632 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */
633
634
635 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE)
636 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE)
637 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE)
638 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE)
639 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE)
640
641 #endif /* KERNEL_PRIVATE */
642
643 #ifdef XNU_KERNEL_PRIVATE
644
645 struct vm_pageout_state {
646 boolean_t vm_pressure_thread_running;
647 boolean_t vm_pressure_changed;
648 boolean_t vm_restricted_to_single_processor;
649 int vm_compressor_thread_count;
650
651 unsigned int vm_page_speculative_q_age_ms;
652 unsigned int vm_page_speculative_percentage;
653 unsigned int vm_page_speculative_target;
654
655 unsigned int vm_pageout_swap_wait;
656 unsigned int vm_pageout_idle_wait; /* milliseconds */
657 unsigned int vm_pageout_empty_wait; /* milliseconds */
658 unsigned int vm_pageout_burst_wait; /* milliseconds */
659 unsigned int vm_pageout_deadlock_wait; /* milliseconds */
660 unsigned int vm_pageout_deadlock_relief;
661 unsigned int vm_pageout_burst_inactive_throttle;
662
663 unsigned int vm_pageout_inactive;
664 unsigned int vm_pageout_inactive_used; /* debugging */
665 unsigned int vm_pageout_inactive_clean; /* debugging */
666
667 uint32_t vm_page_filecache_min;
668 uint32_t vm_page_filecache_min_divisor;
669 uint32_t vm_page_xpmapped_min;
670 uint32_t vm_page_xpmapped_min_divisor;
671 uint64_t vm_pageout_considered_page_last;
672
673 int vm_page_free_count_init;
674
675 unsigned int vm_memory_pressure;
676
677 int memorystatus_purge_on_critical;
678 int memorystatus_purge_on_warning;
679 int memorystatus_purge_on_urgent;
680
681 thread_t vm_pageout_external_iothread;
682 thread_t vm_pageout_internal_iothread;
683 };
684
685 extern struct vm_pageout_state vm_pageout_state;
686
687 /*
688 * This structure is used to track the VM_INFO instrumentation
689 */
690 struct vm_pageout_vminfo {
691 unsigned long vm_pageout_considered_page;
692 unsigned long vm_pageout_considered_bq_internal;
693 unsigned long vm_pageout_considered_bq_external;
694 unsigned long vm_pageout_skipped_external;
695
696 unsigned long vm_pageout_pages_evicted;
697 unsigned long vm_pageout_pages_purged;
698 unsigned long vm_pageout_freed_cleaned;
699 unsigned long vm_pageout_freed_speculative;
700 unsigned long vm_pageout_freed_external;
701 unsigned long vm_pageout_freed_internal;
702 unsigned long vm_pageout_inactive_dirty_internal;
703 unsigned long vm_pageout_inactive_dirty_external;
704 unsigned long vm_pageout_inactive_referenced;
705 unsigned long vm_pageout_reactivation_limit_exceeded;
706 unsigned long vm_pageout_inactive_force_reclaim;
707 unsigned long vm_pageout_inactive_nolock;
708 unsigned long vm_pageout_filecache_min_reactivated;
709 unsigned long vm_pageout_scan_inactive_throttled_internal;
710 unsigned long vm_pageout_scan_inactive_throttled_external;
711
712 uint64_t vm_pageout_compressions;
713 uint64_t vm_compressor_pages_grabbed;
714 unsigned long vm_compressor_failed;
715
716 unsigned long vm_page_pages_freed;
717
718 unsigned long vm_phantom_cache_found_ghost;
719 unsigned long vm_phantom_cache_added_ghost;
720 };
721
722 extern struct vm_pageout_vminfo vm_pageout_vminfo;
723
724
725 #if DEVELOPMENT || DEBUG
726
727 /*
728 * This structure records the pageout daemon's actions:
729 * how many pages it looks at and what happens to those pages.
730 * No locking needed because only one thread modifies the fields.
731 */
732 struct vm_pageout_debug {
733 uint32_t vm_pageout_balanced;
734 uint32_t vm_pageout_scan_event_counter;
735 uint32_t vm_pageout_speculative_dirty;
736
737 uint32_t vm_pageout_inactive_busy;
738 uint32_t vm_pageout_inactive_absent;
739 uint32_t vm_pageout_inactive_notalive;
740 uint32_t vm_pageout_inactive_error;
741 uint32_t vm_pageout_inactive_deactivated;
742
743 uint32_t vm_pageout_enqueued_cleaned;
744
745 uint32_t vm_pageout_cleaned_busy;
746 uint32_t vm_pageout_cleaned_nolock;
747 uint32_t vm_pageout_cleaned_reference_reactivated;
748 uint32_t vm_pageout_cleaned_volatile_reactivated;
749 uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */
750 uint32_t vm_pageout_cleaned_fault_reactivated;
751
752 uint32_t vm_pageout_dirty_no_pager;
753 uint32_t vm_pageout_purged_objects;
754
755 uint32_t vm_pageout_scan_throttle;
756 uint32_t vm_pageout_scan_reclaimed_throttled;
757 uint32_t vm_pageout_scan_burst_throttle;
758 uint32_t vm_pageout_scan_empty_throttle;
759 uint32_t vm_pageout_scan_swap_throttle;
760 uint32_t vm_pageout_scan_deadlock_detected;
761 uint32_t vm_pageout_scan_inactive_throttle_success;
762 uint32_t vm_pageout_scan_throttle_deferred;
763
764 uint32_t vm_pageout_inactive_external_forced_jetsam_count;
765
766 uint32_t vm_grab_anon_overrides;
767 uint32_t vm_grab_anon_nops;
768
769 uint32_t vm_pageout_no_victim;
770 unsigned long vm_pageout_throttle_up_count;
771 uint32_t vm_page_steal_pageout_page;
772
773 uint32_t vm_cs_validated_resets;
774 uint32_t vm_object_iopl_request_sleep_for_cleaning;
775 uint32_t vm_page_slide_counter;
776 uint32_t vm_page_slide_errors;
777 uint32_t vm_page_throttle_count;
778 /*
779 * Statistics about UPL enforcement of copy-on-write obligations.
780 */
781 unsigned long upl_cow;
782 unsigned long upl_cow_again;
783 unsigned long upl_cow_pages;
784 unsigned long upl_cow_again_pages;
785 unsigned long iopl_cow;
786 unsigned long iopl_cow_pages;
787 };
788
789 extern struct vm_pageout_debug vm_pageout_debug;
790
791 #define VM_PAGEOUT_DEBUG(member, value) \
792 MACRO_BEGIN \
793 vm_pageout_debug.member += value; \
794 MACRO_END
795 #else
796 #define VM_PAGEOUT_DEBUG(member, value)
797 #endif
798
799 #define MAX_COMPRESSOR_THREAD_COUNT 8
800
801 #if DEVELOPMENT || DEBUG
802 typedef struct vmct_stats_s {
803 uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT];
804 uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT];
805 uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT];
806 uint64_t vmct_cthreads_total;
807 int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT];
808 int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT];
809 } vmct_stats_t;
810 #endif
811 #endif
812 #endif /* _VM_VM_PAGEOUT_H_ */