]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
b0d623f7 | 2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_pageout.h | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1986 | |
62 | * | |
63 | * Declarations for the pageout daemon interface. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_PAGEOUT_H_ | |
67 | #define _VM_VM_PAGEOUT_H_ | |
68 | ||
91447636 A |
69 | #ifdef KERNEL_PRIVATE |
70 | ||
71 | #include <mach/mach_types.h> | |
1c79356b A |
72 | #include <mach/boolean.h> |
73 | #include <mach/machine/vm_types.h> | |
91447636 | 74 | #include <mach/memory_object_types.h> |
1c79356b | 75 | |
91447636 A |
76 | #include <kern/kern_types.h> |
77 | #include <kern/lock.h> | |
0b4e3aa0 | 78 | |
b0d623f7 A |
79 | #include <libkern/OSAtomic.h> |
80 | ||
81 | ||
82 | #include <vm/vm_options.h> | |
83 | ||
0b4c1975 A |
84 | #ifdef MACH_KERNEL_PRIVATE |
85 | #include <vm/vm_page.h> | |
86 | #endif | |
87 | ||
6d2010ae A |
88 | #include <sys/kdebug.h> |
89 | ||
316670eb A |
90 | #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) |
91 | ||
92 | /* externally manipulated counters */ | |
93 | extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated; | |
94 | ||
39236c6e A |
95 | #if CONFIG_JETSAM |
96 | #define LATENCY_JETSAM FALSE | |
97 | #if LATENCY_JETSAM | |
98 | #define JETSAM_LATENCY_TOKEN_AGE 3000 /* 3ms */ | |
99 | #define NUM_OF_JETSAM_LATENCY_TOKENS 1000 | |
100 | ||
101 | #define JETSAM_AGE_NOTIFY_CRITICAL 1500000 /* 1.5 secs */ | |
102 | ||
103 | extern boolean_t jlp_init; | |
104 | extern uint64_t jlp_time, jlp_current; | |
105 | extern unsigned int latency_jetsam_wakeup; | |
106 | #endif /* LATENCY_JETSAM */ | |
107 | #endif /* CONFIG_JETSAM */ | |
108 | ||
6d2010ae | 109 | #if CONFIG_FREEZE |
316670eb | 110 | extern boolean_t memorystatus_freeze_enabled; |
39236c6e | 111 | #define VM_DYNAMIC_PAGING_ENABLED(port) ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) || (memorystatus_freeze_enabled == FALSE && IP_VALID(port))) |
6d2010ae | 112 | #else |
39236c6e | 113 | #define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || IP_VALID(port)) |
6d2010ae A |
114 | #endif |
115 | ||
116 | ||
117 | extern int vm_debug_events; | |
118 | ||
39236c6e A |
119 | #define VMF_CHECK_ZFDELAY 0x100 |
120 | #define VMF_COWDELAY 0x101 | |
121 | #define VMF_ZFDELAY 0x102 | |
122 | #define VMF_COMPRESSORDELAY 0x103 | |
6d2010ae | 123 | |
39236c6e A |
124 | #define VM_PAGEOUT_SCAN 0x104 |
125 | #define VM_PAGEOUT_BALANCE 0x105 | |
126 | #define VM_PAGEOUT_FREELIST 0x106 | |
127 | #define VM_PAGEOUT_PURGEONE 0x107 | |
128 | #define VM_PAGEOUT_CACHE_EVICT 0x108 | |
129 | #define VM_PAGEOUT_THREAD_BLOCK 0x109 | |
130 | #define VM_PAGEOUT_JETSAM 0x10A | |
131 | #define VM_PAGEOUT_PAGE_TOKEN 0x10B | |
6d2010ae | 132 | |
39236c6e A |
133 | #define VM_UPL_PAGE_WAIT 0x120 |
134 | #define VM_IOPL_PAGE_WAIT 0x121 | |
135 | #define VM_PAGE_WAIT_BLOCK 0x122 | |
6d2010ae | 136 | |
39236c6e A |
137 | #define VM_PRESSURE_EVENT 0x130 |
138 | #define VM_EXECVE 0x131 | |
139 | #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 | |
316670eb | 140 | |
6d2010ae A |
141 | #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ |
142 | MACRO_BEGIN \ | |
143 | if (vm_debug_events) { \ | |
144 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ | |
145 | } \ | |
146 | MACRO_END | |
147 | ||
39236c6e | 148 | extern void inline memoryshot(unsigned int event, unsigned int control); |
0b4c1975 | 149 | |
91447636 A |
150 | extern kern_return_t vm_map_create_upl( |
151 | vm_map_t map, | |
152 | vm_map_address_t offset, | |
153 | upl_size_t *upl_size, | |
154 | upl_t *upl, | |
155 | upl_page_info_array_t page_list, | |
156 | unsigned int *count, | |
157 | int *flags); | |
0b4e3aa0 | 158 | |
0c530ab8 A |
159 | extern ppnum_t upl_get_highest_page( |
160 | upl_t upl); | |
161 | ||
b0d623f7 A |
162 | extern upl_size_t upl_get_size( |
163 | upl_t upl); | |
164 | ||
0b4c1975 A |
165 | |
166 | #ifndef MACH_KERNEL_PRIVATE | |
167 | typedef struct vm_page *vm_page_t; | |
168 | #endif | |
169 | ||
0b4c1975 A |
170 | extern void vm_page_free_list( |
171 | vm_page_t mem, | |
172 | boolean_t prepare_object); | |
173 | ||
174 | extern kern_return_t vm_page_alloc_list( | |
175 | int page_count, | |
176 | int flags, | |
177 | vm_page_t * list); | |
178 | ||
179 | extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); | |
180 | extern vm_object_offset_t vm_page_get_offset(vm_page_t page); | |
181 | extern ppnum_t vm_page_get_phys_page(vm_page_t page); | |
182 | extern vm_page_t vm_page_get_next(vm_page_t page); | |
183 | ||
39236c6e A |
184 | extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); |
185 | ||
91447636 A |
186 | #ifdef MACH_KERNEL_PRIVATE |
187 | ||
188 | #include <vm/vm_page.h> | |
0b4e3aa0 A |
189 | |
190 | extern unsigned int vm_pageout_scan_event_counter; | |
316670eb | 191 | extern unsigned int vm_page_anonymous_count; |
b0d623f7 | 192 | |
b0d623f7 | 193 | |
0b4c1975 A |
194 | /* |
195 | * must hold the page queues lock to | |
196 | * manipulate this structure | |
197 | */ | |
198 | struct vm_pageout_queue { | |
199 | queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ | |
200 | unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ | |
201 | unsigned int pgo_maxlaundry; | |
316670eb A |
202 | uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ |
203 | uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ | |
0b4c1975 A |
204 | |
205 | unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ | |
206 | pgo_busy:1, /* iothread is currently processing request from pgo_pending */ | |
207 | pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ | |
208 | pgo_draining:1, | |
316670eb | 209 | pgo_inited:1, |
0b4c1975 A |
210 | :0; |
211 | }; | |
212 | ||
213 | #define VM_PAGE_Q_THROTTLED(q) \ | |
214 | ((q)->pgo_laundry >= (q)->pgo_maxlaundry) | |
215 | ||
216 | extern struct vm_pageout_queue vm_pageout_queue_internal; | |
217 | extern struct vm_pageout_queue vm_pageout_queue_external; | |
218 | ||
6d2010ae | 219 | |
1c79356b | 220 | /* |
91447636 | 221 | * Routines exported to Mach. |
1c79356b A |
222 | */ |
223 | extern void vm_pageout(void); | |
224 | ||
2d21ac55 | 225 | extern kern_return_t vm_pageout_internal_start(void); |
1c79356b A |
226 | |
227 | extern void vm_pageout_object_terminate( | |
228 | vm_object_t object); | |
229 | ||
1c79356b | 230 | extern void vm_pageout_cluster( |
316670eb A |
231 | vm_page_t m, |
232 | boolean_t pageout); | |
1c79356b A |
233 | |
234 | extern void vm_pageout_initialize_page( | |
235 | vm_page_t m); | |
236 | ||
237 | extern void vm_pageclean_setup( | |
238 | vm_page_t m, | |
239 | vm_page_t new_m, | |
240 | vm_object_t new_object, | |
241 | vm_object_offset_t new_offset); | |
242 | ||
1c79356b A |
243 | /* UPL exported routines and structures */ |
244 | ||
b0d623f7 A |
245 | #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) |
246 | #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) | |
247 | #define upl_lock(object) lck_mtx_lock(&(object)->Lock) | |
248 | #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) | |
249 | ||
250 | #define MAX_VECTOR_UPL_ELEMENTS 8 | |
1c79356b | 251 | |
b0d623f7 A |
252 | struct _vector_upl_iostates{ |
253 | upl_offset_t offset; | |
254 | upl_size_t size; | |
255 | }; | |
256 | ||
257 | typedef struct _vector_upl_iostates vector_upl_iostates_t; | |
258 | ||
259 | struct _vector_upl { | |
260 | upl_size_t size; | |
261 | uint32_t num_upls; | |
262 | uint32_t invalid_upls; | |
263 | uint32_t _reserved; | |
264 | vm_map_t submap; | |
265 | vm_offset_t submap_dst_addr; | |
266 | vm_object_offset_t offset; | |
267 | upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; | |
268 | upl_page_info_array_t pagelist; | |
269 | vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; | |
270 | }; | |
271 | ||
272 | typedef struct _vector_upl* vector_upl_t; | |
1c79356b A |
273 | |
274 | /* universal page list structure */ | |
275 | ||
b0d623f7 A |
276 | #if UPL_DEBUG |
277 | #define UPL_DEBUG_STACK_FRAMES 16 | |
278 | #define UPL_DEBUG_COMMIT_RECORDS 4 | |
279 | ||
280 | struct ucd { | |
281 | upl_offset_t c_beg; | |
282 | upl_offset_t c_end; | |
283 | int c_aborted; | |
284 | void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; | |
285 | }; | |
286 | #endif | |
287 | ||
288 | ||
1c79356b | 289 | struct upl { |
b0d623f7 | 290 | decl_lck_mtx_data(, Lock) /* Synchronization */ |
1c79356b | 291 | int ref_count; |
6d2010ae | 292 | int ext_ref_count; |
1c79356b A |
293 | int flags; |
294 | vm_object_t src_object; /* object derived from */ | |
295 | vm_object_offset_t offset; | |
91447636 | 296 | upl_size_t size; /* size in bytes of the address space */ |
1c79356b A |
297 | vm_offset_t kaddr; /* secondary mapping in kernel */ |
298 | vm_object_t map_object; | |
0c530ab8 | 299 | ppnum_t highest_page; |
b0d623f7 A |
300 | void* vector_upl; |
301 | #if UPL_DEBUG | |
302 | uintptr_t ubc_alias1; | |
303 | uintptr_t ubc_alias2; | |
1c79356b | 304 | queue_chain_t uplq; /* List of outstanding upls on an obj */ |
b0d623f7 A |
305 | |
306 | thread_t upl_creator; | |
307 | uint32_t upl_state; | |
308 | uint32_t upl_commit_index; | |
309 | void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; | |
310 | ||
311 | struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; | |
91447636 | 312 | #endif /* UPL_DEBUG */ |
1c79356b A |
313 | }; |
314 | ||
1c79356b A |
315 | /* upl struct flags */ |
316 | #define UPL_PAGE_LIST_MAPPED 0x1 | |
317 | #define UPL_KERNEL_MAPPED 0x2 | |
318 | #define UPL_CLEAR_DIRTY 0x4 | |
319 | #define UPL_COMPOSITE_LIST 0x8 | |
320 | #define UPL_INTERNAL 0x10 | |
321 | #define UPL_PAGE_SYNC_DONE 0x20 | |
322 | #define UPL_DEVICE_MEMORY 0x40 | |
0b4e3aa0 | 323 | #define UPL_PAGEOUT 0x80 |
55e303ae A |
324 | #define UPL_LITE 0x100 |
325 | #define UPL_IO_WIRE 0x200 | |
91447636 A |
326 | #define UPL_ACCESS_BLOCKED 0x400 |
327 | #define UPL_ENCRYPTED 0x800 | |
2d21ac55 | 328 | #define UPL_SHADOWED 0x1000 |
b0d623f7 A |
329 | #define UPL_KERNEL_OBJECT 0x2000 |
330 | #define UPL_VECTOR 0x4000 | |
6d2010ae A |
331 | #define UPL_SET_DIRTY 0x8000 |
332 | #define UPL_HAS_BUSY 0x10000 | |
1c79356b | 333 | |
55e303ae A |
334 | /* flags for upl_create flags parameter */ |
335 | #define UPL_CREATE_EXTERNAL 0 | |
336 | #define UPL_CREATE_INTERNAL 0x1 | |
337 | #define UPL_CREATE_LITE 0x2 | |
1c79356b | 338 | |
b0d623f7 A |
339 | extern upl_t vector_upl_create(vm_offset_t); |
340 | extern void vector_upl_deallocate(upl_t); | |
341 | extern boolean_t vector_upl_is_valid(upl_t); | |
342 | extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); | |
343 | extern void vector_upl_set_pagelist(upl_t); | |
344 | extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); | |
345 | extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); | |
346 | extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); | |
347 | extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); | |
348 | extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); | |
349 | extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t); | |
350 | extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*); | |
351 | ||
316670eb A |
352 | extern void vm_object_set_pmap_cache_attr( |
353 | vm_object_t object, | |
354 | upl_page_info_array_t user_page_list, | |
355 | unsigned int num_pages, | |
356 | boolean_t batch_pmap_op); | |
357 | ||
91447636 A |
358 | extern kern_return_t vm_object_iopl_request( |
359 | vm_object_t object, | |
360 | vm_object_offset_t offset, | |
361 | upl_size_t size, | |
362 | upl_t *upl_ptr, | |
363 | upl_page_info_array_t user_page_list, | |
364 | unsigned int *page_list_count, | |
365 | int cntrl_flags); | |
366 | ||
367 | extern kern_return_t vm_object_super_upl_request( | |
368 | vm_object_t object, | |
369 | vm_object_offset_t offset, | |
370 | upl_size_t size, | |
371 | upl_size_t super_cluster, | |
372 | upl_t *upl, | |
373 | upl_page_info_t *user_page_list, | |
374 | unsigned int *page_list_count, | |
375 | int cntrl_flags); | |
376 | ||
377 | /* should be just a regular vm_map_enter() */ | |
378 | extern kern_return_t vm_map_enter_upl( | |
379 | vm_map_t map, | |
380 | upl_t upl, | |
381 | vm_map_offset_t *dst_addr); | |
382 | ||
383 | /* should be just a regular vm_map_remove() */ | |
384 | extern kern_return_t vm_map_remove_upl( | |
385 | vm_map_t map, | |
386 | upl_t upl); | |
387 | ||
55e303ae | 388 | /* wired page list structure */ |
b0d623f7 | 389 | typedef uint32_t *wpl_array_t; |
55e303ae | 390 | |
91447636 A |
391 | extern void vm_page_free_reserve(int pages); |
392 | ||
393 | extern void vm_pageout_throttle_down(vm_page_t page); | |
394 | extern void vm_pageout_throttle_up(vm_page_t page); | |
395 | ||
396 | /* | |
397 | * ENCRYPTED SWAP: | |
398 | */ | |
399 | extern void upl_encrypt( | |
400 | upl_t upl, | |
401 | upl_offset_t crypt_offset, | |
402 | upl_size_t crypt_size); | |
403 | extern void vm_page_encrypt( | |
404 | vm_page_t page, | |
405 | vm_map_offset_t kernel_map_offset); | |
406 | extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */ | |
407 | extern void vm_page_decrypt( | |
408 | vm_page_t page, | |
409 | vm_map_offset_t kernel_map_offset); | |
410 | extern kern_return_t vm_paging_map_object( | |
91447636 A |
411 | vm_page_t page, |
412 | vm_object_t object, | |
413 | vm_object_offset_t offset, | |
593a1d5f | 414 | vm_prot_t protection, |
39236c6e A |
415 | boolean_t can_unlock_object, |
416 | vm_map_size_t *size, /* IN/OUT */ | |
417 | vm_map_offset_t *address, /* OUT */ | |
418 | boolean_t *need_unmap); /* OUT */ | |
91447636 A |
419 | extern void vm_paging_unmap_object( |
420 | vm_object_t object, | |
421 | vm_map_offset_t start, | |
422 | vm_map_offset_t end); | |
423 | decl_simple_lock_data(extern, vm_paging_lock) | |
424 | ||
425 | /* | |
426 | * Backing store throttle when BS is exhausted | |
427 | */ | |
428 | extern unsigned int vm_backing_store_low; | |
429 | ||
316670eb | 430 | extern void vm_pageout_steal_laundry( |
b0d623f7 A |
431 | vm_page_t page, |
432 | boolean_t queues_locked); | |
433 | ||
6d2010ae A |
434 | extern boolean_t vm_page_is_slideable(vm_page_t m); |
435 | ||
436 | extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset); | |
91447636 A |
437 | #endif /* MACH_KERNEL_PRIVATE */ |
438 | ||
b0d623f7 A |
439 | #if UPL_DEBUG |
440 | extern kern_return_t upl_ubc_alias_set( | |
441 | upl_t upl, | |
442 | uintptr_t alias1, | |
443 | uintptr_t alias2); | |
444 | extern int upl_ubc_alias_get( | |
445 | upl_t upl, | |
446 | uintptr_t * al, | |
447 | uintptr_t * al2); | |
448 | #endif /* UPL_DEBUG */ | |
449 | ||
91447636 A |
450 | extern void vm_countdirtypages(void); |
451 | ||
452 | extern void vm_backing_store_disable( | |
453 | boolean_t suspend); | |
454 | ||
455 | extern kern_return_t upl_transpose( | |
456 | upl_t upl1, | |
457 | upl_t upl2); | |
458 | ||
b0d623f7 A |
459 | extern kern_return_t mach_vm_pressure_monitor( |
460 | boolean_t wait_for_pressure, | |
461 | unsigned int nsecs_monitored, | |
462 | unsigned int *pages_reclaimed_p, | |
463 | unsigned int *pages_wanted_p); | |
464 | ||
465 | extern kern_return_t | |
466 | vm_set_buffer_cleanup_callout( | |
0b4c1975 | 467 | boolean_t (*func)(int)); |
b0d623f7 A |
468 | |
469 | struct vm_page_stats_reusable { | |
470 | SInt32 reusable_count; | |
471 | uint64_t reusable; | |
472 | uint64_t reused; | |
473 | uint64_t reused_wire; | |
474 | uint64_t reused_remove; | |
475 | uint64_t all_reusable_calls; | |
476 | uint64_t partial_reusable_calls; | |
477 | uint64_t all_reuse_calls; | |
478 | uint64_t partial_reuse_calls; | |
479 | uint64_t reusable_pages_success; | |
480 | uint64_t reusable_pages_failure; | |
481 | uint64_t reusable_pages_shared; | |
482 | uint64_t reuse_pages_success; | |
483 | uint64_t reuse_pages_failure; | |
484 | uint64_t can_reuse_success; | |
485 | uint64_t can_reuse_failure; | |
39236c6e | 486 | uint64_t reusable_reclaimed; |
b0d623f7 A |
487 | }; |
488 | extern struct vm_page_stats_reusable vm_page_stats_reusable; | |
489 | ||
0b4c1975 | 490 | extern int hibernate_flush_memory(void); |
39236c6e A |
491 | extern void hibernate_create_paddr_map(void); |
492 | ||
493 | extern int vm_compressor_mode; | |
494 | extern int vm_compressor_thread_count; | |
495 | ||
496 | #define VM_PAGER_DEFAULT 0x1 /* Use default pager. */ | |
497 | #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* In-core compressor only. */ | |
498 | #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* In-core compressor + swap backend. */ | |
499 | #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager.*/ | |
500 | #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ | |
501 | #define VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Freezer backed by in-core compressor with swap support too.*/ | |
502 | ||
503 | #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ | |
504 | ||
505 | #define DEFAULT_PAGER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_DEFAULT) == VM_PAGER_DEFAULT) | |
506 | ||
507 | #define COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_COMPRESSOR_NO_SWAP | VM_PAGER_COMPRESSOR_WITH_SWAP)) | |
508 | ||
509 | #define DEFAULT_FREEZER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_FREEZER_DEFAULT) == VM_PAGER_FREEZER_DEFAULT) | |
510 | ||
511 | #define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP | VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP)) | |
512 | ||
0b4c1975 | 513 | |
91447636 | 514 | #endif /* KERNEL_PRIVATE */ |
1c79356b A |
515 | |
516 | #endif /* _VM_VM_PAGEOUT_H_ */ |