]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
b0d623f7 | 2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_pageout.h | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1986 | |
62 | * | |
63 | * Declarations for the pageout daemon interface. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_PAGEOUT_H_ | |
67 | #define _VM_VM_PAGEOUT_H_ | |
68 | ||
91447636 A |
69 | #ifdef KERNEL_PRIVATE |
70 | ||
71 | #include <mach/mach_types.h> | |
1c79356b A |
72 | #include <mach/boolean.h> |
73 | #include <mach/machine/vm_types.h> | |
91447636 | 74 | #include <mach/memory_object_types.h> |
1c79356b | 75 | |
91447636 | 76 | #include <kern/kern_types.h> |
fe8ab488 | 77 | #include <kern/locks.h> |
0b4e3aa0 | 78 | |
b0d623f7 A |
79 | #include <libkern/OSAtomic.h> |
80 | ||
81 | ||
82 | #include <vm/vm_options.h> | |
83 | ||
0b4c1975 A |
84 | #ifdef MACH_KERNEL_PRIVATE |
85 | #include <vm/vm_page.h> | |
86 | #endif | |
87 | ||
6d2010ae A |
88 | #include <sys/kdebug.h> |
89 | ||
316670eb A |
90 | #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) |
91 | ||
92 | /* externally manipulated counters */ | |
93 | extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated; | |
94 | ||
6d2010ae | 95 | #if CONFIG_FREEZE |
316670eb | 96 | extern boolean_t memorystatus_freeze_enabled; |
6d2010ae A |
97 | #endif |
98 | ||
39037602 A |
99 | #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) |
100 | ||
fe8ab488 A |
101 | #if VM_PRESSURE_EVENTS |
102 | extern boolean_t vm_pressure_events_enabled; | |
103 | #endif /* VM_PRESSURE_EVENTS */ | |
6d2010ae | 104 | |
39037602 A |
105 | |
106 | /* | |
107 | * the following codes are used in the DBG_MACH_WORKINGSET subclass | |
108 | * of the DBG_MACH class | |
109 | */ | |
110 | #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 | |
111 | #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 | |
112 | #define VM_REAL_FAULT_ADDR_INTERNAL 0x02 | |
113 | #define VM_REAL_FAULT_ADDR_PURGABLE 0x03 | |
114 | #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 | |
115 | #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 | |
116 | ||
117 | ||
118 | ||
6d2010ae A |
119 | extern int vm_debug_events; |
120 | ||
39236c6e A |
121 | #define VMF_CHECK_ZFDELAY 0x100 |
122 | #define VMF_COWDELAY 0x101 | |
123 | #define VMF_ZFDELAY 0x102 | |
124 | #define VMF_COMPRESSORDELAY 0x103 | |
6d2010ae | 125 | |
39236c6e A |
126 | #define VM_PAGEOUT_SCAN 0x104 |
127 | #define VM_PAGEOUT_BALANCE 0x105 | |
128 | #define VM_PAGEOUT_FREELIST 0x106 | |
129 | #define VM_PAGEOUT_PURGEONE 0x107 | |
130 | #define VM_PAGEOUT_CACHE_EVICT 0x108 | |
131 | #define VM_PAGEOUT_THREAD_BLOCK 0x109 | |
132 | #define VM_PAGEOUT_JETSAM 0x10A | |
6d2010ae | 133 | |
39236c6e A |
134 | #define VM_UPL_PAGE_WAIT 0x120 |
135 | #define VM_IOPL_PAGE_WAIT 0x121 | |
136 | #define VM_PAGE_WAIT_BLOCK 0x122 | |
6d2010ae | 137 | |
fe8ab488 A |
138 | #if CONFIG_IOSCHED |
139 | #define VM_PAGE_SLEEP 0x123 | |
140 | #define VM_PAGE_EXPEDITE 0x124 | |
141 | #endif | |
142 | ||
39236c6e A |
143 | #define VM_PRESSURE_EVENT 0x130 |
144 | #define VM_EXECVE 0x131 | |
145 | #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 | |
316670eb | 146 | |
4bd07ac2 A |
147 | #define VM_DATA_WRITE 0x140 |
148 | ||
6d2010ae A |
149 | #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ |
150 | MACRO_BEGIN \ | |
151 | if (vm_debug_events) { \ | |
152 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ | |
153 | } \ | |
154 | MACRO_END | |
155 | ||
3e170ce0 A |
156 | #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ |
157 | MACRO_BEGIN \ | |
158 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ | |
159 | MACRO_END | |
160 | ||
fe8ab488 | 161 | extern void memoryshot(unsigned int event, unsigned int control); |
0b4c1975 | 162 | |
91447636 A |
163 | extern kern_return_t vm_map_create_upl( |
164 | vm_map_t map, | |
165 | vm_map_address_t offset, | |
166 | upl_size_t *upl_size, | |
167 | upl_t *upl, | |
168 | upl_page_info_array_t page_list, | |
169 | unsigned int *count, | |
3e170ce0 | 170 | upl_control_flags_t *flags); |
0b4e3aa0 | 171 | |
0c530ab8 A |
172 | extern ppnum_t upl_get_highest_page( |
173 | upl_t upl); | |
174 | ||
b0d623f7 A |
175 | extern upl_size_t upl_get_size( |
176 | upl_t upl); | |
177 | ||
3e170ce0 A |
178 | extern upl_t upl_associated_upl(upl_t upl); |
179 | extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); | |
180 | ||
fe8ab488 A |
181 | extern void iopl_valid_data( |
182 | upl_t upl_ptr); | |
0b4c1975 | 183 | |
39037602 A |
184 | #ifdef XNU_KERNEL_PRIVATE |
185 | ||
186 | extern vm_tag_t iopl_set_tag( | |
187 | upl_t upl_ptr, | |
188 | vm_tag_t tag); | |
189 | ||
190 | #endif /* XNU_KERNEL_PRIVATE */ | |
191 | ||
192 | extern struct vnode * upl_lookup_vnode(upl_t upl); | |
193 | ||
0b4c1975 A |
194 | #ifndef MACH_KERNEL_PRIVATE |
195 | typedef struct vm_page *vm_page_t; | |
196 | #endif | |
197 | ||
0b4c1975 A |
198 | extern void vm_page_free_list( |
199 | vm_page_t mem, | |
200 | boolean_t prepare_object); | |
201 | ||
202 | extern kern_return_t vm_page_alloc_list( | |
203 | int page_count, | |
204 | int flags, | |
205 | vm_page_t * list); | |
206 | ||
207 | extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); | |
208 | extern vm_object_offset_t vm_page_get_offset(vm_page_t page); | |
209 | extern ppnum_t vm_page_get_phys_page(vm_page_t page); | |
210 | extern vm_page_t vm_page_get_next(vm_page_t page); | |
211 | ||
39236c6e A |
212 | extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); |
213 | ||
4bd07ac2 A |
214 | extern kern_return_t vm_pageout_wait(uint64_t deadline); |
215 | ||
91447636 A |
216 | #ifdef MACH_KERNEL_PRIVATE |
217 | ||
218 | #include <vm/vm_page.h> | |
0b4e3aa0 A |
219 | |
220 | extern unsigned int vm_pageout_scan_event_counter; | |
316670eb | 221 | extern unsigned int vm_page_anonymous_count; |
b0d623f7 | 222 | |
b0d623f7 | 223 | |
0b4c1975 A |
224 | /* |
225 | * must hold the page queues lock to | |
226 | * manipulate this structure | |
227 | */ | |
228 | struct vm_pageout_queue { | |
39037602 | 229 | vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ |
0b4c1975 A |
230 | unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ |
231 | unsigned int pgo_maxlaundry; | |
316670eb A |
232 | uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ |
233 | uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ | |
0b4c1975 A |
234 | |
235 | unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ | |
236 | pgo_busy:1, /* iothread is currently processing request from pgo_pending */ | |
237 | pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ | |
238 | pgo_draining:1, | |
316670eb | 239 | pgo_inited:1, |
0b4c1975 A |
240 | :0; |
241 | }; | |
242 | ||
243 | #define VM_PAGE_Q_THROTTLED(q) \ | |
244 | ((q)->pgo_laundry >= (q)->pgo_maxlaundry) | |
245 | ||
246 | extern struct vm_pageout_queue vm_pageout_queue_internal; | |
247 | extern struct vm_pageout_queue vm_pageout_queue_external; | |
248 | ||
6d2010ae | 249 | |
1c79356b | 250 | /* |
91447636 | 251 | * Routines exported to Mach. |
1c79356b A |
252 | */ |
253 | extern void vm_pageout(void); | |
254 | ||
2d21ac55 | 255 | extern kern_return_t vm_pageout_internal_start(void); |
1c79356b A |
256 | |
257 | extern void vm_pageout_object_terminate( | |
258 | vm_object_t object); | |
259 | ||
3e170ce0 | 260 | extern int vm_pageout_cluster( |
316670eb | 261 | vm_page_t m, |
3e170ce0 A |
262 | boolean_t immediate_ok, |
263 | boolean_t keep_object_locked); | |
1c79356b A |
264 | |
265 | extern void vm_pageout_initialize_page( | |
266 | vm_page_t m); | |
267 | ||
1c79356b A |
268 | /* UPL exported routines and structures */ |
269 | ||
b0d623f7 A |
270 | #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) |
271 | #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) | |
272 | #define upl_lock(object) lck_mtx_lock(&(object)->Lock) | |
273 | #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) | |
a1c7dba1 | 274 | #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) |
b0d623f7 A |
275 | |
276 | #define MAX_VECTOR_UPL_ELEMENTS 8 | |
1c79356b | 277 | |
b0d623f7 A |
278 | struct _vector_upl_iostates{ |
279 | upl_offset_t offset; | |
280 | upl_size_t size; | |
281 | }; | |
282 | ||
283 | typedef struct _vector_upl_iostates vector_upl_iostates_t; | |
284 | ||
285 | struct _vector_upl { | |
286 | upl_size_t size; | |
287 | uint32_t num_upls; | |
288 | uint32_t invalid_upls; | |
289 | uint32_t _reserved; | |
290 | vm_map_t submap; | |
291 | vm_offset_t submap_dst_addr; | |
292 | vm_object_offset_t offset; | |
293 | upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; | |
294 | upl_page_info_array_t pagelist; | |
295 | vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; | |
296 | }; | |
297 | ||
298 | typedef struct _vector_upl* vector_upl_t; | |
1c79356b A |
299 | |
300 | /* universal page list structure */ | |
301 | ||
b0d623f7 A |
302 | #if UPL_DEBUG |
303 | #define UPL_DEBUG_STACK_FRAMES 16 | |
304 | #define UPL_DEBUG_COMMIT_RECORDS 4 | |
305 | ||
306 | struct ucd { | |
307 | upl_offset_t c_beg; | |
308 | upl_offset_t c_end; | |
309 | int c_aborted; | |
310 | void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; | |
311 | }; | |
312 | #endif | |
313 | ||
314 | ||
1c79356b | 315 | struct upl { |
b0d623f7 | 316 | decl_lck_mtx_data(, Lock) /* Synchronization */ |
1c79356b | 317 | int ref_count; |
6d2010ae | 318 | int ext_ref_count; |
1c79356b | 319 | int flags; |
1c79356b | 320 | vm_object_offset_t offset; |
91447636 | 321 | upl_size_t size; /* size in bytes of the address space */ |
1c79356b A |
322 | vm_offset_t kaddr; /* secondary mapping in kernel */ |
323 | vm_object_t map_object; | |
0c530ab8 | 324 | ppnum_t highest_page; |
b0d623f7 | 325 | void* vector_upl; |
3e170ce0 | 326 | upl_t associated_upl; |
fe8ab488 A |
327 | #if CONFIG_IOSCHED |
328 | int upl_priority; | |
329 | uint64_t *upl_reprio_info; | |
330 | void *decmp_io_upl; | |
331 | #endif | |
332 | #if CONFIG_IOSCHED || UPL_DEBUG | |
333 | thread_t upl_creator; | |
334 | queue_chain_t uplq; /* List of outstanding upls on an obj */ | |
335 | #endif | |
b0d623f7 A |
336 | #if UPL_DEBUG |
337 | uintptr_t ubc_alias1; | |
338 | uintptr_t ubc_alias2; | |
fe8ab488 | 339 | |
b0d623f7 A |
340 | uint32_t upl_state; |
341 | uint32_t upl_commit_index; | |
342 | void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; | |
343 | ||
344 | struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; | |
91447636 | 345 | #endif /* UPL_DEBUG */ |
1c79356b A |
346 | }; |
347 | ||
1c79356b A |
348 | /* upl struct flags */ |
349 | #define UPL_PAGE_LIST_MAPPED 0x1 | |
350 | #define UPL_KERNEL_MAPPED 0x2 | |
351 | #define UPL_CLEAR_DIRTY 0x4 | |
352 | #define UPL_COMPOSITE_LIST 0x8 | |
353 | #define UPL_INTERNAL 0x10 | |
354 | #define UPL_PAGE_SYNC_DONE 0x20 | |
355 | #define UPL_DEVICE_MEMORY 0x40 | |
0b4e3aa0 | 356 | #define UPL_PAGEOUT 0x80 |
55e303ae A |
357 | #define UPL_LITE 0x100 |
358 | #define UPL_IO_WIRE 0x200 | |
91447636 A |
359 | #define UPL_ACCESS_BLOCKED 0x400 |
360 | #define UPL_ENCRYPTED 0x800 | |
2d21ac55 | 361 | #define UPL_SHADOWED 0x1000 |
b0d623f7 A |
362 | #define UPL_KERNEL_OBJECT 0x2000 |
363 | #define UPL_VECTOR 0x4000 | |
6d2010ae A |
364 | #define UPL_SET_DIRTY 0x8000 |
365 | #define UPL_HAS_BUSY 0x10000 | |
fe8ab488 A |
366 | #define UPL_TRACKED_BY_OBJECT 0x20000 |
367 | #define UPL_EXPEDITE_SUPPORTED 0x40000 | |
368 | #define UPL_DECMP_REQ 0x80000 | |
369 | #define UPL_DECMP_REAL_IO 0x100000 | |
1c79356b | 370 | |
55e303ae A |
371 | /* flags for upl_create flags parameter */ |
372 | #define UPL_CREATE_EXTERNAL 0 | |
373 | #define UPL_CREATE_INTERNAL 0x1 | |
374 | #define UPL_CREATE_LITE 0x2 | |
fe8ab488 A |
375 | #define UPL_CREATE_IO_TRACKING 0x4 |
376 | #define UPL_CREATE_EXPEDITE_SUP 0x8 | |
1c79356b | 377 | |
b0d623f7 A |
378 | extern upl_t vector_upl_create(vm_offset_t); |
379 | extern void vector_upl_deallocate(upl_t); | |
380 | extern boolean_t vector_upl_is_valid(upl_t); | |
381 | extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); | |
382 | extern void vector_upl_set_pagelist(upl_t); | |
383 | extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); | |
384 | extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); | |
385 | extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); | |
386 | extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); | |
387 | extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); | |
388 | extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t); | |
389 | extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*); | |
390 | ||
316670eb A |
391 | extern void vm_object_set_pmap_cache_attr( |
392 | vm_object_t object, | |
393 | upl_page_info_array_t user_page_list, | |
394 | unsigned int num_pages, | |
395 | boolean_t batch_pmap_op); | |
396 | ||
91447636 A |
397 | extern kern_return_t vm_object_iopl_request( |
398 | vm_object_t object, | |
399 | vm_object_offset_t offset, | |
400 | upl_size_t size, | |
401 | upl_t *upl_ptr, | |
402 | upl_page_info_array_t user_page_list, | |
403 | unsigned int *page_list_count, | |
3e170ce0 | 404 | upl_control_flags_t cntrl_flags); |
91447636 A |
405 | |
406 | extern kern_return_t vm_object_super_upl_request( | |
407 | vm_object_t object, | |
408 | vm_object_offset_t offset, | |
409 | upl_size_t size, | |
410 | upl_size_t super_cluster, | |
411 | upl_t *upl, | |
412 | upl_page_info_t *user_page_list, | |
413 | unsigned int *page_list_count, | |
3e170ce0 | 414 | upl_control_flags_t cntrl_flags); |
91447636 A |
415 | |
416 | /* should be just a regular vm_map_enter() */ | |
417 | extern kern_return_t vm_map_enter_upl( | |
418 | vm_map_t map, | |
419 | upl_t upl, | |
420 | vm_map_offset_t *dst_addr); | |
421 | ||
422 | /* should be just a regular vm_map_remove() */ | |
423 | extern kern_return_t vm_map_remove_upl( | |
424 | vm_map_t map, | |
425 | upl_t upl); | |
426 | ||
55e303ae | 427 | /* wired page list structure */ |
b0d623f7 | 428 | typedef uint32_t *wpl_array_t; |
55e303ae | 429 | |
91447636 A |
430 | extern void vm_page_free_reserve(int pages); |
431 | ||
432 | extern void vm_pageout_throttle_down(vm_page_t page); | |
433 | extern void vm_pageout_throttle_up(vm_page_t page); | |
434 | ||
435 | /* | |
436 | * ENCRYPTED SWAP: | |
437 | */ | |
438 | extern void upl_encrypt( | |
439 | upl_t upl, | |
440 | upl_offset_t crypt_offset, | |
441 | upl_size_t crypt_size); | |
442 | extern void vm_page_encrypt( | |
443 | vm_page_t page, | |
444 | vm_map_offset_t kernel_map_offset); | |
445 | extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */ | |
446 | extern void vm_page_decrypt( | |
447 | vm_page_t page, | |
448 | vm_map_offset_t kernel_map_offset); | |
449 | extern kern_return_t vm_paging_map_object( | |
91447636 A |
450 | vm_page_t page, |
451 | vm_object_t object, | |
452 | vm_object_offset_t offset, | |
593a1d5f | 453 | vm_prot_t protection, |
39236c6e A |
454 | boolean_t can_unlock_object, |
455 | vm_map_size_t *size, /* IN/OUT */ | |
456 | vm_map_offset_t *address, /* OUT */ | |
457 | boolean_t *need_unmap); /* OUT */ | |
91447636 A |
458 | extern void vm_paging_unmap_object( |
459 | vm_object_t object, | |
460 | vm_map_offset_t start, | |
461 | vm_map_offset_t end); | |
462 | decl_simple_lock_data(extern, vm_paging_lock) | |
463 | ||
464 | /* | |
465 | * Backing store throttle when BS is exhausted | |
466 | */ | |
467 | extern unsigned int vm_backing_store_low; | |
468 | ||
316670eb | 469 | extern void vm_pageout_steal_laundry( |
b0d623f7 A |
470 | vm_page_t page, |
471 | boolean_t queues_locked); | |
472 | ||
6d2010ae A |
473 | extern boolean_t vm_page_is_slideable(vm_page_t m); |
474 | ||
475 | extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset); | |
91447636 A |
476 | #endif /* MACH_KERNEL_PRIVATE */ |
477 | ||
b0d623f7 A |
478 | #if UPL_DEBUG |
479 | extern kern_return_t upl_ubc_alias_set( | |
480 | upl_t upl, | |
481 | uintptr_t alias1, | |
482 | uintptr_t alias2); | |
483 | extern int upl_ubc_alias_get( | |
484 | upl_t upl, | |
485 | uintptr_t * al, | |
486 | uintptr_t * al2); | |
487 | #endif /* UPL_DEBUG */ | |
488 | ||
91447636 A |
489 | extern void vm_countdirtypages(void); |
490 | ||
491 | extern void vm_backing_store_disable( | |
492 | boolean_t suspend); | |
493 | ||
494 | extern kern_return_t upl_transpose( | |
495 | upl_t upl1, | |
496 | upl_t upl2); | |
497 | ||
b0d623f7 A |
498 | extern kern_return_t mach_vm_pressure_monitor( |
499 | boolean_t wait_for_pressure, | |
500 | unsigned int nsecs_monitored, | |
501 | unsigned int *pages_reclaimed_p, | |
502 | unsigned int *pages_wanted_p); | |
503 | ||
504 | extern kern_return_t | |
505 | vm_set_buffer_cleanup_callout( | |
0b4c1975 | 506 | boolean_t (*func)(int)); |
b0d623f7 A |
507 | |
508 | struct vm_page_stats_reusable { | |
509 | SInt32 reusable_count; | |
510 | uint64_t reusable; | |
511 | uint64_t reused; | |
512 | uint64_t reused_wire; | |
513 | uint64_t reused_remove; | |
514 | uint64_t all_reusable_calls; | |
515 | uint64_t partial_reusable_calls; | |
516 | uint64_t all_reuse_calls; | |
517 | uint64_t partial_reuse_calls; | |
518 | uint64_t reusable_pages_success; | |
519 | uint64_t reusable_pages_failure; | |
520 | uint64_t reusable_pages_shared; | |
521 | uint64_t reuse_pages_success; | |
522 | uint64_t reuse_pages_failure; | |
523 | uint64_t can_reuse_success; | |
524 | uint64_t can_reuse_failure; | |
39236c6e | 525 | uint64_t reusable_reclaimed; |
39037602 A |
526 | uint64_t reusable_nonwritable; |
527 | uint64_t reusable_shared; | |
528 | uint64_t free_shared; | |
b0d623f7 A |
529 | }; |
530 | extern struct vm_page_stats_reusable vm_page_stats_reusable; | |
531 | ||
0b4c1975 | 532 | extern int hibernate_flush_memory(void); |
fe8ab488 | 533 | extern void hibernate_reset_stats(void); |
39236c6e A |
534 | extern void hibernate_create_paddr_map(void); |
535 | ||
3e170ce0 A |
536 | extern void vm_set_restrictions(void); |
537 | ||
39236c6e A |
538 | extern int vm_compressor_mode; |
539 | extern int vm_compressor_thread_count; | |
3e170ce0 A |
540 | extern boolean_t vm_restricted_to_single_processor; |
541 | extern boolean_t vm_compressor_immediate_preferred; | |
542 | extern boolean_t vm_compressor_immediate_preferred_override; | |
543 | extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t, boolean_t); | |
544 | extern void vm_pageout_anonymous_pages(void); | |
39037602 | 545 | extern void vm_pageout_disconnect_all_pages(void); |
3e170ce0 | 546 | |
39236c6e | 547 | |
39037602 A |
548 | struct vm_config { |
549 | boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ | |
550 | boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ | |
551 | boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ | |
552 | boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ | |
553 | boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ | |
554 | }; | |
39236c6e | 555 | |
39037602 | 556 | extern struct vm_config vm_config; |
39236c6e | 557 | |
39236c6e | 558 | |
39037602 A |
559 | #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ |
560 | #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ | |
561 | #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ | |
562 | #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ | |
563 | #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ | |
564 | #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ | |
565 | #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ | |
39236c6e | 566 | |
39037602 | 567 | #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ |
39236c6e | 568 | |
39236c6e | 569 | |
39037602 A |
570 | #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) |
571 | #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) | |
572 | #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) | |
573 | #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) | |
574 | #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) | |
0b4c1975 | 575 | |
91447636 | 576 | #endif /* KERNEL_PRIVATE */ |
1c79356b A |
577 | |
578 | #endif /* _VM_VM_PAGEOUT_H_ */ |