]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.h
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.h
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Declarations for the pageout daemon interface.
64 */
65
66 #ifndef _VM_VM_PAGEOUT_H_
67 #define _VM_VM_PAGEOUT_H_
68
69 #ifdef KERNEL_PRIVATE
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/locks.h>
78
79 #include <libkern/OSAtomic.h>
80
81
82 #include <vm/vm_options.h>
83
84 #ifdef MACH_KERNEL_PRIVATE
85 #include <vm/vm_page.h>
86 #endif
87
88 #include <sys/kdebug.h>
89
90 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count))
91
92 /* externally manipulated counters */
93 extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated;
94
95 #if CONFIG_FREEZE
96 extern boolean_t memorystatus_freeze_enabled;
97 #define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED || (memorystatus_freeze_enabled == FALSE && IP_VALID(port)))
98 #else
99 #define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || IP_VALID(port))
100 #endif
101
102 #if VM_PRESSURE_EVENTS
103 extern boolean_t vm_pressure_events_enabled;
104 #endif /* VM_PRESSURE_EVENTS */
105
106 extern int vm_debug_events;
107
108 #define VMF_CHECK_ZFDELAY 0x100
109 #define VMF_COWDELAY 0x101
110 #define VMF_ZFDELAY 0x102
111 #define VMF_COMPRESSORDELAY 0x103
112
113 #define VM_PAGEOUT_SCAN 0x104
114 #define VM_PAGEOUT_BALANCE 0x105
115 #define VM_PAGEOUT_FREELIST 0x106
116 #define VM_PAGEOUT_PURGEONE 0x107
117 #define VM_PAGEOUT_CACHE_EVICT 0x108
118 #define VM_PAGEOUT_THREAD_BLOCK 0x109
119 #define VM_PAGEOUT_JETSAM 0x10A
120
121 #define VM_UPL_PAGE_WAIT 0x120
122 #define VM_IOPL_PAGE_WAIT 0x121
123 #define VM_PAGE_WAIT_BLOCK 0x122
124
125 #if CONFIG_IOSCHED
126 #define VM_PAGE_SLEEP 0x123
127 #define VM_PAGE_EXPEDITE 0x124
128 #endif
129
130 #define VM_PRESSURE_EVENT 0x130
131 #define VM_EXECVE 0x131
132 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132
133
134 #define VM_DATA_WRITE 0x140
135
136 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
137 MACRO_BEGIN \
138 if (vm_debug_events) { \
139 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
140 } \
141 MACRO_END
142
143 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
144 MACRO_BEGIN \
145 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
146 MACRO_END
147
148 extern void memoryshot(unsigned int event, unsigned int control);
149
150 extern kern_return_t vm_map_create_upl(
151 vm_map_t map,
152 vm_map_address_t offset,
153 upl_size_t *upl_size,
154 upl_t *upl,
155 upl_page_info_array_t page_list,
156 unsigned int *count,
157 upl_control_flags_t *flags);
158
159 extern ppnum_t upl_get_highest_page(
160 upl_t upl);
161
162 extern upl_size_t upl_get_size(
163 upl_t upl);
164
165 extern upl_t upl_associated_upl(upl_t upl);
166 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl);
167
168 extern void iopl_valid_data(
169 upl_t upl_ptr);
170
171 #ifndef MACH_KERNEL_PRIVATE
172 typedef struct vm_page *vm_page_t;
173 #endif
174
175 extern void vm_page_free_list(
176 vm_page_t mem,
177 boolean_t prepare_object);
178
179 extern kern_return_t vm_page_alloc_list(
180 int page_count,
181 int flags,
182 vm_page_t * list);
183
184 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset);
185 extern vm_object_offset_t vm_page_get_offset(vm_page_t page);
186 extern ppnum_t vm_page_get_phys_page(vm_page_t page);
187 extern vm_page_t vm_page_get_next(vm_page_t page);
188
189 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level);
190
191 extern kern_return_t vm_pageout_wait(uint64_t deadline);
192
193 #ifdef MACH_KERNEL_PRIVATE
194
195 #include <vm/vm_page.h>
196
197 extern unsigned int vm_pageout_scan_event_counter;
198 extern unsigned int vm_page_anonymous_count;
199
200
201 /*
202 * must hold the page queues lock to
203 * manipulate this structure
204 */
205 struct vm_pageout_queue {
206 queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
207 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
208 unsigned int pgo_maxlaundry;
209 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */
210 uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */
211
212 unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */
213 pgo_busy:1, /* iothread is currently processing request from pgo_pending */
214 pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
215 pgo_draining:1,
216 pgo_inited:1,
217 :0;
218 };
219
220 #define VM_PAGE_Q_THROTTLED(q) \
221 ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
222
223 extern struct vm_pageout_queue vm_pageout_queue_internal;
224 extern struct vm_pageout_queue vm_pageout_queue_external;
225
226
227 /*
228 * Routines exported to Mach.
229 */
230 extern void vm_pageout(void);
231
232 extern kern_return_t vm_pageout_internal_start(void);
233
234 extern void vm_pageout_object_terminate(
235 vm_object_t object);
236
237 extern int vm_pageout_cluster(
238 vm_page_t m,
239 boolean_t pageout,
240 boolean_t immediate_ok,
241 boolean_t keep_object_locked);
242
243 extern void vm_pageout_initialize_page(
244 vm_page_t m);
245
246 /* UPL exported routines and structures */
247
248 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
249 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
250 #define upl_lock(object) lck_mtx_lock(&(object)->Lock)
251 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
252 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock)
253
254 #define MAX_VECTOR_UPL_ELEMENTS 8
255
256 struct _vector_upl_iostates{
257 upl_offset_t offset;
258 upl_size_t size;
259 };
260
261 typedef struct _vector_upl_iostates vector_upl_iostates_t;
262
263 struct _vector_upl {
264 upl_size_t size;
265 uint32_t num_upls;
266 uint32_t invalid_upls;
267 uint32_t _reserved;
268 vm_map_t submap;
269 vm_offset_t submap_dst_addr;
270 vm_object_offset_t offset;
271 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS];
272 upl_page_info_array_t pagelist;
273 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS];
274 };
275
276 typedef struct _vector_upl* vector_upl_t;
277
278 /* universal page list structure */
279
280 #if UPL_DEBUG
281 #define UPL_DEBUG_STACK_FRAMES 16
282 #define UPL_DEBUG_COMMIT_RECORDS 4
283
284 struct ucd {
285 upl_offset_t c_beg;
286 upl_offset_t c_end;
287 int c_aborted;
288 void * c_retaddr[UPL_DEBUG_STACK_FRAMES];
289 };
290 #endif
291
292
293 struct upl {
294 decl_lck_mtx_data(, Lock) /* Synchronization */
295 int ref_count;
296 int ext_ref_count;
297 int flags;
298 vm_object_t src_object; /* object derived from */
299 vm_object_offset_t offset;
300 upl_size_t size; /* size in bytes of the address space */
301 vm_offset_t kaddr; /* secondary mapping in kernel */
302 vm_object_t map_object;
303 ppnum_t highest_page;
304 void* vector_upl;
305 upl_t associated_upl;
306 #if CONFIG_IOSCHED
307 int upl_priority;
308 uint64_t *upl_reprio_info;
309 void *decmp_io_upl;
310 #endif
311 #if CONFIG_IOSCHED || UPL_DEBUG
312 thread_t upl_creator;
313 queue_chain_t uplq; /* List of outstanding upls on an obj */
314 #endif
315 #if UPL_DEBUG
316 uintptr_t ubc_alias1;
317 uintptr_t ubc_alias2;
318
319 uint32_t upl_state;
320 uint32_t upl_commit_index;
321 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES];
322
323 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
324 #endif /* UPL_DEBUG */
325 };
326
327 /* upl struct flags */
328 #define UPL_PAGE_LIST_MAPPED 0x1
329 #define UPL_KERNEL_MAPPED 0x2
330 #define UPL_CLEAR_DIRTY 0x4
331 #define UPL_COMPOSITE_LIST 0x8
332 #define UPL_INTERNAL 0x10
333 #define UPL_PAGE_SYNC_DONE 0x20
334 #define UPL_DEVICE_MEMORY 0x40
335 #define UPL_PAGEOUT 0x80
336 #define UPL_LITE 0x100
337 #define UPL_IO_WIRE 0x200
338 #define UPL_ACCESS_BLOCKED 0x400
339 #define UPL_ENCRYPTED 0x800
340 #define UPL_SHADOWED 0x1000
341 #define UPL_KERNEL_OBJECT 0x2000
342 #define UPL_VECTOR 0x4000
343 #define UPL_SET_DIRTY 0x8000
344 #define UPL_HAS_BUSY 0x10000
345 #define UPL_TRACKED_BY_OBJECT 0x20000
346 #define UPL_EXPEDITE_SUPPORTED 0x40000
347 #define UPL_DECMP_REQ 0x80000
348 #define UPL_DECMP_REAL_IO 0x100000
349
350 /* flags for upl_create flags parameter */
351 #define UPL_CREATE_EXTERNAL 0
352 #define UPL_CREATE_INTERNAL 0x1
353 #define UPL_CREATE_LITE 0x2
354 #define UPL_CREATE_IO_TRACKING 0x4
355 #define UPL_CREATE_EXPEDITE_SUP 0x8
356
357 extern upl_t vector_upl_create(vm_offset_t);
358 extern void vector_upl_deallocate(upl_t);
359 extern boolean_t vector_upl_is_valid(upl_t);
360 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
361 extern void vector_upl_set_pagelist(upl_t);
362 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
363 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
364 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
365 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
366 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
367 extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t);
368 extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*);
369
370 extern void vm_object_set_pmap_cache_attr(
371 vm_object_t object,
372 upl_page_info_array_t user_page_list,
373 unsigned int num_pages,
374 boolean_t batch_pmap_op);
375
376 extern kern_return_t vm_object_iopl_request(
377 vm_object_t object,
378 vm_object_offset_t offset,
379 upl_size_t size,
380 upl_t *upl_ptr,
381 upl_page_info_array_t user_page_list,
382 unsigned int *page_list_count,
383 upl_control_flags_t cntrl_flags);
384
385 extern kern_return_t vm_object_super_upl_request(
386 vm_object_t object,
387 vm_object_offset_t offset,
388 upl_size_t size,
389 upl_size_t super_cluster,
390 upl_t *upl,
391 upl_page_info_t *user_page_list,
392 unsigned int *page_list_count,
393 upl_control_flags_t cntrl_flags);
394
395 /* should be just a regular vm_map_enter() */
396 extern kern_return_t vm_map_enter_upl(
397 vm_map_t map,
398 upl_t upl,
399 vm_map_offset_t *dst_addr);
400
401 /* should be just a regular vm_map_remove() */
402 extern kern_return_t vm_map_remove_upl(
403 vm_map_t map,
404 upl_t upl);
405
406 /* wired page list structure */
407 typedef uint32_t *wpl_array_t;
408
409 extern void vm_page_free_reserve(int pages);
410
411 extern void vm_pageout_throttle_down(vm_page_t page);
412 extern void vm_pageout_throttle_up(vm_page_t page);
413
414 /*
415 * ENCRYPTED SWAP:
416 */
417 extern void upl_encrypt(
418 upl_t upl,
419 upl_offset_t crypt_offset,
420 upl_size_t crypt_size);
421 extern void vm_page_encrypt(
422 vm_page_t page,
423 vm_map_offset_t kernel_map_offset);
424 extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */
425 extern void vm_page_decrypt(
426 vm_page_t page,
427 vm_map_offset_t kernel_map_offset);
428 extern kern_return_t vm_paging_map_object(
429 vm_page_t page,
430 vm_object_t object,
431 vm_object_offset_t offset,
432 vm_prot_t protection,
433 boolean_t can_unlock_object,
434 vm_map_size_t *size, /* IN/OUT */
435 vm_map_offset_t *address, /* OUT */
436 boolean_t *need_unmap); /* OUT */
437 extern void vm_paging_unmap_object(
438 vm_object_t object,
439 vm_map_offset_t start,
440 vm_map_offset_t end);
441 decl_simple_lock_data(extern, vm_paging_lock)
442
443 /*
444 * Backing store throttle when BS is exhausted
445 */
446 extern unsigned int vm_backing_store_low;
447
448 extern void vm_pageout_steal_laundry(
449 vm_page_t page,
450 boolean_t queues_locked);
451
452 extern boolean_t vm_page_is_slideable(vm_page_t m);
453
454 extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset);
455 #endif /* MACH_KERNEL_PRIVATE */
456
457 #if UPL_DEBUG
458 extern kern_return_t upl_ubc_alias_set(
459 upl_t upl,
460 uintptr_t alias1,
461 uintptr_t alias2);
462 extern int upl_ubc_alias_get(
463 upl_t upl,
464 uintptr_t * al,
465 uintptr_t * al2);
466 #endif /* UPL_DEBUG */
467
468 extern void vm_countdirtypages(void);
469
470 extern void vm_backing_store_disable(
471 boolean_t suspend);
472
473 extern kern_return_t upl_transpose(
474 upl_t upl1,
475 upl_t upl2);
476
477 extern kern_return_t mach_vm_pressure_monitor(
478 boolean_t wait_for_pressure,
479 unsigned int nsecs_monitored,
480 unsigned int *pages_reclaimed_p,
481 unsigned int *pages_wanted_p);
482
483 extern kern_return_t
484 vm_set_buffer_cleanup_callout(
485 boolean_t (*func)(int));
486
487 struct vm_page_stats_reusable {
488 SInt32 reusable_count;
489 uint64_t reusable;
490 uint64_t reused;
491 uint64_t reused_wire;
492 uint64_t reused_remove;
493 uint64_t all_reusable_calls;
494 uint64_t partial_reusable_calls;
495 uint64_t all_reuse_calls;
496 uint64_t partial_reuse_calls;
497 uint64_t reusable_pages_success;
498 uint64_t reusable_pages_failure;
499 uint64_t reusable_pages_shared;
500 uint64_t reuse_pages_success;
501 uint64_t reuse_pages_failure;
502 uint64_t can_reuse_success;
503 uint64_t can_reuse_failure;
504 uint64_t reusable_reclaimed;
505 };
506 extern struct vm_page_stats_reusable vm_page_stats_reusable;
507
508 extern int hibernate_flush_memory(void);
509 extern void hibernate_reset_stats(void);
510 extern void hibernate_create_paddr_map(void);
511
512 extern void vm_set_restrictions(void);
513
514 extern int vm_compressor_mode;
515 extern int vm_compressor_thread_count;
516 extern boolean_t vm_restricted_to_single_processor;
517 extern boolean_t vm_compressor_immediate_preferred;
518 extern boolean_t vm_compressor_immediate_preferred_override;
519 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t, boolean_t);
520 extern void vm_pageout_anonymous_pages(void);
521
522
523 #define VM_PAGER_DEFAULT 0x1 /* Use default pager. */
524 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* In-core compressor only. */
525 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* In-core compressor + swap backend. */
526 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager.*/
527 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/
528 #define VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Freezer backed by in-core compressor with swap support too.*/
529
530 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */
531
532 #define DEFAULT_PAGER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_DEFAULT) == VM_PAGER_DEFAULT)
533
534 #define COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_COMPRESSOR_NO_SWAP | VM_PAGER_COMPRESSOR_WITH_SWAP))
535 #define COMPRESSED_PAGER_IS_SWAPLESS ((vm_compressor_mode & VM_PAGER_COMPRESSOR_NO_SWAP) == VM_PAGER_COMPRESSOR_NO_SWAP)
536 #define COMPRESSED_PAGER_IS_SWAPBACKED ((vm_compressor_mode & VM_PAGER_COMPRESSOR_WITH_SWAP) == VM_PAGER_COMPRESSOR_WITH_SWAP)
537
538 #define DEFAULT_FREEZER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_FREEZER_DEFAULT) == VM_PAGER_FREEZER_DEFAULT)
539
540 #define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP | VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP))
541 #define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS ((vm_compressor_mode & VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP) == VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP)
542 #define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED ((vm_compressor_mode & VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP) == VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP)
543
544
545 #endif /* KERNEL_PRIVATE */
546
547 #endif /* _VM_VM_PAGEOUT_H_ */