]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_pageout.h
xnu-2782.1.97.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.h
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_pageout.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Declarations for the pageout daemon interface.
64 */
65
66#ifndef _VM_VM_PAGEOUT_H_
67#define _VM_VM_PAGEOUT_H_
68
91447636
A
69#ifdef KERNEL_PRIVATE
70
71#include <mach/mach_types.h>
1c79356b
A
72#include <mach/boolean.h>
73#include <mach/machine/vm_types.h>
91447636 74#include <mach/memory_object_types.h>
1c79356b 75
91447636 76#include <kern/kern_types.h>
fe8ab488 77#include <kern/locks.h>
0b4e3aa0 78
b0d623f7
A
79#include <libkern/OSAtomic.h>
80
81
82#include <vm/vm_options.h>
83
0b4c1975
A
84#ifdef MACH_KERNEL_PRIVATE
85#include <vm/vm_page.h>
86#endif
87
6d2010ae
A
88#include <sys/kdebug.h>
89
316670eb
A
90#define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count))
91
92/* externally manipulated counters */
93extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated;
94
6d2010ae 95#if CONFIG_FREEZE
316670eb 96extern boolean_t memorystatus_freeze_enabled;
fe8ab488 97#define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || (memorystatus_freeze_enabled == FALSE && IP_VALID(port)))
6d2010ae 98#else
39236c6e 99#define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || IP_VALID(port))
6d2010ae
A
100#endif
101
fe8ab488
A
102#if VM_PRESSURE_EVENTS
103extern boolean_t vm_pressure_events_enabled;
104#endif /* VM_PRESSURE_EVENTS */
6d2010ae
A
105
106extern int vm_debug_events;
107
39236c6e
A
108#define VMF_CHECK_ZFDELAY 0x100
109#define VMF_COWDELAY 0x101
110#define VMF_ZFDELAY 0x102
111#define VMF_COMPRESSORDELAY 0x103
6d2010ae 112
39236c6e
A
113#define VM_PAGEOUT_SCAN 0x104
114#define VM_PAGEOUT_BALANCE 0x105
115#define VM_PAGEOUT_FREELIST 0x106
116#define VM_PAGEOUT_PURGEONE 0x107
117#define VM_PAGEOUT_CACHE_EVICT 0x108
118#define VM_PAGEOUT_THREAD_BLOCK 0x109
119#define VM_PAGEOUT_JETSAM 0x10A
6d2010ae 120
39236c6e
A
121#define VM_UPL_PAGE_WAIT 0x120
122#define VM_IOPL_PAGE_WAIT 0x121
123#define VM_PAGE_WAIT_BLOCK 0x122
6d2010ae 124
fe8ab488
A
125#if CONFIG_IOSCHED
126#define VM_PAGE_SLEEP 0x123
127#define VM_PAGE_EXPEDITE 0x124
128#endif
129
39236c6e
A
130#define VM_PRESSURE_EVENT 0x130
131#define VM_EXECVE 0x131
132#define VM_WAKEUP_COMPACTOR_SWAPPER 0x132
316670eb 133
6d2010ae
A
134#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
135 MACRO_BEGIN \
136 if (vm_debug_events) { \
137 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
138 } \
139 MACRO_END
140
fe8ab488 141extern void memoryshot(unsigned int event, unsigned int control);
0b4c1975 142
91447636
A
143extern kern_return_t vm_map_create_upl(
144 vm_map_t map,
145 vm_map_address_t offset,
146 upl_size_t *upl_size,
147 upl_t *upl,
148 upl_page_info_array_t page_list,
149 unsigned int *count,
150 int *flags);
0b4e3aa0 151
0c530ab8
A
152extern ppnum_t upl_get_highest_page(
153 upl_t upl);
154
b0d623f7
A
155extern upl_size_t upl_get_size(
156 upl_t upl);
157
fe8ab488
A
158extern void iopl_valid_data(
159 upl_t upl_ptr);
0b4c1975
A
160
161#ifndef MACH_KERNEL_PRIVATE
162typedef struct vm_page *vm_page_t;
163#endif
164
0b4c1975
A
165extern void vm_page_free_list(
166 vm_page_t mem,
167 boolean_t prepare_object);
168
169extern kern_return_t vm_page_alloc_list(
170 int page_count,
171 int flags,
172 vm_page_t * list);
173
174extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset);
175extern vm_object_offset_t vm_page_get_offset(vm_page_t page);
176extern ppnum_t vm_page_get_phys_page(vm_page_t page);
177extern vm_page_t vm_page_get_next(vm_page_t page);
178
39236c6e
A
179extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level);
180
91447636
A
181#ifdef MACH_KERNEL_PRIVATE
182
183#include <vm/vm_page.h>
0b4e3aa0
A
184
185extern unsigned int vm_pageout_scan_event_counter;
316670eb 186extern unsigned int vm_page_anonymous_count;
b0d623f7 187
b0d623f7 188
0b4c1975
A
189/*
190 * must hold the page queues lock to
191 * manipulate this structure
192 */
193struct vm_pageout_queue {
194 queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
195 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
196 unsigned int pgo_maxlaundry;
316670eb
A
197 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */
198 uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */
0b4c1975
A
199
200 unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */
201 pgo_busy:1, /* iothread is currently processing request from pgo_pending */
202 pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
203 pgo_draining:1,
316670eb 204 pgo_inited:1,
0b4c1975
A
205 :0;
206};
207
208#define VM_PAGE_Q_THROTTLED(q) \
209 ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
210
211extern struct vm_pageout_queue vm_pageout_queue_internal;
212extern struct vm_pageout_queue vm_pageout_queue_external;
213
6d2010ae 214
1c79356b 215/*
91447636 216 * Routines exported to Mach.
1c79356b
A
217 */
218extern void vm_pageout(void);
219
2d21ac55 220extern kern_return_t vm_pageout_internal_start(void);
1c79356b
A
221
222extern void vm_pageout_object_terminate(
223 vm_object_t object);
224
1c79356b 225extern void vm_pageout_cluster(
316670eb
A
226 vm_page_t m,
227 boolean_t pageout);
1c79356b
A
228
229extern void vm_pageout_initialize_page(
230 vm_page_t m);
231
232extern void vm_pageclean_setup(
233 vm_page_t m,
234 vm_page_t new_m,
235 vm_object_t new_object,
236 vm_object_offset_t new_offset);
237
1c79356b
A
238/* UPL exported routines and structures */
239
b0d623f7
A
240#define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
241#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
242#define upl_lock(object) lck_mtx_lock(&(object)->Lock)
243#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
244
245#define MAX_VECTOR_UPL_ELEMENTS 8
1c79356b 246
b0d623f7
A
247struct _vector_upl_iostates{
248 upl_offset_t offset;
249 upl_size_t size;
250};
251
252typedef struct _vector_upl_iostates vector_upl_iostates_t;
253
254struct _vector_upl {
255 upl_size_t size;
256 uint32_t num_upls;
257 uint32_t invalid_upls;
258 uint32_t _reserved;
259 vm_map_t submap;
260 vm_offset_t submap_dst_addr;
261 vm_object_offset_t offset;
262 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS];
263 upl_page_info_array_t pagelist;
264 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS];
265};
266
267typedef struct _vector_upl* vector_upl_t;
1c79356b
A
268
269/* universal page list structure */
270
b0d623f7
A
271#if UPL_DEBUG
272#define UPL_DEBUG_STACK_FRAMES 16
273#define UPL_DEBUG_COMMIT_RECORDS 4
274
275struct ucd {
276 upl_offset_t c_beg;
277 upl_offset_t c_end;
278 int c_aborted;
279 void * c_retaddr[UPL_DEBUG_STACK_FRAMES];
280};
281#endif
282
283
1c79356b 284struct upl {
b0d623f7 285 decl_lck_mtx_data(, Lock) /* Synchronization */
1c79356b 286 int ref_count;
6d2010ae 287 int ext_ref_count;
1c79356b
A
288 int flags;
289 vm_object_t src_object; /* object derived from */
290 vm_object_offset_t offset;
91447636 291 upl_size_t size; /* size in bytes of the address space */
1c79356b
A
292 vm_offset_t kaddr; /* secondary mapping in kernel */
293 vm_object_t map_object;
0c530ab8 294 ppnum_t highest_page;
b0d623f7 295 void* vector_upl;
fe8ab488
A
296#if CONFIG_IOSCHED
297 int upl_priority;
298 uint64_t *upl_reprio_info;
299 void *decmp_io_upl;
300#endif
301#if CONFIG_IOSCHED || UPL_DEBUG
302 thread_t upl_creator;
303 queue_chain_t uplq; /* List of outstanding upls on an obj */
304#endif
b0d623f7
A
305#if UPL_DEBUG
306 uintptr_t ubc_alias1;
307 uintptr_t ubc_alias2;
fe8ab488 308
b0d623f7
A
309 uint32_t upl_state;
310 uint32_t upl_commit_index;
311 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES];
312
313 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
91447636 314#endif /* UPL_DEBUG */
1c79356b
A
315};
316
1c79356b
A
317/* upl struct flags */
318#define UPL_PAGE_LIST_MAPPED 0x1
319#define UPL_KERNEL_MAPPED 0x2
320#define UPL_CLEAR_DIRTY 0x4
321#define UPL_COMPOSITE_LIST 0x8
322#define UPL_INTERNAL 0x10
323#define UPL_PAGE_SYNC_DONE 0x20
324#define UPL_DEVICE_MEMORY 0x40
0b4e3aa0 325#define UPL_PAGEOUT 0x80
55e303ae
A
326#define UPL_LITE 0x100
327#define UPL_IO_WIRE 0x200
91447636
A
328#define UPL_ACCESS_BLOCKED 0x400
329#define UPL_ENCRYPTED 0x800
2d21ac55 330#define UPL_SHADOWED 0x1000
b0d623f7
A
331#define UPL_KERNEL_OBJECT 0x2000
332#define UPL_VECTOR 0x4000
6d2010ae
A
333#define UPL_SET_DIRTY 0x8000
334#define UPL_HAS_BUSY 0x10000
fe8ab488
A
335#define UPL_TRACKED_BY_OBJECT 0x20000
336#define UPL_EXPEDITE_SUPPORTED 0x40000
337#define UPL_DECMP_REQ 0x80000
338#define UPL_DECMP_REAL_IO 0x100000
1c79356b 339
55e303ae
A
340/* flags for upl_create flags parameter */
341#define UPL_CREATE_EXTERNAL 0
342#define UPL_CREATE_INTERNAL 0x1
343#define UPL_CREATE_LITE 0x2
fe8ab488
A
344#define UPL_CREATE_IO_TRACKING 0x4
345#define UPL_CREATE_EXPEDITE_SUP 0x8
1c79356b 346
b0d623f7
A
347extern upl_t vector_upl_create(vm_offset_t);
348extern void vector_upl_deallocate(upl_t);
349extern boolean_t vector_upl_is_valid(upl_t);
350extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
351extern void vector_upl_set_pagelist(upl_t);
352extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
353extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
354extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
355extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
356extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
357extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t);
358extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*);
359
316670eb
A
360extern void vm_object_set_pmap_cache_attr(
361 vm_object_t object,
362 upl_page_info_array_t user_page_list,
363 unsigned int num_pages,
364 boolean_t batch_pmap_op);
365
91447636
A
366extern kern_return_t vm_object_iopl_request(
367 vm_object_t object,
368 vm_object_offset_t offset,
369 upl_size_t size,
370 upl_t *upl_ptr,
371 upl_page_info_array_t user_page_list,
372 unsigned int *page_list_count,
373 int cntrl_flags);
374
375extern kern_return_t vm_object_super_upl_request(
376 vm_object_t object,
377 vm_object_offset_t offset,
378 upl_size_t size,
379 upl_size_t super_cluster,
380 upl_t *upl,
381 upl_page_info_t *user_page_list,
382 unsigned int *page_list_count,
383 int cntrl_flags);
384
385/* should be just a regular vm_map_enter() */
386extern kern_return_t vm_map_enter_upl(
387 vm_map_t map,
388 upl_t upl,
389 vm_map_offset_t *dst_addr);
390
391/* should be just a regular vm_map_remove() */
392extern kern_return_t vm_map_remove_upl(
393 vm_map_t map,
394 upl_t upl);
395
55e303ae 396/* wired page list structure */
b0d623f7 397typedef uint32_t *wpl_array_t;
55e303ae 398
91447636
A
399extern void vm_page_free_reserve(int pages);
400
401extern void vm_pageout_throttle_down(vm_page_t page);
402extern void vm_pageout_throttle_up(vm_page_t page);
403
404/*
405 * ENCRYPTED SWAP:
406 */
407extern void upl_encrypt(
408 upl_t upl,
409 upl_offset_t crypt_offset,
410 upl_size_t crypt_size);
411extern void vm_page_encrypt(
412 vm_page_t page,
413 vm_map_offset_t kernel_map_offset);
414extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */
415extern void vm_page_decrypt(
416 vm_page_t page,
417 vm_map_offset_t kernel_map_offset);
418extern kern_return_t vm_paging_map_object(
91447636
A
419 vm_page_t page,
420 vm_object_t object,
421 vm_object_offset_t offset,
593a1d5f 422 vm_prot_t protection,
39236c6e
A
423 boolean_t can_unlock_object,
424 vm_map_size_t *size, /* IN/OUT */
425 vm_map_offset_t *address, /* OUT */
426 boolean_t *need_unmap); /* OUT */
91447636
A
427extern void vm_paging_unmap_object(
428 vm_object_t object,
429 vm_map_offset_t start,
430 vm_map_offset_t end);
431decl_simple_lock_data(extern, vm_paging_lock)
432
433/*
434 * Backing store throttle when BS is exhausted
435 */
436extern unsigned int vm_backing_store_low;
437
316670eb 438extern void vm_pageout_steal_laundry(
b0d623f7
A
439 vm_page_t page,
440 boolean_t queues_locked);
441
6d2010ae
A
442extern boolean_t vm_page_is_slideable(vm_page_t m);
443
444extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset);
91447636
A
445#endif /* MACH_KERNEL_PRIVATE */
446
b0d623f7
A
447#if UPL_DEBUG
448extern kern_return_t upl_ubc_alias_set(
449 upl_t upl,
450 uintptr_t alias1,
451 uintptr_t alias2);
452extern int upl_ubc_alias_get(
453 upl_t upl,
454 uintptr_t * al,
455 uintptr_t * al2);
456#endif /* UPL_DEBUG */
457
91447636
A
458extern void vm_countdirtypages(void);
459
460extern void vm_backing_store_disable(
461 boolean_t suspend);
462
463extern kern_return_t upl_transpose(
464 upl_t upl1,
465 upl_t upl2);
466
b0d623f7
A
467extern kern_return_t mach_vm_pressure_monitor(
468 boolean_t wait_for_pressure,
469 unsigned int nsecs_monitored,
470 unsigned int *pages_reclaimed_p,
471 unsigned int *pages_wanted_p);
472
473extern kern_return_t
474vm_set_buffer_cleanup_callout(
0b4c1975 475 boolean_t (*func)(int));
b0d623f7
A
476
477struct vm_page_stats_reusable {
478 SInt32 reusable_count;
479 uint64_t reusable;
480 uint64_t reused;
481 uint64_t reused_wire;
482 uint64_t reused_remove;
483 uint64_t all_reusable_calls;
484 uint64_t partial_reusable_calls;
485 uint64_t all_reuse_calls;
486 uint64_t partial_reuse_calls;
487 uint64_t reusable_pages_success;
488 uint64_t reusable_pages_failure;
489 uint64_t reusable_pages_shared;
490 uint64_t reuse_pages_success;
491 uint64_t reuse_pages_failure;
492 uint64_t can_reuse_success;
493 uint64_t can_reuse_failure;
39236c6e 494 uint64_t reusable_reclaimed;
b0d623f7
A
495};
496extern struct vm_page_stats_reusable vm_page_stats_reusable;
497
0b4c1975 498extern int hibernate_flush_memory(void);
fe8ab488 499extern void hibernate_reset_stats(void);
39236c6e
A
500extern void hibernate_create_paddr_map(void);
501
502extern int vm_compressor_mode;
503extern int vm_compressor_thread_count;
504
505#define VM_PAGER_DEFAULT 0x1 /* Use default pager. */
506#define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* In-core compressor only. */
507#define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* In-core compressor + swap backend. */
508#define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager.*/
509#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/
510#define VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Freezer backed by in-core compressor with swap support too.*/
511
512#define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */
513
514#define DEFAULT_PAGER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_DEFAULT) == VM_PAGER_DEFAULT)
515
516#define COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_COMPRESSOR_NO_SWAP | VM_PAGER_COMPRESSOR_WITH_SWAP))
517
518#define DEFAULT_FREEZER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_FREEZER_DEFAULT) == VM_PAGER_FREEZER_DEFAULT)
519
520#define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP | VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP))
fe8ab488
A
521#define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS ((vm_compressor_mode & VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP) == VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP)
522#define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED ((vm_compressor_mode & VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP) == VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP)
39236c6e 523
0b4c1975 524
91447636 525#endif /* KERNEL_PRIVATE */
1c79356b
A
526
527#endif /* _VM_VM_PAGEOUT_H_ */