]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_pageout.h
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.h
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_pageout.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Declarations for the pageout daemon interface.
64 */
65
66#ifndef _VM_VM_PAGEOUT_H_
67#define _VM_VM_PAGEOUT_H_
68
91447636
A
69#ifdef KERNEL_PRIVATE
70
71#include <mach/mach_types.h>
1c79356b
A
72#include <mach/boolean.h>
73#include <mach/machine/vm_types.h>
91447636 74#include <mach/memory_object_types.h>
1c79356b 75
91447636
A
76#include <kern/kern_types.h>
77#include <kern/lock.h>
0b4e3aa0 78
b0d623f7
A
79#include <libkern/OSAtomic.h>
80
81
82#include <vm/vm_options.h>
83
0b4c1975
A
84#ifdef MACH_KERNEL_PRIVATE
85#include <vm/vm_page.h>
86#endif
87
6d2010ae
A
88#include <sys/kdebug.h>
89
90#if CONFIG_FREEZE
91extern boolean_t vm_freeze_enabled;
92#define VM_DYNAMIC_PAGING_ENABLED(port) ((vm_freeze_enabled == FALSE) && IP_VALID(port))
93#else
94#define VM_DYNAMIC_PAGING_ENABLED(port) IP_VALID(port)
95#endif
96
97
98extern int vm_debug_events;
99
100#define VMF_CHECK_ZFDELAY 0x100
101#define VMF_COWDELAY 0x101
102#define VMF_ZFDELAY 0x102
103
104#define VM_PAGEOUT_SCAN 0x104
105#define VM_PAGEOUT_BALANCE 0x105
106#define VM_PAGEOUT_FREELIST 0x106
107#define VM_PAGEOUT_PURGEONE 0x107
108#define VM_PAGEOUT_CACHE_EVICT 0x108
109#define VM_PAGEOUT_THREAD_BLOCK 0x109
110
111#define VM_UPL_PAGE_WAIT 0x120
112#define VM_IOPL_PAGE_WAIT 0x121
113
114#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \
115 MACRO_BEGIN \
116 if (vm_debug_events) { \
117 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
118 } \
119 MACRO_END
120
121
0b4c1975 122
91447636
A
123extern kern_return_t vm_map_create_upl(
124 vm_map_t map,
125 vm_map_address_t offset,
126 upl_size_t *upl_size,
127 upl_t *upl,
128 upl_page_info_array_t page_list,
129 unsigned int *count,
130 int *flags);
0b4e3aa0 131
0c530ab8
A
132extern ppnum_t upl_get_highest_page(
133 upl_t upl);
134
b0d623f7
A
135extern upl_size_t upl_get_size(
136 upl_t upl);
137
0b4c1975
A
138
139#ifndef MACH_KERNEL_PRIVATE
140typedef struct vm_page *vm_page_t;
141#endif
142
0b4c1975
A
143extern void vm_page_free_list(
144 vm_page_t mem,
145 boolean_t prepare_object);
146
147extern kern_return_t vm_page_alloc_list(
148 int page_count,
149 int flags,
150 vm_page_t * list);
151
152extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset);
153extern vm_object_offset_t vm_page_get_offset(vm_page_t page);
154extern ppnum_t vm_page_get_phys_page(vm_page_t page);
155extern vm_page_t vm_page_get_next(vm_page_t page);
156
91447636
A
157#ifdef MACH_KERNEL_PRIVATE
158
159#include <vm/vm_page.h>
0b4e3aa0
A
160
161extern unsigned int vm_pageout_scan_event_counter;
2d21ac55 162extern unsigned int vm_zf_queue_count;
0b4e3aa0 163
b0d623f7 164
b0d623f7
A
165extern uint64_t vm_zf_count;
166
167#define VM_ZF_COUNT_INCR() \
168 MACRO_BEGIN \
169 OSAddAtomic64(1, (SInt64 *) &vm_zf_count); \
170 MACRO_END \
171
172#define VM_ZF_COUNT_DECR() \
173 MACRO_BEGIN \
174 OSAddAtomic64(-1, (SInt64 *) &vm_zf_count); \
175 MACRO_END \
176
0b4c1975
A
177/*
178 * must hold the page queues lock to
179 * manipulate this structure
180 */
181struct vm_pageout_queue {
182 queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
183 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
184 unsigned int pgo_maxlaundry;
185
186 unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */
187 pgo_busy:1, /* iothread is currently processing request from pgo_pending */
188 pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
189 pgo_draining:1,
190 :0;
191};
192
193#define VM_PAGE_Q_THROTTLED(q) \
194 ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
195
196extern struct vm_pageout_queue vm_pageout_queue_internal;
197extern struct vm_pageout_queue vm_pageout_queue_external;
198
6d2010ae 199
1c79356b 200/*
91447636 201 * Routines exported to Mach.
1c79356b
A
202 */
203extern void vm_pageout(void);
204
2d21ac55 205extern kern_return_t vm_pageout_internal_start(void);
1c79356b
A
206
207extern void vm_pageout_object_terminate(
208 vm_object_t object);
209
1c79356b
A
210extern void vm_pageout_cluster(
211 vm_page_t m);
212
213extern void vm_pageout_initialize_page(
214 vm_page_t m);
215
216extern void vm_pageclean_setup(
217 vm_page_t m,
218 vm_page_t new_m,
219 vm_object_t new_object,
220 vm_object_offset_t new_offset);
221
1c79356b
A
222/* UPL exported routines and structures */
223
b0d623f7
A
224#define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
225#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
226#define upl_lock(object) lck_mtx_lock(&(object)->Lock)
227#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
228
229#define MAX_VECTOR_UPL_ELEMENTS 8
1c79356b 230
b0d623f7
A
231struct _vector_upl_iostates{
232 upl_offset_t offset;
233 upl_size_t size;
234};
235
236typedef struct _vector_upl_iostates vector_upl_iostates_t;
237
238struct _vector_upl {
239 upl_size_t size;
240 uint32_t num_upls;
241 uint32_t invalid_upls;
242 uint32_t _reserved;
243 vm_map_t submap;
244 vm_offset_t submap_dst_addr;
245 vm_object_offset_t offset;
246 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS];
247 upl_page_info_array_t pagelist;
248 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS];
249};
250
251typedef struct _vector_upl* vector_upl_t;
1c79356b
A
252
253/* universal page list structure */
254
b0d623f7
A
255#if UPL_DEBUG
256#define UPL_DEBUG_STACK_FRAMES 16
257#define UPL_DEBUG_COMMIT_RECORDS 4
258
259struct ucd {
260 upl_offset_t c_beg;
261 upl_offset_t c_end;
262 int c_aborted;
263 void * c_retaddr[UPL_DEBUG_STACK_FRAMES];
264};
265#endif
266
267
1c79356b 268struct upl {
b0d623f7 269 decl_lck_mtx_data(, Lock) /* Synchronization */
1c79356b 270 int ref_count;
6d2010ae 271 int ext_ref_count;
1c79356b
A
272 int flags;
273 vm_object_t src_object; /* object derived from */
274 vm_object_offset_t offset;
91447636 275 upl_size_t size; /* size in bytes of the address space */
1c79356b
A
276 vm_offset_t kaddr; /* secondary mapping in kernel */
277 vm_object_t map_object;
0c530ab8 278 ppnum_t highest_page;
b0d623f7
A
279 void* vector_upl;
280#if UPL_DEBUG
281 uintptr_t ubc_alias1;
282 uintptr_t ubc_alias2;
1c79356b 283 queue_chain_t uplq; /* List of outstanding upls on an obj */
b0d623f7
A
284
285 thread_t upl_creator;
286 uint32_t upl_state;
287 uint32_t upl_commit_index;
288 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES];
289
290 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
91447636 291#endif /* UPL_DEBUG */
1c79356b
A
292};
293
1c79356b
A
294/* upl struct flags */
295#define UPL_PAGE_LIST_MAPPED 0x1
296#define UPL_KERNEL_MAPPED 0x2
297#define UPL_CLEAR_DIRTY 0x4
298#define UPL_COMPOSITE_LIST 0x8
299#define UPL_INTERNAL 0x10
300#define UPL_PAGE_SYNC_DONE 0x20
301#define UPL_DEVICE_MEMORY 0x40
0b4e3aa0 302#define UPL_PAGEOUT 0x80
55e303ae
A
303#define UPL_LITE 0x100
304#define UPL_IO_WIRE 0x200
91447636
A
305#define UPL_ACCESS_BLOCKED 0x400
306#define UPL_ENCRYPTED 0x800
2d21ac55 307#define UPL_SHADOWED 0x1000
b0d623f7
A
308#define UPL_KERNEL_OBJECT 0x2000
309#define UPL_VECTOR 0x4000
6d2010ae
A
310#define UPL_SET_DIRTY 0x8000
311#define UPL_HAS_BUSY 0x10000
1c79356b 312
55e303ae
A
313/* flags for upl_create flags parameter */
314#define UPL_CREATE_EXTERNAL 0
315#define UPL_CREATE_INTERNAL 0x1
316#define UPL_CREATE_LITE 0x2
1c79356b 317
b0d623f7
A
318extern upl_t vector_upl_create(vm_offset_t);
319extern void vector_upl_deallocate(upl_t);
320extern boolean_t vector_upl_is_valid(upl_t);
321extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
322extern void vector_upl_set_pagelist(upl_t);
323extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
324extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
325extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
326extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
327extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
328extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t);
329extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*);
330
91447636
A
331extern kern_return_t vm_object_iopl_request(
332 vm_object_t object,
333 vm_object_offset_t offset,
334 upl_size_t size,
335 upl_t *upl_ptr,
336 upl_page_info_array_t user_page_list,
337 unsigned int *page_list_count,
338 int cntrl_flags);
339
340extern kern_return_t vm_object_super_upl_request(
341 vm_object_t object,
342 vm_object_offset_t offset,
343 upl_size_t size,
344 upl_size_t super_cluster,
345 upl_t *upl,
346 upl_page_info_t *user_page_list,
347 unsigned int *page_list_count,
348 int cntrl_flags);
349
350/* should be just a regular vm_map_enter() */
351extern kern_return_t vm_map_enter_upl(
352 vm_map_t map,
353 upl_t upl,
354 vm_map_offset_t *dst_addr);
355
356/* should be just a regular vm_map_remove() */
357extern kern_return_t vm_map_remove_upl(
358 vm_map_t map,
359 upl_t upl);
360
55e303ae 361/* wired page list structure */
b0d623f7 362typedef uint32_t *wpl_array_t;
55e303ae 363
91447636
A
364extern void vm_page_free_reserve(int pages);
365
366extern void vm_pageout_throttle_down(vm_page_t page);
367extern void vm_pageout_throttle_up(vm_page_t page);
368
369/*
370 * ENCRYPTED SWAP:
371 */
372extern void upl_encrypt(
373 upl_t upl,
374 upl_offset_t crypt_offset,
375 upl_size_t crypt_size);
376extern void vm_page_encrypt(
377 vm_page_t page,
378 vm_map_offset_t kernel_map_offset);
379extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */
380extern void vm_page_decrypt(
381 vm_page_t page,
382 vm_map_offset_t kernel_map_offset);
383extern kern_return_t vm_paging_map_object(
384 vm_map_offset_t *address,
385 vm_page_t page,
386 vm_object_t object,
387 vm_object_offset_t offset,
2d21ac55 388 vm_map_size_t *size,
593a1d5f 389 vm_prot_t protection,
2d21ac55 390 boolean_t can_unlock_object);
91447636
A
391extern void vm_paging_unmap_object(
392 vm_object_t object,
393 vm_map_offset_t start,
394 vm_map_offset_t end);
395decl_simple_lock_data(extern, vm_paging_lock)
396
397/*
398 * Backing store throttle when BS is exhausted
399 */
400extern unsigned int vm_backing_store_low;
401
b0d623f7
A
402extern void vm_pageout_queue_steal(
403 vm_page_t page,
404 boolean_t queues_locked);
405
6d2010ae
A
406extern boolean_t vm_page_is_slideable(vm_page_t m);
407
408extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset);
91447636
A
409#endif /* MACH_KERNEL_PRIVATE */
410
b0d623f7
A
411#if UPL_DEBUG
412extern kern_return_t upl_ubc_alias_set(
413 upl_t upl,
414 uintptr_t alias1,
415 uintptr_t alias2);
416extern int upl_ubc_alias_get(
417 upl_t upl,
418 uintptr_t * al,
419 uintptr_t * al2);
420#endif /* UPL_DEBUG */
421
91447636
A
422extern void vm_countdirtypages(void);
423
424extern void vm_backing_store_disable(
425 boolean_t suspend);
426
427extern kern_return_t upl_transpose(
428 upl_t upl1,
429 upl_t upl2);
430
b0d623f7
A
431extern kern_return_t mach_vm_pressure_monitor(
432 boolean_t wait_for_pressure,
433 unsigned int nsecs_monitored,
434 unsigned int *pages_reclaimed_p,
435 unsigned int *pages_wanted_p);
436
437extern kern_return_t
438vm_set_buffer_cleanup_callout(
0b4c1975 439 boolean_t (*func)(int));
b0d623f7
A
440
441struct vm_page_stats_reusable {
442 SInt32 reusable_count;
443 uint64_t reusable;
444 uint64_t reused;
445 uint64_t reused_wire;
446 uint64_t reused_remove;
447 uint64_t all_reusable_calls;
448 uint64_t partial_reusable_calls;
449 uint64_t all_reuse_calls;
450 uint64_t partial_reuse_calls;
451 uint64_t reusable_pages_success;
452 uint64_t reusable_pages_failure;
453 uint64_t reusable_pages_shared;
454 uint64_t reuse_pages_success;
455 uint64_t reuse_pages_failure;
456 uint64_t can_reuse_success;
457 uint64_t can_reuse_failure;
458};
459extern struct vm_page_stats_reusable vm_page_stats_reusable;
460
0b4c1975
A
461extern int hibernate_flush_memory(void);
462
91447636 463#endif /* KERNEL_PRIVATE */
1c79356b
A
464
465#endif /* _VM_VM_PAGEOUT_H_ */