]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/vm/vm_pageout.h
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_pageout.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Declarations for the pageout daemon interface.
64 */
65
66#ifndef _VM_VM_PAGEOUT_H_
67#define _VM_VM_PAGEOUT_H_
68
69#ifdef KERNEL_PRIVATE
70
71#include <mach/mach_types.h>
72#include <mach/boolean.h>
73#include <mach/machine/vm_types.h>
74#include <mach/memory_object_types.h>
75
76#include <kern/kern_types.h>
77#include <kern/lock.h>
78
79#include <libkern/OSAtomic.h>
80
81
82#include <vm/vm_options.h>
83
84extern kern_return_t vm_map_create_upl(
85 vm_map_t map,
86 vm_map_address_t offset,
87 upl_size_t *upl_size,
88 upl_t *upl,
89 upl_page_info_array_t page_list,
90 unsigned int *count,
91 int *flags);
92
93extern ppnum_t upl_get_highest_page(
94 upl_t upl);
95
96extern upl_size_t upl_get_size(
97 upl_t upl);
98
99#ifdef MACH_KERNEL_PRIVATE
100
101#include <vm/vm_page.h>
102
103extern unsigned int vm_pageout_scan_event_counter;
104extern unsigned int vm_zf_queue_count;
105
106
107#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
108
109extern unsigned int vm_zf_count;
110
111#define VM_ZF_COUNT_INCR() \
112 MACRO_BEGIN \
113 OSAddAtomic(1, (SInt32 *) &vm_zf_count); \
114 MACRO_END \
115
116#define VM_ZF_COUNT_DECR() \
117 MACRO_BEGIN \
118 OSAddAtomic(-1, (SInt32 *) &vm_zf_count); \
119 MACRO_END \
120
121#else /* !(defined(__ppc__)) */
122
123extern uint64_t vm_zf_count;
124
125#define VM_ZF_COUNT_INCR() \
126 MACRO_BEGIN \
127 OSAddAtomic64(1, (SInt64 *) &vm_zf_count); \
128 MACRO_END \
129
130#define VM_ZF_COUNT_DECR() \
131 MACRO_BEGIN \
132 OSAddAtomic64(-1, (SInt64 *) &vm_zf_count); \
133 MACRO_END \
134
135#endif /* !(defined(__ppc__)) */
136
137/*
138 * Routines exported to Mach.
139 */
140extern void vm_pageout(void);
141
142extern kern_return_t vm_pageout_internal_start(void);
143
144extern void vm_pageout_object_terminate(
145 vm_object_t object);
146
147extern void vm_pageout_cluster(
148 vm_page_t m);
149
150extern void vm_pageout_initialize_page(
151 vm_page_t m);
152
153extern void vm_pageclean_setup(
154 vm_page_t m,
155 vm_page_t new_m,
156 vm_object_t new_object,
157 vm_object_offset_t new_offset);
158
159/* UPL exported routines and structures */
160
161#define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
162#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
163#define upl_lock(object) lck_mtx_lock(&(object)->Lock)
164#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
165
166#define MAX_VECTOR_UPL_ELEMENTS 8
167
168struct _vector_upl_iostates{
169 upl_offset_t offset;
170 upl_size_t size;
171};
172
173typedef struct _vector_upl_iostates vector_upl_iostates_t;
174
175struct _vector_upl {
176 upl_size_t size;
177 uint32_t num_upls;
178 uint32_t invalid_upls;
179 uint32_t _reserved;
180 vm_map_t submap;
181 vm_offset_t submap_dst_addr;
182 vm_object_offset_t offset;
183 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS];
184 upl_page_info_array_t pagelist;
185 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS];
186};
187
188typedef struct _vector_upl* vector_upl_t;
189
190/* universal page list structure */
191
192#if UPL_DEBUG
193#define UPL_DEBUG_STACK_FRAMES 16
194#define UPL_DEBUG_COMMIT_RECORDS 4
195
196struct ucd {
197 upl_offset_t c_beg;
198 upl_offset_t c_end;
199 int c_aborted;
200 void * c_retaddr[UPL_DEBUG_STACK_FRAMES];
201};
202#endif
203
204
205struct upl {
206 decl_lck_mtx_data(, Lock) /* Synchronization */
207 int ref_count;
208 int flags;
209 vm_object_t src_object; /* object derived from */
210 vm_object_offset_t offset;
211 upl_size_t size; /* size in bytes of the address space */
212 vm_offset_t kaddr; /* secondary mapping in kernel */
213 vm_object_t map_object;
214 ppnum_t highest_page;
215 void* vector_upl;
216#if UPL_DEBUG
217 uintptr_t ubc_alias1;
218 uintptr_t ubc_alias2;
219 queue_chain_t uplq; /* List of outstanding upls on an obj */
220
221 thread_t upl_creator;
222 uint32_t upl_state;
223 uint32_t upl_commit_index;
224 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES];
225
226 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
227#endif /* UPL_DEBUG */
228};
229
230/* upl struct flags */
231#define UPL_PAGE_LIST_MAPPED 0x1
232#define UPL_KERNEL_MAPPED 0x2
233#define UPL_CLEAR_DIRTY 0x4
234#define UPL_COMPOSITE_LIST 0x8
235#define UPL_INTERNAL 0x10
236#define UPL_PAGE_SYNC_DONE 0x20
237#define UPL_DEVICE_MEMORY 0x40
238#define UPL_PAGEOUT 0x80
239#define UPL_LITE 0x100
240#define UPL_IO_WIRE 0x200
241#define UPL_ACCESS_BLOCKED 0x400
242#define UPL_ENCRYPTED 0x800
243#define UPL_SHADOWED 0x1000
244#define UPL_KERNEL_OBJECT 0x2000
245#define UPL_VECTOR 0x4000
246
247/* flags for upl_create flags parameter */
248#define UPL_CREATE_EXTERNAL 0
249#define UPL_CREATE_INTERNAL 0x1
250#define UPL_CREATE_LITE 0x2
251
252extern upl_t vector_upl_create(vm_offset_t);
253extern void vector_upl_deallocate(upl_t);
254extern boolean_t vector_upl_is_valid(upl_t);
255extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
256extern void vector_upl_set_pagelist(upl_t);
257extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
258extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
259extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
260extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
261extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
262extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t);
263extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*);
264
265extern kern_return_t vm_object_iopl_request(
266 vm_object_t object,
267 vm_object_offset_t offset,
268 upl_size_t size,
269 upl_t *upl_ptr,
270 upl_page_info_array_t user_page_list,
271 unsigned int *page_list_count,
272 int cntrl_flags);
273
274extern kern_return_t vm_object_super_upl_request(
275 vm_object_t object,
276 vm_object_offset_t offset,
277 upl_size_t size,
278 upl_size_t super_cluster,
279 upl_t *upl,
280 upl_page_info_t *user_page_list,
281 unsigned int *page_list_count,
282 int cntrl_flags);
283
284/* should be just a regular vm_map_enter() */
285extern kern_return_t vm_map_enter_upl(
286 vm_map_t map,
287 upl_t upl,
288 vm_map_offset_t *dst_addr);
289
290/* should be just a regular vm_map_remove() */
291extern kern_return_t vm_map_remove_upl(
292 vm_map_t map,
293 upl_t upl);
294
295/* wired page list structure */
296typedef uint32_t *wpl_array_t;
297
298extern void vm_page_free_list(
299 vm_page_t mem,
300 boolean_t prepare_object);
301
302extern void vm_page_free_reserve(int pages);
303
304extern void vm_pageout_throttle_down(vm_page_t page);
305extern void vm_pageout_throttle_up(vm_page_t page);
306
307/*
308 * ENCRYPTED SWAP:
309 */
310extern void upl_encrypt(
311 upl_t upl,
312 upl_offset_t crypt_offset,
313 upl_size_t crypt_size);
314extern void vm_page_encrypt(
315 vm_page_t page,
316 vm_map_offset_t kernel_map_offset);
317extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */
318extern void vm_page_decrypt(
319 vm_page_t page,
320 vm_map_offset_t kernel_map_offset);
321extern kern_return_t vm_paging_map_object(
322 vm_map_offset_t *address,
323 vm_page_t page,
324 vm_object_t object,
325 vm_object_offset_t offset,
326 vm_map_size_t *size,
327 vm_prot_t protection,
328 boolean_t can_unlock_object);
329extern void vm_paging_unmap_object(
330 vm_object_t object,
331 vm_map_offset_t start,
332 vm_map_offset_t end);
333decl_simple_lock_data(extern, vm_paging_lock)
334
335/*
336 * Backing store throttle when BS is exhausted
337 */
338extern unsigned int vm_backing_store_low;
339
340extern void vm_pageout_queue_steal(
341 vm_page_t page,
342 boolean_t queues_locked);
343
344#endif /* MACH_KERNEL_PRIVATE */
345
346#if UPL_DEBUG
347extern kern_return_t upl_ubc_alias_set(
348 upl_t upl,
349 uintptr_t alias1,
350 uintptr_t alias2);
351extern int upl_ubc_alias_get(
352 upl_t upl,
353 uintptr_t * al,
354 uintptr_t * al2);
355#endif /* UPL_DEBUG */
356
357extern void vm_countdirtypages(void);
358
359extern void vm_backing_store_disable(
360 boolean_t suspend);
361
362extern kern_return_t upl_transpose(
363 upl_t upl1,
364 upl_t upl2);
365
366extern kern_return_t mach_vm_pressure_monitor(
367 boolean_t wait_for_pressure,
368 unsigned int nsecs_monitored,
369 unsigned int *pages_reclaimed_p,
370 unsigned int *pages_wanted_p);
371
372extern kern_return_t
373vm_set_buffer_cleanup_callout(
374 boolean_t (*func)(void));
375
376struct vm_page_stats_reusable {
377 SInt32 reusable_count;
378 uint64_t reusable;
379 uint64_t reused;
380 uint64_t reused_wire;
381 uint64_t reused_remove;
382 uint64_t all_reusable_calls;
383 uint64_t partial_reusable_calls;
384 uint64_t all_reuse_calls;
385 uint64_t partial_reuse_calls;
386 uint64_t reusable_pages_success;
387 uint64_t reusable_pages_failure;
388 uint64_t reusable_pages_shared;
389 uint64_t reuse_pages_success;
390 uint64_t reuse_pages_failure;
391 uint64_t can_reuse_success;
392 uint64_t can_reuse_failure;
393};
394extern struct vm_page_stats_reusable vm_page_stats_reusable;
395
396#endif /* KERNEL_PRIVATE */
397
398#endif /* _VM_VM_PAGEOUT_H_ */