]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_page.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Resident memory system definitions. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_PAGE_H_ | |
67 | #define _VM_VM_PAGE_H_ | |
68 | ||
91447636 | 69 | #include <debug.h> |
15129b1c | 70 | #include <vm/vm_options.h> |
91447636 | 71 | |
1c79356b A |
72 | #include <mach/boolean.h> |
73 | #include <mach/vm_prot.h> | |
74 | #include <mach/vm_param.h> | |
75 | #include <vm/vm_object.h> | |
76 | #include <kern/queue.h> | |
fe8ab488 | 77 | #include <kern/locks.h> |
1c79356b A |
78 | |
79 | #include <kern/macro_help.h> | |
2d21ac55 A |
80 | #include <libkern/OSAtomic.h> |
81 | ||
1c79356b | 82 | |
0b4e3aa0 | 83 | /* |
2d21ac55 A |
84 | * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q |
85 | * represents a set of aging bins that are 'protected'... | |
86 | * | |
87 | * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have | |
88 | * not yet been 'claimed' but have been aged out of the protective bins | |
89 | * this occurs in vm_page_speculate when it advances to the next bin | |
90 | * and discovers that it is still occupied... at that point, all of the | |
91 | * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages | |
92 | * in that bin are all guaranteed to have reached at least the maximum age | |
93 | * we allow for a protected page... they can be older if there is no | |
94 | * memory pressure to pull them from the bin, or there are no new speculative pages | |
95 | * being generated to push them out. | |
96 | * this list is the one that vm_pageout_scan will prefer when looking | |
97 | * for pages to move to the underweight free list | |
98 | * | |
99 | * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS | |
100 | * defines the amount of time a speculative page is normally | |
101 | * allowed to live in the 'protected' state (i.e. not available | |
102 | * to be stolen if vm_pageout_scan is running and looking for | |
103 | * pages)... however, if the total number of speculative pages | |
104 | * in the protected state exceeds our limit (defined in vm_pageout.c) | |
105 | * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then | |
106 | * vm_pageout_scan is allowed to steal pages from the protected | |
107 | * bucket even if they are underage. | |
108 | * | |
109 | * vm_pageout_scan is also allowed to pull pages from a protected | |
110 | * bin if the bin has reached the "age of consent" we've set | |
0b4e3aa0 | 111 | */ |
2d21ac55 A |
112 | #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 |
113 | #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 | |
114 | #define VM_PAGE_SPECULATIVE_AGED_Q 0 | |
115 | ||
116 | #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 | |
117 | ||
2d21ac55 A |
118 | struct vm_speculative_age_q { |
119 | /* | |
120 | * memory queue for speculative pages via clustered pageins | |
121 | */ | |
122 | queue_head_t age_q; | |
123 | mach_timespec_t age_ts; | |
124 | }; | |
0b4e3aa0 A |
125 | |
126 | ||
6d2010ae | 127 | |
2d21ac55 A |
128 | extern |
129 | struct vm_speculative_age_q vm_page_queue_speculative[]; | |
0b4e3aa0 | 130 | |
2d21ac55 A |
131 | extern int speculative_steal_index; |
132 | extern int speculative_age_index; | |
6d2010ae | 133 | extern unsigned int vm_page_speculative_q_age_ms; |
9bccf70c | 134 | |
0b4e3aa0 | 135 | |
39236c6e A |
136 | #define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) |
137 | ||
1c79356b A |
138 | /* |
139 | * Management of resident (logical) pages. | |
140 | * | |
141 | * A small structure is kept for each resident | |
142 | * page, indexed by page number. Each structure | |
143 | * is an element of several lists: | |
144 | * | |
145 | * A hash table bucket used to quickly | |
146 | * perform object/offset lookups | |
147 | * | |
148 | * A list of all pages for a given object, | |
149 | * so they can be quickly deactivated at | |
150 | * time of deallocation. | |
151 | * | |
152 | * An ordered list of pages due for pageout. | |
153 | * | |
154 | * In addition, the structure contains the object | |
155 | * and offset to which this page belongs (for pageout), | |
156 | * and sundry status bits. | |
157 | * | |
158 | * Fields in this structure are locked either by the lock on the | |
159 | * object that the page belongs to (O) or by the lock on the page | |
160 | * queues (P). [Some fields require that both locks be held to | |
161 | * change that field; holding either lock is sufficient to read.] | |
162 | */ | |
163 | ||
fe8ab488 A |
164 | |
165 | #if defined(__LP64__) | |
166 | ||
167 | /* | |
168 | * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64) | |
169 | * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate | |
170 | * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the | |
171 | * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack | |
172 | * pointers from the 2 ends of these spaces | |
173 | */ | |
174 | typedef uint32_t vm_page_packed_t; | |
175 | ||
176 | #define VM_PAGE_PACK_PTR(m) (!(m) ? (vm_page_packed_t)0 : ((vm_page_packed_t)((uintptr_t)(((uintptr_t)(m) - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> 6))) | |
177 | #define VM_PAGE_UNPACK_PTR(p) (!(p) ? VM_PAGE_NULL : ((vm_page_t)((((uintptr_t)(p)) << 6) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS))) | |
178 | ||
179 | #else | |
180 | ||
181 | /* | |
182 | * we can't do the packing trick on 32 bit architectures, so | |
183 | * just turn the macros into noops. | |
184 | */ | |
185 | typedef struct vm_page *vm_page_packed_t; | |
186 | ||
187 | #define VM_PAGE_PACK_PTR(m) ((vm_page_packed_t)(m)) | |
188 | #define VM_PAGE_UNPACK_PTR(p) ((vm_page_t)(p)) | |
189 | ||
190 | #endif | |
191 | ||
192 | ||
1c79356b | 193 | struct vm_page { |
b0d623f7 A |
194 | queue_chain_t pageq; /* queue info for FIFO */ |
195 | /* queue or free list (P) */ | |
196 | ||
1c79356b | 197 | queue_chain_t listq; /* all pages in same object (O) */ |
1c79356b | 198 | |
1c79356b | 199 | vm_object_offset_t offset; /* offset into that object (O,P) */ |
fe8ab488 | 200 | vm_object_t object; /* which object am I in (O&P) */ |
1c79356b | 201 | |
fe8ab488 | 202 | vm_page_packed_t next_m; /* VP bucket link (O) */ |
91447636 A |
203 | /* |
204 | * The following word of flags is protected | |
205 | * by the "page queues" lock. | |
b0d623f7 A |
206 | * |
207 | * we use the 'wire_count' field to store the local | |
208 | * queue id if local queues are enabled... | |
3e170ce0 | 209 | * see the comments at 'vm_page_queues_remove' as to |
b0d623f7 | 210 | * why this is safe to do |
91447636 | 211 | */ |
b0d623f7 | 212 | #define local_id wire_count |
1c79356b | 213 | unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */ |
316670eb A |
214 | /* boolean_t */ active:1, /* page is in active list (P) */ |
215 | inactive:1, /* page is in inactive list (P) */ | |
216 | clean_queue:1, /* page is in pre-cleaned list (P) */ | |
217 | local:1, /* page is in one of the local queues (P) */ | |
218 | speculative:1, /* page is in speculative list (P) */ | |
39236c6e | 219 | throttled:1, /* pager is not responding or doesn't exist(P) */ |
316670eb | 220 | free:1, /* page is on free list (P) */ |
91447636 | 221 | pageout_queue:1,/* page is on queue for pageout (P) */ |
1c79356b | 222 | laundry:1, /* page is being cleaned now (P)*/ |
1c79356b | 223 | reference:1, /* page has been used (P) */ |
0b4e3aa0 A |
224 | gobbled:1, /* page used internally (P) */ |
225 | private:1, /* Page should not be returned to | |
91447636 | 226 | * the free list (P) */ |
6d2010ae A |
227 | no_cache:1, /* page is not to be cached and should |
228 | * be reused ahead of other pages (P) */ | |
fe8ab488 A |
229 | |
230 | __unused_pageq_bits:3; /* 3 bits available here */ | |
1c79356b | 231 | |
b0d623f7 A |
232 | ppnum_t phys_page; /* Physical address of page, passed |
233 | * to pmap_enter (read-only) */ | |
234 | ||
91447636 A |
235 | /* |
236 | * The following word of flags is protected | |
237 | * by the "VM object" lock. | |
238 | */ | |
1c79356b A |
239 | unsigned int |
240 | /* boolean_t */ busy:1, /* page is in transit (O) */ | |
241 | wanted:1, /* someone is waiting for page (O) */ | |
242 | tabled:1, /* page is in VP table (O) */ | |
15129b1c A |
243 | hashed:1, /* page is in vm_page_buckets[] |
244 | (O) + the bucket lock */ | |
1c79356b | 245 | fictitious:1, /* Physical page doesn't exist (O) */ |
b0d623f7 | 246 | /* |
fe8ab488 A |
247 | * IMPORTANT: the "pmapped", "xpmapped" and "clustered" bits can be modified while holding the |
248 | * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function. | |
249 | * This is done in vm_fault_enter and the CONSUME_CLUSTERED macro. | |
250 | * It's also ok to modify them behind just the VM object "exclusive" lock. | |
b0d623f7 | 251 | */ |
fe8ab488 | 252 | clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ |
2d21ac55 | 253 | pmapped:1, /* page has been entered at some |
fe8ab488 A |
254 | * point into a pmap (O) or (O-shared AND pmap_page) */ |
255 | xpmapped:1, /* page has been entered with execute permission (O) | |
256 | or (O-shared AND pmap_page) */ | |
257 | ||
4a3eedf9 A |
258 | wpmapped:1, /* page has been entered at some |
259 | * point into a pmap for write (O) */ | |
b0d623f7 | 260 | pageout:1, /* page wired & busy for pageout (O) */ |
1c79356b A |
261 | absent:1, /* Data has been requested, but is |
262 | * not yet available (O) */ | |
263 | error:1, /* Data manager was unable to provide | |
264 | * data due to error (O) */ | |
265 | dirty:1, /* Page must be cleaned (O) */ | |
266 | cleaning:1, /* Page clean has begun (O) */ | |
267 | precious:1, /* Page is precious; data must be | |
268 | * returned even if clean (O) */ | |
1c79356b A |
269 | overwriting:1, /* Request to unlock has been made |
270 | * without having data. (O) | |
271 | * [See vm_fault_page_overwrite] */ | |
272 | restart:1, /* Page was pushed higher in shadow | |
273 | chain by copy_call-related pagers; | |
274 | start again at top of chain */ | |
1c79356b A |
275 | unusual:1, /* Page is absent, error, restart or |
276 | page locked */ | |
91447636 | 277 | encrypted:1, /* encrypted for secure swap (O) */ |
2d21ac55 | 278 | encrypted_cleaning:1, /* encrypting page */ |
2d21ac55 A |
279 | cs_validated:1, /* code-signing: page was checked */ |
280 | cs_tainted:1, /* code-signing: page is tainted */ | |
c18c124e | 281 | cs_nx:1, /* code-signing: page is nx */ |
b0d623f7 | 282 | reusable:1, |
0b4c1975 | 283 | lopage:1, |
6d2010ae | 284 | slid:1, |
39236c6e | 285 | compressor:1, /* page owned by compressor pool */ |
15129b1c | 286 | written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ |
c18c124e | 287 | __unused_object_bits:4; /* 5 bits available here */ |
1c79356b A |
288 | }; |
289 | ||
91447636 A |
290 | #define DEBUG_ENCRYPTED_SWAP 1 |
291 | #if DEBUG_ENCRYPTED_SWAP | |
292 | #define ASSERT_PAGE_DECRYPTED(page) \ | |
293 | MACRO_BEGIN \ | |
294 | if ((page)->encrypted) { \ | |
295 | panic("VM page %p should not be encrypted here\n", \ | |
296 | (page)); \ | |
297 | } \ | |
298 | MACRO_END | |
299 | #else /* DEBUG_ENCRYPTED_SWAP */ | |
300 | #define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted) | |
301 | #endif /* DEBUG_ENCRYPTED_SWAP */ | |
302 | ||
1c79356b A |
303 | typedef struct vm_page *vm_page_t; |
304 | ||
b0d623f7 A |
305 | |
306 | typedef struct vm_locks_array { | |
307 | char pad __attribute__ ((aligned (64))); | |
308 | lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64))); | |
309 | lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64))); | |
310 | char pad2 __attribute__ ((aligned (64))); | |
311 | } vm_locks_array_t; | |
312 | ||
313 | ||
314 | #define VM_PAGE_WIRED(m) ((!(m)->local && (m)->wire_count)) | |
1c79356b | 315 | #define VM_PAGE_NULL ((vm_page_t) 0) |
91447636 | 316 | #define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next) |
e5568f75 | 317 | #define NEXT_PAGE_PTR(m) ((vm_page_t *) &(m)->pageq.next) |
1c79356b A |
318 | |
319 | /* | |
320 | * XXX The unusual bit should not be necessary. Most of the bit | |
321 | * XXX fields above really want to be masks. | |
322 | */ | |
323 | ||
324 | /* | |
325 | * For debugging, this macro can be defined to perform | |
326 | * some useful check on a page structure. | |
327 | */ | |
328 | ||
b0d623f7 A |
329 | #define VM_PAGE_CHECK(mem) \ |
330 | MACRO_BEGIN \ | |
331 | VM_PAGE_QUEUES_ASSERT(mem, 1); \ | |
332 | MACRO_END | |
2d21ac55 A |
333 | |
334 | /* Page coloring: | |
335 | * | |
336 | * The free page list is actually n lists, one per color, | |
337 | * where the number of colors is a function of the machine's | |
338 | * cache geometry set at system initialization. To disable | |
339 | * coloring, set vm_colors to 1 and vm_color_mask to 0. | |
340 | * The boot-arg "colors" may be used to override vm_colors. | |
341 | * Note that there is little harm in having more colors than needed. | |
342 | */ | |
343 | ||
344 | #define MAX_COLORS 128 | |
345 | #define DEFAULT_COLORS 32 | |
346 | ||
347 | extern | |
348 | unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ | |
349 | extern | |
350 | unsigned int vm_color_mask; /* must be (vm_colors-1) */ | |
351 | extern | |
352 | unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ | |
1c79356b | 353 | |
b0d623f7 A |
354 | /* |
355 | * Wired memory is a very limited resource and we can't let users exhaust it | |
356 | * and deadlock the entire system. We enforce the following limits: | |
357 | * | |
358 | * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount) | |
359 | * how much memory can be user-wired in one user task | |
360 | * | |
361 | * vm_global_user_wire_limit (default: same as vm_user_wire_limit) | |
362 | * how much memory can be user-wired in all user tasks | |
363 | * | |
364 | * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE) | |
365 | * how much memory must remain user-unwired at any time | |
366 | */ | |
367 | #define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */ | |
368 | extern | |
369 | vm_map_size_t vm_user_wire_limit; | |
370 | extern | |
371 | vm_map_size_t vm_global_user_wire_limit; | |
372 | extern | |
373 | vm_map_size_t vm_global_no_user_wire_amount; | |
374 | ||
1c79356b A |
375 | /* |
376 | * Each pageable resident page falls into one of three lists: | |
377 | * | |
378 | * free | |
2d21ac55 A |
379 | * Available for allocation now. The free list is |
380 | * actually an array of lists, one per color. | |
1c79356b A |
381 | * inactive |
382 | * Not referenced in any map, but still has an | |
383 | * object/offset-page mapping, and may be dirty. | |
384 | * This is the list of pages that should be | |
2d21ac55 A |
385 | * paged out next. There are actually two |
386 | * inactive lists, one for pages brought in from | |
387 | * disk or other backing store, and another | |
388 | * for "zero-filled" pages. See vm_pageout_scan() | |
389 | * for the distinction and usage. | |
1c79356b A |
390 | * active |
391 | * A list of pages which have been placed in | |
392 | * at least one physical map. This list is | |
393 | * ordered, in LRU-like fashion. | |
394 | */ | |
395 | ||
b0d623f7 A |
396 | |
397 | #define VPL_LOCK_SPIN 1 | |
398 | ||
399 | struct vpl { | |
400 | unsigned int vpl_count; | |
39236c6e A |
401 | unsigned int vpl_internal_count; |
402 | unsigned int vpl_external_count; | |
b0d623f7 A |
403 | queue_head_t vpl_queue; |
404 | #ifdef VPL_LOCK_SPIN | |
405 | lck_spin_t vpl_lock; | |
406 | #else | |
407 | lck_mtx_t vpl_lock; | |
408 | lck_mtx_ext_t vpl_lock_ext; | |
409 | #endif | |
410 | }; | |
411 | ||
412 | struct vplq { | |
413 | union { | |
414 | char cache_line_pad[128]; | |
415 | struct vpl vpl; | |
416 | } vpl_un; | |
417 | }; | |
418 | extern | |
419 | unsigned int vm_page_local_q_count; | |
420 | extern | |
421 | struct vplq *vm_page_local_q; | |
422 | extern | |
423 | unsigned int vm_page_local_q_soft_limit; | |
424 | extern | |
425 | unsigned int vm_page_local_q_hard_limit; | |
426 | extern | |
427 | vm_locks_array_t vm_page_locks; | |
428 | ||
1c79356b | 429 | extern |
2d21ac55 A |
430 | queue_head_t vm_page_queue_free[MAX_COLORS]; /* memory free queue */ |
431 | extern | |
432 | queue_head_t vm_lopage_queue_free; /* low memory free queue */ | |
1c79356b | 433 | extern |
1c79356b A |
434 | queue_head_t vm_page_queue_active; /* active memory queue */ |
435 | extern | |
2d21ac55 A |
436 | queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ |
437 | extern | |
316670eb A |
438 | queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ |
439 | extern | |
440 | queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ | |
b0d623f7 | 441 | extern |
2d21ac55 | 442 | queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ |
1c79356b | 443 | |
3e170ce0 A |
444 | extern |
445 | queue_head_t vm_objects_wired; | |
446 | extern | |
447 | lck_spin_t vm_objects_wired_lock; | |
448 | ||
449 | ||
1c79356b A |
450 | extern |
451 | vm_offset_t first_phys_addr; /* physical address for first_page */ | |
452 | extern | |
453 | vm_offset_t last_phys_addr; /* physical address for last_page */ | |
454 | ||
455 | extern | |
2d21ac55 | 456 | unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ |
1c79356b | 457 | extern |
91447636 | 458 | unsigned int vm_page_fictitious_count;/* How many fictitious pages are free? */ |
1c79356b | 459 | extern |
91447636 | 460 | unsigned int vm_page_active_count; /* How many pages are active? */ |
1c79356b | 461 | extern |
91447636 | 462 | unsigned int vm_page_inactive_count; /* How many pages are inactive? */ |
1c79356b | 463 | extern |
316670eb A |
464 | unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ |
465 | extern | |
2d21ac55 A |
466 | unsigned int vm_page_throttled_count;/* How many inactives are throttled */ |
467 | extern | |
468 | unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ | |
39236c6e A |
469 | extern unsigned int vm_page_pageable_internal_count; |
470 | extern unsigned int vm_page_pageable_external_count; | |
471 | extern | |
fe8ab488 A |
472 | unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ |
473 | extern | |
39236c6e A |
474 | unsigned int vm_page_external_count; /* How many pages are file-backed? */ |
475 | extern | |
476 | unsigned int vm_page_internal_count; /* How many pages are anonymous? */ | |
2d21ac55 | 477 | extern |
db609669 A |
478 | unsigned int vm_page_wire_count; /* How many pages are wired? */ |
479 | extern | |
480 | unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ | |
1c79356b | 481 | extern |
91447636 | 482 | unsigned int vm_page_free_target; /* How many do we want free? */ |
1c79356b | 483 | extern |
91447636 | 484 | unsigned int vm_page_free_min; /* When to wakeup pageout */ |
1c79356b | 485 | extern |
b0d623f7 A |
486 | unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ |
487 | extern | |
488 | uint32_t vm_page_creation_throttle; /* When to throttle new page creation */ | |
489 | extern | |
91447636 | 490 | unsigned int vm_page_inactive_target;/* How many do we want inactive? */ |
1c79356b | 491 | extern |
316670eb A |
492 | unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ |
493 | extern | |
2d21ac55 A |
494 | unsigned int vm_page_inactive_min; /* When do wakeup pageout */ |
495 | extern | |
91447636 | 496 | unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ |
1c79356b | 497 | extern |
b0d623f7 | 498 | unsigned int vm_page_throttle_count; /* Count of page allocations throttled */ |
91447636 A |
499 | extern |
500 | unsigned int vm_page_gobble_count; | |
3e170ce0 A |
501 | extern |
502 | unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */ | |
503 | ||
91447636 | 504 | |
b0d623f7 | 505 | #if DEVELOPMENT || DEBUG |
2d21ac55 A |
506 | extern |
507 | unsigned int vm_page_speculative_used; | |
b0d623f7 A |
508 | #endif |
509 | ||
55e303ae | 510 | extern |
91447636 | 511 | unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ |
55e303ae | 512 | extern |
b0d623f7 A |
513 | unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ |
514 | extern | |
91447636 | 515 | uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ |
1c79356b | 516 | |
1c79356b A |
517 | extern unsigned int vm_page_free_wanted; |
518 | /* how many threads are waiting for memory */ | |
519 | ||
2d21ac55 A |
520 | extern unsigned int vm_page_free_wanted_privileged; |
521 | /* how many VM privileged threads are waiting for memory */ | |
522 | ||
b0d623f7 | 523 | extern ppnum_t vm_page_fictitious_addr; |
1c79356b A |
524 | /* (fake) phys_addr of fictitious pages */ |
525 | ||
b0d623f7 | 526 | extern ppnum_t vm_page_guard_addr; |
2d21ac55 A |
527 | /* (fake) phys_addr of guard pages */ |
528 | ||
529 | ||
91447636 A |
530 | extern boolean_t vm_page_deactivate_hint; |
531 | ||
39236c6e A |
532 | extern int vm_compressor_mode; |
533 | ||
0b4c1975 A |
534 | /* |
535 | 0 = all pages avail ( default. ) | |
536 | 1 = disable high mem ( cap max pages to 4G) | |
537 | 2 = prefer himem | |
538 | */ | |
0c530ab8 A |
539 | extern int vm_himemory_mode; |
540 | ||
0b4c1975 A |
541 | extern boolean_t vm_lopage_needed; |
542 | extern uint32_t vm_lopage_free_count; | |
543 | extern uint32_t vm_lopage_free_limit; | |
544 | extern uint32_t vm_lopage_lowater; | |
545 | extern boolean_t vm_lopage_refill; | |
0c530ab8 | 546 | extern uint64_t max_valid_dma_address; |
0b4c1975 | 547 | extern ppnum_t max_valid_low_ppnum; |
0c530ab8 | 548 | |
1c79356b A |
549 | /* |
550 | * Prototypes for functions exported by this module. | |
551 | */ | |
552 | extern void vm_page_bootstrap( | |
553 | vm_offset_t *startp, | |
39236c6e | 554 | vm_offset_t *endp); |
1c79356b | 555 | |
39236c6e | 556 | extern void vm_page_module_init(void); |
2d21ac55 | 557 | |
b0d623f7 A |
558 | extern void vm_page_init_local_q(void); |
559 | ||
1c79356b | 560 | extern void vm_page_create( |
55e303ae A |
561 | ppnum_t start, |
562 | ppnum_t end); | |
1c79356b | 563 | |
3e170ce0 A |
564 | extern vm_page_t kdp_vm_page_lookup( |
565 | vm_object_t object, | |
566 | vm_object_offset_t offset); | |
567 | ||
1c79356b A |
568 | extern vm_page_t vm_page_lookup( |
569 | vm_object_t object, | |
570 | vm_object_offset_t offset); | |
571 | ||
572 | extern vm_page_t vm_page_grab_fictitious(void); | |
573 | ||
2d21ac55 A |
574 | extern vm_page_t vm_page_grab_guard(void); |
575 | ||
1c79356b A |
576 | extern void vm_page_release_fictitious( |
577 | vm_page_t page); | |
578 | ||
1c79356b A |
579 | extern void vm_page_more_fictitious(void); |
580 | ||
581 | extern int vm_pool_low(void); | |
582 | ||
583 | extern vm_page_t vm_page_grab(void); | |
584 | ||
0c530ab8 A |
585 | extern vm_page_t vm_page_grablo(void); |
586 | ||
1c79356b A |
587 | extern void vm_page_release( |
588 | vm_page_t page); | |
589 | ||
1c79356b A |
590 | extern boolean_t vm_page_wait( |
591 | int interruptible ); | |
592 | ||
593 | extern vm_page_t vm_page_alloc( | |
594 | vm_object_t object, | |
595 | vm_object_offset_t offset); | |
596 | ||
2d21ac55 A |
597 | extern vm_page_t vm_page_alloc_guard( |
598 | vm_object_t object, | |
599 | vm_object_offset_t offset); | |
600 | ||
1c79356b A |
601 | extern void vm_page_init( |
602 | vm_page_t page, | |
0b4c1975 | 603 | ppnum_t phys_page, |
6d2010ae | 604 | boolean_t lopage); |
1c79356b A |
605 | |
606 | extern void vm_page_free( | |
b0d623f7 | 607 | vm_page_t page); |
1c79356b | 608 | |
b0d623f7 A |
609 | extern void vm_page_free_unlocked( |
610 | vm_page_t page, | |
611 | boolean_t remove_from_hash); | |
2d21ac55 | 612 | |
1c79356b A |
613 | extern void vm_page_activate( |
614 | vm_page_t page); | |
615 | ||
616 | extern void vm_page_deactivate( | |
617 | vm_page_t page); | |
618 | ||
b0d623f7 A |
619 | extern void vm_page_deactivate_internal( |
620 | vm_page_t page, | |
621 | boolean_t clear_hw_reference); | |
622 | ||
316670eb A |
623 | extern void vm_page_enqueue_cleaned(vm_page_t page); |
624 | ||
2d21ac55 A |
625 | extern void vm_page_lru( |
626 | vm_page_t page); | |
627 | ||
628 | extern void vm_page_speculate( | |
629 | vm_page_t page, | |
630 | boolean_t new); | |
631 | ||
632 | extern void vm_page_speculate_ageit( | |
633 | struct vm_speculative_age_q *aq); | |
634 | ||
b0d623f7 A |
635 | extern void vm_page_reactivate_all_throttled(void); |
636 | ||
637 | extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); | |
638 | ||
1c79356b A |
639 | extern void vm_page_rename( |
640 | vm_page_t page, | |
641 | vm_object_t new_object, | |
2d21ac55 A |
642 | vm_object_offset_t new_offset, |
643 | boolean_t encrypted_ok); | |
1c79356b A |
644 | |
645 | extern void vm_page_insert( | |
646 | vm_page_t page, | |
647 | vm_object_t object, | |
648 | vm_object_offset_t offset); | |
649 | ||
3e170ce0 A |
650 | extern void vm_page_insert_wired( |
651 | vm_page_t page, | |
652 | vm_object_t object, | |
653 | vm_object_offset_t offset, | |
654 | vm_tag_t tag); | |
655 | ||
4a3eedf9 | 656 | extern void vm_page_insert_internal( |
b0d623f7 | 657 | vm_page_t page, |
4a3eedf9 A |
658 | vm_object_t object, |
659 | vm_object_offset_t offset, | |
3e170ce0 | 660 | vm_tag_t tag, |
b0d623f7 | 661 | boolean_t queues_lock_held, |
316670eb | 662 | boolean_t insert_in_hash, |
3e170ce0 A |
663 | boolean_t batch_pmap_op, |
664 | boolean_t delayed_accounting, | |
665 | uint64_t *delayed_ledger_update); | |
4a3eedf9 | 666 | |
1c79356b A |
667 | extern void vm_page_replace( |
668 | vm_page_t mem, | |
669 | vm_object_t object, | |
670 | vm_object_offset_t offset); | |
671 | ||
672 | extern void vm_page_remove( | |
b0d623f7 A |
673 | vm_page_t page, |
674 | boolean_t remove_from_hash); | |
1c79356b A |
675 | |
676 | extern void vm_page_zero_fill( | |
677 | vm_page_t page); | |
678 | ||
679 | extern void vm_page_part_zero_fill( | |
680 | vm_page_t m, | |
681 | vm_offset_t m_pa, | |
682 | vm_size_t len); | |
683 | ||
684 | extern void vm_page_copy( | |
685 | vm_page_t src_page, | |
686 | vm_page_t dest_page); | |
687 | ||
688 | extern void vm_page_part_copy( | |
689 | vm_page_t src_m, | |
690 | vm_offset_t src_pa, | |
691 | vm_page_t dst_m, | |
692 | vm_offset_t dst_pa, | |
693 | vm_size_t len); | |
694 | ||
695 | extern void vm_page_wire( | |
3e170ce0 A |
696 | vm_page_t page, |
697 | vm_tag_t tag, | |
698 | boolean_t check_memorystatus); | |
1c79356b A |
699 | |
700 | extern void vm_page_unwire( | |
0b4c1975 A |
701 | vm_page_t page, |
702 | boolean_t queueit); | |
1c79356b A |
703 | |
704 | extern void vm_set_page_size(void); | |
705 | ||
706 | extern void vm_page_gobble( | |
707 | vm_page_t page); | |
708 | ||
2d21ac55 | 709 | extern void vm_page_validate_cs(vm_page_t page); |
593a1d5f A |
710 | extern void vm_page_validate_cs_mapped( |
711 | vm_page_t page, | |
712 | const void *kaddr); | |
3e170ce0 A |
713 | extern void vm_page_validate_cs_mapped_chunk( |
714 | vm_page_t page, | |
715 | const void *kaddr, | |
716 | vm_offset_t chunk_offset, | |
717 | boolean_t *validated, | |
718 | unsigned *tainted); | |
2d21ac55 | 719 | |
b0d623f7 A |
720 | extern void vm_page_free_prepare_queues( |
721 | vm_page_t page); | |
722 | ||
723 | extern void vm_page_free_prepare_object( | |
724 | vm_page_t page, | |
725 | boolean_t remove_from_hash); | |
726 | ||
fe8ab488 A |
727 | #if CONFIG_IOSCHED |
728 | extern wait_result_t vm_page_sleep( | |
729 | vm_object_t object, | |
730 | vm_page_t m, | |
731 | int interruptible); | |
732 | #endif | |
733 | ||
734 | extern void vm_pressure_response(void); | |
735 | ||
316670eb | 736 | #if CONFIG_JETSAM |
39236c6e | 737 | extern void memorystatus_pages_update(unsigned int pages_avail); |
316670eb A |
738 | |
739 | #define VM_CHECK_MEMORYSTATUS do { \ | |
39236c6e | 740 | memorystatus_pages_update( \ |
fe8ab488 | 741 | vm_page_pageable_external_count + \ |
39236c6e | 742 | vm_page_free_count + \ |
316670eb A |
743 | (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) ? 0 : vm_page_purgeable_count) \ |
744 | ); \ | |
745 | } while(0) | |
39236c6e A |
746 | |
747 | #else /* CONFIG_JETSAM */ | |
748 | ||
749 | ||
39236c6e A |
750 | #define VM_CHECK_MEMORYSTATUS vm_pressure_response() |
751 | ||
752 | ||
753 | #endif /* CONFIG_JETSAM */ | |
6d2010ae | 754 | |
1c79356b A |
755 | /* |
756 | * Functions implemented as macros. m->wanted and m->busy are | |
757 | * protected by the object lock. | |
758 | */ | |
759 | ||
316670eb A |
760 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ |
761 | MACRO_BEGIN \ | |
762 | vm_page_t __page__ = (m); \ | |
316670eb A |
763 | __page__->dirty = TRUE; \ |
764 | MACRO_END | |
316670eb | 765 | |
1c79356b | 766 | #define PAGE_ASSERT_WAIT(m, interruptible) \ |
9bccf70c A |
767 | (((m)->wanted = TRUE), \ |
768 | assert_wait((event_t) (m), (interruptible))) | |
769 | ||
fe8ab488 | 770 | #if CONFIG_IOSCHED |
9bccf70c | 771 | #define PAGE_SLEEP(o, m, interruptible) \ |
fe8ab488 A |
772 | vm_page_sleep(o, m, interruptible) |
773 | #else | |
774 | #define PAGE_SLEEP(o, m, interruptible) \ | |
775 | (((m)->wanted = TRUE), \ | |
776 | thread_sleep_vm_object((o), (m), (interruptible))) | |
777 | #endif | |
1c79356b A |
778 | |
779 | #define PAGE_WAKEUP_DONE(m) \ | |
780 | MACRO_BEGIN \ | |
781 | (m)->busy = FALSE; \ | |
782 | if ((m)->wanted) { \ | |
783 | (m)->wanted = FALSE; \ | |
784 | thread_wakeup((event_t) (m)); \ | |
785 | } \ | |
786 | MACRO_END | |
787 | ||
788 | #define PAGE_WAKEUP(m) \ | |
789 | MACRO_BEGIN \ | |
790 | if ((m)->wanted) { \ | |
791 | (m)->wanted = FALSE; \ | |
792 | thread_wakeup((event_t) (m)); \ | |
793 | } \ | |
794 | MACRO_END | |
795 | ||
796 | #define VM_PAGE_FREE(p) \ | |
797 | MACRO_BEGIN \ | |
b0d623f7 | 798 | vm_page_free_unlocked(p, TRUE); \ |
1c79356b A |
799 | MACRO_END |
800 | ||
801 | #define VM_PAGE_GRAB_FICTITIOUS(M) \ | |
802 | MACRO_BEGIN \ | |
803 | while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ | |
804 | vm_page_more_fictitious(); \ | |
805 | MACRO_END | |
806 | ||
1c79356b A |
807 | #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) |
808 | ||
b0d623f7 A |
809 | #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) |
810 | #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) | |
811 | ||
812 | #define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) | |
813 | #define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) | |
814 | ||
815 | #define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) | |
816 | #define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) | |
817 | #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) | |
818 | ||
819 | #ifdef VPL_LOCK_SPIN | |
820 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) | |
821 | #define VPL_LOCK(vpl) lck_spin_lock(vpl) | |
822 | #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) | |
823 | #else | |
824 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr) | |
825 | #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl) | |
826 | #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl) | |
827 | #endif | |
1c79356b | 828 | |
b0d623f7 A |
829 | #if MACH_ASSERT |
830 | extern void vm_page_queues_assert(vm_page_t mem, int val); | |
831 | #define VM_PAGE_QUEUES_ASSERT(mem, val) vm_page_queues_assert((mem), (val)) | |
832 | #else | |
833 | #define VM_PAGE_QUEUES_ASSERT(mem, val) | |
834 | #endif | |
2d21ac55 | 835 | |
b0d623f7 A |
836 | #if DEVELOPMENT || DEBUG |
837 | #define VM_PAGE_SPECULATIVE_USED_ADD() \ | |
838 | MACRO_BEGIN \ | |
839 | OSAddAtomic(1, &vm_page_speculative_used); \ | |
2d21ac55 | 840 | MACRO_END |
b0d623f7 A |
841 | #else |
842 | #define VM_PAGE_SPECULATIVE_USED_ADD() | |
843 | #endif | |
2d21ac55 A |
844 | |
845 | ||
846 | #define VM_PAGE_CONSUME_CLUSTERED(mem) \ | |
847 | MACRO_BEGIN \ | |
fe8ab488 | 848 | pmap_lock_phys_page(mem->phys_page); \ |
2d21ac55 A |
849 | if (mem->clustered) { \ |
850 | assert(mem->object); \ | |
851 | mem->object->pages_used++; \ | |
852 | mem->clustered = FALSE; \ | |
b0d623f7 | 853 | VM_PAGE_SPECULATIVE_USED_ADD(); \ |
1c79356b | 854 | } \ |
fe8ab488 | 855 | pmap_unlock_phys_page(mem->phys_page); \ |
1c79356b A |
856 | MACRO_END |
857 | ||
6d2010ae | 858 | |
fe8ab488 A |
859 | #define VM_PAGE_COUNT_AS_PAGEIN(mem) \ |
860 | MACRO_BEGIN \ | |
861 | DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ | |
862 | current_task()->pageins++; \ | |
863 | if (mem->object->internal) { \ | |
864 | DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ | |
865 | } else { \ | |
866 | DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ | |
867 | } \ | |
868 | MACRO_END | |
869 | ||
3e170ce0 A |
870 | /* adjust for stolen pages accounted elsewhere */ |
871 | #define VM_PAGE_MOVE_STOLEN(page_count) \ | |
872 | MACRO_BEGIN \ | |
873 | vm_page_stolen_count -= (page_count); \ | |
874 | vm_page_wire_count_initial -= (page_count); \ | |
875 | MACRO_END | |
6d2010ae A |
876 | |
877 | #define DW_vm_page_unwire 0x01 | |
878 | #define DW_vm_page_wire 0x02 | |
879 | #define DW_vm_page_free 0x04 | |
880 | #define DW_vm_page_activate 0x08 | |
881 | #define DW_vm_page_deactivate_internal 0x10 | |
882 | #define DW_vm_page_speculate 0x20 | |
883 | #define DW_vm_page_lru 0x40 | |
884 | #define DW_vm_pageout_throttle_up 0x80 | |
885 | #define DW_PAGE_WAKEUP 0x100 | |
886 | #define DW_clear_busy 0x200 | |
887 | #define DW_clear_reference 0x400 | |
888 | #define DW_set_reference 0x800 | |
889 | #define DW_move_page 0x1000 | |
890 | #define DW_VM_PAGE_QUEUES_REMOVE 0x2000 | |
316670eb | 891 | #define DW_enqueue_cleaned 0x4000 |
fe8ab488 | 892 | #define DW_vm_phantom_cache_update 0x8000 |
6d2010ae A |
893 | |
894 | struct vm_page_delayed_work { | |
895 | vm_page_t dw_m; | |
896 | int dw_mask; | |
897 | }; | |
898 | ||
3e170ce0 | 899 | void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count); |
6d2010ae A |
900 | |
901 | extern unsigned int vm_max_delayed_work_limit; | |
902 | ||
903 | #define DEFAULT_DELAYED_WORK_LIMIT 32 | |
904 | ||
905 | #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) | |
906 | ||
907 | /* | |
908 | * vm_page_do_delayed_work may need to drop the object lock... | |
909 | * if it does, we need the pages it's looking at to | |
910 | * be held stable via the busy bit, so if busy isn't already | |
911 | * set, we need to set it and ask vm_page_do_delayed_work | |
912 | * to clear it and wakeup anyone that might have blocked on | |
913 | * it once we're done processing the page. | |
6d2010ae A |
914 | */ |
915 | ||
916 | #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ | |
917 | MACRO_BEGIN \ | |
918 | if (mem->busy == FALSE) { \ | |
919 | mem->busy = TRUE; \ | |
920 | if ( !(dwp->dw_mask & DW_vm_page_free)) \ | |
921 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ | |
922 | } \ | |
6d2010ae A |
923 | dwp->dw_m = mem; \ |
924 | dwp++; \ | |
316670eb | 925 | dw_cnt++; \ |
6d2010ae A |
926 | MACRO_END |
927 | ||
928 | extern vm_page_t vm_object_page_grab(vm_object_t); | |
929 | ||
15129b1c A |
930 | #if VM_PAGE_BUCKETS_CHECK |
931 | extern void vm_page_buckets_check(void); | |
932 | #endif /* VM_PAGE_BUCKETS_CHECK */ | |
6d2010ae | 933 | |
3e170ce0 A |
934 | extern void vm_page_queues_remove(vm_page_t mem); |
935 | extern void vm_page_remove_internal(vm_page_t page); | |
936 | extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first); | |
937 | extern void vm_page_check_pageable_safe(vm_page_t page); | |
938 | ||
939 | ||
1c79356b | 940 | #endif /* _VM_VM_PAGE_H_ */ |