]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_page.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Resident memory system definitions. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_PAGE_H_ | |
67 | #define _VM_VM_PAGE_H_ | |
68 | ||
91447636 A |
69 | #include <debug.h> |
70 | ||
1c79356b A |
71 | #include <mach/boolean.h> |
72 | #include <mach/vm_prot.h> | |
73 | #include <mach/vm_param.h> | |
74 | #include <vm/vm_object.h> | |
75 | #include <kern/queue.h> | |
76 | #include <kern/lock.h> | |
77 | ||
78 | #include <kern/macro_help.h> | |
2d21ac55 A |
79 | #include <libkern/OSAtomic.h> |
80 | ||
1c79356b | 81 | |
0b4e3aa0 | 82 | /* |
2d21ac55 A |
83 | * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q |
84 | * represents a set of aging bins that are 'protected'... | |
85 | * | |
86 | * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have | |
87 | * not yet been 'claimed' but have been aged out of the protective bins | |
88 | * this occurs in vm_page_speculate when it advances to the next bin | |
89 | * and discovers that it is still occupied... at that point, all of the | |
90 | * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages | |
91 | * in that bin are all guaranteed to have reached at least the maximum age | |
92 | * we allow for a protected page... they can be older if there is no | |
93 | * memory pressure to pull them from the bin, or there are no new speculative pages | |
94 | * being generated to push them out. | |
95 | * this list is the one that vm_pageout_scan will prefer when looking | |
96 | * for pages to move to the underweight free list | |
97 | * | |
98 | * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS | |
99 | * defines the amount of time a speculative page is normally | |
100 | * allowed to live in the 'protected' state (i.e. not available | |
101 | * to be stolen if vm_pageout_scan is running and looking for | |
102 | * pages)... however, if the total number of speculative pages | |
103 | * in the protected state exceeds our limit (defined in vm_pageout.c) | |
104 | * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then | |
105 | * vm_pageout_scan is allowed to steal pages from the protected | |
106 | * bucket even if they are underage. | |
107 | * | |
108 | * vm_pageout_scan is also allowed to pull pages from a protected | |
109 | * bin if the bin has reached the "age of consent" we've set | |
0b4e3aa0 | 110 | */ |
2d21ac55 A |
111 | #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 |
112 | #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 | |
113 | #define VM_PAGE_SPECULATIVE_AGED_Q 0 | |
114 | ||
115 | #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 | |
116 | ||
117 | ||
118 | struct vm_speculative_age_q { | |
119 | /* | |
120 | * memory queue for speculative pages via clustered pageins | |
121 | */ | |
122 | queue_head_t age_q; | |
123 | mach_timespec_t age_ts; | |
124 | }; | |
0b4e3aa0 A |
125 | |
126 | ||
2d21ac55 A |
127 | extern |
128 | struct vm_speculative_age_q vm_page_queue_speculative[]; | |
0b4e3aa0 | 129 | |
2d21ac55 A |
130 | extern int speculative_steal_index; |
131 | extern int speculative_age_index; | |
9bccf70c | 132 | |
0b4e3aa0 | 133 | |
1c79356b A |
134 | /* |
135 | * Management of resident (logical) pages. | |
136 | * | |
137 | * A small structure is kept for each resident | |
138 | * page, indexed by page number. Each structure | |
139 | * is an element of several lists: | |
140 | * | |
141 | * A hash table bucket used to quickly | |
142 | * perform object/offset lookups | |
143 | * | |
144 | * A list of all pages for a given object, | |
145 | * so they can be quickly deactivated at | |
146 | * time of deallocation. | |
147 | * | |
148 | * An ordered list of pages due for pageout. | |
149 | * | |
150 | * In addition, the structure contains the object | |
151 | * and offset to which this page belongs (for pageout), | |
152 | * and sundry status bits. | |
153 | * | |
154 | * Fields in this structure are locked either by the lock on the | |
155 | * object that the page belongs to (O) or by the lock on the page | |
156 | * queues (P). [Some fields require that both locks be held to | |
157 | * change that field; holding either lock is sufficient to read.] | |
158 | */ | |
159 | ||
160 | struct vm_page { | |
161 | queue_chain_t pageq; /* queue info for FIFO | |
162 | * queue or free list (P) */ | |
163 | queue_chain_t listq; /* all pages in same object (O) */ | |
164 | struct vm_page *next; /* VP bucket link (O) */ | |
165 | ||
166 | vm_object_t object; /* which object am I in (O&P) */ | |
167 | vm_object_offset_t offset; /* offset into that object (O,P) */ | |
168 | ||
91447636 A |
169 | /* |
170 | * The following word of flags is protected | |
171 | * by the "page queues" lock. | |
172 | */ | |
1c79356b | 173 | unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */ |
1c79356b A |
174 | /* boolean_t */ inactive:1, /* page is in inactive list (P) */ |
175 | active:1, /* page is in active list (P) */ | |
91447636 | 176 | pageout_queue:1,/* page is on queue for pageout (P) */ |
2d21ac55 | 177 | speculative:1, /* page is on speculative list (P) */ |
1c79356b A |
178 | laundry:1, /* page is being cleaned now (P)*/ |
179 | free:1, /* page is on free list (P) */ | |
180 | reference:1, /* page has been used (P) */ | |
1c79356b | 181 | pageout:1, /* page wired & busy for pageout (P) */ |
0b4e3aa0 A |
182 | gobbled:1, /* page used internally (P) */ |
183 | private:1, /* Page should not be returned to | |
91447636 | 184 | * the free list (P) */ |
2d21ac55 A |
185 | throttled:1, /* pager is not responding (P) */ |
186 | __unused_pageq_bits:5; /* 5 bits available here */ | |
1c79356b | 187 | |
91447636 A |
188 | /* |
189 | * The following word of flags is protected | |
190 | * by the "VM object" lock. | |
191 | */ | |
1c79356b A |
192 | unsigned int |
193 | /* boolean_t */ busy:1, /* page is in transit (O) */ | |
194 | wanted:1, /* someone is waiting for page (O) */ | |
195 | tabled:1, /* page is in VP table (O) */ | |
196 | fictitious:1, /* Physical page doesn't exist (O) */ | |
2d21ac55 A |
197 | pmapped:1, /* page has been entered at some |
198 | * point into a pmap (O) */ | |
1c79356b A |
199 | absent:1, /* Data has been requested, but is |
200 | * not yet available (O) */ | |
201 | error:1, /* Data manager was unable to provide | |
202 | * data due to error (O) */ | |
203 | dirty:1, /* Page must be cleaned (O) */ | |
204 | cleaning:1, /* Page clean has begun (O) */ | |
205 | precious:1, /* Page is precious; data must be | |
206 | * returned even if clean (O) */ | |
207 | clustered:1, /* page is not the faulted page (O) */ | |
208 | overwriting:1, /* Request to unlock has been made | |
209 | * without having data. (O) | |
210 | * [See vm_fault_page_overwrite] */ | |
211 | restart:1, /* Page was pushed higher in shadow | |
212 | chain by copy_call-related pagers; | |
213 | start again at top of chain */ | |
1c79356b A |
214 | unusual:1, /* Page is absent, error, restart or |
215 | page locked */ | |
91447636 | 216 | encrypted:1, /* encrypted for secure swap (O) */ |
2d21ac55 | 217 | encrypted_cleaning:1, /* encrypting page */ |
1c79356b A |
218 | list_req_pending:1, /* pagein/pageout alt mechanism */ |
219 | /* allows creation of list */ | |
220 | /* requests on pages that are */ | |
221 | /* actively being paged. */ | |
2d21ac55 | 222 | dump_cleaning:1, /* set by the pageout daemon when */ |
0b4e3aa0 A |
223 | /* a page being cleaned is */ |
224 | /* encountered and targeted as */ | |
225 | /* a pageout candidate */ | |
2d21ac55 A |
226 | cs_validated:1, /* code-signing: page was checked */ |
227 | cs_tainted:1, /* code-signing: page is tainted */ | |
228 | no_cache:1, /* page is not to be cached and */ | |
229 | /* should be reused ahead of */ | |
230 | /* other pages */ | |
231 | deactivated:1, | |
232 | zero_fill:1, | |
233 | __unused_object_bits:9; /* 9 bits available here */ | |
1c79356b | 234 | |
91447636 | 235 | ppnum_t phys_page; /* Physical address of page, passed |
1c79356b | 236 | * to pmap_enter (read-only) */ |
1c79356b A |
237 | }; |
238 | ||
91447636 A |
239 | #define DEBUG_ENCRYPTED_SWAP 1 |
240 | #if DEBUG_ENCRYPTED_SWAP | |
241 | #define ASSERT_PAGE_DECRYPTED(page) \ | |
242 | MACRO_BEGIN \ | |
243 | if ((page)->encrypted) { \ | |
244 | panic("VM page %p should not be encrypted here\n", \ | |
245 | (page)); \ | |
246 | } \ | |
247 | MACRO_END | |
248 | #else /* DEBUG_ENCRYPTED_SWAP */ | |
249 | #define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted) | |
250 | #endif /* DEBUG_ENCRYPTED_SWAP */ | |
251 | ||
1c79356b A |
252 | typedef struct vm_page *vm_page_t; |
253 | ||
1c79356b | 254 | #define VM_PAGE_NULL ((vm_page_t) 0) |
91447636 | 255 | #define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next) |
e5568f75 | 256 | #define NEXT_PAGE_PTR(m) ((vm_page_t *) &(m)->pageq.next) |
1c79356b A |
257 | |
258 | /* | |
259 | * XXX The unusual bit should not be necessary. Most of the bit | |
260 | * XXX fields above really want to be masks. | |
261 | */ | |
262 | ||
263 | /* | |
264 | * For debugging, this macro can be defined to perform | |
265 | * some useful check on a page structure. | |
266 | */ | |
267 | ||
2d21ac55 A |
268 | #define VM_PAGE_CHECK(mem) do {} while (0) |
269 | ||
270 | /* Page coloring: | |
271 | * | |
272 | * The free page list is actually n lists, one per color, | |
273 | * where the number of colors is a function of the machine's | |
274 | * cache geometry set at system initialization. To disable | |
275 | * coloring, set vm_colors to 1 and vm_color_mask to 0. | |
276 | * The boot-arg "colors" may be used to override vm_colors. | |
277 | * Note that there is little harm in having more colors than needed. | |
278 | */ | |
279 | ||
280 | #define MAX_COLORS 128 | |
281 | #define DEFAULT_COLORS 32 | |
282 | ||
283 | extern | |
284 | unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ | |
285 | extern | |
286 | unsigned int vm_color_mask; /* must be (vm_colors-1) */ | |
287 | extern | |
288 | unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ | |
1c79356b A |
289 | |
290 | /* | |
291 | * Each pageable resident page falls into one of three lists: | |
292 | * | |
293 | * free | |
2d21ac55 A |
294 | * Available for allocation now. The free list is |
295 | * actually an array of lists, one per color. | |
1c79356b A |
296 | * inactive |
297 | * Not referenced in any map, but still has an | |
298 | * object/offset-page mapping, and may be dirty. | |
299 | * This is the list of pages that should be | |
2d21ac55 A |
300 | * paged out next. There are actually two |
301 | * inactive lists, one for pages brought in from | |
302 | * disk or other backing store, and another | |
303 | * for "zero-filled" pages. See vm_pageout_scan() | |
304 | * for the distinction and usage. | |
1c79356b A |
305 | * active |
306 | * A list of pages which have been placed in | |
307 | * at least one physical map. This list is | |
308 | * ordered, in LRU-like fashion. | |
309 | */ | |
310 | ||
311 | extern | |
2d21ac55 A |
312 | queue_head_t vm_page_queue_free[MAX_COLORS]; /* memory free queue */ |
313 | extern | |
314 | queue_head_t vm_lopage_queue_free; /* low memory free queue */ | |
1c79356b A |
315 | extern |
316 | vm_page_t vm_page_queue_fictitious; /* fictitious free queue */ | |
317 | extern | |
318 | queue_head_t vm_page_queue_active; /* active memory queue */ | |
319 | extern | |
2d21ac55 A |
320 | queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ |
321 | extern | |
9bccf70c | 322 | queue_head_t vm_page_queue_zf; /* inactive memory queue for zero fill */ |
2d21ac55 | 323 | queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ |
1c79356b A |
324 | |
325 | extern | |
326 | vm_offset_t first_phys_addr; /* physical address for first_page */ | |
327 | extern | |
328 | vm_offset_t last_phys_addr; /* physical address for last_page */ | |
329 | ||
330 | extern | |
2d21ac55 | 331 | unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ |
1c79356b | 332 | extern |
91447636 | 333 | unsigned int vm_page_fictitious_count;/* How many fictitious pages are free? */ |
1c79356b | 334 | extern |
91447636 | 335 | unsigned int vm_page_active_count; /* How many pages are active? */ |
1c79356b | 336 | extern |
91447636 | 337 | unsigned int vm_page_inactive_count; /* How many pages are inactive? */ |
1c79356b | 338 | extern |
2d21ac55 A |
339 | unsigned int vm_page_throttled_count;/* How many inactives are throttled */ |
340 | extern | |
341 | unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ | |
342 | extern | |
91447636 | 343 | unsigned int vm_page_wire_count; /* How many pages are wired? */ |
1c79356b | 344 | extern |
2d21ac55 A |
345 | vm_map_size_t vm_user_wire_limit; /* How much memory can be locked by a user? */ |
346 | extern | |
347 | vm_map_size_t vm_global_user_wire_limit; /* How much memory can be locked system wide by users? */ | |
348 | extern | |
91447636 | 349 | unsigned int vm_page_free_target; /* How many do we want free? */ |
1c79356b | 350 | extern |
91447636 | 351 | unsigned int vm_page_free_min; /* When to wakeup pageout */ |
1c79356b | 352 | extern |
91447636 | 353 | unsigned int vm_page_inactive_target;/* How many do we want inactive? */ |
1c79356b | 354 | extern |
2d21ac55 A |
355 | unsigned int vm_page_inactive_min; /* When do wakeup pageout */ |
356 | extern | |
91447636 | 357 | unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ |
1c79356b | 358 | extern |
2d21ac55 | 359 | unsigned int vm_page_zfill_throttle_count;/* Count of zero-fill allocations throttled */ |
91447636 A |
360 | extern |
361 | unsigned int vm_page_gobble_count; | |
362 | ||
2d21ac55 A |
363 | extern |
364 | unsigned int vm_page_speculative_unused; | |
365 | extern | |
366 | unsigned int vm_page_speculative_used; | |
55e303ae | 367 | extern |
91447636 | 368 | unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ |
55e303ae | 369 | extern |
91447636 | 370 | uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ |
1c79356b A |
371 | |
372 | decl_mutex_data(,vm_page_queue_lock) | |
373 | /* lock on active and inactive page queues */ | |
374 | decl_mutex_data(,vm_page_queue_free_lock) | |
2d21ac55 | 375 | /* lock on free page queue array (ie, all colors) */ |
1c79356b A |
376 | |
377 | extern unsigned int vm_page_free_wanted; | |
378 | /* how many threads are waiting for memory */ | |
379 | ||
2d21ac55 A |
380 | extern unsigned int vm_page_free_wanted_privileged; |
381 | /* how many VM privileged threads are waiting for memory */ | |
382 | ||
1c79356b A |
383 | extern vm_offset_t vm_page_fictitious_addr; |
384 | /* (fake) phys_addr of fictitious pages */ | |
385 | ||
2d21ac55 A |
386 | extern vm_offset_t vm_page_guard_addr; |
387 | /* (fake) phys_addr of guard pages */ | |
388 | ||
389 | ||
91447636 A |
390 | extern boolean_t vm_page_deactivate_hint; |
391 | ||
0c530ab8 A |
392 | // 0 = all pages avail, 1 = disable high mem, 2 = prefer himem |
393 | extern int vm_himemory_mode; | |
394 | ||
395 | extern ppnum_t vm_lopage_poolend; | |
396 | extern int vm_lopage_poolsize; | |
397 | extern uint64_t max_valid_dma_address; | |
398 | ||
399 | ||
1c79356b A |
400 | /* |
401 | * Prototypes for functions exported by this module. | |
402 | */ | |
403 | extern void vm_page_bootstrap( | |
404 | vm_offset_t *startp, | |
2d21ac55 | 405 | vm_offset_t *endp) __attribute__((section("__TEXT, initcode"))); |
1c79356b | 406 | |
2d21ac55 A |
407 | extern void vm_page_module_init(void) __attribute__((section("__TEXT, initcode"))); |
408 | ||
1c79356b | 409 | extern void vm_page_create( |
55e303ae A |
410 | ppnum_t start, |
411 | ppnum_t end); | |
1c79356b A |
412 | |
413 | extern vm_page_t vm_page_lookup( | |
414 | vm_object_t object, | |
415 | vm_object_offset_t offset); | |
416 | ||
417 | extern vm_page_t vm_page_grab_fictitious(void); | |
418 | ||
2d21ac55 A |
419 | extern vm_page_t vm_page_grab_guard(void); |
420 | ||
1c79356b A |
421 | extern void vm_page_release_fictitious( |
422 | vm_page_t page); | |
423 | ||
1c79356b A |
424 | extern void vm_page_more_fictitious(void); |
425 | ||
426 | extern int vm_pool_low(void); | |
427 | ||
428 | extern vm_page_t vm_page_grab(void); | |
429 | ||
0c530ab8 A |
430 | extern vm_page_t vm_page_grablo(void); |
431 | ||
1c79356b A |
432 | extern void vm_page_release( |
433 | vm_page_t page); | |
434 | ||
1c79356b A |
435 | extern boolean_t vm_page_wait( |
436 | int interruptible ); | |
437 | ||
438 | extern vm_page_t vm_page_alloc( | |
439 | vm_object_t object, | |
440 | vm_object_offset_t offset); | |
441 | ||
0c530ab8 A |
442 | extern vm_page_t vm_page_alloclo( |
443 | vm_object_t object, | |
444 | vm_object_offset_t offset); | |
445 | ||
2d21ac55 A |
446 | extern vm_page_t vm_page_alloc_guard( |
447 | vm_object_t object, | |
448 | vm_object_offset_t offset); | |
449 | ||
1c79356b A |
450 | extern void vm_page_init( |
451 | vm_page_t page, | |
55e303ae | 452 | ppnum_t phys_page); |
1c79356b A |
453 | |
454 | extern void vm_page_free( | |
455 | vm_page_t page); | |
456 | ||
2d21ac55 A |
457 | extern void vm_page_free_prepare( |
458 | vm_page_t page); | |
459 | ||
1c79356b A |
460 | extern void vm_page_activate( |
461 | vm_page_t page); | |
462 | ||
463 | extern void vm_page_deactivate( | |
464 | vm_page_t page); | |
465 | ||
2d21ac55 A |
466 | extern void vm_page_lru( |
467 | vm_page_t page); | |
468 | ||
469 | extern void vm_page_speculate( | |
470 | vm_page_t page, | |
471 | boolean_t new); | |
472 | ||
473 | extern void vm_page_speculate_ageit( | |
474 | struct vm_speculative_age_q *aq); | |
475 | ||
1c79356b A |
476 | extern void vm_page_rename( |
477 | vm_page_t page, | |
478 | vm_object_t new_object, | |
2d21ac55 A |
479 | vm_object_offset_t new_offset, |
480 | boolean_t encrypted_ok); | |
1c79356b A |
481 | |
482 | extern void vm_page_insert( | |
483 | vm_page_t page, | |
484 | vm_object_t object, | |
485 | vm_object_offset_t offset); | |
486 | ||
487 | extern void vm_page_replace( | |
488 | vm_page_t mem, | |
489 | vm_object_t object, | |
490 | vm_object_offset_t offset); | |
491 | ||
492 | extern void vm_page_remove( | |
493 | vm_page_t page); | |
494 | ||
495 | extern void vm_page_zero_fill( | |
496 | vm_page_t page); | |
497 | ||
498 | extern void vm_page_part_zero_fill( | |
499 | vm_page_t m, | |
500 | vm_offset_t m_pa, | |
501 | vm_size_t len); | |
502 | ||
503 | extern void vm_page_copy( | |
504 | vm_page_t src_page, | |
505 | vm_page_t dest_page); | |
506 | ||
507 | extern void vm_page_part_copy( | |
508 | vm_page_t src_m, | |
509 | vm_offset_t src_pa, | |
510 | vm_page_t dst_m, | |
511 | vm_offset_t dst_pa, | |
512 | vm_size_t len); | |
513 | ||
514 | extern void vm_page_wire( | |
515 | vm_page_t page); | |
516 | ||
517 | extern void vm_page_unwire( | |
518 | vm_page_t page); | |
519 | ||
520 | extern void vm_set_page_size(void); | |
521 | ||
522 | extern void vm_page_gobble( | |
523 | vm_page_t page); | |
524 | ||
2d21ac55 A |
525 | extern void vm_page_validate_cs(vm_page_t page); |
526 | ||
1c79356b A |
527 | /* |
528 | * Functions implemented as macros. m->wanted and m->busy are | |
529 | * protected by the object lock. | |
530 | */ | |
531 | ||
532 | #define PAGE_ASSERT_WAIT(m, interruptible) \ | |
9bccf70c A |
533 | (((m)->wanted = TRUE), \ |
534 | assert_wait((event_t) (m), (interruptible))) | |
535 | ||
536 | #define PAGE_SLEEP(o, m, interruptible) \ | |
537 | (((m)->wanted = TRUE), \ | |
538 | thread_sleep_vm_object((o), (m), (interruptible))) | |
1c79356b A |
539 | |
540 | #define PAGE_WAKEUP_DONE(m) \ | |
541 | MACRO_BEGIN \ | |
542 | (m)->busy = FALSE; \ | |
543 | if ((m)->wanted) { \ | |
544 | (m)->wanted = FALSE; \ | |
545 | thread_wakeup((event_t) (m)); \ | |
546 | } \ | |
547 | MACRO_END | |
548 | ||
549 | #define PAGE_WAKEUP(m) \ | |
550 | MACRO_BEGIN \ | |
551 | if ((m)->wanted) { \ | |
552 | (m)->wanted = FALSE; \ | |
553 | thread_wakeup((event_t) (m)); \ | |
554 | } \ | |
555 | MACRO_END | |
556 | ||
557 | #define VM_PAGE_FREE(p) \ | |
558 | MACRO_BEGIN \ | |
559 | vm_page_lock_queues(); \ | |
560 | vm_page_free(p); \ | |
561 | vm_page_unlock_queues(); \ | |
562 | MACRO_END | |
563 | ||
564 | #define VM_PAGE_GRAB_FICTITIOUS(M) \ | |
565 | MACRO_BEGIN \ | |
566 | while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ | |
567 | vm_page_more_fictitious(); \ | |
568 | MACRO_END | |
569 | ||
2d21ac55 | 570 | #define VM_PAGE_ZFILL_THROTTLED() \ |
55e303ae | 571 | (vm_page_free_count < vm_page_free_min && \ |
2d21ac55 A |
572 | !(current_thread()->options & TH_OPT_VMPRIV) && \ |
573 | ++vm_page_zfill_throttle_count) | |
1c79356b A |
574 | |
575 | #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) | |
576 | ||
577 | #define vm_page_lock_queues() mutex_lock(&vm_page_queue_lock) | |
578 | #define vm_page_unlock_queues() mutex_unlock(&vm_page_queue_lock) | |
1c79356b | 579 | |
2d21ac55 A |
580 | #define vm_page_lockspin_queues() mutex_lock_spin(&vm_page_queue_lock) |
581 | ||
1c79356b A |
582 | #define VM_PAGE_QUEUES_REMOVE(mem) \ |
583 | MACRO_BEGIN \ | |
91447636 | 584 | assert(!mem->laundry); \ |
1c79356b | 585 | if (mem->active) { \ |
91447636 | 586 | assert(mem->object != kernel_object); \ |
2d21ac55 A |
587 | assert(!mem->inactive && !mem->speculative); \ |
588 | assert(!mem->throttled); \ | |
1c79356b A |
589 | queue_remove(&vm_page_queue_active, \ |
590 | mem, vm_page_t, pageq); \ | |
591 | mem->active = FALSE; \ | |
2d21ac55 | 592 | if (!mem->fictitious) { \ |
1c79356b | 593 | vm_page_active_count--; \ |
2d21ac55 A |
594 | } else { \ |
595 | assert(mem->phys_page == \ | |
596 | vm_page_fictitious_addr); \ | |
597 | } \ | |
1c79356b A |
598 | } \ |
599 | \ | |
2d21ac55 | 600 | else if (mem->inactive) { \ |
91447636 | 601 | assert(mem->object != kernel_object); \ |
2d21ac55 A |
602 | assert(!mem->active && !mem->speculative); \ |
603 | assert(!mem->throttled); \ | |
9bccf70c A |
604 | if (mem->zero_fill) { \ |
605 | queue_remove(&vm_page_queue_zf, \ | |
606 | mem, vm_page_t, pageq); \ | |
2d21ac55 | 607 | vm_zf_queue_count--; \ |
9bccf70c A |
608 | } else { \ |
609 | queue_remove(&vm_page_queue_inactive, \ | |
1c79356b | 610 | mem, vm_page_t, pageq); \ |
9bccf70c | 611 | } \ |
1c79356b | 612 | mem->inactive = FALSE; \ |
2d21ac55 | 613 | if (!mem->fictitious) { \ |
1c79356b | 614 | vm_page_inactive_count--; \ |
2d21ac55 A |
615 | vm_purgeable_q_advance_all(1); \ |
616 | } else { \ | |
617 | assert(mem->phys_page == \ | |
618 | vm_page_fictitious_addr); \ | |
619 | } \ | |
620 | } \ | |
621 | \ | |
622 | else if (mem->throttled) { \ | |
623 | assert(!mem->active && !mem->inactive); \ | |
624 | assert(!mem->speculative); \ | |
625 | queue_remove(&vm_page_queue_throttled, \ | |
626 | mem, vm_page_t, pageq); \ | |
627 | mem->throttled = FALSE; \ | |
628 | if (!mem->fictitious) \ | |
629 | vm_page_throttled_count--; \ | |
630 | } \ | |
631 | \ | |
632 | else if (mem->speculative) { \ | |
633 | assert(!mem->active && !mem->inactive); \ | |
634 | assert(!mem->throttled); \ | |
635 | assert(!mem->fictitious); \ | |
636 | remque(&mem->pageq); \ | |
637 | mem->speculative = FALSE; \ | |
638 | vm_page_speculative_count--; \ | |
639 | } \ | |
640 | mem->pageq.next = NULL; \ | |
641 | mem->pageq.prev = NULL; \ | |
642 | MACRO_END | |
643 | ||
644 | ||
645 | #define VM_PAGE_CONSUME_CLUSTERED(mem) \ | |
646 | MACRO_BEGIN \ | |
647 | if (mem->clustered) { \ | |
648 | assert(mem->object); \ | |
649 | mem->object->pages_used++; \ | |
650 | mem->clustered = FALSE; \ | |
651 | OSAddAtomic(1, (SInt32 *)&vm_page_speculative_used); \ | |
1c79356b A |
652 | } \ |
653 | MACRO_END | |
654 | ||
655 | #endif /* _VM_VM_PAGE_H_ */ |