]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_page.h
xnu-792.6.22.tar.gz
[apple/xnu.git] / osfmk / vm / vm_page.h
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_page.h
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Resident memory system definitions.
58 */
59
60 #ifndef _VM_VM_PAGE_H_
61 #define _VM_VM_PAGE_H_
62
63 #include <debug.h>
64
65 #include <mach/boolean.h>
66 #include <mach/vm_prot.h>
67 #include <mach/vm_param.h>
68 #include <vm/vm_object.h>
69 #include <kern/queue.h>
70 #include <kern/lock.h>
71
72 #include <kern/macro_help.h>
73
74 /*
75 * Each page entered on the inactive queue obtains a ticket from a
76 * particular ticket roll. Pages granted tickets from a particular
77 * roll generally flow through the queue as a group. In this way when a
78 * page with a ticket from a particular roll is pulled from the top of the
79 * queue it is extremely likely that the pages near the top will have tickets
80 * from the same or adjacent rolls. In this way the proximity to the top
81 * of the queue can be loosely ascertained by determining the identity of
82 * the roll the pages ticket came from.
83 */
84
85
86 extern unsigned int vm_page_ticket_roll;
87 extern unsigned int vm_page_ticket;
88
89
90 #define VM_PAGE_TICKETS_IN_ROLL 512
91 #define VM_PAGE_TICKET_ROLL_IDS 16
92
93 /*
94 * Management of resident (logical) pages.
95 *
96 * A small structure is kept for each resident
97 * page, indexed by page number. Each structure
98 * is an element of several lists:
99 *
100 * A hash table bucket used to quickly
101 * perform object/offset lookups
102 *
103 * A list of all pages for a given object,
104 * so they can be quickly deactivated at
105 * time of deallocation.
106 *
107 * An ordered list of pages due for pageout.
108 *
109 * In addition, the structure contains the object
110 * and offset to which this page belongs (for pageout),
111 * and sundry status bits.
112 *
113 * Fields in this structure are locked either by the lock on the
114 * object that the page belongs to (O) or by the lock on the page
115 * queues (P). [Some fields require that both locks be held to
116 * change that field; holding either lock is sufficient to read.]
117 */
118
119 struct vm_page {
120 queue_chain_t pageq; /* queue info for FIFO
121 * queue or free list (P) */
122 queue_chain_t listq; /* all pages in same object (O) */
123 struct vm_page *next; /* VP bucket link (O) */
124
125 vm_object_t object; /* which object am I in (O&P) */
126 vm_object_offset_t offset; /* offset into that object (O,P) */
127
128 /*
129 * The following word of flags is protected
130 * by the "page queues" lock.
131 */
132 unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */
133 page_ticket:4, /* age of the page on the */
134 /* inactive queue. */
135 /* boolean_t */ inactive:1, /* page is in inactive list (P) */
136 active:1, /* page is in active list (P) */
137 pageout_queue:1,/* page is on queue for pageout (P) */
138 laundry:1, /* page is being cleaned now (P)*/
139 free:1, /* page is on free list (P) */
140 reference:1, /* page has been used (P) */
141 pageout:1, /* page wired & busy for pageout (P) */
142 gobbled:1, /* page used internally (P) */
143 private:1, /* Page should not be returned to
144 * the free list (P) */
145 zero_fill:1,
146 :0;
147
148 /*
149 * The following word of flags is protected
150 * by the "VM object" lock.
151 */
152 unsigned int
153 page_error:8, /* error from I/O operations */
154 /* boolean_t */ busy:1, /* page is in transit (O) */
155 wanted:1, /* someone is waiting for page (O) */
156 tabled:1, /* page is in VP table (O) */
157 fictitious:1, /* Physical page doesn't exist (O) */
158 no_isync:1, /* page has not been instruction synced */
159 absent:1, /* Data has been requested, but is
160 * not yet available (O) */
161 error:1, /* Data manager was unable to provide
162 * data due to error (O) */
163 dirty:1, /* Page must be cleaned (O) */
164 cleaning:1, /* Page clean has begun (O) */
165 precious:1, /* Page is precious; data must be
166 * returned even if clean (O) */
167 clustered:1, /* page is not the faulted page (O) */
168 overwriting:1, /* Request to unlock has been made
169 * without having data. (O)
170 * [See vm_fault_page_overwrite] */
171 restart:1, /* Page was pushed higher in shadow
172 chain by copy_call-related pagers;
173 start again at top of chain */
174 lock_supplied:1,/* protection supplied by pager (O) */
175 /* vm_prot_t */ page_lock:3, /* Uses prohibited by pager (O) */
176 /* vm_prot_t */ unlock_request:3,/* Outstanding unlock request (O) */
177 unusual:1, /* Page is absent, error, restart or
178 page locked */
179 encrypted:1, /* encrypted for secure swap (O) */
180 list_req_pending:1, /* pagein/pageout alt mechanism */
181 /* allows creation of list */
182 /* requests on pages that are */
183 /* actively being paged. */
184 dump_cleaning:1; /* set by the pageout daemon when */
185 /* a page being cleaned is */
186 /* encountered and targeted as */
187 /* a pageout candidate */
188 /* we've used up all 32 bits */
189
190 ppnum_t phys_page; /* Physical address of page, passed
191 * to pmap_enter (read-only) */
192 };
193
194 #define DEBUG_ENCRYPTED_SWAP 1
195 #if DEBUG_ENCRYPTED_SWAP
196 #define ASSERT_PAGE_DECRYPTED(page) \
197 MACRO_BEGIN \
198 if ((page)->encrypted) { \
199 panic("VM page %p should not be encrypted here\n", \
200 (page)); \
201 } \
202 MACRO_END
203 #else /* DEBUG_ENCRYPTED_SWAP */
204 #define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted)
205 #endif /* DEBUG_ENCRYPTED_SWAP */
206
207 typedef struct vm_page *vm_page_t;
208
209 #define VM_PAGE_NULL ((vm_page_t) 0)
210 #define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next)
211 #define NEXT_PAGE_PTR(m) ((vm_page_t *) &(m)->pageq.next)
212
213 /*
214 * XXX The unusual bit should not be necessary. Most of the bit
215 * XXX fields above really want to be masks.
216 */
217
218 /*
219 * For debugging, this macro can be defined to perform
220 * some useful check on a page structure.
221 */
222
223 #define VM_PAGE_CHECK(mem)
224
225 /*
226 * Each pageable resident page falls into one of three lists:
227 *
228 * free
229 * Available for allocation now.
230 * inactive
231 * Not referenced in any map, but still has an
232 * object/offset-page mapping, and may be dirty.
233 * This is the list of pages that should be
234 * paged out next.
235 * active
236 * A list of pages which have been placed in
237 * at least one physical map. This list is
238 * ordered, in LRU-like fashion.
239 */
240
241 extern
242 vm_page_t vm_page_queue_free; /* memory free queue */
243 extern
244 vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
245 extern
246 queue_head_t vm_page_queue_active; /* active memory queue */
247 extern
248 queue_head_t vm_page_queue_inactive; /* inactive memory queue */
249 queue_head_t vm_page_queue_zf; /* inactive memory queue for zero fill */
250
251 extern
252 vm_offset_t first_phys_addr; /* physical address for first_page */
253 extern
254 vm_offset_t last_phys_addr; /* physical address for last_page */
255
256 extern
257 unsigned int vm_page_free_count; /* How many pages are free? */
258 extern
259 unsigned int vm_page_fictitious_count;/* How many fictitious pages are free? */
260 extern
261 unsigned int vm_page_active_count; /* How many pages are active? */
262 extern
263 unsigned int vm_page_inactive_count; /* How many pages are inactive? */
264 extern
265 unsigned int vm_page_wire_count; /* How many pages are wired? */
266 extern
267 unsigned int vm_page_free_target; /* How many do we want free? */
268 extern
269 unsigned int vm_page_free_min; /* When to wakeup pageout */
270 extern
271 unsigned int vm_page_inactive_target;/* How many do we want inactive? */
272 extern
273 unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */
274 extern
275 unsigned int vm_page_throttled_count;/* Count of zero-fill allocations throttled */
276 extern
277 unsigned int vm_page_gobble_count;
278
279 extern
280 unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */
281 extern
282 uint64_t vm_page_purged_count; /* How many pages got purged so far ? */
283
284 decl_mutex_data(,vm_page_queue_lock)
285 /* lock on active and inactive page queues */
286 decl_mutex_data(,vm_page_queue_free_lock)
287 /* lock on free page queue */
288
289 extern unsigned int vm_page_free_wanted;
290 /* how many threads are waiting for memory */
291
292 extern vm_offset_t vm_page_fictitious_addr;
293 /* (fake) phys_addr of fictitious pages */
294
295 extern boolean_t vm_page_deactivate_hint;
296
297 /*
298 * Prototypes for functions exported by this module.
299 */
300 extern void vm_page_bootstrap(
301 vm_offset_t *startp,
302 vm_offset_t *endp);
303
304 extern void vm_page_module_init(void);
305
306 extern void vm_page_create(
307 ppnum_t start,
308 ppnum_t end);
309
310 extern vm_page_t vm_page_lookup(
311 vm_object_t object,
312 vm_object_offset_t offset);
313
314 extern vm_page_t vm_page_grab_fictitious(void);
315
316 extern void vm_page_release_fictitious(
317 vm_page_t page);
318
319 extern boolean_t vm_page_convert(
320 vm_page_t page);
321
322 extern void vm_page_more_fictitious(void);
323
324 extern int vm_pool_low(void);
325
326 extern vm_page_t vm_page_grab(void);
327
328 extern void vm_page_release(
329 vm_page_t page);
330
331 extern boolean_t vm_page_wait(
332 int interruptible );
333
334 extern vm_page_t vm_page_alloc(
335 vm_object_t object,
336 vm_object_offset_t offset);
337
338 extern void vm_page_init(
339 vm_page_t page,
340 ppnum_t phys_page);
341
342 extern void vm_page_free(
343 vm_page_t page);
344
345 extern void vm_page_activate(
346 vm_page_t page);
347
348 extern void vm_page_deactivate(
349 vm_page_t page);
350
351 extern void vm_page_rename(
352 vm_page_t page,
353 vm_object_t new_object,
354 vm_object_offset_t new_offset);
355
356 extern void vm_page_insert(
357 vm_page_t page,
358 vm_object_t object,
359 vm_object_offset_t offset);
360
361 extern void vm_page_replace(
362 vm_page_t mem,
363 vm_object_t object,
364 vm_object_offset_t offset);
365
366 extern void vm_page_remove(
367 vm_page_t page);
368
369 extern void vm_page_zero_fill(
370 vm_page_t page);
371
372 extern void vm_page_part_zero_fill(
373 vm_page_t m,
374 vm_offset_t m_pa,
375 vm_size_t len);
376
377 extern void vm_page_copy(
378 vm_page_t src_page,
379 vm_page_t dest_page);
380
381 extern void vm_page_part_copy(
382 vm_page_t src_m,
383 vm_offset_t src_pa,
384 vm_page_t dst_m,
385 vm_offset_t dst_pa,
386 vm_size_t len);
387
388 extern void vm_page_wire(
389 vm_page_t page);
390
391 extern void vm_page_unwire(
392 vm_page_t page);
393
394 extern void vm_set_page_size(void);
395
396 extern void vm_page_gobble(
397 vm_page_t page);
398
399 /*
400 * Functions implemented as macros. m->wanted and m->busy are
401 * protected by the object lock.
402 */
403
404 #define PAGE_ASSERT_WAIT(m, interruptible) \
405 (((m)->wanted = TRUE), \
406 assert_wait((event_t) (m), (interruptible)))
407
408 #define PAGE_SLEEP(o, m, interruptible) \
409 (((m)->wanted = TRUE), \
410 thread_sleep_vm_object((o), (m), (interruptible)))
411
412 #define PAGE_WAKEUP_DONE(m) \
413 MACRO_BEGIN \
414 (m)->busy = FALSE; \
415 if ((m)->wanted) { \
416 (m)->wanted = FALSE; \
417 thread_wakeup((event_t) (m)); \
418 } \
419 MACRO_END
420
421 #define PAGE_WAKEUP(m) \
422 MACRO_BEGIN \
423 if ((m)->wanted) { \
424 (m)->wanted = FALSE; \
425 thread_wakeup((event_t) (m)); \
426 } \
427 MACRO_END
428
429 #define VM_PAGE_FREE(p) \
430 MACRO_BEGIN \
431 vm_page_lock_queues(); \
432 vm_page_free(p); \
433 vm_page_unlock_queues(); \
434 MACRO_END
435
436 #define VM_PAGE_GRAB_FICTITIOUS(M) \
437 MACRO_BEGIN \
438 while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \
439 vm_page_more_fictitious(); \
440 MACRO_END
441
442 #define VM_PAGE_THROTTLED() \
443 (vm_page_free_count < vm_page_free_min && \
444 !(current_thread()->options & TH_OPT_VMPRIV) && \
445 ++vm_page_throttled_count)
446
447 #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT))
448
449 #define vm_page_lock_queues() mutex_lock(&vm_page_queue_lock)
450 #define vm_page_unlock_queues() mutex_unlock(&vm_page_queue_lock)
451
452 #define VM_PAGE_QUEUES_REMOVE(mem) \
453 MACRO_BEGIN \
454 assert(!mem->laundry); \
455 if (mem->active) { \
456 assert(mem->object != kernel_object); \
457 assert(!mem->inactive); \
458 queue_remove(&vm_page_queue_active, \
459 mem, vm_page_t, pageq); \
460 mem->pageq.next = NULL; \
461 mem->pageq.prev = NULL; \
462 mem->active = FALSE; \
463 if (!mem->fictitious) \
464 vm_page_active_count--; \
465 } \
466 \
467 if (mem->inactive) { \
468 assert(mem->object != kernel_object); \
469 assert(!mem->active); \
470 if (mem->zero_fill) { \
471 queue_remove(&vm_page_queue_zf, \
472 mem, vm_page_t, pageq); \
473 } else { \
474 queue_remove(&vm_page_queue_inactive, \
475 mem, vm_page_t, pageq); \
476 } \
477 mem->pageq.next = NULL; \
478 mem->pageq.prev = NULL; \
479 mem->inactive = FALSE; \
480 if (!mem->fictitious) \
481 vm_page_inactive_count--; \
482 } \
483 MACRO_END
484
485 #endif /* _VM_VM_PAGE_H_ */