]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_page.h
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / vm / vm_page.h
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Resident memory system definitions.
64 */
65
66 #ifndef _VM_VM_PAGE_H_
67 #define _VM_VM_PAGE_H_
68
69 #include <debug.h>
70 #include <vm/vm_options.h>
71 #include <mach/boolean.h>
72 #include <mach/vm_prot.h>
73 #include <mach/vm_param.h>
74
75
76 #if defined(__LP64__)
77
78 /*
79 * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
80 * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
81 * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the
82 * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
83 * pointers from the 2 ends of these spaces
84 */
85 typedef uint32_t vm_page_packed_t;
86
87 struct vm_page_packed_queue_entry {
88 vm_page_packed_t next; /* next element */
89 vm_page_packed_t prev; /* previous element */
90 };
91
92 typedef struct vm_page_packed_queue_entry *vm_page_queue_t;
93 typedef struct vm_page_packed_queue_entry vm_page_queue_head_t;
94 typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t;
95 typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t;
96
97 typedef vm_page_packed_t vm_page_object_t;
98
99 #else
100
101 /*
102 * we can't do the packing trick on 32 bit architectures, so
103 * just turn the macros into noops.
104 */
105 typedef struct vm_page *vm_page_packed_t;
106
107 #define vm_page_queue_t queue_t
108 #define vm_page_queue_head_t queue_head_t
109 #define vm_page_queue_chain_t queue_chain_t
110 #define vm_page_queue_entry_t queue_entry_t
111
112 #define vm_page_object_t vm_object_t
113 #endif
114
115
116 #include <vm/vm_object.h>
117 #include <kern/queue.h>
118 #include <kern/locks.h>
119
120 #include <kern/macro_help.h>
121 #include <libkern/OSAtomic.h>
122
123
124
125 #define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count)
126
127 /*
128 * Management of resident (logical) pages.
129 *
130 * A small structure is kept for each resident
131 * page, indexed by page number. Each structure
132 * is an element of several lists:
133 *
134 * A hash table bucket used to quickly
135 * perform object/offset lookups
136 *
137 * A list of all pages for a given object,
138 * so they can be quickly deactivated at
139 * time of deallocation.
140 *
141 * An ordered list of pages due for pageout.
142 *
143 * In addition, the structure contains the object
144 * and offset to which this page belongs (for pageout),
145 * and sundry status bits.
146 *
147 * Fields in this structure are locked either by the lock on the
148 * object that the page belongs to (O) or by the lock on the page
149 * queues (P). [Some fields require that both locks be held to
150 * change that field; holding either lock is sufficient to read.]
151 */
152
153 #define VM_PAGE_NULL ((vm_page_t) 0)
154
155 extern char vm_page_inactive_states[];
156 extern char vm_page_pageable_states[];
157 extern char vm_page_non_speculative_pageable_states[];
158 extern char vm_page_active_or_inactive_states[];
159
160
161 #define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vm_page_q_state])
162 #define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vm_page_q_state])
163 #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vm_page_q_state])
164 #define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vm_page_q_state])
165
166
167 #define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */
168 #define VM_PAGE_IS_WIRED 1 /* page is currently wired */
169 #define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */
170 #define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */
171 #define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */
172 #define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */
173 #define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */
174 #define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */
175 #define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */
176 #define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */
177 #define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */
178 #define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */
179 #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */
180 #define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */
181 #define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */
182 #define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */
183
184 #define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1)
185
186
187 #define pageq pageq_un.vm_page_pageq
188 #define snext pageq_un.vm_page_snext
189
190 struct vm_page {
191 union {
192 vm_page_queue_chain_t vm_page_pageq; /* queue info for FIFO queue or free list (P) */
193 struct vm_page *vm_page_snext;
194 } pageq_un;
195
196 vm_page_queue_chain_t listq; /* all pages in same object (O) */
197
198 #if CONFIG_BACKGROUND_QUEUE
199 vm_page_queue_chain_t vm_page_backgroundq; /* anonymous pages in the background pool (P) */
200 #endif
201
202 vm_object_offset_t offset; /* offset into that object (O,P) */
203 vm_page_object_t vm_page_object; /* which object am I in (O&P) */
204
205 /*
206 * The following word of flags is protected
207 * by the "page queues" lock.
208 *
209 * we use the 'wire_count' field to store the local
210 * queue id if local queues are enabled...
211 * see the comments at 'vm_page_queues_remove' as to
212 * why this is safe to do
213 */
214 #define local_id wire_count
215 unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */
216 vm_page_q_state:4, /* which q is the page on (P) */
217
218 vm_page_in_background:1,
219 vm_page_on_backgroundq:1,
220 /* boolean_t */
221 gobbled:1, /* page used internally (P) */
222 laundry:1, /* page is being cleaned now (P)*/
223 no_cache:1, /* page is not to be cached and should
224 * be reused ahead of other pages (P) */
225 private:1, /* Page should not be returned to
226 * the free list (P) */
227 reference:1, /* page has been used (P) */
228
229 __unused_pageq_bits:5; /* 5 bits available here */
230
231 /*
232 * MUST keep the 2 32 bit words used as bit fields
233 * separated since the compiler has a nasty habit
234 * of using 64 bit loads and stores on them as
235 * if they were a single 64 bit field... since
236 * they are protected by 2 different locks, this
237 * is a real problem
238 */
239 vm_page_packed_t next_m; /* VP bucket link (O) */
240
241 /*
242 * The following word of flags is protected
243 * by the "VM object" lock.
244 */
245 unsigned int
246 /* boolean_t */ busy:1, /* page is in transit (O) */
247 wanted:1, /* someone is waiting for page (O) */
248 tabled:1, /* page is in VP table (O) */
249 hashed:1, /* page is in vm_page_buckets[]
250 (O) + the bucket lock */
251 fictitious:1, /* Physical page doesn't exist (O) */
252 /*
253 * IMPORTANT: the "pmapped", "xpmapped" and "clustered" bits can be modified while holding the
254 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
255 * This is done in vm_fault_enter and the CONSUME_CLUSTERED macro.
256 * It's also ok to modify them behind just the VM object "exclusive" lock.
257 */
258 clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */
259 pmapped:1, /* page has been entered at some
260 * point into a pmap (O) or (O-shared AND pmap_page) */
261 xpmapped:1, /* page has been entered with execute permission (O)
262 or (O-shared AND pmap_page) */
263
264 wpmapped:1, /* page has been entered at some
265 * point into a pmap for write (O) */
266 free_when_done:1, /* page is to be freed once cleaning is completed (O) */
267 absent:1, /* Data has been requested, but is
268 * not yet available (O) */
269 error:1, /* Data manager was unable to provide
270 * data due to error (O) */
271 dirty:1, /* Page must be cleaned (O) */
272 cleaning:1, /* Page clean has begun (O) */
273 precious:1, /* Page is precious; data must be
274 * returned even if clean (O) */
275 overwriting:1, /* Request to unlock has been made
276 * without having data. (O)
277 * [See vm_fault_page_overwrite] */
278 restart:1, /* Page was pushed higher in shadow
279 chain by copy_call-related pagers;
280 start again at top of chain */
281 unusual:1, /* Page is absent, error, restart or
282 page locked */
283 encrypted:1, /* encrypted for secure swap (O) */
284 encrypted_cleaning:1, /* encrypting page */
285 cs_validated:1, /* code-signing: page was checked */
286 cs_tainted:1, /* code-signing: page is tainted */
287 cs_nx:1, /* code-signing: page is nx */
288 reusable:1,
289 lopage:1,
290 slid:1,
291 written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */
292 __unused_object_bits:5; /* 5 bits available here */
293
294 ppnum_t phys_page; /* Physical address of page, passed
295 * to pmap_enter (read-only) */
296 };
297
298
299 typedef struct vm_page *vm_page_t;
300 extern vm_page_t vm_pages;
301 extern vm_page_t vm_page_array_beginning_addr;
302 extern vm_page_t vm_page_array_ending_addr;
303
304
305
306
307 struct vm_page_with_ppnum {
308 struct vm_page vm_page_with_ppnum;
309 };
310 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
311
312
313 #define VM_PAGE_GET_PHYS_PAGE(page) (page)->phys_page
314 #define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \
315 MACRO_BEGIN \
316 (page)->phys_page = ppnum; \
317 MACRO_END
318
319
320
321
322 #define DEBUG_ENCRYPTED_SWAP 1
323 #if DEBUG_ENCRYPTED_SWAP
324 #define ASSERT_PAGE_DECRYPTED(page) \
325 MACRO_BEGIN \
326 if ((page)->encrypted) { \
327 panic("VM page %p should not be encrypted here\n", \
328 (page)); \
329 } \
330 MACRO_END
331 #else /* DEBUG_ENCRYPTED_SWAP */
332 #define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted)
333 #endif /* DEBUG_ENCRYPTED_SWAP */
334
335
336
337 #if defined(__LP64__)
338
339 #define VM_VPLQ_ALIGNMENT 128
340 #define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */
341 #define VM_PACKED_POINTER_SHIFT 6
342
343 #define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000
344
345 static inline vm_page_packed_t vm_page_pack_ptr(uintptr_t p)
346 {
347 vm_page_packed_t packed_ptr;
348
349 if (!p)
350 return ((vm_page_packed_t)0);
351
352 if (p >= (uintptr_t)(vm_page_array_beginning_addr) && p < (uintptr_t)(vm_page_array_ending_addr)) {
353 packed_ptr = ((vm_page_packed_t)(((vm_page_t)p - vm_page_array_beginning_addr)));
354 assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY));
355 packed_ptr |= VM_PACKED_FROM_VM_PAGES_ARRAY;
356 return packed_ptr;
357 }
358
359 assert((p & (VM_PACKED_POINTER_ALIGNMENT - 1)) == 0);
360
361 packed_ptr = ((vm_page_packed_t)(((uintptr_t)(p - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> VM_PACKED_POINTER_SHIFT));
362 assert(packed_ptr != 0);
363 assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY));
364 return packed_ptr;
365 }
366
367
368 static inline uintptr_t vm_page_unpack_ptr(uintptr_t p)
369 {
370 if (!p)
371 return ((uintptr_t)0);
372
373 if (p & VM_PACKED_FROM_VM_PAGES_ARRAY)
374 return ((uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)]));
375 return (((p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS));
376 }
377
378
379 #define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p))
380 #define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p))
381
382 #define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vm_page_object)))
383 #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
384
385
386 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
387 MACRO_BEGIN \
388 (p)->snext = 0; \
389 MACRO_END
390
391
392 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p)
393
394
395 static __inline__ void
396 vm_page_enqueue_tail(
397 vm_page_queue_t que,
398 vm_page_queue_entry_t elt)
399 {
400 vm_page_queue_entry_t old_tail;
401
402 old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
403 elt->next = VM_PAGE_PACK_PTR(que);
404 elt->prev = que->prev;
405 old_tail->next = VM_PAGE_PACK_PTR(elt);
406 que->prev = VM_PAGE_PACK_PTR(elt);
407 }
408
409
410 static __inline__ void
411 vm_page_remque(
412 vm_page_queue_entry_t elt)
413 {
414 vm_page_queue_entry_t next_elt, prev_elt;
415
416 next_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->next);
417
418 /* next_elt may equal prev_elt (and the queue head) if elt was the only element */
419 prev_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->prev);
420
421 next_elt->prev = VM_PAGE_PACK_PTR(prev_elt);
422 prev_elt->next = VM_PAGE_PACK_PTR(next_elt);
423
424 elt->next = 0;
425 elt->prev = 0;
426 }
427
428
429 /*
430 * Macro: vm_page_queue_init
431 * Function:
432 * Initialize the given queue.
433 * Header:
434 * void vm_page_queue_init(q)
435 * vm_page_queue_t q; \* MODIFIED *\
436 */
437 #define vm_page_queue_init(q) \
438 MACRO_BEGIN \
439 assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \
440 assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \
441 (q)->next = VM_PAGE_PACK_PTR(q); \
442 (q)->prev = VM_PAGE_PACK_PTR(q); \
443 MACRO_END
444
445
446 /*
447 * Macro: vm_page_queue_enter
448 * Function:
449 * Insert a new element at the tail of the queue.
450 * Header:
451 * void vm_page_queue_enter(q, elt, type, field)
452 * queue_t q;
453 * <type> elt;
454 * <type> is what's in our queue
455 * <field> is the chain field in (*<type>)
456 * Note:
457 * This should only be used with Method 2 queue iteration (element chains)
458 */
459 #define vm_page_queue_enter(head, elt, type, field) \
460 MACRO_BEGIN \
461 vm_page_queue_entry_t __prev; \
462 \
463 __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->prev)); \
464 if ((head) == __prev) { \
465 (head)->next = VM_PAGE_PACK_PTR(elt); \
466 } \
467 else { \
468 ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(elt); \
469 } \
470 (elt)->field.prev = VM_PAGE_PACK_PTR(__prev); \
471 (elt)->field.next = VM_PAGE_PACK_PTR(head); \
472 (head)->prev = VM_PAGE_PACK_PTR(elt); \
473 MACRO_END
474
475
476 /*
477 * Macro: vm_page_queue_enter_first
478 * Function:
479 * Insert a new element at the head of the queue.
480 * Header:
481 * void queue_enter_first(q, elt, type, field)
482 * queue_t q;
483 * <type> elt;
484 * <type> is what's in our queue
485 * <field> is the chain field in (*<type>)
486 * Note:
487 * This should only be used with Method 2 queue iteration (element chains)
488 */
489 #define vm_page_queue_enter_first(head, elt, type, field) \
490 MACRO_BEGIN \
491 vm_page_queue_entry_t __next; \
492 \
493 __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->next)); \
494 if ((head) == __next) { \
495 (head)->prev = VM_PAGE_PACK_PTR(elt); \
496 } \
497 else { \
498 ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(elt); \
499 } \
500 (elt)->field.next = VM_PAGE_PACK_PTR(__next); \
501 (elt)->field.prev = VM_PAGE_PACK_PTR(head); \
502 (head)->next = VM_PAGE_PACK_PTR(elt); \
503 MACRO_END
504
505
506 /*
507 * Macro: vm_page_queue_remove
508 * Function:
509 * Remove an arbitrary item from the queue.
510 * Header:
511 * void vm_page_queue_remove(q, qe, type, field)
512 * arguments as in vm_page_queue_enter
513 * Note:
514 * This should only be used with Method 2 queue iteration (element chains)
515 */
516 #define vm_page_queue_remove(head, elt, type, field) \
517 MACRO_BEGIN \
518 vm_page_queue_entry_t __next, __prev; \
519 \
520 __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.next)); \
521 __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.prev)); \
522 \
523 if ((head) == __next) \
524 (head)->prev = VM_PAGE_PACK_PTR(__prev); \
525 else \
526 ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(__prev); \
527 \
528 if ((head) == __prev) \
529 (head)->next = VM_PAGE_PACK_PTR(__next); \
530 else \
531 ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(__next); \
532 \
533 (elt)->field.next = 0; \
534 (elt)->field.prev = 0; \
535 MACRO_END
536
537
538 /*
539 * Macro: vm_page_queue_remove_first
540 * Function:
541 * Remove and return the entry at the head of
542 * the queue.
543 * Header:
544 * vm_page_queue_remove_first(head, entry, type, field)
545 * entry is returned by reference
546 * Note:
547 * This should only be used with Method 2 queue iteration (element chains)
548 */
549 #define vm_page_queue_remove_first(head, entry, type, field) \
550 MACRO_BEGIN \
551 vm_page_queue_entry_t __next; \
552 \
553 (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \
554 __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \
555 \
556 if ((head) == __next) \
557 (head)->prev = VM_PAGE_PACK_PTR(head); \
558 else \
559 ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \
560 (head)->next = VM_PAGE_PACK_PTR(__next); \
561 \
562 (entry)->field.next = 0; \
563 (entry)->field.prev = 0; \
564 MACRO_END
565
566
567 /*
568 * Macro: vm_page_queue_end
569 * Function:
570 * Tests whether a new entry is really the end of
571 * the queue.
572 * Header:
573 * boolean_t vm_page_queue_end(q, qe)
574 * vm_page_queue_t q;
575 * vm_page_queue_entry_t qe;
576 */
577 #define vm_page_queue_end(q, qe) ((q) == (qe))
578
579
580 /*
581 * Macro: vm_page_queue_empty
582 * Function:
583 * Tests whether a queue is empty.
584 * Header:
585 * boolean_t vm_page_queue_empty(q)
586 * vm_page_queue_t q;
587 */
588 #define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
589
590
591
592 /*
593 * Macro: vm_page_queue_first
594 * Function:
595 * Returns the first entry in the queue,
596 * Header:
597 * uintpr_t vm_page_queue_first(q)
598 * vm_page_queue_t q; \* IN *\
599 */
600 #define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
601
602
603
604 /*
605 * Macro: vm_page_queue_last
606 * Function:
607 * Returns the last entry in the queue.
608 * Header:
609 * vm_page_queue_entry_t queue_last(q)
610 * queue_t q; \* IN *\
611 */
612 #define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
613
614
615
616 /*
617 * Macro: vm_page_queue_next
618 * Function:
619 * Returns the entry after an item in the queue.
620 * Header:
621 * uintpr_t vm_page_queue_next(qc)
622 * vm_page_queue_t qc;
623 */
624 #define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
625
626
627
628 /*
629 * Macro: vm_page_queue_prev
630 * Function:
631 * Returns the entry before an item in the queue.
632 * Header:
633 * uinptr_t vm_page_queue_prev(qc)
634 * vm_page_queue_t qc;
635 */
636 #define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
637
638
639
640 /*
641 * Macro: vm_page_queue_iterate
642 * Function:
643 * iterate over each item in the queue.
644 * Generates a 'for' loop, setting elt to
645 * each item in turn (by reference).
646 * Header:
647 * vm_page_queue_iterate(q, elt, type, field)
648 * queue_t q;
649 * <type> elt;
650 * <type> is what's in our queue
651 * <field> is the chain field in (*<type>)
652 * Note:
653 * This should only be used with Method 2 queue iteration (element chains)
654 */
655 #define vm_page_queue_iterate(head, elt, type, field) \
656 for ((elt) = (type)(void *) vm_page_queue_first(head); \
657 !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
658 (elt) = (type)(void *) vm_page_queue_next(&(elt)->field))
659
660 #else
661
662 #define VM_VPLQ_ALIGNMENT 128
663 #define VM_PACKED_POINTER_ALIGNMENT 4
664 #define VM_PACKED_POINTER_SHIFT 0
665
666 #define VM_PACKED_FROM_VM_PAGES_ARRAY 0
667
668 #define VM_PAGE_PACK_PTR(p) (p)
669 #define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p))
670
671 #define VM_PAGE_OBJECT(p) (vm_object_t)(p->vm_page_object)
672 #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
673
674
675 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
676 MACRO_BEGIN \
677 (p)->pageq.next = 0; \
678 (p)->pageq.prev = 0; \
679 MACRO_END
680
681 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p))
682
683 #define vm_page_remque remque
684 #define vm_page_enqueue_tail enqueue_tail
685 #define vm_page_queue_init queue_init
686 #define vm_page_queue_enter queue_enter
687 #define vm_page_queue_enter_first queue_enter_first
688 #define vm_page_queue_remove queue_remove
689 #define vm_page_queue_remove_first queue_remove_first
690 #define vm_page_queue_end queue_end
691 #define vm_page_queue_empty queue_empty
692 #define vm_page_queue_first queue_first
693 #define vm_page_queue_last queue_last
694 #define vm_page_queue_next queue_next
695 #define vm_page_queue_prev queue_prev
696 #define vm_page_queue_iterate queue_iterate
697
698 #endif
699
700
701
702 /*
703 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
704 * represents a set of aging bins that are 'protected'...
705 *
706 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
707 * not yet been 'claimed' but have been aged out of the protective bins
708 * this occurs in vm_page_speculate when it advances to the next bin
709 * and discovers that it is still occupied... at that point, all of the
710 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
711 * in that bin are all guaranteed to have reached at least the maximum age
712 * we allow for a protected page... they can be older if there is no
713 * memory pressure to pull them from the bin, or there are no new speculative pages
714 * being generated to push them out.
715 * this list is the one that vm_pageout_scan will prefer when looking
716 * for pages to move to the underweight free list
717 *
718 * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
719 * defines the amount of time a speculative page is normally
720 * allowed to live in the 'protected' state (i.e. not available
721 * to be stolen if vm_pageout_scan is running and looking for
722 * pages)... however, if the total number of speculative pages
723 * in the protected state exceeds our limit (defined in vm_pageout.c)
724 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
725 * vm_pageout_scan is allowed to steal pages from the protected
726 * bucket even if they are underage.
727 *
728 * vm_pageout_scan is also allowed to pull pages from a protected
729 * bin if the bin has reached the "age of consent" we've set
730 */
731 #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
732 #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
733 #define VM_PAGE_SPECULATIVE_AGED_Q 0
734
735 #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
736
737 struct vm_speculative_age_q {
738 /*
739 * memory queue for speculative pages via clustered pageins
740 */
741 vm_page_queue_head_t age_q;
742 mach_timespec_t age_ts;
743 } __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
744
745
746
747 extern
748 struct vm_speculative_age_q vm_page_queue_speculative[];
749
750 extern int speculative_steal_index;
751 extern int speculative_age_index;
752 extern unsigned int vm_page_speculative_q_age_ms;
753
754
755 typedef struct vm_locks_array {
756 char pad __attribute__ ((aligned (64)));
757 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64)));
758 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64)));
759 char pad2 __attribute__ ((aligned (64)));
760 } vm_locks_array_t;
761
762
763 #if CONFIG_BACKGROUND_QUEUE
764 extern void vm_page_assign_background_state(vm_page_t mem);
765 extern void vm_page_update_background_state(vm_page_t mem);
766 extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first);
767 extern void vm_page_remove_from_backgroundq(vm_page_t mem);
768 #endif
769
770 #define VM_PAGE_WIRED(m) ((m)->vm_page_q_state == VM_PAGE_IS_WIRED)
771 #define NEXT_PAGE(m) ((m)->snext)
772 #define NEXT_PAGE_PTR(m) (&(m)->snext)
773
774 /*
775 * XXX The unusual bit should not be necessary. Most of the bit
776 * XXX fields above really want to be masks.
777 */
778
779 /*
780 * For debugging, this macro can be defined to perform
781 * some useful check on a page structure.
782 * INTENTIONALLY left as a no-op so that the
783 * current call-sites can be left intact for future uses.
784 */
785
786 #define VM_PAGE_CHECK(mem) \
787 MACRO_BEGIN \
788 MACRO_END
789
790 /* Page coloring:
791 *
792 * The free page list is actually n lists, one per color,
793 * where the number of colors is a function of the machine's
794 * cache geometry set at system initialization. To disable
795 * coloring, set vm_colors to 1 and vm_color_mask to 0.
796 * The boot-arg "colors" may be used to override vm_colors.
797 * Note that there is little harm in having more colors than needed.
798 */
799
800 #define MAX_COLORS 128
801 #define DEFAULT_COLORS 32
802
803 extern
804 unsigned int vm_colors; /* must be in range 1..MAX_COLORS */
805 extern
806 unsigned int vm_color_mask; /* must be (vm_colors-1) */
807 extern
808 unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
809
810 /*
811 * Wired memory is a very limited resource and we can't let users exhaust it
812 * and deadlock the entire system. We enforce the following limits:
813 *
814 * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount)
815 * how much memory can be user-wired in one user task
816 *
817 * vm_global_user_wire_limit (default: same as vm_user_wire_limit)
818 * how much memory can be user-wired in all user tasks
819 *
820 * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE)
821 * how much memory must remain user-unwired at any time
822 */
823 #define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */
824 extern
825 vm_map_size_t vm_user_wire_limit;
826 extern
827 vm_map_size_t vm_global_user_wire_limit;
828 extern
829 vm_map_size_t vm_global_no_user_wire_amount;
830
831 /*
832 * Each pageable resident page falls into one of three lists:
833 *
834 * free
835 * Available for allocation now. The free list is
836 * actually an array of lists, one per color.
837 * inactive
838 * Not referenced in any map, but still has an
839 * object/offset-page mapping, and may be dirty.
840 * This is the list of pages that should be
841 * paged out next. There are actually two
842 * inactive lists, one for pages brought in from
843 * disk or other backing store, and another
844 * for "zero-filled" pages. See vm_pageout_scan()
845 * for the distinction and usage.
846 * active
847 * A list of pages which have been placed in
848 * at least one physical map. This list is
849 * ordered, in LRU-like fashion.
850 */
851
852
853 #define VPL_LOCK_SPIN 1
854
855 struct vpl {
856 vm_page_queue_head_t vpl_queue;
857 unsigned int vpl_count;
858 unsigned int vpl_internal_count;
859 unsigned int vpl_external_count;
860 #ifdef VPL_LOCK_SPIN
861 lck_spin_t vpl_lock;
862 #else
863 lck_mtx_t vpl_lock;
864 lck_mtx_ext_t vpl_lock_ext;
865 #endif
866 };
867
868 struct vplq {
869 union {
870 char cache_line_pad[VM_VPLQ_ALIGNMENT];
871 struct vpl vpl;
872 } vpl_un;
873 };
874 extern
875 unsigned int vm_page_local_q_count;
876 extern
877 struct vplq *vm_page_local_q;
878 extern
879 unsigned int vm_page_local_q_soft_limit;
880 extern
881 unsigned int vm_page_local_q_hard_limit;
882 extern
883 vm_locks_array_t vm_page_locks;
884
885 extern
886 vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */
887 extern
888 vm_page_queue_head_t vm_page_queue_active; /* active memory queue */
889 extern
890 vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */
891 #if CONFIG_SECLUDED_MEMORY
892 extern
893 vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
894 #endif /* CONFIG_SECLUDED_MEMORY */
895 extern
896 vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */
897 extern
898 vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
899 extern
900 vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */
901
902 extern
903 queue_head_t vm_objects_wired;
904 extern
905 lck_spin_t vm_objects_wired_lock;
906
907 #if CONFIG_BACKGROUND_QUEUE
908
909 #define VM_PAGE_BACKGROUND_TARGET_MAX 50000
910
911 #define VM_PAGE_BG_DISABLED 0
912 #define VM_PAGE_BG_LEVEL_1 1
913 #define VM_PAGE_BG_LEVEL_2 2
914 #define VM_PAGE_BG_LEVEL_3 3
915
916 extern
917 vm_page_queue_head_t vm_page_queue_background;
918 extern
919 uint64_t vm_page_background_promoted_count;
920 extern
921 uint32_t vm_page_background_count;
922 extern
923 uint32_t vm_page_background_limit;
924 extern
925 uint32_t vm_page_background_target;
926 extern
927 uint32_t vm_page_background_internal_count;
928 extern
929 uint32_t vm_page_background_external_count;
930 extern
931 uint32_t vm_page_background_mode;
932 extern
933 uint32_t vm_page_background_exclude_external;
934
935 #endif
936
937 extern
938 vm_offset_t first_phys_addr; /* physical address for first_page */
939 extern
940 vm_offset_t last_phys_addr; /* physical address for last_page */
941
942 extern
943 unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */
944 extern
945 unsigned int vm_page_active_count; /* How many pages are active? */
946 extern
947 unsigned int vm_page_inactive_count; /* How many pages are inactive? */
948 #if CONFIG_SECLUDED_MEMORY
949 extern
950 unsigned int vm_page_secluded_count; /* How many pages are secluded? */
951 extern
952 unsigned int vm_page_secluded_count_free;
953 extern
954 unsigned int vm_page_secluded_count_inuse;
955 #endif /* CONFIG_SECLUDED_MEMORY */
956 extern
957 unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */
958 extern
959 unsigned int vm_page_throttled_count;/* How many inactives are throttled */
960 extern
961 unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */
962 extern unsigned int vm_page_pageable_internal_count;
963 extern unsigned int vm_page_pageable_external_count;
964 extern
965 unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */
966 extern
967 unsigned int vm_page_external_count; /* How many pages are file-backed? */
968 extern
969 unsigned int vm_page_internal_count; /* How many pages are anonymous? */
970 extern
971 unsigned int vm_page_wire_count; /* How many pages are wired? */
972 extern
973 unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */
974 extern
975 unsigned int vm_page_free_target; /* How many do we want free? */
976 extern
977 unsigned int vm_page_free_min; /* When to wakeup pageout */
978 extern
979 unsigned int vm_page_throttle_limit; /* When to throttle new page creation */
980 extern
981 uint32_t vm_page_creation_throttle; /* When to throttle new page creation */
982 extern
983 unsigned int vm_page_inactive_target;/* How many do we want inactive? */
984 #if CONFIG_SECLUDED_MEMORY
985 extern
986 unsigned int vm_page_secluded_target;/* How many do we want secluded? */
987 #endif /* CONFIG_SECLUDED_MEMORY */
988 extern
989 unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */
990 extern
991 unsigned int vm_page_inactive_min; /* When to wakeup pageout */
992 extern
993 unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */
994 extern
995 unsigned int vm_page_throttle_count; /* Count of page allocations throttled */
996 extern
997 unsigned int vm_page_gobble_count;
998 extern
999 unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */
1000
1001
1002 #if DEVELOPMENT || DEBUG
1003 extern
1004 unsigned int vm_page_speculative_used;
1005 #endif
1006
1007 extern
1008 unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */
1009 extern
1010 unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
1011 extern
1012 uint64_t vm_page_purged_count; /* How many pages got purged so far ? */
1013
1014 extern unsigned int vm_page_free_wanted;
1015 /* how many threads are waiting for memory */
1016
1017 extern unsigned int vm_page_free_wanted_privileged;
1018 /* how many VM privileged threads are waiting for memory */
1019 #if CONFIG_SECLUDED_MEMORY
1020 extern unsigned int vm_page_free_wanted_secluded;
1021 /* how many threads are waiting for secluded memory */
1022 #endif /* CONFIG_SECLUDED_MEMORY */
1023
1024 extern ppnum_t vm_page_fictitious_addr;
1025 /* (fake) phys_addr of fictitious pages */
1026
1027 extern ppnum_t vm_page_guard_addr;
1028 /* (fake) phys_addr of guard pages */
1029
1030
1031 extern boolean_t vm_page_deactivate_hint;
1032
1033 extern int vm_compressor_mode;
1034
1035 /*
1036 0 = all pages avail ( default. )
1037 1 = disable high mem ( cap max pages to 4G)
1038 2 = prefer himem
1039 */
1040 extern int vm_himemory_mode;
1041
1042 extern boolean_t vm_lopage_needed;
1043 extern uint32_t vm_lopage_free_count;
1044 extern uint32_t vm_lopage_free_limit;
1045 extern uint32_t vm_lopage_lowater;
1046 extern boolean_t vm_lopage_refill;
1047 extern uint64_t max_valid_dma_address;
1048 extern ppnum_t max_valid_low_ppnum;
1049
1050 /*
1051 * Prototypes for functions exported by this module.
1052 */
1053 extern void vm_page_bootstrap(
1054 vm_offset_t *startp,
1055 vm_offset_t *endp);
1056
1057 extern void vm_page_module_init(void);
1058
1059 extern void vm_page_init_local_q(void);
1060
1061 extern void vm_page_create(
1062 ppnum_t start,
1063 ppnum_t end);
1064
1065 extern vm_page_t kdp_vm_page_lookup(
1066 vm_object_t object,
1067 vm_object_offset_t offset);
1068
1069 extern vm_page_t vm_page_lookup(
1070 vm_object_t object,
1071 vm_object_offset_t offset);
1072
1073 extern vm_page_t vm_page_grab_fictitious(void);
1074
1075 extern vm_page_t vm_page_grab_guard(void);
1076
1077 extern void vm_page_release_fictitious(
1078 vm_page_t page);
1079
1080 extern void vm_page_more_fictitious(void);
1081
1082 extern int vm_pool_low(void);
1083
1084 extern vm_page_t vm_page_grab(void);
1085 extern vm_page_t vm_page_grab_options(int flags);
1086 #if CONFIG_SECLUDED_MEMORY
1087 #define VM_PAGE_GRAB_SECLUDED 0x00000001
1088 #endif /* CONFIG_SECLUDED_MEMORY */
1089
1090 extern vm_page_t vm_page_grablo(void);
1091
1092 extern void vm_page_release(
1093 vm_page_t page,
1094 boolean_t page_queues_locked);
1095
1096 extern boolean_t vm_page_wait(
1097 int interruptible );
1098
1099 extern vm_page_t vm_page_alloc(
1100 vm_object_t object,
1101 vm_object_offset_t offset);
1102
1103 extern vm_page_t vm_page_alloc_guard(
1104 vm_object_t object,
1105 vm_object_offset_t offset);
1106
1107 extern void vm_page_init(
1108 vm_page_t page,
1109 ppnum_t phys_page,
1110 boolean_t lopage);
1111
1112 extern void vm_page_free(
1113 vm_page_t page);
1114
1115 extern void vm_page_free_unlocked(
1116 vm_page_t page,
1117 boolean_t remove_from_hash);
1118
1119 extern void vm_page_activate(
1120 vm_page_t page);
1121
1122 extern void vm_page_deactivate(
1123 vm_page_t page);
1124
1125 extern void vm_page_deactivate_internal(
1126 vm_page_t page,
1127 boolean_t clear_hw_reference);
1128
1129 extern void vm_page_enqueue_cleaned(vm_page_t page);
1130
1131 extern void vm_page_lru(
1132 vm_page_t page);
1133
1134 extern void vm_page_speculate(
1135 vm_page_t page,
1136 boolean_t new);
1137
1138 extern void vm_page_speculate_ageit(
1139 struct vm_speculative_age_q *aq);
1140
1141 extern void vm_page_reactivate_all_throttled(void);
1142
1143 extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
1144
1145 extern void vm_page_rename(
1146 vm_page_t page,
1147 vm_object_t new_object,
1148 vm_object_offset_t new_offset,
1149 boolean_t encrypted_ok);
1150
1151 extern void vm_page_insert(
1152 vm_page_t page,
1153 vm_object_t object,
1154 vm_object_offset_t offset);
1155
1156 extern void vm_page_insert_wired(
1157 vm_page_t page,
1158 vm_object_t object,
1159 vm_object_offset_t offset,
1160 vm_tag_t tag);
1161
1162 extern void vm_page_insert_internal(
1163 vm_page_t page,
1164 vm_object_t object,
1165 vm_object_offset_t offset,
1166 vm_tag_t tag,
1167 boolean_t queues_lock_held,
1168 boolean_t insert_in_hash,
1169 boolean_t batch_pmap_op,
1170 boolean_t delayed_accounting,
1171 uint64_t *delayed_ledger_update);
1172
1173 extern void vm_page_replace(
1174 vm_page_t mem,
1175 vm_object_t object,
1176 vm_object_offset_t offset);
1177
1178 extern void vm_page_remove(
1179 vm_page_t page,
1180 boolean_t remove_from_hash);
1181
1182 extern void vm_page_zero_fill(
1183 vm_page_t page);
1184
1185 extern void vm_page_part_zero_fill(
1186 vm_page_t m,
1187 vm_offset_t m_pa,
1188 vm_size_t len);
1189
1190 extern void vm_page_copy(
1191 vm_page_t src_page,
1192 vm_page_t dest_page);
1193
1194 extern void vm_page_part_copy(
1195 vm_page_t src_m,
1196 vm_offset_t src_pa,
1197 vm_page_t dst_m,
1198 vm_offset_t dst_pa,
1199 vm_size_t len);
1200
1201 extern void vm_page_wire(
1202 vm_page_t page,
1203 vm_tag_t tag,
1204 boolean_t check_memorystatus);
1205
1206 extern void vm_page_unwire(
1207 vm_page_t page,
1208 boolean_t queueit);
1209
1210 extern void vm_set_page_size(void);
1211
1212 extern void vm_page_gobble(
1213 vm_page_t page);
1214
1215 extern void vm_page_validate_cs(vm_page_t page);
1216 extern void vm_page_validate_cs_mapped(
1217 vm_page_t page,
1218 const void *kaddr);
1219 extern void vm_page_validate_cs_mapped_chunk(
1220 vm_page_t page,
1221 const void *kaddr,
1222 vm_offset_t chunk_offset,
1223 vm_size_t chunk_size,
1224 boolean_t *validated,
1225 unsigned *tainted);
1226
1227 extern void vm_page_free_prepare_queues(
1228 vm_page_t page);
1229
1230 extern void vm_page_free_prepare_object(
1231 vm_page_t page,
1232 boolean_t remove_from_hash);
1233
1234 #if CONFIG_IOSCHED
1235 extern wait_result_t vm_page_sleep(
1236 vm_object_t object,
1237 vm_page_t m,
1238 int interruptible);
1239 #endif
1240
1241 extern void vm_pressure_response(void);
1242
1243 #if CONFIG_JETSAM
1244 extern void memorystatus_pages_update(unsigned int pages_avail);
1245
1246 #define VM_CHECK_MEMORYSTATUS do { \
1247 memorystatus_pages_update( \
1248 vm_page_pageable_external_count + \
1249 vm_page_free_count + \
1250 (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1251 ); \
1252 } while(0)
1253
1254 #else /* CONFIG_JETSAM */
1255
1256
1257 #define VM_CHECK_MEMORYSTATUS vm_pressure_response()
1258
1259
1260 #endif /* CONFIG_JETSAM */
1261
1262 /*
1263 * Functions implemented as macros. m->wanted and m->busy are
1264 * protected by the object lock.
1265 */
1266
1267 #define SET_PAGE_DIRTY(m, set_pmap_modified) \
1268 MACRO_BEGIN \
1269 vm_page_t __page__ = (m); \
1270 __page__->dirty = TRUE; \
1271 MACRO_END
1272
1273 #define PAGE_ASSERT_WAIT(m, interruptible) \
1274 (((m)->wanted = TRUE), \
1275 assert_wait((event_t) (m), (interruptible)))
1276
1277 #if CONFIG_IOSCHED
1278 #define PAGE_SLEEP(o, m, interruptible) \
1279 vm_page_sleep(o, m, interruptible)
1280 #else
1281 #define PAGE_SLEEP(o, m, interruptible) \
1282 (((m)->wanted = TRUE), \
1283 thread_sleep_vm_object((o), (m), (interruptible)))
1284 #endif
1285
1286 #define PAGE_WAKEUP_DONE(m) \
1287 MACRO_BEGIN \
1288 (m)->busy = FALSE; \
1289 if ((m)->wanted) { \
1290 (m)->wanted = FALSE; \
1291 thread_wakeup((event_t) (m)); \
1292 } \
1293 MACRO_END
1294
1295 #define PAGE_WAKEUP(m) \
1296 MACRO_BEGIN \
1297 if ((m)->wanted) { \
1298 (m)->wanted = FALSE; \
1299 thread_wakeup((event_t) (m)); \
1300 } \
1301 MACRO_END
1302
1303 #define VM_PAGE_FREE(p) \
1304 MACRO_BEGIN \
1305 vm_page_free_unlocked(p, TRUE); \
1306 MACRO_END
1307
1308 #define VM_PAGE_GRAB_FICTITIOUS(M) \
1309 MACRO_BEGIN \
1310 while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \
1311 vm_page_more_fictitious(); \
1312 MACRO_END
1313
1314 #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT))
1315
1316 #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1317 #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1318
1319 #define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock)
1320 #define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock)
1321 #define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock)
1322
1323 #define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock)
1324 #define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock)
1325 #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock)
1326
1327 #ifdef VPL_LOCK_SPIN
1328 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
1329 #define VPL_LOCK(vpl) lck_spin_lock(vpl)
1330 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
1331 #else
1332 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr)
1333 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
1334 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
1335 #endif
1336
1337
1338 #if DEVELOPMENT || DEBUG
1339 #define VM_PAGE_SPECULATIVE_USED_ADD() \
1340 MACRO_BEGIN \
1341 OSAddAtomic(1, &vm_page_speculative_used); \
1342 MACRO_END
1343 #else
1344 #define VM_PAGE_SPECULATIVE_USED_ADD()
1345 #endif
1346
1347
1348 #define VM_PAGE_CONSUME_CLUSTERED(mem) \
1349 MACRO_BEGIN \
1350 ppnum_t __phys_page; \
1351 __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \
1352 pmap_lock_phys_page(__phys_page); \
1353 if (mem->clustered) { \
1354 vm_object_t o; \
1355 o = VM_PAGE_OBJECT(mem); \
1356 assert(o); \
1357 o->pages_used++; \
1358 mem->clustered = FALSE; \
1359 VM_PAGE_SPECULATIVE_USED_ADD(); \
1360 } \
1361 pmap_unlock_phys_page(__phys_page); \
1362 MACRO_END
1363
1364
1365 #define VM_PAGE_COUNT_AS_PAGEIN(mem) \
1366 MACRO_BEGIN \
1367 { \
1368 vm_object_t o; \
1369 o = VM_PAGE_OBJECT(mem); \
1370 DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \
1371 current_task()->pageins++; \
1372 if (o->internal) { \
1373 DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \
1374 } else { \
1375 DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
1376 } \
1377 } \
1378 MACRO_END
1379
1380 /* adjust for stolen pages accounted elsewhere */
1381 #define VM_PAGE_MOVE_STOLEN(page_count) \
1382 MACRO_BEGIN \
1383 vm_page_stolen_count -= (page_count); \
1384 vm_page_wire_count_initial -= (page_count); \
1385 MACRO_END
1386
1387 #define DW_vm_page_unwire 0x01
1388 #define DW_vm_page_wire 0x02
1389 #define DW_vm_page_free 0x04
1390 #define DW_vm_page_activate 0x08
1391 #define DW_vm_page_deactivate_internal 0x10
1392 #define DW_vm_page_speculate 0x20
1393 #define DW_vm_page_lru 0x40
1394 #define DW_vm_pageout_throttle_up 0x80
1395 #define DW_PAGE_WAKEUP 0x100
1396 #define DW_clear_busy 0x200
1397 #define DW_clear_reference 0x400
1398 #define DW_set_reference 0x800
1399 #define DW_move_page 0x1000
1400 #define DW_VM_PAGE_QUEUES_REMOVE 0x2000
1401 #define DW_enqueue_cleaned 0x4000
1402 #define DW_vm_phantom_cache_update 0x8000
1403
1404 struct vm_page_delayed_work {
1405 vm_page_t dw_m;
1406 int dw_mask;
1407 };
1408
1409 void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
1410
1411 extern unsigned int vm_max_delayed_work_limit;
1412
1413 #define DEFAULT_DELAYED_WORK_LIMIT 32
1414
1415 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
1416
1417 /*
1418 * vm_page_do_delayed_work may need to drop the object lock...
1419 * if it does, we need the pages it's looking at to
1420 * be held stable via the busy bit, so if busy isn't already
1421 * set, we need to set it and ask vm_page_do_delayed_work
1422 * to clear it and wakeup anyone that might have blocked on
1423 * it once we're done processing the page.
1424 */
1425
1426 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \
1427 MACRO_BEGIN \
1428 if (mem->busy == FALSE) { \
1429 mem->busy = TRUE; \
1430 if ( !(dwp->dw_mask & DW_vm_page_free)) \
1431 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
1432 } \
1433 dwp->dw_m = mem; \
1434 dwp++; \
1435 dw_cnt++; \
1436 MACRO_END
1437
1438 extern vm_page_t vm_object_page_grab(vm_object_t);
1439
1440 #if VM_PAGE_BUCKETS_CHECK
1441 extern void vm_page_buckets_check(void);
1442 #endif /* VM_PAGE_BUCKETS_CHECK */
1443
1444 extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq);
1445 extern void vm_page_remove_internal(vm_page_t page);
1446 extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
1447 extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
1448 extern void vm_page_check_pageable_safe(vm_page_t page);
1449
1450
1451 #endif /* _VM_VM_PAGE_H_ */