]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_page.h
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_page.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_page.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Resident memory system definitions.
64 */
65
0a7de745 66#ifndef _VM_VM_PAGE_H_
1c79356b
A
67#define _VM_VM_PAGE_H_
68
91447636 69#include <debug.h>
15129b1c 70#include <vm/vm_options.h>
1c79356b
A
71#include <mach/boolean.h>
72#include <mach/vm_prot.h>
73#include <mach/vm_param.h>
1c79356b 74
2d21ac55 75
39037602 76#if defined(__LP64__)
1c79356b 77
39037602
A
78/*
79 * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
80 * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
81 * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the
82 * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
83 * pointers from the 2 ends of these spaces
0b4e3aa0 84 */
0a7de745 85typedef uint32_t vm_page_packed_t;
2d21ac55 86
39037602 87struct vm_page_packed_queue_entry {
0a7de745
A
88 vm_page_packed_t next; /* next element */
89 vm_page_packed_t prev; /* previous element */
2d21ac55 90};
0b4e3aa0 91
0a7de745
A
92typedef struct vm_page_packed_queue_entry *vm_page_queue_t;
93typedef struct vm_page_packed_queue_entry vm_page_queue_head_t;
94typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t;
95typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t;
0b4e3aa0 96
0a7de745 97typedef vm_page_packed_t vm_page_object_t;
6d2010ae 98
39037602
A
99#else
100
101/*
0a7de745 102 * we can't do the packing trick on 32 bit architectures, so
39037602
A
103 * just turn the macros into noops.
104 */
0a7de745 105typedef struct vm_page *vm_page_packed_t;
39037602 106
0a7de745
A
107#define vm_page_queue_t queue_t
108#define vm_page_queue_head_t queue_head_t
109#define vm_page_queue_chain_t queue_chain_t
110#define vm_page_queue_entry_t queue_entry_t
39037602 111
0a7de745 112#define vm_page_object_t vm_object_t
39037602
A
113#endif
114
115
116#include <vm/vm_object.h>
117#include <kern/queue.h>
118#include <kern/locks.h>
119
120#include <kern/macro_help.h>
121#include <libkern/OSAtomic.h>
0b4e3aa0 122
9bccf70c 123
0b4e3aa0 124
0a7de745 125#define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count)
39236c6e 126
1c79356b
A
127/*
128 * Management of resident (logical) pages.
129 *
130 * A small structure is kept for each resident
131 * page, indexed by page number. Each structure
132 * is an element of several lists:
133 *
134 * A hash table bucket used to quickly
135 * perform object/offset lookups
136 *
137 * A list of all pages for a given object,
138 * so they can be quickly deactivated at
139 * time of deallocation.
140 *
141 * An ordered list of pages due for pageout.
142 *
143 * In addition, the structure contains the object
144 * and offset to which this page belongs (for pageout),
145 * and sundry status bits.
146 *
147 * Fields in this structure are locked either by the lock on the
148 * object that the page belongs to (O) or by the lock on the page
149 * queues (P). [Some fields require that both locks be held to
150 * change that field; holding either lock is sufficient to read.]
151 */
152
0a7de745 153#define VM_PAGE_NULL ((vm_page_t) 0)
fe8ab488 154
0a7de745
A
155extern char vm_page_inactive_states[];
156extern char vm_page_pageable_states[];
157extern char vm_page_non_speculative_pageable_states[];
158extern char vm_page_active_or_inactive_states[];
fe8ab488 159
fe8ab488 160
0a7de745
A
161#define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state])
162#define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state])
163#define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state])
164#define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state])
fe8ab488 165
fe8ab488 166
0a7de745
A
167#define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */
168#define VM_PAGE_IS_WIRED 1 /* page is currently wired */
169#define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */
170#define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */
171#define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */
172#define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */
173#define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */
174#define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */
175#define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */
176#define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */
177#define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */
178#define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */
179#define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */
180#define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */
181#define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */
182#define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */
fe8ab488 183
0a7de745 184#define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1)
fe8ab488 185
fe8ab488 186
d9a64523
A
187/*
188 * The structure itself. See the block comment above for what (O) and (P) mean.
189 */
190#define vmp_pageq vmp_q_un.vmp_q_pageq
191#define vmp_snext vmp_q_un.vmp_q_snext
fe8ab488 192
1c79356b 193struct vm_page {
39037602 194 union {
d9a64523
A
195 vm_page_queue_chain_t vmp_q_pageq; /* queue info for FIFO queue or free list (P) */
196 struct vm_page *vmp_q_snext;
197 } vmp_q_un;
b0d623f7 198
d9a64523 199 vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */
1c79356b 200
39037602 201#if CONFIG_BACKGROUND_QUEUE
0a7de745 202 vm_page_queue_chain_t vmp_backgroundq; /* anonymous pages in the background pool (P) */
39037602
A
203#endif
204
d9a64523
A
205 vm_object_offset_t vmp_offset; /* offset into that object (O,P) */
206 vm_page_object_t vmp_object; /* which object am I in (O&P) */
1c79356b 207
91447636 208 /*
d9a64523 209 * The following word of flags is always protected by the "page queues" lock.
b0d623f7 210 *
d9a64523
A
211 * We use 'vmp_wire_count' to store the local queue id if local queues are enabled.
212 * See the comments at 'vm_page_queues_remove' as to why this is safe to do.
91447636 213 */
d9a64523
A
214#define vmp_local_id vmp_wire_count
215 unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */
0a7de745
A
216 vmp_q_state:4, /* which q is the page on (P) */
217 vmp_in_background:1,
218 vmp_on_backgroundq:1,
219 vmp_gobbled:1, /* page used internally (P) */
220 vmp_laundry:1, /* page is being cleaned now (P)*/
221 vmp_no_cache:1, /* page is not to be cached and should */
d9a64523 222 /* be reused ahead of other pages (P) */
0a7de745
A
223 vmp_private:1, /* Page should not be returned to the free list (P) */
224 vmp_reference:1, /* page has been used (P) */
225 vmp_unused_page_bits:5;
1c79356b 226
39037602
A
227 /*
228 * MUST keep the 2 32 bit words used as bit fields
229 * separated since the compiler has a nasty habit
0a7de745 230 * of using 64 bit loads and stores on them as
39037602
A
231 * if they were a single 64 bit field... since
232 * they are protected by 2 different locks, this
233 * is a real problem
234 */
d9a64523 235 vm_page_packed_t vmp_next_m; /* VP bucket link (O) */
b0d623f7 236
91447636 237 /*
d9a64523 238 * The following word of flags is protected by the "VM object" lock.
0a7de745 239 *
d9a64523 240 * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the
fe8ab488 241 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
d9a64523 242 * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro.
fe8ab488 243 * It's also ok to modify them behind just the VM object "exclusive" lock.
b0d623f7 244 */
d9a64523 245 unsigned int vmp_busy:1, /* page is in transit (O) */
0a7de745
A
246 vmp_wanted:1, /* someone is waiting for page (O) */
247 vmp_tabled:1, /* page is in VP table (O) */
248 vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */
249 vmp_fictitious:1, /* Physical page doesn't exist (O) */
250 vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */
251 vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */
d9a64523 252 /* (O-shared AND pmap_page) */
0a7de745 253 vmp_xpmapped:1, /* page has been entered with execute permission (O) or */
d9a64523 254 /* (O-shared AND pmap_page) */
0a7de745
A
255 vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */
256 vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */
257 vmp_absent:1, /* Data has been requested, but is not yet available (O) */
258 vmp_error:1, /* Data manager was unable to provide data due to error (O) */
259 vmp_dirty:1, /* Page must be cleaned (O) */
260 vmp_cleaning:1, /* Page clean has begun (O) */
261 vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */
262 vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */
d9a64523 263 /* [See vm_fault_page_overwrite] */
0a7de745 264 vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */
d9a64523 265 /* start again at top of chain */
0a7de745
A
266 vmp_unusual:1, /* Page is absent, error, restart or page locked */
267 vmp_cs_validated:1, /* code-signing: page was checked */
268 vmp_cs_tainted:1, /* code-signing: page is tainted */
269 vmp_cs_nx:1, /* code-signing: page is nx */
270 vmp_reusable:1,
271 vmp_lopage:1,
272 vmp_written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */
273 vmp_unused_object_bits:8;
39037602 274
5ba3f43e 275#if !defined(__arm__) && !defined(__arm64__)
d9a64523 276 ppnum_t vmp_phys_page; /* Physical page number of the page */
5ba3f43e 277#endif
1c79356b
A
278};
279
39037602 280
0a7de745
A
281typedef struct vm_page *vm_page_t;
282extern vm_page_t vm_pages;
283extern vm_page_t vm_page_array_beginning_addr;
284extern vm_page_t vm_page_array_ending_addr;
39037602
A
285
286
5ba3f43e
A
287#if defined(__arm__) || defined(__arm64__)
288
0a7de745 289extern unsigned int vm_first_phys_ppnum;
5ba3f43e
A
290
291struct vm_page_with_ppnum {
0a7de745 292 struct vm_page vm_page_wo_ppnum;
5ba3f43e 293
0a7de745 294 ppnum_t vmp_phys_page;
5ba3f43e
A
295};
296typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
297
298
0a7de745
A
299static inline ppnum_t
300VM_PAGE_GET_PHYS_PAGE(vm_page_t m)
5ba3f43e 301{
0a7de745
A
302 if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) {
303 return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum);
304 } else {
305 return ((vm_page_with_ppnum_t)m)->vmp_phys_page;
306 }
5ba3f43e
A
307}
308
0a7de745
A
309#define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \
310 MACRO_BEGIN \
311 if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \
312 ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \
313 assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \
5ba3f43e
A
314 MACRO_END
315
316#define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask)
317
0a7de745 318#else /* defined(__arm__) || defined(__arm64__) */
39037602
A
319
320
321struct vm_page_with_ppnum {
0a7de745 322 struct vm_page vm_page_with_ppnum;
39037602
A
323};
324typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
325
326
0a7de745
A
327#define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page
328#define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \
329 MACRO_BEGIN \
330 (page)->vmp_phys_page = ppnum; \
39037602
A
331 MACRO_END
332
5ba3f43e
A
333#define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift)
334#define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask)
39037602 335
0a7de745 336#endif /* defined(__arm__) || defined(__arm64__) */
91447636 337
39037602
A
338
339
340#if defined(__LP64__)
341
0a7de745
A
342#define VM_VPLQ_ALIGNMENT 128
343#define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */
344#define VM_PACKED_POINTER_SHIFT 6
39037602 345
0a7de745 346#define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000
39037602 347
0a7de745
A
348static inline vm_page_packed_t
349vm_page_pack_ptr(uintptr_t p)
39037602
A
350{
351 vm_page_packed_t packed_ptr;
352
0a7de745
A
353 if (!p) {
354 return (vm_page_packed_t)0;
355 }
39037602
A
356
357 if (p >= (uintptr_t)(vm_page_array_beginning_addr) && p < (uintptr_t)(vm_page_array_ending_addr)) {
358 packed_ptr = ((vm_page_packed_t)(((vm_page_t)p - vm_page_array_beginning_addr)));
0a7de745 359 assert(!(packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY));
39037602
A
360 packed_ptr |= VM_PACKED_FROM_VM_PAGES_ARRAY;
361 return packed_ptr;
362 }
363
364 assert((p & (VM_PACKED_POINTER_ALIGNMENT - 1)) == 0);
365
366 packed_ptr = ((vm_page_packed_t)(((uintptr_t)(p - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> VM_PACKED_POINTER_SHIFT));
367 assert(packed_ptr != 0);
0a7de745 368 assert(!(packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY));
39037602
A
369 return packed_ptr;
370}
371
372
0a7de745
A
373static inline uintptr_t
374vm_page_unpack_ptr(uintptr_t p)
39037602 375{
0a7de745
A
376 if (!p) {
377 return (uintptr_t)0;
378 }
39037602 379
0a7de745
A
380 if (p & VM_PACKED_FROM_VM_PAGES_ARRAY) {
381 return (uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)]);
382 }
383 return (p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS;
39037602
A
384}
385
386
0a7de745
A
387#define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p))
388#define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p))
39037602 389
0a7de745
A
390#define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object)))
391#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
39037602
A
392
393
0a7de745
A
394#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
395MACRO_BEGIN \
396 (p)->vmp_snext = 0; \
39037602
A
397MACRO_END
398
399
0a7de745 400#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p)
39037602
A
401
402
403static __inline__ void
404vm_page_enqueue_tail(
0a7de745
A
405 vm_page_queue_t que,
406 vm_page_queue_entry_t elt)
39037602 407{
0a7de745 408 vm_page_queue_entry_t old_tail;
39037602
A
409
410 old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
411 elt->next = VM_PAGE_PACK_PTR(que);
412 elt->prev = que->prev;
0a7de745 413 que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
39037602
A
414}
415
416
417static __inline__ void
418vm_page_remque(
419 vm_page_queue_entry_t elt)
420{
0a7de745
A
421 vm_page_queue_entry_t next;
422 vm_page_queue_entry_t prev;
423 vm_page_packed_t next_pck = elt->next;
424 vm_page_packed_t prev_pck = elt->prev;
39037602 425
0a7de745 426 next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
39037602 427
0a7de745
A
428 /* next may equal prev (and the queue head) if elt was the only element */
429 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
39037602 430
0a7de745
A
431 next->prev = prev_pck;
432 prev->next = next_pck;
39037602
A
433
434 elt->next = 0;
435 elt->prev = 0;
436}
437
438
439/*
440 * Macro: vm_page_queue_init
441 * Function:
442 * Initialize the given queue.
443 * Header:
444 * void vm_page_queue_init(q)
445 * vm_page_queue_t q; \* MODIFIED *\
446 */
0a7de745
A
447#define vm_page_queue_init(q) \
448MACRO_BEGIN \
449 assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \
450 assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \
451 (q)->next = VM_PAGE_PACK_PTR(q); \
452 (q)->prev = VM_PAGE_PACK_PTR(q); \
39037602
A
453MACRO_END
454
455
456/*
0a7de745
A
457 * Macro: vm_page_queue_enter
458 * Function:
459 * Insert a new element at the tail of the vm_page queue.
460 * Header:
461 * void vm_page_queue_enter(q, elt, field)
462 * queue_t q;
463 * vm_page_t elt;
464 * <field> is the list field in vm_page_t
465 *
466 * This macro's arguments have to match the generic "queue_enter()" macro which is
467 * what is used for this on 32 bit kernels.
39037602 468 */
0a7de745
A
469#define vm_page_queue_enter(head, elt, field) \
470MACRO_BEGIN \
471 vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
472 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
473 vm_page_packed_t __pck_prev = (head)->prev; \
474 \
475 if (__pck_head == __pck_prev) { \
476 (head)->next = __pck_elt; \
477 } else { \
478 vm_page_t __prev; \
479 __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
480 __prev->field.next = __pck_elt; \
481 } \
482 (elt)->field.prev = __pck_prev; \
483 (elt)->field.next = __pck_head; \
484 (head)->prev = __pck_elt; \
39037602
A
485MACRO_END
486
487
0a7de745 488#if defined(__x86_64__)
5ba3f43e
A
489/*
490 * These are helper macros for vm_page_queue_enter_clump to assist
491 * with conditional compilation (release / debug / development)
492 */
493#if DEVELOPMENT || DEBUG
494
0a7de745
A
495#define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \
496MACRO_BEGIN \
497 if (__prev != NULL) { \
498 assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \
499 assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
500 } \
5ba3f43e
A
501MACRO_END
502
0a7de745
A
503#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \
504MACRO_BEGIN \
505 unsigned int __i; \
506 vm_page_queue_entry_t __tmp; \
507 for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \
508 __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
509 } \
510 assert(__tmp == __last_next); \
5ba3f43e
A
511MACRO_END
512
513#define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++
514#define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++
515#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free
516
517#else
518
0a7de745
A
519#define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
520#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
5ba3f43e
A
521#define __DEBUG_STAT_INCREMENT_INRANGE
522#define __DEBUG_STAT_INCREMENT_INSERTS
523#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
524
525#endif /* if DEVELOPMENT || DEBUG */
526
527/*
0a7de745 528 * Insert a new page into a free queue and clump pages within the same 16K boundary together
5ba3f43e 529 */
0a7de745
A
530static inline void
531vm_page_queue_enter_clump(
532 vm_page_queue_t head,
533 vm_page_t elt)
534{
535 vm_page_queue_entry_t first; /* first page in the clump */
536 vm_page_queue_entry_t last; /* last page in the clump */
537 vm_page_queue_entry_t prev = NULL;
538 vm_page_queue_entry_t next;
539 uint_t n_free = 1;
540 extern unsigned int vm_pages_count;
541 extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
542 extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
543
544 /*
545 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
546 */
547 if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
548 vm_page_t p;
549 uint_t i;
550 uint_t n;
551 ppnum_t clump_num;
552
553 first = last = (vm_page_queue_entry_t)elt;
554 clump_num = VM_PAGE_GET_CLUMP(elt);
555 n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
556
557 /*
558 * Check for preceeding vm_pages[] entries in the same chunk
559 */
560 for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
561 if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
562 if (prev == NULL) {
563 prev = (vm_page_queue_entry_t)p;
564 }
565 first = (vm_page_queue_entry_t)p;
566 n_free++;
567 }
568 }
569
570 /*
571 * Check the following vm_pages[] entries in the same chunk
572 */
573 for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
574 if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
575 if (last == (vm_page_queue_entry_t)elt) { /* first one only */
576 __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
577 }
578
579 if (prev == NULL) {
580 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
581 }
582 last = (vm_page_queue_entry_t)p;
583 n_free++;
584 }
585 }
586 __DEBUG_STAT_INCREMENT_INRANGE;
587 }
588
589 /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
590 if (prev == NULL) {
591 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
592 }
593
594 /* insert the element */
595 next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
596 elt->vmp_pageq.next = prev->next;
597 elt->vmp_pageq.prev = next->prev;
598 prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
599 __DEBUG_STAT_INCREMENT_INSERTS;
600
601 /*
602 * Check if clump needs to be promoted to head.
603 */
604 if (n_free >= vm_clump_promote_threshold && n_free > 1) {
605 vm_page_queue_entry_t first_prev;
606
607 first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
608
609 /* If not at head already */
610 if (first_prev != head) {
611 vm_page_queue_entry_t last_next;
612 vm_page_queue_entry_t head_next;
613
614 last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
615
616 /* verify that the links within the clump are consistent */
617 __DEBUG_VERIFY_LINKS(first, n_free, last_next);
618
619 /* promote clump to head */
620 first_prev->next = last->next;
621 last_next->prev = first->prev;
622 first->prev = VM_PAGE_PACK_PTR(head);
623 last->next = head->next;
624
625 head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
626 head_next->prev = VM_PAGE_PACK_PTR(last);
627 head->next = VM_PAGE_PACK_PTR(first);
628 __DEBUG_STAT_INCREMENT_PROMOTES(n_free);
629 }
630 }
631}
5ba3f43e
A
632#endif
633
39037602 634/*
0a7de745
A
635 * Macro: vm_page_queue_enter_first
636 * Function:
637 * Insert a new element at the head of the vm_page queue.
638 * Header:
639 * void queue_enter_first(q, elt, , field)
640 * queue_t q;
641 * vm_page_t elt;
642 * <field> is the linkage field in vm_page
643 *
644 * This macro's arguments have to match the generic "queue_enter_first()" macro which is
645 * what is used for this on 32 bit kernels.
39037602 646 */
0a7de745
A
647#define vm_page_queue_enter_first(head, elt, field) \
648MACRO_BEGIN \
649 vm_page_packed_t __pck_next = (head)->next; \
650 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
651 vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
652 \
653 if (__pck_head == __pck_next) { \
654 (head)->prev = __pck_elt; \
655 } else { \
656 vm_page_t __next; \
657 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
658 __next->field.prev = __pck_elt; \
659 } \
660 \
661 (elt)->field.next = __pck_next; \
662 (elt)->field.prev = __pck_head; \
663 (head)->next = __pck_elt; \
39037602
A
664MACRO_END
665
666
667/*
0a7de745
A
668 * Macro: vm_page_queue_remove
669 * Function:
670 * Remove an arbitrary page from a vm_page queue.
671 * Header:
672 * void vm_page_queue_remove(q, qe, field)
673 * arguments as in vm_page_queue_enter
674 *
675 * This macro's arguments have to match the generic "queue_enter()" macro which is
676 * what is used for this on 32 bit kernels.
39037602 677 */
0a7de745
A
678#define vm_page_queue_remove(head, elt, field) \
679MACRO_BEGIN \
680 vm_page_packed_t __pck_next = (elt)->field.next; \
681 vm_page_packed_t __pck_prev = (elt)->field.prev; \
682 vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
683 vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
684 \
685 if ((void *)(head) == (void *)__next) { \
686 (head)->prev = __pck_prev; \
687 } else { \
688 __next->field.prev = __pck_prev; \
689 } \
690 \
691 if ((void *)(head) == (void *)__prev) { \
692 (head)->next = __pck_next; \
693 } else { \
694 __prev->field.next = __pck_next; \
695 } \
696 \
697 (elt)->field.next = 0; \
698 (elt)->field.prev = 0; \
39037602
A
699MACRO_END
700
701
702/*
0a7de745
A
703 * Macro: vm_page_queue_remove_first
704 *
705 * Function:
706 * Remove and return the entry at the head of a vm_page queue.
707 *
708 * Header:
709 * vm_page_queue_remove_first(head, entry, field)
710 * N.B. entry is returned by reference
711 *
712 * This macro's arguments have to match the generic "queue_remove_first()" macro which is
713 * what is used for this on 32 bit kernels.
39037602 714 */
0a7de745
A
715#define vm_page_queue_remove_first(head, entry, field) \
716MACRO_BEGIN \
717 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
718 vm_page_packed_t __pck_next; \
719 vm_page_t __next; \
720 \
721 (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
722 __pck_next = (entry)->field.next; \
723 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
724 \
725 if (__pck_head == __pck_next) { \
726 (head)->prev = __pck_head; \
727 } else { \
728 __next->field.prev = __pck_head; \
729 } \
730 \
731 (head)->next = __pck_next; \
732 (entry)->field.next = 0; \
733 (entry)->field.prev = 0; \
39037602
A
734MACRO_END
735
736
0a7de745 737#if defined(__x86_64__)
5ba3f43e 738/*
0a7de745
A
739 * Macro: vm_page_queue_remove_first_with_clump
740 * Function:
741 * Remove and return the entry at the head of the free queue
742 * end is set to 1 to indicate that we just returned the last page in a clump
5ba3f43e 743 *
0a7de745
A
744 * Header:
745 * vm_page_queue_remove_first_with_clump(head, entry, end)
746 * entry is returned by reference
747 * end is returned by reference
5ba3f43e 748 */
0a7de745
A
749#define vm_page_queue_remove_first_with_clump(head, entry, end) \
750MACRO_BEGIN \
751 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
752 vm_page_packed_t __pck_next; \
753 vm_page_t __next; \
754 \
755 (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
756 __pck_next = (entry)->vmp_pageq.next; \
757 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
758 \
759 (end) = 0; \
760 if (__pck_head == __pck_next) { \
761 (head)->prev = __pck_head; \
762 (end) = 1; \
763 } else { \
764 __next->vmp_pageq.prev = __pck_head; \
765 if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
766 (end) = 1; \
767 } \
768 } \
769 \
770 (head)->next = __pck_next; \
771 (entry)->vmp_pageq.next = 0; \
772 (entry)->vmp_pageq.prev = 0; \
5ba3f43e
A
773MACRO_END
774#endif
775
39037602
A
776/*
777 * Macro: vm_page_queue_end
778 * Function:
779 * Tests whether a new entry is really the end of
780 * the queue.
781 * Header:
782 * boolean_t vm_page_queue_end(q, qe)
783 * vm_page_queue_t q;
784 * vm_page_queue_entry_t qe;
785 */
0a7de745 786#define vm_page_queue_end(q, qe) ((q) == (qe))
39037602
A
787
788
789/*
790 * Macro: vm_page_queue_empty
791 * Function:
792 * Tests whether a queue is empty.
793 * Header:
794 * boolean_t vm_page_queue_empty(q)
795 * vm_page_queue_t q;
796 */
0a7de745 797#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
39037602
A
798
799
800
801/*
802 * Macro: vm_page_queue_first
803 * Function:
804 * Returns the first entry in the queue,
805 * Header:
806 * uintpr_t vm_page_queue_first(q)
807 * vm_page_queue_t q; \* IN *\
808 */
0a7de745 809#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
39037602
A
810
811
812
813/*
814 * Macro: vm_page_queue_last
815 * Function:
816 * Returns the last entry in the queue.
817 * Header:
818 * vm_page_queue_entry_t queue_last(q)
819 * queue_t q; \* IN *\
820 */
0a7de745 821#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
39037602
A
822
823
824
825/*
826 * Macro: vm_page_queue_next
827 * Function:
828 * Returns the entry after an item in the queue.
829 * Header:
830 * uintpr_t vm_page_queue_next(qc)
831 * vm_page_queue_t qc;
832 */
0a7de745 833#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
39037602
A
834
835
836
837/*
838 * Macro: vm_page_queue_prev
839 * Function:
840 * Returns the entry before an item in the queue.
841 * Header:
842 * uinptr_t vm_page_queue_prev(qc)
843 * vm_page_queue_t qc;
844 */
0a7de745 845#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
39037602
A
846
847
848
849/*
850 * Macro: vm_page_queue_iterate
851 * Function:
0a7de745 852 * iterate over each item in a vm_page queue.
39037602
A
853 * Generates a 'for' loop, setting elt to
854 * each item in turn (by reference).
855 * Header:
0a7de745 856 * vm_page_queue_iterate(q, elt, field)
39037602 857 * queue_t q;
0a7de745
A
858 * vm_page_t elt;
859 * <field> is the chain field in vm_page_t
39037602 860 */
0a7de745
A
861#define vm_page_queue_iterate(head, elt, field) \
862 for ((elt) = (vm_page_t)vm_page_queue_first(head); \
863 !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
864 (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \
39037602
A
865
866#else
867
0a7de745
A
868#define VM_VPLQ_ALIGNMENT 128
869#define VM_PACKED_POINTER_ALIGNMENT 4
870#define VM_PACKED_POINTER_SHIFT 0
39037602 871
0a7de745 872#define VM_PACKED_FROM_VM_PAGES_ARRAY 0
39037602 873
0a7de745
A
874#define VM_PAGE_PACK_PTR(p) (p)
875#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p))
39037602 876
0a7de745
A
877#define VM_PAGE_OBJECT(p) (vm_object_t)(p->vmp_object)
878#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
39037602
A
879
880
0a7de745
A
881#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
882MACRO_BEGIN \
883 (p)->vmp_pageq.next = 0; \
884 (p)->vmp_pageq.prev = 0; \
39037602
A
885MACRO_END
886
0a7de745
A
887#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p))
888
889#define vm_page_remque remque
890#define vm_page_enqueue_tail enqueue_tail
891#define vm_page_queue_init queue_init
892#define vm_page_queue_enter(h, e, f) queue_enter(h, e, vm_page_t, f)
893#define vm_page_queue_enter_first(h, e, f) queue_enter_first(h, e, vm_page_t, f)
894#define vm_page_queue_remove(h, e, f) queue_remove(h, e, vm_page_t, f)
895#define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f)
896#define vm_page_queue_end queue_end
897#define vm_page_queue_empty queue_empty
898#define vm_page_queue_first queue_first
899#define vm_page_queue_last queue_last
900#define vm_page_queue_next queue_next
901#define vm_page_queue_prev queue_prev
902#define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f)
39037602
A
903
904#endif
905
906
907
0a7de745 908/*
39037602
A
909 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
910 * represents a set of aging bins that are 'protected'...
911 *
912 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
913 * not yet been 'claimed' but have been aged out of the protective bins
0a7de745 914 * this occurs in vm_page_speculate when it advances to the next bin
39037602
A
915 * and discovers that it is still occupied... at that point, all of the
916 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
917 * in that bin are all guaranteed to have reached at least the maximum age
918 * we allow for a protected page... they can be older if there is no
919 * memory pressure to pull them from the bin, or there are no new speculative pages
920 * being generated to push them out.
0a7de745 921 * this list is the one that vm_pageout_scan will prefer when looking
39037602 922 * for pages to move to the underweight free list
0a7de745 923 *
39037602
A
924 * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
925 * defines the amount of time a speculative page is normally
926 * allowed to live in the 'protected' state (i.e. not available
927 * to be stolen if vm_pageout_scan is running and looking for
928 * pages)... however, if the total number of speculative pages
929 * in the protected state exceeds our limit (defined in vm_pageout.c)
930 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
931 * vm_pageout_scan is allowed to steal pages from the protected
932 * bucket even if they are underage.
933 *
934 * vm_pageout_scan is also allowed to pull pages from a protected
935 * bin if the bin has reached the "age of consent" we've set
936 */
0a7de745
A
937#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
938#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
939#define VM_PAGE_SPECULATIVE_AGED_Q 0
39037602 940
0a7de745 941#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
39037602
A
942
943struct vm_speculative_age_q {
944 /*
945 * memory queue for speculative pages via clustered pageins
946 */
0a7de745
A
947 vm_page_queue_head_t age_q;
948 mach_timespec_t age_ts;
39037602
A
949} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
950
951
952
953extern
0a7de745 954struct vm_speculative_age_q vm_page_queue_speculative[];
39037602 955
0a7de745
A
956extern int speculative_steal_index;
957extern int speculative_age_index;
958extern unsigned int vm_page_speculative_q_age_ms;
1c79356b 959
b0d623f7
A
960
961typedef struct vm_locks_array {
0a7de745
A
962 char pad __attribute__ ((aligned(64)));
963 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned(64)));
964 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned(64)));
965 char pad2 __attribute__ ((aligned(64)));
b0d623f7
A
966} vm_locks_array_t;
967
968
39037602
A
969#if CONFIG_BACKGROUND_QUEUE
970extern void vm_page_assign_background_state(vm_page_t mem);
0a7de745
A
971extern void vm_page_update_background_state(vm_page_t mem);
972extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first);
973extern void vm_page_remove_from_backgroundq(vm_page_t mem);
39037602
A
974#endif
975
0a7de745
A
976#define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED)
977#define NEXT_PAGE(m) ((m)->vmp_snext)
978#define NEXT_PAGE_PTR(m) (&(m)->vmp_snext)
1c79356b
A
979
980/*
981 * XXX The unusual bit should not be necessary. Most of the bit
982 * XXX fields above really want to be masks.
983 */
984
985/*
986 * For debugging, this macro can be defined to perform
987 * some useful check on a page structure.
39037602
A
988 * INTENTIONALLY left as a no-op so that the
989 * current call-sites can be left intact for future uses.
1c79356b
A
990 */
991
0a7de745
A
992#define VM_PAGE_CHECK(mem) \
993 MACRO_BEGIN \
b0d623f7 994 MACRO_END
2d21ac55
A
995
996/* Page coloring:
997 *
998 * The free page list is actually n lists, one per color,
999 * where the number of colors is a function of the machine's
1000 * cache geometry set at system initialization. To disable
1001 * coloring, set vm_colors to 1 and vm_color_mask to 0.
1002 * The boot-arg "colors" may be used to override vm_colors.
1003 * Note that there is little harm in having more colors than needed.
1004 */
0a7de745 1005
2d21ac55 1006#define MAX_COLORS 128
0a7de745 1007#define DEFAULT_COLORS 32
2d21ac55
A
1008
1009extern
0a7de745 1010unsigned int vm_colors; /* must be in range 1..MAX_COLORS */
2d21ac55 1011extern
0a7de745 1012unsigned int vm_color_mask; /* must be (vm_colors-1) */
2d21ac55 1013extern
0a7de745 1014unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
1c79356b 1015
b0d623f7
A
1016/*
1017 * Wired memory is a very limited resource and we can't let users exhaust it
1018 * and deadlock the entire system. We enforce the following limits:
0a7de745 1019 *
b0d623f7 1020 * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount)
0a7de745 1021 * how much memory can be user-wired in one user task
b0d623f7
A
1022 *
1023 * vm_global_user_wire_limit (default: same as vm_user_wire_limit)
0a7de745 1024 * how much memory can be user-wired in all user tasks
b0d623f7
A
1025 *
1026 * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE)
1027 * how much memory must remain user-unwired at any time
1028 */
0a7de745 1029#define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */
b0d623f7 1030extern
0a7de745 1031vm_map_size_t vm_user_wire_limit;
b0d623f7 1032extern
0a7de745 1033vm_map_size_t vm_global_user_wire_limit;
b0d623f7 1034extern
0a7de745 1035vm_map_size_t vm_global_no_user_wire_amount;
b0d623f7 1036
1c79356b
A
1037/*
1038 * Each pageable resident page falls into one of three lists:
1039 *
0a7de745 1040 * free
2d21ac55
A
1041 * Available for allocation now. The free list is
1042 * actually an array of lists, one per color.
1c79356b
A
1043 * inactive
1044 * Not referenced in any map, but still has an
1045 * object/offset-page mapping, and may be dirty.
1046 * This is the list of pages that should be
2d21ac55
A
1047 * paged out next. There are actually two
1048 * inactive lists, one for pages brought in from
1049 * disk or other backing store, and another
1050 * for "zero-filled" pages. See vm_pageout_scan()
1051 * for the distinction and usage.
1c79356b
A
1052 * active
1053 * A list of pages which have been placed in
1054 * at least one physical map. This list is
1055 * ordered, in LRU-like fashion.
1056 */
1057
b0d623f7
A
1058
1059#define VPL_LOCK_SPIN 1
1060
1061struct vpl {
0a7de745
A
1062 vm_page_queue_head_t vpl_queue;
1063 unsigned int vpl_count;
1064 unsigned int vpl_internal_count;
1065 unsigned int vpl_external_count;
1066#ifdef VPL_LOCK_SPIN
1067 lck_spin_t vpl_lock;
b0d623f7 1068#else
0a7de745
A
1069 lck_mtx_t vpl_lock;
1070 lck_mtx_ext_t vpl_lock_ext;
b0d623f7
A
1071#endif
1072};
1073
0a7de745 1074struct vplq {
b0d623f7 1075 union {
39037602 1076 char cache_line_pad[VM_VPLQ_ALIGNMENT];
b0d623f7
A
1077 struct vpl vpl;
1078 } vpl_un;
1079};
1080extern
0a7de745 1081unsigned int vm_page_local_q_count;
b0d623f7 1082extern
0a7de745 1083struct vplq *vm_page_local_q;
b0d623f7 1084extern
0a7de745 1085unsigned int vm_page_local_q_soft_limit;
b0d623f7 1086extern
0a7de745 1087unsigned int vm_page_local_q_hard_limit;
b0d623f7
A
1088extern
1089vm_locks_array_t vm_page_locks;
1090
1c79356b 1091extern
0a7de745 1092vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */
2d21ac55 1093extern
0a7de745 1094vm_page_queue_head_t vm_page_queue_active; /* active memory queue */
1c79356b 1095extern
0a7de745 1096vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */
39037602 1097#if CONFIG_SECLUDED_MEMORY
1c79356b 1098extern
0a7de745 1099vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
39037602 1100#endif /* CONFIG_SECLUDED_MEMORY */
2d21ac55 1101extern
39037602 1102vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */
316670eb 1103extern
0a7de745 1104vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
b0d623f7 1105extern
0a7de745 1106vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */
1c79356b 1107
3e170ce0 1108extern
0a7de745 1109queue_head_t vm_objects_wired;
3e170ce0 1110extern
0a7de745 1111lck_spin_t vm_objects_wired_lock;
3e170ce0 1112
39037602
A
1113#if CONFIG_BACKGROUND_QUEUE
1114
0a7de745 1115#define VM_PAGE_BACKGROUND_TARGET_MAX 50000
39037602 1116
0a7de745
A
1117#define VM_PAGE_BG_DISABLED 0
1118#define VM_PAGE_BG_LEVEL_1 1
39037602
A
1119
1120extern
0a7de745 1121vm_page_queue_head_t vm_page_queue_background;
39037602 1122extern
0a7de745 1123uint64_t vm_page_background_promoted_count;
39037602 1124extern
0a7de745 1125uint32_t vm_page_background_count;
39037602 1126extern
0a7de745 1127uint32_t vm_page_background_target;
39037602 1128extern
0a7de745 1129uint32_t vm_page_background_internal_count;
39037602 1130extern
0a7de745 1131uint32_t vm_page_background_external_count;
39037602 1132extern
0a7de745 1133uint32_t vm_page_background_mode;
39037602 1134extern
0a7de745 1135uint32_t vm_page_background_exclude_external;
39037602
A
1136
1137#endif
3e170ce0 1138
1c79356b 1139extern
0a7de745 1140vm_offset_t first_phys_addr; /* physical address for first_page */
1c79356b 1141extern
0a7de745 1142vm_offset_t last_phys_addr; /* physical address for last_page */
1c79356b
A
1143
1144extern
0a7de745 1145unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */
1c79356b 1146extern
0a7de745 1147unsigned int vm_page_active_count; /* How many pages are active? */
1c79356b 1148extern
0a7de745 1149unsigned int vm_page_inactive_count; /* How many pages are inactive? */
39037602
A
1150#if CONFIG_SECLUDED_MEMORY
1151extern
0a7de745 1152unsigned int vm_page_secluded_count; /* How many pages are secluded? */
39037602 1153extern
0a7de745 1154unsigned int vm_page_secluded_count_free;
39037602 1155extern
0a7de745 1156unsigned int vm_page_secluded_count_inuse;
39037602 1157#endif /* CONFIG_SECLUDED_MEMORY */
1c79356b 1158extern
316670eb
A
1159unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */
1160extern
0a7de745
A
1161unsigned int vm_page_throttled_count;/* How many inactives are throttled */
1162extern
1163unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */
1164extern unsigned int vm_page_pageable_internal_count;
1165extern unsigned int vm_page_pageable_external_count;
2d21ac55 1166extern
0a7de745 1167unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */
39236c6e 1168extern
0a7de745 1169unsigned int vm_page_external_count; /* How many pages are file-backed? */
fe8ab488 1170extern
0a7de745 1171unsigned int vm_page_internal_count; /* How many pages are anonymous? */
39236c6e 1172extern
0a7de745 1173unsigned int vm_page_wire_count; /* How many pages are wired? */
2d21ac55 1174extern
0a7de745 1175unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */
db609669 1176extern
0a7de745 1177unsigned int vm_page_wire_count_on_boot; /* even earlier than _initial */
1c79356b 1178extern
0a7de745 1179unsigned int vm_page_free_target; /* How many do we want free? */
1c79356b 1180extern
0a7de745 1181unsigned int vm_page_free_min; /* When to wakeup pageout */
1c79356b 1182extern
0a7de745 1183unsigned int vm_page_throttle_limit; /* When to throttle new page creation */
b0d623f7 1184extern
0a7de745 1185unsigned int vm_page_inactive_target;/* How many do we want inactive? */
39037602
A
1186#if CONFIG_SECLUDED_MEMORY
1187extern
0a7de745 1188unsigned int vm_page_secluded_target;/* How many do we want secluded? */
39037602 1189#endif /* CONFIG_SECLUDED_MEMORY */
1c79356b 1190extern
0a7de745 1191unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */
316670eb 1192extern
0a7de745 1193unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */
1c79356b 1194extern
0a7de745 1195unsigned int vm_page_gobble_count;
3e170ce0 1196extern
0a7de745 1197unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */
3e170ce0 1198
91447636 1199
b0d623f7 1200#if DEVELOPMENT || DEBUG
2d21ac55 1201extern
0a7de745 1202unsigned int vm_page_speculative_used;
b0d623f7
A
1203#endif
1204
55e303ae 1205extern
0a7de745 1206unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */
55e303ae 1207extern
0a7de745 1208unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
b0d623f7 1209extern
0a7de745 1210uint64_t vm_page_purged_count; /* How many pages got purged so far ? */
1c79356b 1211
0a7de745
A
1212extern unsigned int vm_page_free_wanted;
1213/* how many threads are waiting for memory */
1c79356b 1214
0a7de745
A
1215extern unsigned int vm_page_free_wanted_privileged;
1216/* how many VM privileged threads are waiting for memory */
39037602 1217#if CONFIG_SECLUDED_MEMORY
0a7de745
A
1218extern unsigned int vm_page_free_wanted_secluded;
1219/* how many threads are waiting for secluded memory */
39037602 1220#endif /* CONFIG_SECLUDED_MEMORY */
2d21ac55 1221
0a7de745
A
1222extern const ppnum_t vm_page_fictitious_addr;
1223/* (fake) phys_addr of fictitious pages */
1c79356b 1224
0a7de745
A
1225extern const ppnum_t vm_page_guard_addr;
1226/* (fake) phys_addr of guard pages */
2d21ac55
A
1227
1228
0a7de745 1229extern boolean_t vm_page_deactivate_hint;
91447636 1230
0a7de745 1231extern int vm_compressor_mode;
39236c6e 1232
0b4c1975 1233/*
0a7de745
A
1234 * Defaults to true, so highest memory is used first.
1235 */
1236extern boolean_t vm_himemory_mode;
1237
1238extern boolean_t vm_lopage_needed;
1239extern uint32_t vm_lopage_free_count;
1240extern uint32_t vm_lopage_free_limit;
1241extern uint32_t vm_lopage_lowater;
1242extern boolean_t vm_lopage_refill;
1243extern uint64_t max_valid_dma_address;
1244extern ppnum_t max_valid_low_ppnum;
0c530ab8 1245
1c79356b
A
1246/*
1247 * Prototypes for functions exported by this module.
1248 */
0a7de745
A
1249extern void vm_page_bootstrap(
1250 vm_offset_t *startp,
1251 vm_offset_t *endp);
1252
1253extern void vm_page_module_init(void);
1c79356b 1254
0a7de745 1255extern void vm_page_init_local_q(void);
b0d623f7 1256
0a7de745
A
1257extern void vm_page_create(
1258 ppnum_t start,
1259 ppnum_t end);
1c79356b 1260
0a7de745
A
1261extern vm_page_t kdp_vm_page_lookup(
1262 vm_object_t object,
1263 vm_object_offset_t offset);
3e170ce0 1264
0a7de745
A
1265extern vm_page_t vm_page_lookup(
1266 vm_object_t object,
1267 vm_object_offset_t offset);
1c79356b 1268
0a7de745 1269extern vm_page_t vm_page_grab_fictitious(void);
1c79356b 1270
0a7de745 1271extern vm_page_t vm_page_grab_guard(void);
2d21ac55 1272
0a7de745
A
1273extern void vm_page_release_fictitious(
1274 vm_page_t page);
1c79356b 1275
0a7de745 1276extern void vm_free_delayed_pages(void);
1c79356b 1277
0a7de745 1278extern void vm_page_more_fictitious(void);
1c79356b 1279
0a7de745
A
1280extern int vm_pool_low(void);
1281
1282extern vm_page_t vm_page_grab(void);
1283extern vm_page_t vm_page_grab_options(int flags);
1284
1285#define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
39037602 1286#if CONFIG_SECLUDED_MEMORY
0a7de745 1287#define VM_PAGE_GRAB_SECLUDED 0x00000001
39037602 1288#endif /* CONFIG_SECLUDED_MEMORY */
0a7de745 1289#define VM_PAGE_GRAB_Q_LOCK_HELD 0x00000002
1c79356b 1290
0a7de745 1291extern vm_page_t vm_page_grablo(void);
0c530ab8 1292
0a7de745
A
1293extern void vm_page_release(
1294 vm_page_t page,
1295 boolean_t page_queues_locked);
1c79356b 1296
0a7de745
A
1297extern boolean_t vm_page_wait(
1298 int interruptible );
1c79356b 1299
0a7de745
A
1300extern vm_page_t vm_page_alloc(
1301 vm_object_t object,
1302 vm_object_offset_t offset);
1c79356b 1303
0a7de745
A
1304extern vm_page_t vm_page_alloc_guard(
1305 vm_object_t object,
1306 vm_object_offset_t offset);
2d21ac55 1307
0a7de745
A
1308extern void vm_page_init(
1309 vm_page_t page,
1310 ppnum_t phys_page,
1311 boolean_t lopage);
1c79356b 1312
0a7de745
A
1313extern void vm_page_free(
1314 vm_page_t page);
1c79356b 1315
0a7de745
A
1316extern void vm_page_free_unlocked(
1317 vm_page_t page,
1318 boolean_t remove_from_hash);
2d21ac55 1319
d9a64523 1320extern void vm_page_balance_inactive(
0a7de745
A
1321 int max_to_move);
1322
1323extern void vm_page_activate(
1324 vm_page_t page);
1325
1326extern void vm_page_deactivate(
1327 vm_page_t page);
1328
1329extern void vm_page_deactivate_internal(
1330 vm_page_t page,
1331 boolean_t clear_hw_reference);
1332
1333extern void vm_page_enqueue_cleaned(vm_page_t page);
1334
1335extern void vm_page_lru(
1336 vm_page_t page);
1337
1338extern void vm_page_speculate(
1339 vm_page_t page,
1340 boolean_t new);
1341
1342extern void vm_page_speculate_ageit(
1343 struct vm_speculative_age_q *aq);
1344
1345extern void vm_page_reactivate_all_throttled(void);
1346
1347extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
1348
1349extern void vm_page_rename(
1350 vm_page_t page,
1351 vm_object_t new_object,
1352 vm_object_offset_t new_offset);
1353
1354extern void vm_page_insert(
1355 vm_page_t page,
1356 vm_object_t object,
1357 vm_object_offset_t offset);
1358
1359extern void vm_page_insert_wired(
1360 vm_page_t page,
1361 vm_object_t object,
1362 vm_object_offset_t offset,
1363 vm_tag_t tag);
1364
1365extern void vm_page_insert_internal(
1366 vm_page_t page,
1367 vm_object_t object,
1368 vm_object_offset_t offset,
1369 vm_tag_t tag,
1370 boolean_t queues_lock_held,
1371 boolean_t insert_in_hash,
1372 boolean_t batch_pmap_op,
1373 boolean_t delayed_accounting,
1374 uint64_t *delayed_ledger_update);
1375
1376extern void vm_page_replace(
1377 vm_page_t mem,
1378 vm_object_t object,
1379 vm_object_offset_t offset);
1380
1381extern void vm_page_remove(
1382 vm_page_t page,
1383 boolean_t remove_from_hash);
1384
1385extern void vm_page_zero_fill(
1386 vm_page_t page);
1387
1388extern void vm_page_part_zero_fill(
1389 vm_page_t m,
1390 vm_offset_t m_pa,
1391 vm_size_t len);
1392
1393extern void vm_page_copy(
1394 vm_page_t src_page,
1395 vm_page_t dest_page);
1396
1397extern void vm_page_part_copy(
1398 vm_page_t src_m,
1399 vm_offset_t src_pa,
1400 vm_page_t dst_m,
1401 vm_offset_t dst_pa,
1402 vm_size_t len);
1403
1404extern void vm_page_wire(
1405 vm_page_t page,
1406 vm_tag_t tag,
1407 boolean_t check_memorystatus);
1408
1409extern void vm_page_unwire(
1410 vm_page_t page,
1411 boolean_t queueit);
1412
1413extern void vm_set_page_size(void);
1414
1415extern void vm_page_gobble(
1416 vm_page_t page);
1417
1418extern void vm_page_validate_cs(vm_page_t page);
1419extern void vm_page_validate_cs_mapped(
1420 vm_page_t page,
1421 const void *kaddr);
1422extern void vm_page_validate_cs_mapped_slow(
1423 vm_page_t page,
1424 const void *kaddr);
1425extern void vm_page_validate_cs_mapped_chunk(
1426 vm_page_t page,
1427 const void *kaddr,
1428 vm_offset_t chunk_offset,
1429 vm_size_t chunk_size,
1430 boolean_t *validated,
1431 unsigned *tainted);
1432
1433extern void vm_page_free_prepare_queues(
1434 vm_page_t page);
1435
1436extern void vm_page_free_prepare_object(
1437 vm_page_t page,
1438 boolean_t remove_from_hash);
b0d623f7 1439
fe8ab488 1440#if CONFIG_IOSCHED
0a7de745
A
1441extern wait_result_t vm_page_sleep(
1442 vm_object_t object,
1443 vm_page_t m,
1444 int interruptible);
fe8ab488
A
1445#endif
1446
1447extern void vm_pressure_response(void);
1448
316670eb 1449#if CONFIG_JETSAM
39236c6e 1450extern void memorystatus_pages_update(unsigned int pages_avail);
316670eb
A
1451
1452#define VM_CHECK_MEMORYSTATUS do { \
0a7de745
A
1453 memorystatus_pages_update( \
1454 vm_page_pageable_external_count + \
1455 vm_page_free_count + \
1456 (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1457 ); \
316670eb 1458 } while(0)
39236c6e
A
1459
1460#else /* CONFIG_JETSAM */
1461
5ba3f43e
A
1462#if CONFIG_EMBEDDED
1463
1464#define VM_CHECK_MEMORYSTATUS do {} while(0)
1465
1466#else /* CONFIG_EMBEDDED */
39236c6e 1467
0a7de745 1468#define VM_CHECK_MEMORYSTATUS vm_pressure_response()
39236c6e 1469
5ba3f43e 1470#endif /* CONFIG_EMBEDDED */
39236c6e
A
1471
1472#endif /* CONFIG_JETSAM */
6d2010ae 1473
1c79356b 1474/*
d9a64523
A
1475 * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
1476 * protected by the object lock.
1c79356b
A
1477 */
1478
5ba3f43e 1479#if CONFIG_EMBEDDED
0a7de745
A
1480#define SET_PAGE_DIRTY(m, set_pmap_modified) \
1481 MACRO_BEGIN \
1482 vm_page_t __page__ = (m); \
1483 if (__page__->vmp_pmapped == TRUE && \
1484 __page__->vmp_wpmapped == TRUE && \
1485 __page__->vmp_dirty == FALSE && \
1486 (set_pmap_modified)) { \
1487 pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
1488 } \
1489 __page__->vmp_dirty = TRUE; \
1490 MACRO_END
5ba3f43e 1491#else /* CONFIG_EMBEDDED */
0a7de745
A
1492#define SET_PAGE_DIRTY(m, set_pmap_modified) \
1493 MACRO_BEGIN \
1494 vm_page_t __page__ = (m); \
1495 __page__->vmp_dirty = TRUE; \
1496 MACRO_END
5ba3f43e 1497#endif /* CONFIG_EMBEDDED */
316670eb 1498
0a7de745
A
1499#define PAGE_ASSERT_WAIT(m, interruptible) \
1500 (((m)->vmp_wanted = TRUE), \
1501 assert_wait((event_t) (m), (interruptible)))
9bccf70c 1502
fe8ab488 1503#if CONFIG_IOSCHED
0a7de745
A
1504#define PAGE_SLEEP(o, m, interruptible) \
1505 vm_page_sleep(o, m, interruptible)
fe8ab488 1506#else
0a7de745
A
1507#define PAGE_SLEEP(o, m, interruptible) \
1508 (((m)->vmp_wanted = TRUE), \
fe8ab488
A
1509 thread_sleep_vm_object((o), (m), (interruptible)))
1510#endif
1c79356b 1511
0a7de745
A
1512#define PAGE_WAKEUP_DONE(m) \
1513 MACRO_BEGIN \
1514 (m)->vmp_busy = FALSE; \
1515 if ((m)->vmp_wanted) { \
1516 (m)->vmp_wanted = FALSE; \
1517 thread_wakeup((event_t) (m)); \
1518 } \
1519 MACRO_END
1520
1521#define PAGE_WAKEUP(m) \
1522 MACRO_BEGIN \
1523 if ((m)->vmp_wanted) { \
1524 (m)->vmp_wanted = FALSE; \
1525 thread_wakeup((event_t) (m)); \
1526 } \
1527 MACRO_END
1528
1529#define VM_PAGE_FREE(p) \
1530 MACRO_BEGIN \
1531 vm_page_free_unlocked(p, TRUE); \
1532 MACRO_END
1533
1534#define VM_PAGE_GRAB_FICTITIOUS(M) \
1535 MACRO_BEGIN \
1536 while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \
1537 vm_page_more_fictitious(); \
1538 MACRO_END
1539
1540#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT))
1c79356b 1541
b0d623f7
A
1542#define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1543#define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1544
0a7de745 1545#define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock)
39037602 1546#define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock)
0a7de745 1547#define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock)
b0d623f7 1548
0a7de745
A
1549#define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock)
1550#define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock)
1551#define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock)
1552
1553#ifdef VPL_LOCK_SPIN
1554extern lck_grp_t vm_page_lck_grp_local;
b0d623f7 1555
b0d623f7 1556#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
0a7de745 1557#define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
b0d623f7
A
1558#define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
1559#else
1560#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr)
1561#define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
1562#define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
1563#endif
1c79356b 1564
2d21ac55 1565
b0d623f7 1566#if DEVELOPMENT || DEBUG
0a7de745
A
1567#define VM_PAGE_SPECULATIVE_USED_ADD() \
1568 MACRO_BEGIN \
1569 OSAddAtomic(1, &vm_page_speculative_used); \
2d21ac55 1570 MACRO_END
b0d623f7 1571#else
0a7de745 1572#define VM_PAGE_SPECULATIVE_USED_ADD()
b0d623f7 1573#endif
2d21ac55
A
1574
1575
0a7de745
A
1576#define VM_PAGE_CONSUME_CLUSTERED(mem) \
1577 MACRO_BEGIN \
1578 ppnum_t __phys_page; \
1579 __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \
1580 pmap_lock_phys_page(__phys_page); \
1581 if (mem->vmp_clustered) { \
1582 vm_object_t o; \
1583 o = VM_PAGE_OBJECT(mem); \
1584 assert(o); \
1585 o->pages_used++; \
1586 mem->vmp_clustered = FALSE; \
1587 VM_PAGE_SPECULATIVE_USED_ADD(); \
1588 } \
1589 pmap_unlock_phys_page(__phys_page); \
1c79356b
A
1590 MACRO_END
1591
6d2010ae 1592
0a7de745
A
1593#define VM_PAGE_COUNT_AS_PAGEIN(mem) \
1594 MACRO_BEGIN \
1595 { \
1596 vm_object_t o; \
1597 o = VM_PAGE_OBJECT(mem); \
1598 DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \
1599 current_task()->pageins++; \
1600 if (o->internal) { \
1601 DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \
1602 } else { \
1603 DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
1604 } \
1605 } \
fe8ab488
A
1606 MACRO_END
1607
3e170ce0 1608/* adjust for stolen pages accounted elsewhere */
0a7de745
A
1609#define VM_PAGE_MOVE_STOLEN(page_count) \
1610 MACRO_BEGIN \
1611 vm_page_stolen_count -= (page_count); \
1612 vm_page_wire_count_initial -= (page_count); \
3e170ce0 1613 MACRO_END
0a7de745
A
1614
1615#define DW_vm_page_unwire 0x01
1616#define DW_vm_page_wire 0x02
1617#define DW_vm_page_free 0x04
1618#define DW_vm_page_activate 0x08
1619#define DW_vm_page_deactivate_internal 0x10
1620#define DW_vm_page_speculate 0x20
1621#define DW_vm_page_lru 0x40
1622#define DW_vm_pageout_throttle_up 0x80
1623#define DW_PAGE_WAKEUP 0x100
1624#define DW_clear_busy 0x200
1625#define DW_clear_reference 0x400
1626#define DW_set_reference 0x800
1627#define DW_move_page 0x1000
1628#define DW_VM_PAGE_QUEUES_REMOVE 0x2000
1629#define DW_enqueue_cleaned 0x4000
1630#define DW_vm_phantom_cache_update 0x8000
6d2010ae
A
1631
1632struct vm_page_delayed_work {
0a7de745
A
1633 vm_page_t dw_m;
1634 int dw_mask;
6d2010ae
A
1635};
1636
3e170ce0 1637void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
6d2010ae
A
1638
1639extern unsigned int vm_max_delayed_work_limit;
1640
0a7de745 1641#define DEFAULT_DELAYED_WORK_LIMIT 32
6d2010ae 1642
0a7de745 1643#define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
6d2010ae
A
1644
1645/*
1646 * vm_page_do_delayed_work may need to drop the object lock...
1647 * if it does, we need the pages it's looking at to
1648 * be held stable via the busy bit, so if busy isn't already
1649 * set, we need to set it and ask vm_page_do_delayed_work
1650 * to clear it and wakeup anyone that might have blocked on
1651 * it once we're done processing the page.
6d2010ae
A
1652 */
1653
0a7de745
A
1654#define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \
1655 MACRO_BEGIN \
1656 if (mem->vmp_busy == FALSE) { \
1657 mem->vmp_busy = TRUE; \
1658 if ( !(dwp->dw_mask & DW_vm_page_free)) \
1659 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
1660 } \
1661 dwp->dw_m = mem; \
1662 dwp++; \
1663 dw_cnt++; \
6d2010ae
A
1664 MACRO_END
1665
1666extern vm_page_t vm_object_page_grab(vm_object_t);
1667
15129b1c
A
1668#if VM_PAGE_BUCKETS_CHECK
1669extern void vm_page_buckets_check(void);
1670#endif /* VM_PAGE_BUCKETS_CHECK */
6d2010ae 1671
39037602 1672extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq);
3e170ce0
A
1673extern void vm_page_remove_internal(vm_page_t page);
1674extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
39037602 1675extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
3e170ce0
A
1676extern void vm_page_check_pageable_safe(vm_page_t page);
1677
d9a64523
A
1678#if CONFIG_SECLUDED_MEMORY
1679extern uint64_t secluded_shutoff_trigger;
1680extern void start_secluded_suppression(task_t);
1681extern void stop_secluded_suppression(task_t);
1682#endif /* CONFIG_SECLUDED_MEMORY */
1683
3e170ce0 1684
0a7de745 1685#endif /* _VM_VM_PAGE_H_ */