]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_page.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_page.h
CommitLineData
1c79356b 1/*
f427ee49 2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_page.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Resident memory system definitions.
64 */
65
0a7de745 66#ifndef _VM_VM_PAGE_H_
1c79356b
A
67#define _VM_VM_PAGE_H_
68
91447636 69#include <debug.h>
15129b1c 70#include <vm/vm_options.h>
f427ee49 71#include <vm/vm_protos.h>
1c79356b
A
72#include <mach/boolean.h>
73#include <mach/vm_prot.h>
74#include <mach/vm_param.h>
f427ee49 75#include <mach/memory_object_types.h> /* for VMP_CS_BITS... */
1c79356b 76
2d21ac55 77
39037602 78#if defined(__LP64__)
1c79356b 79
39037602
A
80/*
81 * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
82 * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
83 * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the
84 * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
85 * pointers from the 2 ends of these spaces
0b4e3aa0 86 */
0a7de745 87typedef uint32_t vm_page_packed_t;
2d21ac55 88
39037602 89struct vm_page_packed_queue_entry {
0a7de745
A
90 vm_page_packed_t next; /* next element */
91 vm_page_packed_t prev; /* previous element */
2d21ac55 92};
0b4e3aa0 93
0a7de745
A
94typedef struct vm_page_packed_queue_entry *vm_page_queue_t;
95typedef struct vm_page_packed_queue_entry vm_page_queue_head_t;
96typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t;
97typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t;
0b4e3aa0 98
0a7de745 99typedef vm_page_packed_t vm_page_object_t;
6d2010ae 100
f427ee49 101#else // __LP64__
39037602
A
102
103/*
f427ee49
A
104 * we can't do the packing trick on 32 bit architectures
105 * so just turn the macros into noops.
39037602 106 */
0a7de745 107typedef struct vm_page *vm_page_packed_t;
39037602 108
0a7de745
A
109#define vm_page_queue_t queue_t
110#define vm_page_queue_head_t queue_head_t
111#define vm_page_queue_chain_t queue_chain_t
112#define vm_page_queue_entry_t queue_entry_t
39037602 113
0a7de745 114#define vm_page_object_t vm_object_t
f427ee49 115#endif // __LP64__
39037602
A
116
117
118#include <vm/vm_object.h>
119#include <kern/queue.h>
120#include <kern/locks.h>
121
122#include <kern/macro_help.h>
123#include <libkern/OSAtomic.h>
0b4e3aa0 124
9bccf70c 125
0b4e3aa0 126
0a7de745 127#define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count)
39236c6e 128
1c79356b
A
129/*
130 * Management of resident (logical) pages.
131 *
132 * A small structure is kept for each resident
133 * page, indexed by page number. Each structure
134 * is an element of several lists:
135 *
136 * A hash table bucket used to quickly
137 * perform object/offset lookups
138 *
139 * A list of all pages for a given object,
140 * so they can be quickly deactivated at
141 * time of deallocation.
142 *
143 * An ordered list of pages due for pageout.
144 *
145 * In addition, the structure contains the object
146 * and offset to which this page belongs (for pageout),
147 * and sundry status bits.
148 *
149 * Fields in this structure are locked either by the lock on the
150 * object that the page belongs to (O) or by the lock on the page
151 * queues (P). [Some fields require that both locks be held to
152 * change that field; holding either lock is sufficient to read.]
153 */
154
0a7de745 155#define VM_PAGE_NULL ((vm_page_t) 0)
fe8ab488 156
0a7de745
A
157extern char vm_page_inactive_states[];
158extern char vm_page_pageable_states[];
159extern char vm_page_non_speculative_pageable_states[];
160extern char vm_page_active_or_inactive_states[];
fe8ab488 161
fe8ab488 162
0a7de745
A
163#define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state])
164#define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state])
165#define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state])
166#define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state])
fe8ab488 167
fe8ab488 168
0a7de745
A
169#define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */
170#define VM_PAGE_IS_WIRED 1 /* page is currently wired */
171#define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */
172#define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */
173#define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */
174#define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */
175#define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */
176#define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */
177#define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */
178#define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */
179#define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */
180#define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */
181#define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */
182#define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */
183#define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */
184#define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */
fe8ab488 185
0a7de745 186#define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1)
fe8ab488 187
fe8ab488 188
d9a64523
A
189/*
190 * The structure itself. See the block comment above for what (O) and (P) mean.
191 */
192#define vmp_pageq vmp_q_un.vmp_q_pageq
193#define vmp_snext vmp_q_un.vmp_q_snext
fe8ab488 194
1c79356b 195struct vm_page {
39037602 196 union {
d9a64523
A
197 vm_page_queue_chain_t vmp_q_pageq; /* queue info for FIFO queue or free list (P) */
198 struct vm_page *vmp_q_snext;
199 } vmp_q_un;
b0d623f7 200
d9a64523 201 vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */
1c79356b 202
39037602 203#if CONFIG_BACKGROUND_QUEUE
0a7de745 204 vm_page_queue_chain_t vmp_backgroundq; /* anonymous pages in the background pool (P) */
39037602
A
205#endif
206
d9a64523
A
207 vm_object_offset_t vmp_offset; /* offset into that object (O,P) */
208 vm_page_object_t vmp_object; /* which object am I in (O&P) */
1c79356b 209
91447636 210 /*
d9a64523 211 * The following word of flags is always protected by the "page queues" lock.
b0d623f7 212 *
d9a64523
A
213 * We use 'vmp_wire_count' to store the local queue id if local queues are enabled.
214 * See the comments at 'vm_page_queues_remove' as to why this is safe to do.
91447636 215 */
d9a64523
A
216#define vmp_local_id vmp_wire_count
217 unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */
0a7de745
A
218 vmp_q_state:4, /* which q is the page on (P) */
219 vmp_in_background:1,
220 vmp_on_backgroundq:1,
221 vmp_gobbled:1, /* page used internally (P) */
222 vmp_laundry:1, /* page is being cleaned now (P)*/
223 vmp_no_cache:1, /* page is not to be cached and should */
d9a64523 224 /* be reused ahead of other pages (P) */
0a7de745
A
225 vmp_private:1, /* Page should not be returned to the free list (P) */
226 vmp_reference:1, /* page has been used (P) */
f427ee49
A
227 vmp_lopage:1,
228 vmp_unused_page_bits:4;
1c79356b 229
39037602
A
230 /*
231 * MUST keep the 2 32 bit words used as bit fields
232 * separated since the compiler has a nasty habit
0a7de745 233 * of using 64 bit loads and stores on them as
39037602
A
234 * if they were a single 64 bit field... since
235 * they are protected by 2 different locks, this
236 * is a real problem
237 */
d9a64523 238 vm_page_packed_t vmp_next_m; /* VP bucket link (O) */
b0d623f7 239
91447636 240 /*
d9a64523 241 * The following word of flags is protected by the "VM object" lock.
0a7de745 242 *
d9a64523 243 * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the
fe8ab488 244 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
d9a64523 245 * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro.
fe8ab488 246 * It's also ok to modify them behind just the VM object "exclusive" lock.
b0d623f7 247 */
d9a64523 248 unsigned int vmp_busy:1, /* page is in transit (O) */
0a7de745
A
249 vmp_wanted:1, /* someone is waiting for page (O) */
250 vmp_tabled:1, /* page is in VP table (O) */
251 vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */
252 vmp_fictitious:1, /* Physical page doesn't exist (O) */
253 vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */
254 vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */
d9a64523 255 /* (O-shared AND pmap_page) */
0a7de745 256 vmp_xpmapped:1, /* page has been entered with execute permission (O) or */
d9a64523 257 /* (O-shared AND pmap_page) */
0a7de745
A
258 vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */
259 vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */
260 vmp_absent:1, /* Data has been requested, but is not yet available (O) */
261 vmp_error:1, /* Data manager was unable to provide data due to error (O) */
262 vmp_dirty:1, /* Page must be cleaned (O) */
263 vmp_cleaning:1, /* Page clean has begun (O) */
264 vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */
265 vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */
d9a64523 266 /* [See vm_fault_page_overwrite] */
0a7de745 267 vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */
d9a64523 268 /* start again at top of chain */
0a7de745 269 vmp_unusual:1, /* Page is absent, error, restart or page locked */
f427ee49
A
270 vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */
271 vmp_cs_tainted:VMP_CS_BITS, /* code-signing: page is tainted */
272 vmp_cs_nx:VMP_CS_BITS, /* code-signing: page is nx */
0a7de745 273 vmp_reusable:1,
f427ee49 274 vmp_written_by_kernel:1; /* page was written by kernel (i.e. decompressed) */
39037602 275
5ba3f43e 276#if !defined(__arm__) && !defined(__arm64__)
d9a64523 277 ppnum_t vmp_phys_page; /* Physical page number of the page */
5ba3f43e 278#endif
1c79356b
A
279};
280
0a7de745
A
281typedef struct vm_page *vm_page_t;
282extern vm_page_t vm_pages;
283extern vm_page_t vm_page_array_beginning_addr;
284extern vm_page_t vm_page_array_ending_addr;
39037602 285
f427ee49
A
286static inline int
287VMP_CS_FOR_OFFSET(
288 vm_map_offset_t fault_phys_offset)
289{
290 assertf(fault_phys_offset < PAGE_SIZE &&
291 !(fault_phys_offset & FOURK_PAGE_MASK),
292 "offset 0x%llx\n", (uint64_t)fault_phys_offset);
293 return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT);
294}
295static inline bool
296VMP_CS_VALIDATED(
297 vm_page_t p,
298 vm_map_size_t fault_page_size,
299 vm_map_offset_t fault_phys_offset)
300{
301 assertf(fault_page_size <= PAGE_SIZE,
302 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
303 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
304 if (fault_page_size == PAGE_SIZE) {
305 return p->vmp_cs_validated == VMP_CS_ALL_TRUE;
306 }
307 return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset);
308}
309static inline bool
310VMP_CS_TAINTED(
311 vm_page_t p,
312 vm_map_size_t fault_page_size,
313 vm_map_offset_t fault_phys_offset)
314{
315 assertf(fault_page_size <= PAGE_SIZE,
316 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
317 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
318 if (fault_page_size == PAGE_SIZE) {
319 return p->vmp_cs_tainted != VMP_CS_ALL_FALSE;
320 }
321 return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset);
322}
323static inline bool
324VMP_CS_NX(
325 vm_page_t p,
326 vm_map_size_t fault_page_size,
327 vm_map_offset_t fault_phys_offset)
328{
329 assertf(fault_page_size <= PAGE_SIZE,
330 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
331 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
332 if (fault_page_size == PAGE_SIZE) {
333 return p->vmp_cs_nx != VMP_CS_ALL_FALSE;
334 }
335 return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset);
336}
337static inline void
338VMP_CS_SET_VALIDATED(
339 vm_page_t p,
340 vm_map_size_t fault_page_size,
341 vm_map_offset_t fault_phys_offset,
342 boolean_t value)
343{
344 assertf(fault_page_size <= PAGE_SIZE,
345 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
346 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
347 if (value) {
348 if (fault_page_size == PAGE_SIZE) {
349 p->vmp_cs_validated = VMP_CS_ALL_TRUE;
350 }
351 p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset);
352 } else {
353 if (fault_page_size == PAGE_SIZE) {
354 p->vmp_cs_validated = VMP_CS_ALL_FALSE;
355 }
356 p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
357 }
358}
359static inline void
360VMP_CS_SET_TAINTED(
361 vm_page_t p,
362 vm_map_size_t fault_page_size,
363 vm_map_offset_t fault_phys_offset,
364 boolean_t value)
365{
366 assertf(fault_page_size <= PAGE_SIZE,
367 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
368 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
369 if (value) {
370 if (fault_page_size == PAGE_SIZE) {
371 p->vmp_cs_tainted = VMP_CS_ALL_TRUE;
372 }
373 p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset);
374 } else {
375 if (fault_page_size == PAGE_SIZE) {
376 p->vmp_cs_tainted = VMP_CS_ALL_FALSE;
377 }
378 p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
379 }
380}
381static inline void
382VMP_CS_SET_NX(
383 vm_page_t p,
384 vm_map_size_t fault_page_size,
385 vm_map_offset_t fault_phys_offset,
386 boolean_t value)
387{
388 assertf(fault_page_size <= PAGE_SIZE,
389 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
390 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
391 if (value) {
392 if (fault_page_size == PAGE_SIZE) {
393 p->vmp_cs_nx = VMP_CS_ALL_TRUE;
394 }
395 p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset);
396 } else {
397 if (fault_page_size == PAGE_SIZE) {
398 p->vmp_cs_nx = VMP_CS_ALL_FALSE;
399 }
400 p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
401 }
402}
403
39037602 404
5ba3f43e
A
405#if defined(__arm__) || defined(__arm64__)
406
0a7de745 407extern unsigned int vm_first_phys_ppnum;
5ba3f43e
A
408
409struct vm_page_with_ppnum {
0a7de745 410 struct vm_page vm_page_wo_ppnum;
5ba3f43e 411
0a7de745 412 ppnum_t vmp_phys_page;
5ba3f43e
A
413};
414typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
415
416
0a7de745
A
417static inline ppnum_t
418VM_PAGE_GET_PHYS_PAGE(vm_page_t m)
5ba3f43e 419{
0a7de745
A
420 if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) {
421 return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum);
422 } else {
423 return ((vm_page_with_ppnum_t)m)->vmp_phys_page;
424 }
5ba3f43e
A
425}
426
0a7de745
A
427#define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \
428 MACRO_BEGIN \
429 if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \
430 ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \
431 assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \
5ba3f43e
A
432 MACRO_END
433
434#define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask)
435
0a7de745 436#else /* defined(__arm__) || defined(__arm64__) */
39037602
A
437
438
439struct vm_page_with_ppnum {
0a7de745 440 struct vm_page vm_page_with_ppnum;
39037602
A
441};
442typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
443
444
0a7de745
A
445#define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page
446#define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \
447 MACRO_BEGIN \
448 (page)->vmp_phys_page = ppnum; \
39037602
A
449 MACRO_END
450
5ba3f43e
A
451#define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift)
452#define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask)
39037602 453
0a7de745 454#endif /* defined(__arm__) || defined(__arm64__) */
91447636 455
39037602
A
456
457
f427ee49
A
458#if defined(__LP64__)
459/*
460 * Parameters for pointer packing
461 *
462 *
463 * VM Pages pointers might point to:
464 *
465 * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals,
466 *
467 * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages
468 *
469 * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED
470 * aligned).
471 *
472 *
473 * The current scheme uses 31 bits of storage and 6 bits of shift using the
474 * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the
475 * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY).
476 *
477 * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS.
478 */
0a7de745 479#define VM_VPLQ_ALIGNMENT 128
f427ee49
A
480#define VM_PAGE_PACKED_PTR_ALIGNMENT 64 /* must be a power of 2 */
481#define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT)))
482#define VM_PAGE_PACKED_PTR_BITS 31
483#define VM_PAGE_PACKED_PTR_SHIFT 6
484#define VM_PAGE_PACKED_PTR_BASE ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS)
39037602 485
f427ee49 486#define VM_PAGE_PACKED_FROM_ARRAY 0x80000000
39037602 487
0a7de745
A
488static inline vm_page_packed_t
489vm_page_pack_ptr(uintptr_t p)
39037602 490{
f427ee49
A
491 if (p >= (uintptr_t)vm_page_array_beginning_addr &&
492 p < (uintptr_t)vm_page_array_ending_addr) {
493 ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr;
494 assert((vm_page_t)p == &vm_pages[diff]);
495 return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY);
39037602
A
496 }
497
f427ee49
A
498 VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR);
499 vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR);
500 return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed);
39037602
A
501}
502
503
0a7de745
A
504static inline uintptr_t
505vm_page_unpack_ptr(uintptr_t p)
39037602 506{
cb323159
A
507 extern unsigned int vm_pages_count;
508
f427ee49
A
509 if (p >= VM_PAGE_PACKED_FROM_ARRAY) {
510 p &= ~VM_PAGE_PACKED_FROM_ARRAY;
511 assert(p < (uintptr_t)vm_pages_count);
512 return (uintptr_t)&vm_pages[p];
0a7de745 513 }
39037602 514
f427ee49 515 return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR);
39037602
A
516}
517
518
0a7de745
A
519#define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p))
520#define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p))
39037602 521
0a7de745
A
522#define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object)))
523#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
39037602
A
524
525
0a7de745
A
526#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
527MACRO_BEGIN \
528 (p)->vmp_snext = 0; \
39037602
A
529MACRO_END
530
531
0a7de745 532#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p)
39037602
A
533
534
535static __inline__ void
536vm_page_enqueue_tail(
0a7de745
A
537 vm_page_queue_t que,
538 vm_page_queue_entry_t elt)
39037602 539{
0a7de745 540 vm_page_queue_entry_t old_tail;
39037602
A
541
542 old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
543 elt->next = VM_PAGE_PACK_PTR(que);
544 elt->prev = que->prev;
0a7de745 545 que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
39037602
A
546}
547
548
549static __inline__ void
550vm_page_remque(
551 vm_page_queue_entry_t elt)
552{
0a7de745
A
553 vm_page_queue_entry_t next;
554 vm_page_queue_entry_t prev;
555 vm_page_packed_t next_pck = elt->next;
556 vm_page_packed_t prev_pck = elt->prev;
39037602 557
0a7de745 558 next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
39037602 559
0a7de745
A
560 /* next may equal prev (and the queue head) if elt was the only element */
561 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
39037602 562
0a7de745
A
563 next->prev = prev_pck;
564 prev->next = next_pck;
39037602
A
565
566 elt->next = 0;
567 elt->prev = 0;
568}
569
570
571/*
572 * Macro: vm_page_queue_init
573 * Function:
574 * Initialize the given queue.
575 * Header:
576 * void vm_page_queue_init(q)
577 * vm_page_queue_t q; \* MODIFIED *\
578 */
f427ee49
A
579#define vm_page_queue_init(q) \
580MACRO_BEGIN \
581 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \
0a7de745
A
582 (q)->next = VM_PAGE_PACK_PTR(q); \
583 (q)->prev = VM_PAGE_PACK_PTR(q); \
39037602
A
584MACRO_END
585
586
587/*
0a7de745
A
588 * Macro: vm_page_queue_enter
589 * Function:
590 * Insert a new element at the tail of the vm_page queue.
591 * Header:
592 * void vm_page_queue_enter(q, elt, field)
593 * queue_t q;
594 * vm_page_t elt;
595 * <field> is the list field in vm_page_t
596 *
597 * This macro's arguments have to match the generic "queue_enter()" macro which is
598 * what is used for this on 32 bit kernels.
39037602 599 */
0a7de745
A
600#define vm_page_queue_enter(head, elt, field) \
601MACRO_BEGIN \
602 vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
603 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
604 vm_page_packed_t __pck_prev = (head)->prev; \
605 \
606 if (__pck_head == __pck_prev) { \
607 (head)->next = __pck_elt; \
608 } else { \
609 vm_page_t __prev; \
610 __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
611 __prev->field.next = __pck_elt; \
612 } \
613 (elt)->field.prev = __pck_prev; \
614 (elt)->field.next = __pck_head; \
615 (head)->prev = __pck_elt; \
39037602
A
616MACRO_END
617
618
0a7de745 619#if defined(__x86_64__)
5ba3f43e
A
620/*
621 * These are helper macros for vm_page_queue_enter_clump to assist
622 * with conditional compilation (release / debug / development)
623 */
624#if DEVELOPMENT || DEBUG
625
0a7de745
A
626#define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \
627MACRO_BEGIN \
628 if (__prev != NULL) { \
629 assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \
630 assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
631 } \
5ba3f43e
A
632MACRO_END
633
0a7de745
A
634#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \
635MACRO_BEGIN \
636 unsigned int __i; \
637 vm_page_queue_entry_t __tmp; \
638 for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \
639 __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
640 } \
641 assert(__tmp == __last_next); \
5ba3f43e
A
642MACRO_END
643
644#define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++
645#define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++
646#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free
647
648#else
649
0a7de745
A
650#define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
651#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
5ba3f43e
A
652#define __DEBUG_STAT_INCREMENT_INRANGE
653#define __DEBUG_STAT_INCREMENT_INSERTS
654#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
655
656#endif /* if DEVELOPMENT || DEBUG */
657
658/*
0a7de745 659 * Insert a new page into a free queue and clump pages within the same 16K boundary together
5ba3f43e 660 */
0a7de745
A
661static inline void
662vm_page_queue_enter_clump(
663 vm_page_queue_t head,
664 vm_page_t elt)
665{
f427ee49
A
666 vm_page_queue_entry_t first = NULL; /* first page in the clump */
667 vm_page_queue_entry_t last = NULL; /* last page in the clump */
0a7de745
A
668 vm_page_queue_entry_t prev = NULL;
669 vm_page_queue_entry_t next;
670 uint_t n_free = 1;
671 extern unsigned int vm_pages_count;
672 extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
673 extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
674
675 /*
676 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
677 */
678 if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
679 vm_page_t p;
680 uint_t i;
681 uint_t n;
682 ppnum_t clump_num;
683
684 first = last = (vm_page_queue_entry_t)elt;
685 clump_num = VM_PAGE_GET_CLUMP(elt);
686 n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
687
688 /*
689 * Check for preceeding vm_pages[] entries in the same chunk
690 */
691 for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
692 if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
693 if (prev == NULL) {
694 prev = (vm_page_queue_entry_t)p;
695 }
696 first = (vm_page_queue_entry_t)p;
697 n_free++;
698 }
699 }
700
701 /*
702 * Check the following vm_pages[] entries in the same chunk
703 */
704 for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
705 if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
706 if (last == (vm_page_queue_entry_t)elt) { /* first one only */
707 __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
708 }
709
710 if (prev == NULL) {
711 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
712 }
713 last = (vm_page_queue_entry_t)p;
714 n_free++;
715 }
716 }
717 __DEBUG_STAT_INCREMENT_INRANGE;
718 }
719
720 /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
721 if (prev == NULL) {
722 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
723 }
724
725 /* insert the element */
726 next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
727 elt->vmp_pageq.next = prev->next;
728 elt->vmp_pageq.prev = next->prev;
729 prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
730 __DEBUG_STAT_INCREMENT_INSERTS;
731
732 /*
733 * Check if clump needs to be promoted to head.
734 */
735 if (n_free >= vm_clump_promote_threshold && n_free > 1) {
736 vm_page_queue_entry_t first_prev;
737
738 first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
739
740 /* If not at head already */
741 if (first_prev != head) {
742 vm_page_queue_entry_t last_next;
743 vm_page_queue_entry_t head_next;
744
745 last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
746
747 /* verify that the links within the clump are consistent */
748 __DEBUG_VERIFY_LINKS(first, n_free, last_next);
749
750 /* promote clump to head */
751 first_prev->next = last->next;
752 last_next->prev = first->prev;
753 first->prev = VM_PAGE_PACK_PTR(head);
754 last->next = head->next;
755
756 head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
757 head_next->prev = VM_PAGE_PACK_PTR(last);
758 head->next = VM_PAGE_PACK_PTR(first);
759 __DEBUG_STAT_INCREMENT_PROMOTES(n_free);
760 }
761 }
762}
5ba3f43e
A
763#endif
764
39037602 765/*
0a7de745
A
766 * Macro: vm_page_queue_enter_first
767 * Function:
768 * Insert a new element at the head of the vm_page queue.
769 * Header:
770 * void queue_enter_first(q, elt, , field)
771 * queue_t q;
772 * vm_page_t elt;
773 * <field> is the linkage field in vm_page
774 *
775 * This macro's arguments have to match the generic "queue_enter_first()" macro which is
776 * what is used for this on 32 bit kernels.
39037602 777 */
0a7de745
A
778#define vm_page_queue_enter_first(head, elt, field) \
779MACRO_BEGIN \
780 vm_page_packed_t __pck_next = (head)->next; \
781 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
782 vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
783 \
784 if (__pck_head == __pck_next) { \
785 (head)->prev = __pck_elt; \
786 } else { \
787 vm_page_t __next; \
788 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
789 __next->field.prev = __pck_elt; \
790 } \
791 \
792 (elt)->field.next = __pck_next; \
793 (elt)->field.prev = __pck_head; \
794 (head)->next = __pck_elt; \
39037602
A
795MACRO_END
796
797
798/*
0a7de745
A
799 * Macro: vm_page_queue_remove
800 * Function:
801 * Remove an arbitrary page from a vm_page queue.
802 * Header:
803 * void vm_page_queue_remove(q, qe, field)
804 * arguments as in vm_page_queue_enter
805 *
806 * This macro's arguments have to match the generic "queue_enter()" macro which is
807 * what is used for this on 32 bit kernels.
39037602 808 */
0a7de745
A
809#define vm_page_queue_remove(head, elt, field) \
810MACRO_BEGIN \
811 vm_page_packed_t __pck_next = (elt)->field.next; \
812 vm_page_packed_t __pck_prev = (elt)->field.prev; \
813 vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
814 vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
815 \
816 if ((void *)(head) == (void *)__next) { \
817 (head)->prev = __pck_prev; \
818 } else { \
819 __next->field.prev = __pck_prev; \
820 } \
821 \
822 if ((void *)(head) == (void *)__prev) { \
823 (head)->next = __pck_next; \
824 } else { \
825 __prev->field.next = __pck_next; \
826 } \
827 \
828 (elt)->field.next = 0; \
829 (elt)->field.prev = 0; \
39037602
A
830MACRO_END
831
832
833/*
0a7de745
A
834 * Macro: vm_page_queue_remove_first
835 *
836 * Function:
837 * Remove and return the entry at the head of a vm_page queue.
838 *
839 * Header:
840 * vm_page_queue_remove_first(head, entry, field)
841 * N.B. entry is returned by reference
842 *
843 * This macro's arguments have to match the generic "queue_remove_first()" macro which is
844 * what is used for this on 32 bit kernels.
39037602 845 */
0a7de745
A
846#define vm_page_queue_remove_first(head, entry, field) \
847MACRO_BEGIN \
848 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
849 vm_page_packed_t __pck_next; \
850 vm_page_t __next; \
851 \
852 (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
853 __pck_next = (entry)->field.next; \
854 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
855 \
856 if (__pck_head == __pck_next) { \
857 (head)->prev = __pck_head; \
858 } else { \
859 __next->field.prev = __pck_head; \
860 } \
861 \
862 (head)->next = __pck_next; \
863 (entry)->field.next = 0; \
864 (entry)->field.prev = 0; \
39037602
A
865MACRO_END
866
867
0a7de745 868#if defined(__x86_64__)
5ba3f43e 869/*
0a7de745
A
870 * Macro: vm_page_queue_remove_first_with_clump
871 * Function:
872 * Remove and return the entry at the head of the free queue
873 * end is set to 1 to indicate that we just returned the last page in a clump
5ba3f43e 874 *
0a7de745
A
875 * Header:
876 * vm_page_queue_remove_first_with_clump(head, entry, end)
877 * entry is returned by reference
878 * end is returned by reference
5ba3f43e 879 */
0a7de745
A
880#define vm_page_queue_remove_first_with_clump(head, entry, end) \
881MACRO_BEGIN \
882 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
883 vm_page_packed_t __pck_next; \
884 vm_page_t __next; \
885 \
886 (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
887 __pck_next = (entry)->vmp_pageq.next; \
888 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
889 \
890 (end) = 0; \
891 if (__pck_head == __pck_next) { \
892 (head)->prev = __pck_head; \
893 (end) = 1; \
894 } else { \
895 __next->vmp_pageq.prev = __pck_head; \
896 if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
897 (end) = 1; \
898 } \
899 } \
900 \
901 (head)->next = __pck_next; \
902 (entry)->vmp_pageq.next = 0; \
903 (entry)->vmp_pageq.prev = 0; \
5ba3f43e
A
904MACRO_END
905#endif
906
39037602
A
907/*
908 * Macro: vm_page_queue_end
909 * Function:
910 * Tests whether a new entry is really the end of
911 * the queue.
912 * Header:
913 * boolean_t vm_page_queue_end(q, qe)
914 * vm_page_queue_t q;
915 * vm_page_queue_entry_t qe;
916 */
0a7de745 917#define vm_page_queue_end(q, qe) ((q) == (qe))
39037602
A
918
919
920/*
921 * Macro: vm_page_queue_empty
922 * Function:
923 * Tests whether a queue is empty.
924 * Header:
925 * boolean_t vm_page_queue_empty(q)
926 * vm_page_queue_t q;
927 */
0a7de745 928#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
39037602
A
929
930
931
932/*
933 * Macro: vm_page_queue_first
934 * Function:
935 * Returns the first entry in the queue,
936 * Header:
937 * uintpr_t vm_page_queue_first(q)
938 * vm_page_queue_t q; \* IN *\
939 */
0a7de745 940#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
39037602
A
941
942
943
944/*
945 * Macro: vm_page_queue_last
946 * Function:
947 * Returns the last entry in the queue.
948 * Header:
949 * vm_page_queue_entry_t queue_last(q)
950 * queue_t q; \* IN *\
951 */
0a7de745 952#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
39037602
A
953
954
955
956/*
957 * Macro: vm_page_queue_next
958 * Function:
959 * Returns the entry after an item in the queue.
960 * Header:
961 * uintpr_t vm_page_queue_next(qc)
962 * vm_page_queue_t qc;
963 */
0a7de745 964#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
39037602
A
965
966
967
968/*
969 * Macro: vm_page_queue_prev
970 * Function:
971 * Returns the entry before an item in the queue.
972 * Header:
973 * uinptr_t vm_page_queue_prev(qc)
974 * vm_page_queue_t qc;
975 */
0a7de745 976#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
39037602
A
977
978
979
980/*
981 * Macro: vm_page_queue_iterate
982 * Function:
0a7de745 983 * iterate over each item in a vm_page queue.
39037602
A
984 * Generates a 'for' loop, setting elt to
985 * each item in turn (by reference).
986 * Header:
0a7de745 987 * vm_page_queue_iterate(q, elt, field)
39037602 988 * queue_t q;
0a7de745
A
989 * vm_page_t elt;
990 * <field> is the chain field in vm_page_t
39037602 991 */
0a7de745
A
992#define vm_page_queue_iterate(head, elt, field) \
993 for ((elt) = (vm_page_t)vm_page_queue_first(head); \
994 !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
995 (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \
39037602 996
f427ee49 997#else // LP64
39037602 998
0a7de745 999#define VM_VPLQ_ALIGNMENT 128
f427ee49
A
1000#define VM_PAGE_PACKED_PTR_ALIGNMENT sizeof(vm_offset_t)
1001#define VM_PAGE_PACKED_ALIGNED
1002#define VM_PAGE_PACKED_PTR_BITS 32
1003#define VM_PAGE_PACKED_PTR_SHIFT 0
1004#define VM_PAGE_PACKED_PTR_BASE 0
39037602 1005
f427ee49 1006#define VM_PAGE_PACKED_FROM_ARRAY 0
39037602 1007
0a7de745
A
1008#define VM_PAGE_PACK_PTR(p) (p)
1009#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p))
39037602 1010
f427ee49 1011#define VM_PAGE_OBJECT(p) ((vm_object_t)((p)->vmp_object))
0a7de745 1012#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
39037602
A
1013
1014
0a7de745
A
1015#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
1016MACRO_BEGIN \
1017 (p)->vmp_pageq.next = 0; \
1018 (p)->vmp_pageq.prev = 0; \
39037602
A
1019MACRO_END
1020
0a7de745
A
1021#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p))
1022
1023#define vm_page_remque remque
1024#define vm_page_enqueue_tail enqueue_tail
1025#define vm_page_queue_init queue_init
1026#define vm_page_queue_enter(h, e, f) queue_enter(h, e, vm_page_t, f)
1027#define vm_page_queue_enter_first(h, e, f) queue_enter_first(h, e, vm_page_t, f)
1028#define vm_page_queue_remove(h, e, f) queue_remove(h, e, vm_page_t, f)
1029#define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f)
1030#define vm_page_queue_end queue_end
1031#define vm_page_queue_empty queue_empty
1032#define vm_page_queue_first queue_first
1033#define vm_page_queue_last queue_last
1034#define vm_page_queue_next queue_next
1035#define vm_page_queue_prev queue_prev
1036#define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f)
39037602 1037
f427ee49 1038#endif // __LP64__
39037602
A
1039
1040
1041
0a7de745 1042/*
39037602
A
1043 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
1044 * represents a set of aging bins that are 'protected'...
1045 *
1046 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
1047 * not yet been 'claimed' but have been aged out of the protective bins
0a7de745 1048 * this occurs in vm_page_speculate when it advances to the next bin
39037602
A
1049 * and discovers that it is still occupied... at that point, all of the
1050 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
1051 * in that bin are all guaranteed to have reached at least the maximum age
1052 * we allow for a protected page... they can be older if there is no
1053 * memory pressure to pull them from the bin, or there are no new speculative pages
1054 * being generated to push them out.
0a7de745 1055 * this list is the one that vm_pageout_scan will prefer when looking
39037602 1056 * for pages to move to the underweight free list
0a7de745 1057 *
39037602
A
1058 * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
1059 * defines the amount of time a speculative page is normally
1060 * allowed to live in the 'protected' state (i.e. not available
1061 * to be stolen if vm_pageout_scan is running and looking for
1062 * pages)... however, if the total number of speculative pages
1063 * in the protected state exceeds our limit (defined in vm_pageout.c)
1064 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
1065 * vm_pageout_scan is allowed to steal pages from the protected
1066 * bucket even if they are underage.
1067 *
1068 * vm_pageout_scan is also allowed to pull pages from a protected
1069 * bin if the bin has reached the "age of consent" we've set
1070 */
0a7de745
A
1071#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
1072#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
1073#define VM_PAGE_SPECULATIVE_AGED_Q 0
39037602 1074
0a7de745 1075#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
39037602
A
1076
1077struct vm_speculative_age_q {
1078 /*
1079 * memory queue for speculative pages via clustered pageins
1080 */
0a7de745
A
1081 vm_page_queue_head_t age_q;
1082 mach_timespec_t age_ts;
f427ee49 1083} VM_PAGE_PACKED_ALIGNED;
39037602
A
1084
1085
1086
1087extern
0a7de745 1088struct vm_speculative_age_q vm_page_queue_speculative[];
39037602 1089
0a7de745
A
1090extern int speculative_steal_index;
1091extern int speculative_age_index;
1092extern unsigned int vm_page_speculative_q_age_ms;
1c79356b 1093
b0d623f7
A
1094
1095typedef struct vm_locks_array {
0a7de745
A
1096 char pad __attribute__ ((aligned(64)));
1097 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned(64)));
1098 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned(64)));
1099 char pad2 __attribute__ ((aligned(64)));
b0d623f7
A
1100} vm_locks_array_t;
1101
1102
39037602
A
1103#if CONFIG_BACKGROUND_QUEUE
1104extern void vm_page_assign_background_state(vm_page_t mem);
0a7de745
A
1105extern void vm_page_update_background_state(vm_page_t mem);
1106extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first);
1107extern void vm_page_remove_from_backgroundq(vm_page_t mem);
39037602
A
1108#endif
1109
0a7de745
A
1110#define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED)
1111#define NEXT_PAGE(m) ((m)->vmp_snext)
1112#define NEXT_PAGE_PTR(m) (&(m)->vmp_snext)
1c79356b
A
1113
1114/*
1115 * XXX The unusual bit should not be necessary. Most of the bit
1116 * XXX fields above really want to be masks.
1117 */
1118
1119/*
1120 * For debugging, this macro can be defined to perform
1121 * some useful check on a page structure.
39037602
A
1122 * INTENTIONALLY left as a no-op so that the
1123 * current call-sites can be left intact for future uses.
1c79356b
A
1124 */
1125
0a7de745
A
1126#define VM_PAGE_CHECK(mem) \
1127 MACRO_BEGIN \
b0d623f7 1128 MACRO_END
2d21ac55
A
1129
1130/* Page coloring:
1131 *
1132 * The free page list is actually n lists, one per color,
1133 * where the number of colors is a function of the machine's
1134 * cache geometry set at system initialization. To disable
1135 * coloring, set vm_colors to 1 and vm_color_mask to 0.
1136 * The boot-arg "colors" may be used to override vm_colors.
1137 * Note that there is little harm in having more colors than needed.
1138 */
0a7de745 1139
2d21ac55 1140#define MAX_COLORS 128
0a7de745 1141#define DEFAULT_COLORS 32
2d21ac55
A
1142
1143extern
0a7de745 1144unsigned int vm_colors; /* must be in range 1..MAX_COLORS */
2d21ac55 1145extern
0a7de745 1146unsigned int vm_color_mask; /* must be (vm_colors-1) */
2d21ac55 1147extern
0a7de745 1148unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
1c79356b 1149
b0d623f7
A
1150/*
1151 * Wired memory is a very limited resource and we can't let users exhaust it
1152 * and deadlock the entire system. We enforce the following limits:
0a7de745 1153 *
4ba76501 1154 * vm_per_task_user_wire_limit
0a7de745 1155 * how much memory can be user-wired in one user task
b0d623f7 1156 *
4ba76501 1157 * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit)
0a7de745 1158 * how much memory can be user-wired in all user tasks
b0d623f7 1159 *
4ba76501
A
1160 * These values are set to defaults based on the number of pages managed
1161 * by the VM system. They can be overriden via sysctls.
1162 * See kmem_set_user_wire_limits for details on the default values.
1163 *
1164 * Regardless of the amount of memory in the system, we never reserve
1165 * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable.
b0d623f7 1166 */
4ba76501
A
1167#if defined(__LP64__)
1168#define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024) /* 32GB */
1169#else
1170#define VM_NOT_USER_WIREABLE_MAX (1UL*1024*1024*1024) /* 1GB */
1171#endif /* __LP64__ */
b0d623f7 1172extern
4ba76501 1173vm_map_size_t vm_per_task_user_wire_limit;
b0d623f7 1174extern
0a7de745 1175vm_map_size_t vm_global_user_wire_limit;
f427ee49
A
1176extern
1177uint64_t vm_add_wire_count_over_global_limit;
1178extern
1179uint64_t vm_add_wire_count_over_user_limit;
b0d623f7 1180
1c79356b
A
1181/*
1182 * Each pageable resident page falls into one of three lists:
1183 *
0a7de745 1184 * free
2d21ac55
A
1185 * Available for allocation now. The free list is
1186 * actually an array of lists, one per color.
1c79356b
A
1187 * inactive
1188 * Not referenced in any map, but still has an
1189 * object/offset-page mapping, and may be dirty.
1190 * This is the list of pages that should be
2d21ac55
A
1191 * paged out next. There are actually two
1192 * inactive lists, one for pages brought in from
1193 * disk or other backing store, and another
1194 * for "zero-filled" pages. See vm_pageout_scan()
1195 * for the distinction and usage.
1c79356b
A
1196 * active
1197 * A list of pages which have been placed in
1198 * at least one physical map. This list is
1199 * ordered, in LRU-like fashion.
1200 */
1201
b0d623f7
A
1202
1203#define VPL_LOCK_SPIN 1
1204
1205struct vpl {
0a7de745
A
1206 vm_page_queue_head_t vpl_queue;
1207 unsigned int vpl_count;
1208 unsigned int vpl_internal_count;
1209 unsigned int vpl_external_count;
1210#ifdef VPL_LOCK_SPIN
1211 lck_spin_t vpl_lock;
b0d623f7 1212#else
0a7de745
A
1213 lck_mtx_t vpl_lock;
1214 lck_mtx_ext_t vpl_lock_ext;
b0d623f7
A
1215#endif
1216};
1217
b0d623f7 1218extern
f427ee49 1219struct vpl * /* __zpercpu */ vm_page_local_q;
b0d623f7 1220extern
0a7de745 1221unsigned int vm_page_local_q_soft_limit;
b0d623f7 1222extern
0a7de745 1223unsigned int vm_page_local_q_hard_limit;
b0d623f7
A
1224extern
1225vm_locks_array_t vm_page_locks;
1226
1c79356b 1227extern
0a7de745 1228vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */
2d21ac55 1229extern
0a7de745 1230vm_page_queue_head_t vm_page_queue_active; /* active memory queue */
1c79356b 1231extern
0a7de745 1232vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */
39037602 1233#if CONFIG_SECLUDED_MEMORY
1c79356b 1234extern
0a7de745 1235vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
39037602 1236#endif /* CONFIG_SECLUDED_MEMORY */
2d21ac55 1237extern
39037602 1238vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */
316670eb 1239extern
0a7de745 1240vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
b0d623f7 1241extern
0a7de745 1242vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */
1c79356b 1243
3e170ce0 1244extern
0a7de745 1245queue_head_t vm_objects_wired;
3e170ce0 1246extern
0a7de745 1247lck_spin_t vm_objects_wired_lock;
3e170ce0 1248
39037602
A
1249#if CONFIG_BACKGROUND_QUEUE
1250
0a7de745 1251#define VM_PAGE_BACKGROUND_TARGET_MAX 50000
39037602 1252
0a7de745
A
1253#define VM_PAGE_BG_DISABLED 0
1254#define VM_PAGE_BG_LEVEL_1 1
39037602
A
1255
1256extern
0a7de745 1257vm_page_queue_head_t vm_page_queue_background;
39037602 1258extern
0a7de745 1259uint64_t vm_page_background_promoted_count;
39037602 1260extern
0a7de745 1261uint32_t vm_page_background_count;
39037602 1262extern
0a7de745 1263uint32_t vm_page_background_target;
39037602 1264extern
0a7de745 1265uint32_t vm_page_background_internal_count;
39037602 1266extern
0a7de745 1267uint32_t vm_page_background_external_count;
39037602 1268extern
0a7de745 1269uint32_t vm_page_background_mode;
39037602 1270extern
0a7de745 1271uint32_t vm_page_background_exclude_external;
39037602
A
1272
1273#endif
3e170ce0 1274
1c79356b 1275extern
0a7de745 1276vm_offset_t first_phys_addr; /* physical address for first_page */
1c79356b 1277extern
0a7de745 1278vm_offset_t last_phys_addr; /* physical address for last_page */
1c79356b
A
1279
1280extern
0a7de745 1281unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */
1c79356b 1282extern
0a7de745 1283unsigned int vm_page_active_count; /* How many pages are active? */
1c79356b 1284extern
0a7de745 1285unsigned int vm_page_inactive_count; /* How many pages are inactive? */
f427ee49
A
1286extern
1287unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */
39037602
A
1288#if CONFIG_SECLUDED_MEMORY
1289extern
0a7de745 1290unsigned int vm_page_secluded_count; /* How many pages are secluded? */
39037602 1291extern
cb323159 1292unsigned int vm_page_secluded_count_free; /* how many of them are free? */
39037602 1293extern
cb323159
A
1294unsigned int vm_page_secluded_count_inuse; /* how many of them are in use? */
1295/*
1296 * We keep filling the secluded pool with new eligible pages and
1297 * we can overshoot our target by a lot.
1298 * When there's memory pressure, vm_pageout_scan() will re-balance the queues,
1299 * pushing the extra secluded pages to the active or free queue.
1300 * Since these "over target" secluded pages are actually "available", jetsam
1301 * should consider them as such, so make them visible to jetsam via the
1302 * "vm_page_secluded_count_over_target" counter and update it whenever we
1303 * update vm_page_secluded_count or vm_page_secluded_target.
1304 */
1305extern
1306unsigned int vm_page_secluded_count_over_target;
1307#define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1308 MACRO_BEGIN \
1309 if (vm_page_secluded_count > vm_page_secluded_target) { \
1310 vm_page_secluded_count_over_target = \
1311 (vm_page_secluded_count - vm_page_secluded_target); \
1312 } else { \
1313 vm_page_secluded_count_over_target = 0; \
1314 } \
1315 MACRO_END
1316#define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target
1317#else /* CONFIG_SECLUDED_MEMORY */
1318#define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1319 MACRO_BEGIN \
1320 MACRO_END
1321#define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0
39037602 1322#endif /* CONFIG_SECLUDED_MEMORY */
1c79356b 1323extern
316670eb
A
1324unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */
1325extern
0a7de745
A
1326unsigned int vm_page_throttled_count;/* How many inactives are throttled */
1327extern
1328unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */
1329extern unsigned int vm_page_pageable_internal_count;
1330extern unsigned int vm_page_pageable_external_count;
2d21ac55 1331extern
0a7de745 1332unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */
39236c6e 1333extern
0a7de745 1334unsigned int vm_page_external_count; /* How many pages are file-backed? */
fe8ab488 1335extern
0a7de745 1336unsigned int vm_page_internal_count; /* How many pages are anonymous? */
39236c6e 1337extern
0a7de745 1338unsigned int vm_page_wire_count; /* How many pages are wired? */
2d21ac55 1339extern
0a7de745 1340unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */
db609669 1341extern
0a7de745 1342unsigned int vm_page_wire_count_on_boot; /* even earlier than _initial */
1c79356b 1343extern
0a7de745 1344unsigned int vm_page_free_target; /* How many do we want free? */
1c79356b 1345extern
0a7de745 1346unsigned int vm_page_free_min; /* When to wakeup pageout */
1c79356b 1347extern
0a7de745 1348unsigned int vm_page_throttle_limit; /* When to throttle new page creation */
b0d623f7 1349extern
0a7de745 1350unsigned int vm_page_inactive_target;/* How many do we want inactive? */
39037602
A
1351#if CONFIG_SECLUDED_MEMORY
1352extern
0a7de745 1353unsigned int vm_page_secluded_target;/* How many do we want secluded? */
39037602 1354#endif /* CONFIG_SECLUDED_MEMORY */
1c79356b 1355extern
0a7de745 1356unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */
316670eb 1357extern
0a7de745 1358unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */
1c79356b 1359extern
0a7de745 1360unsigned int vm_page_gobble_count;
3e170ce0 1361extern
0a7de745 1362unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */
cb323159
A
1363extern
1364unsigned int vm_page_kern_lpage_count; /* Count of large pages used in early boot */
3e170ce0 1365
91447636 1366
b0d623f7 1367#if DEVELOPMENT || DEBUG
2d21ac55 1368extern
0a7de745 1369unsigned int vm_page_speculative_used;
b0d623f7
A
1370#endif
1371
55e303ae 1372extern
0a7de745 1373unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */
55e303ae 1374extern
0a7de745 1375unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
b0d623f7 1376extern
0a7de745 1377uint64_t vm_page_purged_count; /* How many pages got purged so far ? */
1c79356b 1378
0a7de745
A
1379extern unsigned int vm_page_free_wanted;
1380/* how many threads are waiting for memory */
1c79356b 1381
0a7de745
A
1382extern unsigned int vm_page_free_wanted_privileged;
1383/* how many VM privileged threads are waiting for memory */
39037602 1384#if CONFIG_SECLUDED_MEMORY
0a7de745
A
1385extern unsigned int vm_page_free_wanted_secluded;
1386/* how many threads are waiting for secluded memory */
39037602 1387#endif /* CONFIG_SECLUDED_MEMORY */
2d21ac55 1388
0a7de745
A
1389extern const ppnum_t vm_page_fictitious_addr;
1390/* (fake) phys_addr of fictitious pages */
1c79356b 1391
0a7de745
A
1392extern const ppnum_t vm_page_guard_addr;
1393/* (fake) phys_addr of guard pages */
2d21ac55
A
1394
1395
0a7de745 1396extern boolean_t vm_page_deactivate_hint;
91447636 1397
0a7de745 1398extern int vm_compressor_mode;
39236c6e 1399
0b4c1975 1400/*
0a7de745
A
1401 * Defaults to true, so highest memory is used first.
1402 */
1403extern boolean_t vm_himemory_mode;
1404
1405extern boolean_t vm_lopage_needed;
1406extern uint32_t vm_lopage_free_count;
1407extern uint32_t vm_lopage_free_limit;
1408extern uint32_t vm_lopage_lowater;
1409extern boolean_t vm_lopage_refill;
1410extern uint64_t max_valid_dma_address;
1411extern ppnum_t max_valid_low_ppnum;
0c530ab8 1412
1c79356b
A
1413/*
1414 * Prototypes for functions exported by this module.
1415 */
0a7de745
A
1416extern void vm_page_bootstrap(
1417 vm_offset_t *startp,
1418 vm_offset_t *endp);
1419
f427ee49 1420extern void vm_page_init_local_q(unsigned int num_cpus);
b0d623f7 1421
0a7de745
A
1422extern void vm_page_create(
1423 ppnum_t start,
1424 ppnum_t end);
1c79356b 1425
c3c9b80d
A
1426extern void vm_page_create_retired(
1427 ppnum_t pn);
1428
0a7de745
A
1429extern vm_page_t kdp_vm_page_lookup(
1430 vm_object_t object,
1431 vm_object_offset_t offset);
3e170ce0 1432
0a7de745
A
1433extern vm_page_t vm_page_lookup(
1434 vm_object_t object,
1435 vm_object_offset_t offset);
1c79356b 1436
c3c9b80d 1437extern vm_page_t vm_page_grab_fictitious(boolean_t canwait);
1c79356b 1438
c3c9b80d 1439extern vm_page_t vm_page_grab_guard(boolean_t canwait);
2d21ac55 1440
0a7de745
A
1441extern void vm_page_release_fictitious(
1442 vm_page_t page);
1c79356b 1443
0a7de745 1444extern void vm_free_delayed_pages(void);
1c79356b 1445
c3c9b80d 1446extern bool vm_pool_low(void);
0a7de745
A
1447
1448extern vm_page_t vm_page_grab(void);
1449extern vm_page_t vm_page_grab_options(int flags);
1450
1451#define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
39037602 1452#if CONFIG_SECLUDED_MEMORY
0a7de745 1453#define VM_PAGE_GRAB_SECLUDED 0x00000001
39037602 1454#endif /* CONFIG_SECLUDED_MEMORY */
0a7de745 1455#define VM_PAGE_GRAB_Q_LOCK_HELD 0x00000002
1c79356b 1456
0a7de745 1457extern vm_page_t vm_page_grablo(void);
0c530ab8 1458
0a7de745
A
1459extern void vm_page_release(
1460 vm_page_t page,
1461 boolean_t page_queues_locked);
1c79356b 1462
0a7de745
A
1463extern boolean_t vm_page_wait(
1464 int interruptible );
1c79356b 1465
0a7de745
A
1466extern vm_page_t vm_page_alloc(
1467 vm_object_t object,
1468 vm_object_offset_t offset);
1c79356b 1469
0a7de745
A
1470extern void vm_page_init(
1471 vm_page_t page,
1472 ppnum_t phys_page,
1473 boolean_t lopage);
1c79356b 1474
0a7de745
A
1475extern void vm_page_free(
1476 vm_page_t page);
1c79356b 1477
0a7de745
A
1478extern void vm_page_free_unlocked(
1479 vm_page_t page,
1480 boolean_t remove_from_hash);
2d21ac55 1481
d9a64523 1482extern void vm_page_balance_inactive(
0a7de745
A
1483 int max_to_move);
1484
1485extern void vm_page_activate(
1486 vm_page_t page);
1487
1488extern void vm_page_deactivate(
1489 vm_page_t page);
1490
1491extern void vm_page_deactivate_internal(
1492 vm_page_t page,
1493 boolean_t clear_hw_reference);
1494
1495extern void vm_page_enqueue_cleaned(vm_page_t page);
1496
1497extern void vm_page_lru(
1498 vm_page_t page);
1499
1500extern void vm_page_speculate(
1501 vm_page_t page,
1502 boolean_t new);
1503
1504extern void vm_page_speculate_ageit(
1505 struct vm_speculative_age_q *aq);
1506
1507extern void vm_page_reactivate_all_throttled(void);
1508
1509extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
1510
1511extern void vm_page_rename(
1512 vm_page_t page,
1513 vm_object_t new_object,
1514 vm_object_offset_t new_offset);
1515
1516extern void vm_page_insert(
1517 vm_page_t page,
1518 vm_object_t object,
1519 vm_object_offset_t offset);
1520
1521extern void vm_page_insert_wired(
1522 vm_page_t page,
1523 vm_object_t object,
1524 vm_object_offset_t offset,
1525 vm_tag_t tag);
1526
1527extern void vm_page_insert_internal(
1528 vm_page_t page,
1529 vm_object_t object,
1530 vm_object_offset_t offset,
1531 vm_tag_t tag,
1532 boolean_t queues_lock_held,
1533 boolean_t insert_in_hash,
1534 boolean_t batch_pmap_op,
1535 boolean_t delayed_accounting,
1536 uint64_t *delayed_ledger_update);
1537
1538extern void vm_page_replace(
1539 vm_page_t mem,
1540 vm_object_t object,
1541 vm_object_offset_t offset);
1542
1543extern void vm_page_remove(
1544 vm_page_t page,
1545 boolean_t remove_from_hash);
1546
1547extern void vm_page_zero_fill(
1548 vm_page_t page);
1549
1550extern void vm_page_part_zero_fill(
1551 vm_page_t m,
1552 vm_offset_t m_pa,
1553 vm_size_t len);
1554
1555extern void vm_page_copy(
1556 vm_page_t src_page,
1557 vm_page_t dest_page);
1558
1559extern void vm_page_part_copy(
1560 vm_page_t src_m,
1561 vm_offset_t src_pa,
1562 vm_page_t dst_m,
1563 vm_offset_t dst_pa,
1564 vm_size_t len);
1565
1566extern void vm_page_wire(
1567 vm_page_t page,
1568 vm_tag_t tag,
1569 boolean_t check_memorystatus);
1570
1571extern void vm_page_unwire(
1572 vm_page_t page,
1573 boolean_t queueit);
1574
1575extern void vm_set_page_size(void);
1576
1577extern void vm_page_gobble(
1578 vm_page_t page);
1579
f427ee49
A
1580extern void vm_page_validate_cs(
1581 vm_page_t page,
1582 vm_map_size_t fault_page_size,
1583 vm_map_offset_t fault_phys_offset);
0a7de745
A
1584extern void vm_page_validate_cs_mapped(
1585 vm_page_t page,
f427ee49
A
1586 vm_map_size_t fault_page_size,
1587 vm_map_offset_t fault_phys_offset,
0a7de745
A
1588 const void *kaddr);
1589extern void vm_page_validate_cs_mapped_slow(
1590 vm_page_t page,
1591 const void *kaddr);
1592extern void vm_page_validate_cs_mapped_chunk(
1593 vm_page_t page,
1594 const void *kaddr,
1595 vm_offset_t chunk_offset,
1596 vm_size_t chunk_size,
1597 boolean_t *validated,
1598 unsigned *tainted);
1599
1600extern void vm_page_free_prepare_queues(
1601 vm_page_t page);
1602
1603extern void vm_page_free_prepare_object(
1604 vm_page_t page,
1605 boolean_t remove_from_hash);
b0d623f7 1606
fe8ab488 1607#if CONFIG_IOSCHED
0a7de745
A
1608extern wait_result_t vm_page_sleep(
1609 vm_object_t object,
1610 vm_page_t m,
1611 int interruptible);
fe8ab488
A
1612#endif
1613
1614extern void vm_pressure_response(void);
1615
316670eb 1616#if CONFIG_JETSAM
39236c6e 1617extern void memorystatus_pages_update(unsigned int pages_avail);
316670eb
A
1618
1619#define VM_CHECK_MEMORYSTATUS do { \
0a7de745
A
1620 memorystatus_pages_update( \
1621 vm_page_pageable_external_count + \
1622 vm_page_free_count + \
cb323159 1623 VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \
0a7de745
A
1624 (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1625 ); \
316670eb 1626 } while(0)
39236c6e
A
1627
1628#else /* CONFIG_JETSAM */
1629
c3c9b80d 1630#if !XNU_TARGET_OS_OSX
5ba3f43e
A
1631
1632#define VM_CHECK_MEMORYSTATUS do {} while(0)
1633
c3c9b80d 1634#else /* !XNU_TARGET_OS_OSX */
39236c6e 1635
0a7de745 1636#define VM_CHECK_MEMORYSTATUS vm_pressure_response()
39236c6e 1637
c3c9b80d 1638#endif /* !XNU_TARGET_OS_OSX */
39236c6e
A
1639
1640#endif /* CONFIG_JETSAM */
6d2010ae 1641
1c79356b 1642/*
d9a64523
A
1643 * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
1644 * protected by the object lock.
1c79356b
A
1645 */
1646
c3c9b80d 1647#if !XNU_TARGET_OS_OSX
0a7de745
A
1648#define SET_PAGE_DIRTY(m, set_pmap_modified) \
1649 MACRO_BEGIN \
1650 vm_page_t __page__ = (m); \
1651 if (__page__->vmp_pmapped == TRUE && \
1652 __page__->vmp_wpmapped == TRUE && \
1653 __page__->vmp_dirty == FALSE && \
1654 (set_pmap_modified)) { \
1655 pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
1656 } \
1657 __page__->vmp_dirty = TRUE; \
1658 MACRO_END
c3c9b80d 1659#else /* !XNU_TARGET_OS_OSX */
0a7de745
A
1660#define SET_PAGE_DIRTY(m, set_pmap_modified) \
1661 MACRO_BEGIN \
1662 vm_page_t __page__ = (m); \
1663 __page__->vmp_dirty = TRUE; \
1664 MACRO_END
c3c9b80d 1665#endif /* !XNU_TARGET_OS_OSX */
316670eb 1666
0a7de745
A
1667#define PAGE_ASSERT_WAIT(m, interruptible) \
1668 (((m)->vmp_wanted = TRUE), \
1669 assert_wait((event_t) (m), (interruptible)))
9bccf70c 1670
fe8ab488 1671#if CONFIG_IOSCHED
0a7de745
A
1672#define PAGE_SLEEP(o, m, interruptible) \
1673 vm_page_sleep(o, m, interruptible)
fe8ab488 1674#else
0a7de745
A
1675#define PAGE_SLEEP(o, m, interruptible) \
1676 (((m)->vmp_wanted = TRUE), \
fe8ab488
A
1677 thread_sleep_vm_object((o), (m), (interruptible)))
1678#endif
1c79356b 1679
0a7de745
A
1680#define PAGE_WAKEUP_DONE(m) \
1681 MACRO_BEGIN \
1682 (m)->vmp_busy = FALSE; \
1683 if ((m)->vmp_wanted) { \
1684 (m)->vmp_wanted = FALSE; \
1685 thread_wakeup((event_t) (m)); \
1686 } \
1687 MACRO_END
1688
1689#define PAGE_WAKEUP(m) \
1690 MACRO_BEGIN \
1691 if ((m)->vmp_wanted) { \
1692 (m)->vmp_wanted = FALSE; \
1693 thread_wakeup((event_t) (m)); \
1694 } \
1695 MACRO_END
1696
1697#define VM_PAGE_FREE(p) \
1698 MACRO_BEGIN \
1699 vm_page_free_unlocked(p, TRUE); \
1700 MACRO_END
1701
0a7de745 1702#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT))
1c79356b 1703
b0d623f7
A
1704#define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1705#define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1706
0a7de745 1707#define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock)
39037602 1708#define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock)
0a7de745 1709#define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock)
b0d623f7 1710
0a7de745
A
1711#define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock)
1712#define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock)
1713#define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock)
1714
1715#ifdef VPL_LOCK_SPIN
1716extern lck_grp_t vm_page_lck_grp_local;
b0d623f7 1717
b0d623f7 1718#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
0a7de745 1719#define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
b0d623f7
A
1720#define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
1721#else
1722#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr)
1723#define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
1724#define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
1725#endif
1c79356b 1726
2d21ac55 1727
b0d623f7 1728#if DEVELOPMENT || DEBUG
0a7de745
A
1729#define VM_PAGE_SPECULATIVE_USED_ADD() \
1730 MACRO_BEGIN \
1731 OSAddAtomic(1, &vm_page_speculative_used); \
2d21ac55 1732 MACRO_END
b0d623f7 1733#else
0a7de745 1734#define VM_PAGE_SPECULATIVE_USED_ADD()
b0d623f7 1735#endif
2d21ac55
A
1736
1737
0a7de745
A
1738#define VM_PAGE_CONSUME_CLUSTERED(mem) \
1739 MACRO_BEGIN \
1740 ppnum_t __phys_page; \
1741 __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \
1742 pmap_lock_phys_page(__phys_page); \
1743 if (mem->vmp_clustered) { \
1744 vm_object_t o; \
1745 o = VM_PAGE_OBJECT(mem); \
1746 assert(o); \
1747 o->pages_used++; \
1748 mem->vmp_clustered = FALSE; \
1749 VM_PAGE_SPECULATIVE_USED_ADD(); \
1750 } \
1751 pmap_unlock_phys_page(__phys_page); \
1c79356b
A
1752 MACRO_END
1753
6d2010ae 1754
0a7de745
A
1755#define VM_PAGE_COUNT_AS_PAGEIN(mem) \
1756 MACRO_BEGIN \
1757 { \
1758 vm_object_t o; \
1759 o = VM_PAGE_OBJECT(mem); \
1760 DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \
1761 current_task()->pageins++; \
1762 if (o->internal) { \
1763 DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \
1764 } else { \
1765 DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
1766 } \
1767 } \
fe8ab488
A
1768 MACRO_END
1769
3e170ce0 1770/* adjust for stolen pages accounted elsewhere */
0a7de745
A
1771#define VM_PAGE_MOVE_STOLEN(page_count) \
1772 MACRO_BEGIN \
1773 vm_page_stolen_count -= (page_count); \
1774 vm_page_wire_count_initial -= (page_count); \
3e170ce0 1775 MACRO_END
0a7de745
A
1776
1777#define DW_vm_page_unwire 0x01
1778#define DW_vm_page_wire 0x02
1779#define DW_vm_page_free 0x04
1780#define DW_vm_page_activate 0x08
1781#define DW_vm_page_deactivate_internal 0x10
1782#define DW_vm_page_speculate 0x20
1783#define DW_vm_page_lru 0x40
1784#define DW_vm_pageout_throttle_up 0x80
1785#define DW_PAGE_WAKEUP 0x100
1786#define DW_clear_busy 0x200
1787#define DW_clear_reference 0x400
1788#define DW_set_reference 0x800
1789#define DW_move_page 0x1000
1790#define DW_VM_PAGE_QUEUES_REMOVE 0x2000
1791#define DW_enqueue_cleaned 0x4000
1792#define DW_vm_phantom_cache_update 0x8000
6d2010ae
A
1793
1794struct vm_page_delayed_work {
0a7de745
A
1795 vm_page_t dw_m;
1796 int dw_mask;
6d2010ae
A
1797};
1798
f427ee49
A
1799#define DEFAULT_DELAYED_WORK_LIMIT 32
1800
1801struct vm_page_delayed_work_ctx {
1802 struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT];
1803 thread_t delayed_owner;
1804};
1805
3e170ce0 1806void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
6d2010ae
A
1807
1808extern unsigned int vm_max_delayed_work_limit;
1809
f427ee49 1810extern void vm_page_delayed_work_init_ctx(void);
6d2010ae 1811
0a7de745 1812#define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
6d2010ae
A
1813
1814/*
1815 * vm_page_do_delayed_work may need to drop the object lock...
1816 * if it does, we need the pages it's looking at to
1817 * be held stable via the busy bit, so if busy isn't already
1818 * set, we need to set it and ask vm_page_do_delayed_work
1819 * to clear it and wakeup anyone that might have blocked on
1820 * it once we're done processing the page.
6d2010ae
A
1821 */
1822
0a7de745
A
1823#define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \
1824 MACRO_BEGIN \
1825 if (mem->vmp_busy == FALSE) { \
1826 mem->vmp_busy = TRUE; \
1827 if ( !(dwp->dw_mask & DW_vm_page_free)) \
1828 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
1829 } \
1830 dwp->dw_m = mem; \
1831 dwp++; \
1832 dw_cnt++; \
6d2010ae
A
1833 MACRO_END
1834
1835extern vm_page_t vm_object_page_grab(vm_object_t);
1836
15129b1c
A
1837#if VM_PAGE_BUCKETS_CHECK
1838extern void vm_page_buckets_check(void);
1839#endif /* VM_PAGE_BUCKETS_CHECK */
6d2010ae 1840
39037602 1841extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq);
3e170ce0
A
1842extern void vm_page_remove_internal(vm_page_t page);
1843extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
39037602 1844extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
3e170ce0
A
1845extern void vm_page_check_pageable_safe(vm_page_t page);
1846
d9a64523
A
1847#if CONFIG_SECLUDED_MEMORY
1848extern uint64_t secluded_shutoff_trigger;
f427ee49 1849extern uint64_t secluded_shutoff_headroom;
d9a64523
A
1850extern void start_secluded_suppression(task_t);
1851extern void stop_secluded_suppression(task_t);
1852#endif /* CONFIG_SECLUDED_MEMORY */
1853
c3c9b80d
A
1854extern void vm_retire_boot_pages(void);
1855extern uint32_t vm_retired_pages_count(void);
3e170ce0 1856
0a7de745 1857#endif /* _VM_VM_PAGE_H_ */