]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_page.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Resident memory system definitions. | |
64 | */ | |
65 | ||
66 | #ifndef _VM_VM_PAGE_H_ | |
67 | #define _VM_VM_PAGE_H_ | |
68 | ||
69 | #include <debug.h> | |
70 | #include <vm/vm_options.h> | |
71 | #include <vm/vm_protos.h> | |
72 | #include <mach/boolean.h> | |
73 | #include <mach/vm_prot.h> | |
74 | #include <mach/vm_param.h> | |
75 | #include <mach/memory_object_types.h> /* for VMP_CS_BITS... */ | |
76 | ||
77 | ||
78 | #if defined(__LP64__) | |
79 | ||
80 | /* | |
81 | * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64) | |
82 | * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate | |
83 | * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the | |
84 | * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack | |
85 | * pointers from the 2 ends of these spaces | |
86 | */ | |
87 | typedef uint32_t vm_page_packed_t; | |
88 | ||
89 | struct vm_page_packed_queue_entry { | |
90 | vm_page_packed_t next; /* next element */ | |
91 | vm_page_packed_t prev; /* previous element */ | |
92 | }; | |
93 | ||
94 | typedef struct vm_page_packed_queue_entry *vm_page_queue_t; | |
95 | typedef struct vm_page_packed_queue_entry vm_page_queue_head_t; | |
96 | typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t; | |
97 | typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t; | |
98 | ||
99 | typedef vm_page_packed_t vm_page_object_t; | |
100 | ||
101 | #else // __LP64__ | |
102 | ||
103 | /* | |
104 | * we can't do the packing trick on 32 bit architectures | |
105 | * so just turn the macros into noops. | |
106 | */ | |
107 | typedef struct vm_page *vm_page_packed_t; | |
108 | ||
109 | #define vm_page_queue_t queue_t | |
110 | #define vm_page_queue_head_t queue_head_t | |
111 | #define vm_page_queue_chain_t queue_chain_t | |
112 | #define vm_page_queue_entry_t queue_entry_t | |
113 | ||
114 | #define vm_page_object_t vm_object_t | |
115 | #endif // __LP64__ | |
116 | ||
117 | ||
118 | #include <vm/vm_object.h> | |
119 | #include <kern/queue.h> | |
120 | #include <kern/locks.h> | |
121 | ||
122 | #include <kern/macro_help.h> | |
123 | #include <libkern/OSAtomic.h> | |
124 | ||
125 | ||
126 | ||
127 | #define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) | |
128 | ||
129 | /* | |
130 | * Management of resident (logical) pages. | |
131 | * | |
132 | * A small structure is kept for each resident | |
133 | * page, indexed by page number. Each structure | |
134 | * is an element of several lists: | |
135 | * | |
136 | * A hash table bucket used to quickly | |
137 | * perform object/offset lookups | |
138 | * | |
139 | * A list of all pages for a given object, | |
140 | * so they can be quickly deactivated at | |
141 | * time of deallocation. | |
142 | * | |
143 | * An ordered list of pages due for pageout. | |
144 | * | |
145 | * In addition, the structure contains the object | |
146 | * and offset to which this page belongs (for pageout), | |
147 | * and sundry status bits. | |
148 | * | |
149 | * Fields in this structure are locked either by the lock on the | |
150 | * object that the page belongs to (O) or by the lock on the page | |
151 | * queues (P). [Some fields require that both locks be held to | |
152 | * change that field; holding either lock is sufficient to read.] | |
153 | */ | |
154 | ||
155 | #define VM_PAGE_NULL ((vm_page_t) 0) | |
156 | ||
157 | extern char vm_page_inactive_states[]; | |
158 | extern char vm_page_pageable_states[]; | |
159 | extern char vm_page_non_speculative_pageable_states[]; | |
160 | extern char vm_page_active_or_inactive_states[]; | |
161 | ||
162 | ||
163 | #define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state]) | |
164 | #define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state]) | |
165 | #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state]) | |
166 | #define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state]) | |
167 | ||
168 | ||
169 | #define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */ | |
170 | #define VM_PAGE_IS_WIRED 1 /* page is currently wired */ | |
171 | #define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */ | |
172 | #define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */ | |
173 | #define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */ | |
174 | #define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */ | |
175 | #define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */ | |
176 | #define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */ | |
177 | #define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */ | |
178 | #define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */ | |
179 | #define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */ | |
180 | #define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */ | |
181 | #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */ | |
182 | #define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */ | |
183 | #define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */ | |
184 | #define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */ | |
185 | ||
186 | #define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1) | |
187 | ||
188 | ||
189 | /* | |
190 | * The structure itself. See the block comment above for what (O) and (P) mean. | |
191 | */ | |
192 | #define vmp_pageq vmp_q_un.vmp_q_pageq | |
193 | #define vmp_snext vmp_q_un.vmp_q_snext | |
194 | ||
195 | struct vm_page { | |
196 | union { | |
197 | vm_page_queue_chain_t vmp_q_pageq; /* queue info for FIFO queue or free list (P) */ | |
198 | struct vm_page *vmp_q_snext; | |
199 | } vmp_q_un; | |
200 | ||
201 | vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */ | |
202 | ||
203 | #if CONFIG_BACKGROUND_QUEUE | |
204 | vm_page_queue_chain_t vmp_backgroundq; /* anonymous pages in the background pool (P) */ | |
205 | #endif | |
206 | ||
207 | vm_object_offset_t vmp_offset; /* offset into that object (O,P) */ | |
208 | vm_page_object_t vmp_object; /* which object am I in (O&P) */ | |
209 | ||
210 | /* | |
211 | * The following word of flags is always protected by the "page queues" lock. | |
212 | * | |
213 | * We use 'vmp_wire_count' to store the local queue id if local queues are enabled. | |
214 | * See the comments at 'vm_page_queues_remove' as to why this is safe to do. | |
215 | */ | |
216 | #define vmp_local_id vmp_wire_count | |
217 | unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */ | |
218 | vmp_q_state:4, /* which q is the page on (P) */ | |
219 | vmp_in_background:1, | |
220 | vmp_on_backgroundq:1, | |
221 | vmp_gobbled:1, /* page used internally (P) */ | |
222 | vmp_laundry:1, /* page is being cleaned now (P)*/ | |
223 | vmp_no_cache:1, /* page is not to be cached and should */ | |
224 | /* be reused ahead of other pages (P) */ | |
225 | vmp_private:1, /* Page should not be returned to the free list (P) */ | |
226 | vmp_reference:1, /* page has been used (P) */ | |
227 | vmp_lopage:1, | |
228 | vmp_unused_page_bits:4; | |
229 | ||
230 | /* | |
231 | * MUST keep the 2 32 bit words used as bit fields | |
232 | * separated since the compiler has a nasty habit | |
233 | * of using 64 bit loads and stores on them as | |
234 | * if they were a single 64 bit field... since | |
235 | * they are protected by 2 different locks, this | |
236 | * is a real problem | |
237 | */ | |
238 | vm_page_packed_t vmp_next_m; /* VP bucket link (O) */ | |
239 | ||
240 | /* | |
241 | * The following word of flags is protected by the "VM object" lock. | |
242 | * | |
243 | * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the | |
244 | * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function. | |
245 | * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro. | |
246 | * It's also ok to modify them behind just the VM object "exclusive" lock. | |
247 | */ | |
248 | unsigned int vmp_busy:1, /* page is in transit (O) */ | |
249 | vmp_wanted:1, /* someone is waiting for page (O) */ | |
250 | vmp_tabled:1, /* page is in VP table (O) */ | |
251 | vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */ | |
252 | vmp_fictitious:1, /* Physical page doesn't exist (O) */ | |
253 | vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ | |
254 | vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */ | |
255 | /* (O-shared AND pmap_page) */ | |
256 | vmp_xpmapped:1, /* page has been entered with execute permission (O) or */ | |
257 | /* (O-shared AND pmap_page) */ | |
258 | vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */ | |
259 | vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */ | |
260 | vmp_absent:1, /* Data has been requested, but is not yet available (O) */ | |
261 | vmp_error:1, /* Data manager was unable to provide data due to error (O) */ | |
262 | vmp_dirty:1, /* Page must be cleaned (O) */ | |
263 | vmp_cleaning:1, /* Page clean has begun (O) */ | |
264 | vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */ | |
265 | vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */ | |
266 | /* [See vm_fault_page_overwrite] */ | |
267 | vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */ | |
268 | /* start again at top of chain */ | |
269 | vmp_unusual:1, /* Page is absent, error, restart or page locked */ | |
270 | vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */ | |
271 | vmp_cs_tainted:VMP_CS_BITS, /* code-signing: page is tainted */ | |
272 | vmp_cs_nx:VMP_CS_BITS, /* code-signing: page is nx */ | |
273 | vmp_reusable:1, | |
274 | vmp_written_by_kernel:1; /* page was written by kernel (i.e. decompressed) */ | |
275 | ||
276 | #if !defined(__arm__) && !defined(__arm64__) | |
277 | ppnum_t vmp_phys_page; /* Physical page number of the page */ | |
278 | #endif | |
279 | }; | |
280 | ||
281 | typedef struct vm_page *vm_page_t; | |
282 | extern vm_page_t vm_pages; | |
283 | extern vm_page_t vm_page_array_beginning_addr; | |
284 | extern vm_page_t vm_page_array_ending_addr; | |
285 | ||
286 | static inline int | |
287 | VMP_CS_FOR_OFFSET( | |
288 | vm_map_offset_t fault_phys_offset) | |
289 | { | |
290 | assertf(fault_phys_offset < PAGE_SIZE && | |
291 | !(fault_phys_offset & FOURK_PAGE_MASK), | |
292 | "offset 0x%llx\n", (uint64_t)fault_phys_offset); | |
293 | return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT); | |
294 | } | |
295 | static inline bool | |
296 | VMP_CS_VALIDATED( | |
297 | vm_page_t p, | |
298 | vm_map_size_t fault_page_size, | |
299 | vm_map_offset_t fault_phys_offset) | |
300 | { | |
301 | assertf(fault_page_size <= PAGE_SIZE, | |
302 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", | |
303 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); | |
304 | if (fault_page_size == PAGE_SIZE) { | |
305 | return p->vmp_cs_validated == VMP_CS_ALL_TRUE; | |
306 | } | |
307 | return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset); | |
308 | } | |
309 | static inline bool | |
310 | VMP_CS_TAINTED( | |
311 | vm_page_t p, | |
312 | vm_map_size_t fault_page_size, | |
313 | vm_map_offset_t fault_phys_offset) | |
314 | { | |
315 | assertf(fault_page_size <= PAGE_SIZE, | |
316 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", | |
317 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); | |
318 | if (fault_page_size == PAGE_SIZE) { | |
319 | return p->vmp_cs_tainted != VMP_CS_ALL_FALSE; | |
320 | } | |
321 | return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset); | |
322 | } | |
323 | static inline bool | |
324 | VMP_CS_NX( | |
325 | vm_page_t p, | |
326 | vm_map_size_t fault_page_size, | |
327 | vm_map_offset_t fault_phys_offset) | |
328 | { | |
329 | assertf(fault_page_size <= PAGE_SIZE, | |
330 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", | |
331 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); | |
332 | if (fault_page_size == PAGE_SIZE) { | |
333 | return p->vmp_cs_nx != VMP_CS_ALL_FALSE; | |
334 | } | |
335 | return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset); | |
336 | } | |
337 | static inline void | |
338 | VMP_CS_SET_VALIDATED( | |
339 | vm_page_t p, | |
340 | vm_map_size_t fault_page_size, | |
341 | vm_map_offset_t fault_phys_offset, | |
342 | boolean_t value) | |
343 | { | |
344 | assertf(fault_page_size <= PAGE_SIZE, | |
345 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", | |
346 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); | |
347 | if (value) { | |
348 | if (fault_page_size == PAGE_SIZE) { | |
349 | p->vmp_cs_validated = VMP_CS_ALL_TRUE; | |
350 | } | |
351 | p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset); | |
352 | } else { | |
353 | if (fault_page_size == PAGE_SIZE) { | |
354 | p->vmp_cs_validated = VMP_CS_ALL_FALSE; | |
355 | } | |
356 | p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); | |
357 | } | |
358 | } | |
359 | static inline void | |
360 | VMP_CS_SET_TAINTED( | |
361 | vm_page_t p, | |
362 | vm_map_size_t fault_page_size, | |
363 | vm_map_offset_t fault_phys_offset, | |
364 | boolean_t value) | |
365 | { | |
366 | assertf(fault_page_size <= PAGE_SIZE, | |
367 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", | |
368 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); | |
369 | if (value) { | |
370 | if (fault_page_size == PAGE_SIZE) { | |
371 | p->vmp_cs_tainted = VMP_CS_ALL_TRUE; | |
372 | } | |
373 | p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset); | |
374 | } else { | |
375 | if (fault_page_size == PAGE_SIZE) { | |
376 | p->vmp_cs_tainted = VMP_CS_ALL_FALSE; | |
377 | } | |
378 | p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); | |
379 | } | |
380 | } | |
381 | static inline void | |
382 | VMP_CS_SET_NX( | |
383 | vm_page_t p, | |
384 | vm_map_size_t fault_page_size, | |
385 | vm_map_offset_t fault_phys_offset, | |
386 | boolean_t value) | |
387 | { | |
388 | assertf(fault_page_size <= PAGE_SIZE, | |
389 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", | |
390 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); | |
391 | if (value) { | |
392 | if (fault_page_size == PAGE_SIZE) { | |
393 | p->vmp_cs_nx = VMP_CS_ALL_TRUE; | |
394 | } | |
395 | p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset); | |
396 | } else { | |
397 | if (fault_page_size == PAGE_SIZE) { | |
398 | p->vmp_cs_nx = VMP_CS_ALL_FALSE; | |
399 | } | |
400 | p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); | |
401 | } | |
402 | } | |
403 | ||
404 | ||
405 | #if defined(__arm__) || defined(__arm64__) | |
406 | ||
407 | extern unsigned int vm_first_phys_ppnum; | |
408 | ||
409 | struct vm_page_with_ppnum { | |
410 | struct vm_page vm_page_wo_ppnum; | |
411 | ||
412 | ppnum_t vmp_phys_page; | |
413 | }; | |
414 | typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; | |
415 | ||
416 | ||
417 | static inline ppnum_t | |
418 | VM_PAGE_GET_PHYS_PAGE(vm_page_t m) | |
419 | { | |
420 | if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) { | |
421 | return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum); | |
422 | } else { | |
423 | return ((vm_page_with_ppnum_t)m)->vmp_phys_page; | |
424 | } | |
425 | } | |
426 | ||
427 | #define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \ | |
428 | MACRO_BEGIN \ | |
429 | if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \ | |
430 | ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \ | |
431 | assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \ | |
432 | MACRO_END | |
433 | ||
434 | #define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask) | |
435 | ||
436 | #else /* defined(__arm__) || defined(__arm64__) */ | |
437 | ||
438 | ||
439 | struct vm_page_with_ppnum { | |
440 | struct vm_page vm_page_with_ppnum; | |
441 | }; | |
442 | typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; | |
443 | ||
444 | ||
445 | #define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page | |
446 | #define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \ | |
447 | MACRO_BEGIN \ | |
448 | (page)->vmp_phys_page = ppnum; \ | |
449 | MACRO_END | |
450 | ||
451 | #define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift) | |
452 | #define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask) | |
453 | ||
454 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
455 | ||
456 | ||
457 | ||
458 | #if defined(__LP64__) | |
459 | /* | |
460 | * Parameters for pointer packing | |
461 | * | |
462 | * | |
463 | * VM Pages pointers might point to: | |
464 | * | |
465 | * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals, | |
466 | * | |
467 | * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages | |
468 | * | |
469 | * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED | |
470 | * aligned). | |
471 | * | |
472 | * | |
473 | * The current scheme uses 31 bits of storage and 6 bits of shift using the | |
474 | * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the | |
475 | * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY). | |
476 | * | |
477 | * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS. | |
478 | */ | |
479 | #define VM_VPLQ_ALIGNMENT 128 | |
480 | #define VM_PAGE_PACKED_PTR_ALIGNMENT 64 /* must be a power of 2 */ | |
481 | #define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT))) | |
482 | #define VM_PAGE_PACKED_PTR_BITS 31 | |
483 | #define VM_PAGE_PACKED_PTR_SHIFT 6 | |
484 | #define VM_PAGE_PACKED_PTR_BASE ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS) | |
485 | ||
486 | #define VM_PAGE_PACKED_FROM_ARRAY 0x80000000 | |
487 | ||
488 | static inline vm_page_packed_t | |
489 | vm_page_pack_ptr(uintptr_t p) | |
490 | { | |
491 | if (p >= (uintptr_t)vm_page_array_beginning_addr && | |
492 | p < (uintptr_t)vm_page_array_ending_addr) { | |
493 | ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr; | |
494 | assert((vm_page_t)p == &vm_pages[diff]); | |
495 | return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY); | |
496 | } | |
497 | ||
498 | VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR); | |
499 | vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR); | |
500 | return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed); | |
501 | } | |
502 | ||
503 | ||
504 | static inline uintptr_t | |
505 | vm_page_unpack_ptr(uintptr_t p) | |
506 | { | |
507 | extern unsigned int vm_pages_count; | |
508 | ||
509 | if (p >= VM_PAGE_PACKED_FROM_ARRAY) { | |
510 | p &= ~VM_PAGE_PACKED_FROM_ARRAY; | |
511 | assert(p < (uintptr_t)vm_pages_count); | |
512 | return (uintptr_t)&vm_pages[p]; | |
513 | } | |
514 | ||
515 | return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR); | |
516 | } | |
517 | ||
518 | ||
519 | #define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p)) | |
520 | #define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p)) | |
521 | ||
522 | #define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object))) | |
523 | #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) | |
524 | ||
525 | ||
526 | #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ | |
527 | MACRO_BEGIN \ | |
528 | (p)->vmp_snext = 0; \ | |
529 | MACRO_END | |
530 | ||
531 | ||
532 | #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p) | |
533 | ||
534 | ||
535 | static __inline__ void | |
536 | vm_page_enqueue_tail( | |
537 | vm_page_queue_t que, | |
538 | vm_page_queue_entry_t elt) | |
539 | { | |
540 | vm_page_queue_entry_t old_tail; | |
541 | ||
542 | old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev); | |
543 | elt->next = VM_PAGE_PACK_PTR(que); | |
544 | elt->prev = que->prev; | |
545 | que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt); | |
546 | } | |
547 | ||
548 | ||
549 | static __inline__ void | |
550 | vm_page_remque( | |
551 | vm_page_queue_entry_t elt) | |
552 | { | |
553 | vm_page_queue_entry_t next; | |
554 | vm_page_queue_entry_t prev; | |
555 | vm_page_packed_t next_pck = elt->next; | |
556 | vm_page_packed_t prev_pck = elt->prev; | |
557 | ||
558 | next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck); | |
559 | ||
560 | /* next may equal prev (and the queue head) if elt was the only element */ | |
561 | prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck); | |
562 | ||
563 | next->prev = prev_pck; | |
564 | prev->next = next_pck; | |
565 | ||
566 | elt->next = 0; | |
567 | elt->prev = 0; | |
568 | } | |
569 | ||
570 | ||
571 | /* | |
572 | * Macro: vm_page_queue_init | |
573 | * Function: | |
574 | * Initialize the given queue. | |
575 | * Header: | |
576 | * void vm_page_queue_init(q) | |
577 | * vm_page_queue_t q; \* MODIFIED *\ | |
578 | */ | |
579 | #define vm_page_queue_init(q) \ | |
580 | MACRO_BEGIN \ | |
581 | VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \ | |
582 | (q)->next = VM_PAGE_PACK_PTR(q); \ | |
583 | (q)->prev = VM_PAGE_PACK_PTR(q); \ | |
584 | MACRO_END | |
585 | ||
586 | ||
587 | /* | |
588 | * Macro: vm_page_queue_enter | |
589 | * Function: | |
590 | * Insert a new element at the tail of the vm_page queue. | |
591 | * Header: | |
592 | * void vm_page_queue_enter(q, elt, field) | |
593 | * queue_t q; | |
594 | * vm_page_t elt; | |
595 | * <field> is the list field in vm_page_t | |
596 | * | |
597 | * This macro's arguments have to match the generic "queue_enter()" macro which is | |
598 | * what is used for this on 32 bit kernels. | |
599 | */ | |
600 | #define vm_page_queue_enter(head, elt, field) \ | |
601 | MACRO_BEGIN \ | |
602 | vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \ | |
603 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ | |
604 | vm_page_packed_t __pck_prev = (head)->prev; \ | |
605 | \ | |
606 | if (__pck_head == __pck_prev) { \ | |
607 | (head)->next = __pck_elt; \ | |
608 | } else { \ | |
609 | vm_page_t __prev; \ | |
610 | __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \ | |
611 | __prev->field.next = __pck_elt; \ | |
612 | } \ | |
613 | (elt)->field.prev = __pck_prev; \ | |
614 | (elt)->field.next = __pck_head; \ | |
615 | (head)->prev = __pck_elt; \ | |
616 | MACRO_END | |
617 | ||
618 | ||
619 | #if defined(__x86_64__) | |
620 | /* | |
621 | * These are helper macros for vm_page_queue_enter_clump to assist | |
622 | * with conditional compilation (release / debug / development) | |
623 | */ | |
624 | #if DEVELOPMENT || DEBUG | |
625 | ||
626 | #define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \ | |
627 | MACRO_BEGIN \ | |
628 | if (__prev != NULL) { \ | |
629 | assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \ | |
630 | assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \ | |
631 | } \ | |
632 | MACRO_END | |
633 | ||
634 | #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \ | |
635 | MACRO_BEGIN \ | |
636 | unsigned int __i; \ | |
637 | vm_page_queue_entry_t __tmp; \ | |
638 | for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \ | |
639 | __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \ | |
640 | } \ | |
641 | assert(__tmp == __last_next); \ | |
642 | MACRO_END | |
643 | ||
644 | #define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++ | |
645 | #define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++ | |
646 | #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free | |
647 | ||
648 | #else | |
649 | ||
650 | #define __DEBUG_CHECK_BUDDIES(__prev, __p, field) | |
651 | #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) | |
652 | #define __DEBUG_STAT_INCREMENT_INRANGE | |
653 | #define __DEBUG_STAT_INCREMENT_INSERTS | |
654 | #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) | |
655 | ||
656 | #endif /* if DEVELOPMENT || DEBUG */ | |
657 | ||
658 | /* | |
659 | * Insert a new page into a free queue and clump pages within the same 16K boundary together | |
660 | */ | |
661 | static inline void | |
662 | vm_page_queue_enter_clump( | |
663 | vm_page_queue_t head, | |
664 | vm_page_t elt) | |
665 | { | |
666 | vm_page_queue_entry_t first = NULL; /* first page in the clump */ | |
667 | vm_page_queue_entry_t last = NULL; /* last page in the clump */ | |
668 | vm_page_queue_entry_t prev = NULL; | |
669 | vm_page_queue_entry_t next; | |
670 | uint_t n_free = 1; | |
671 | extern unsigned int vm_pages_count; | |
672 | extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold; | |
673 | extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes; | |
674 | ||
675 | /* | |
676 | * If elt is part of the vm_pages[] array, find its neighboring buddies in the array. | |
677 | */ | |
678 | if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) { | |
679 | vm_page_t p; | |
680 | uint_t i; | |
681 | uint_t n; | |
682 | ppnum_t clump_num; | |
683 | ||
684 | first = last = (vm_page_queue_entry_t)elt; | |
685 | clump_num = VM_PAGE_GET_CLUMP(elt); | |
686 | n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask; | |
687 | ||
688 | /* | |
689 | * Check for preceeding vm_pages[] entries in the same chunk | |
690 | */ | |
691 | for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) { | |
692 | if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) { | |
693 | if (prev == NULL) { | |
694 | prev = (vm_page_queue_entry_t)p; | |
695 | } | |
696 | first = (vm_page_queue_entry_t)p; | |
697 | n_free++; | |
698 | } | |
699 | } | |
700 | ||
701 | /* | |
702 | * Check the following vm_pages[] entries in the same chunk | |
703 | */ | |
704 | for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) { | |
705 | if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) { | |
706 | if (last == (vm_page_queue_entry_t)elt) { /* first one only */ | |
707 | __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq); | |
708 | } | |
709 | ||
710 | if (prev == NULL) { | |
711 | prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev); | |
712 | } | |
713 | last = (vm_page_queue_entry_t)p; | |
714 | n_free++; | |
715 | } | |
716 | } | |
717 | __DEBUG_STAT_INCREMENT_INRANGE; | |
718 | } | |
719 | ||
720 | /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */ | |
721 | if (prev == NULL) { | |
722 | prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev); | |
723 | } | |
724 | ||
725 | /* insert the element */ | |
726 | next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next); | |
727 | elt->vmp_pageq.next = prev->next; | |
728 | elt->vmp_pageq.prev = next->prev; | |
729 | prev->next = next->prev = VM_PAGE_PACK_PTR(elt); | |
730 | __DEBUG_STAT_INCREMENT_INSERTS; | |
731 | ||
732 | /* | |
733 | * Check if clump needs to be promoted to head. | |
734 | */ | |
735 | if (n_free >= vm_clump_promote_threshold && n_free > 1) { | |
736 | vm_page_queue_entry_t first_prev; | |
737 | ||
738 | first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev); | |
739 | ||
740 | /* If not at head already */ | |
741 | if (first_prev != head) { | |
742 | vm_page_queue_entry_t last_next; | |
743 | vm_page_queue_entry_t head_next; | |
744 | ||
745 | last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next); | |
746 | ||
747 | /* verify that the links within the clump are consistent */ | |
748 | __DEBUG_VERIFY_LINKS(first, n_free, last_next); | |
749 | ||
750 | /* promote clump to head */ | |
751 | first_prev->next = last->next; | |
752 | last_next->prev = first->prev; | |
753 | first->prev = VM_PAGE_PACK_PTR(head); | |
754 | last->next = head->next; | |
755 | ||
756 | head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next); | |
757 | head_next->prev = VM_PAGE_PACK_PTR(last); | |
758 | head->next = VM_PAGE_PACK_PTR(first); | |
759 | __DEBUG_STAT_INCREMENT_PROMOTES(n_free); | |
760 | } | |
761 | } | |
762 | } | |
763 | #endif | |
764 | ||
765 | /* | |
766 | * Macro: vm_page_queue_enter_first | |
767 | * Function: | |
768 | * Insert a new element at the head of the vm_page queue. | |
769 | * Header: | |
770 | * void queue_enter_first(q, elt, , field) | |
771 | * queue_t q; | |
772 | * vm_page_t elt; | |
773 | * <field> is the linkage field in vm_page | |
774 | * | |
775 | * This macro's arguments have to match the generic "queue_enter_first()" macro which is | |
776 | * what is used for this on 32 bit kernels. | |
777 | */ | |
778 | #define vm_page_queue_enter_first(head, elt, field) \ | |
779 | MACRO_BEGIN \ | |
780 | vm_page_packed_t __pck_next = (head)->next; \ | |
781 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ | |
782 | vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \ | |
783 | \ | |
784 | if (__pck_head == __pck_next) { \ | |
785 | (head)->prev = __pck_elt; \ | |
786 | } else { \ | |
787 | vm_page_t __next; \ | |
788 | __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ | |
789 | __next->field.prev = __pck_elt; \ | |
790 | } \ | |
791 | \ | |
792 | (elt)->field.next = __pck_next; \ | |
793 | (elt)->field.prev = __pck_head; \ | |
794 | (head)->next = __pck_elt; \ | |
795 | MACRO_END | |
796 | ||
797 | ||
798 | /* | |
799 | * Macro: vm_page_queue_remove | |
800 | * Function: | |
801 | * Remove an arbitrary page from a vm_page queue. | |
802 | * Header: | |
803 | * void vm_page_queue_remove(q, qe, field) | |
804 | * arguments as in vm_page_queue_enter | |
805 | * | |
806 | * This macro's arguments have to match the generic "queue_enter()" macro which is | |
807 | * what is used for this on 32 bit kernels. | |
808 | */ | |
809 | #define vm_page_queue_remove(head, elt, field) \ | |
810 | MACRO_BEGIN \ | |
811 | vm_page_packed_t __pck_next = (elt)->field.next; \ | |
812 | vm_page_packed_t __pck_prev = (elt)->field.prev; \ | |
813 | vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ | |
814 | vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \ | |
815 | \ | |
816 | if ((void *)(head) == (void *)__next) { \ | |
817 | (head)->prev = __pck_prev; \ | |
818 | } else { \ | |
819 | __next->field.prev = __pck_prev; \ | |
820 | } \ | |
821 | \ | |
822 | if ((void *)(head) == (void *)__prev) { \ | |
823 | (head)->next = __pck_next; \ | |
824 | } else { \ | |
825 | __prev->field.next = __pck_next; \ | |
826 | } \ | |
827 | \ | |
828 | (elt)->field.next = 0; \ | |
829 | (elt)->field.prev = 0; \ | |
830 | MACRO_END | |
831 | ||
832 | ||
833 | /* | |
834 | * Macro: vm_page_queue_remove_first | |
835 | * | |
836 | * Function: | |
837 | * Remove and return the entry at the head of a vm_page queue. | |
838 | * | |
839 | * Header: | |
840 | * vm_page_queue_remove_first(head, entry, field) | |
841 | * N.B. entry is returned by reference | |
842 | * | |
843 | * This macro's arguments have to match the generic "queue_remove_first()" macro which is | |
844 | * what is used for this on 32 bit kernels. | |
845 | */ | |
846 | #define vm_page_queue_remove_first(head, entry, field) \ | |
847 | MACRO_BEGIN \ | |
848 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ | |
849 | vm_page_packed_t __pck_next; \ | |
850 | vm_page_t __next; \ | |
851 | \ | |
852 | (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \ | |
853 | __pck_next = (entry)->field.next; \ | |
854 | __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ | |
855 | \ | |
856 | if (__pck_head == __pck_next) { \ | |
857 | (head)->prev = __pck_head; \ | |
858 | } else { \ | |
859 | __next->field.prev = __pck_head; \ | |
860 | } \ | |
861 | \ | |
862 | (head)->next = __pck_next; \ | |
863 | (entry)->field.next = 0; \ | |
864 | (entry)->field.prev = 0; \ | |
865 | MACRO_END | |
866 | ||
867 | ||
868 | #if defined(__x86_64__) | |
869 | /* | |
870 | * Macro: vm_page_queue_remove_first_with_clump | |
871 | * Function: | |
872 | * Remove and return the entry at the head of the free queue | |
873 | * end is set to 1 to indicate that we just returned the last page in a clump | |
874 | * | |
875 | * Header: | |
876 | * vm_page_queue_remove_first_with_clump(head, entry, end) | |
877 | * entry is returned by reference | |
878 | * end is returned by reference | |
879 | */ | |
880 | #define vm_page_queue_remove_first_with_clump(head, entry, end) \ | |
881 | MACRO_BEGIN \ | |
882 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ | |
883 | vm_page_packed_t __pck_next; \ | |
884 | vm_page_t __next; \ | |
885 | \ | |
886 | (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \ | |
887 | __pck_next = (entry)->vmp_pageq.next; \ | |
888 | __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ | |
889 | \ | |
890 | (end) = 0; \ | |
891 | if (__pck_head == __pck_next) { \ | |
892 | (head)->prev = __pck_head; \ | |
893 | (end) = 1; \ | |
894 | } else { \ | |
895 | __next->vmp_pageq.prev = __pck_head; \ | |
896 | if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \ | |
897 | (end) = 1; \ | |
898 | } \ | |
899 | } \ | |
900 | \ | |
901 | (head)->next = __pck_next; \ | |
902 | (entry)->vmp_pageq.next = 0; \ | |
903 | (entry)->vmp_pageq.prev = 0; \ | |
904 | MACRO_END | |
905 | #endif | |
906 | ||
907 | /* | |
908 | * Macro: vm_page_queue_end | |
909 | * Function: | |
910 | * Tests whether a new entry is really the end of | |
911 | * the queue. | |
912 | * Header: | |
913 | * boolean_t vm_page_queue_end(q, qe) | |
914 | * vm_page_queue_t q; | |
915 | * vm_page_queue_entry_t qe; | |
916 | */ | |
917 | #define vm_page_queue_end(q, qe) ((q) == (qe)) | |
918 | ||
919 | ||
920 | /* | |
921 | * Macro: vm_page_queue_empty | |
922 | * Function: | |
923 | * Tests whether a queue is empty. | |
924 | * Header: | |
925 | * boolean_t vm_page_queue_empty(q) | |
926 | * vm_page_queue_t q; | |
927 | */ | |
928 | #define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q))) | |
929 | ||
930 | ||
931 | ||
932 | /* | |
933 | * Macro: vm_page_queue_first | |
934 | * Function: | |
935 | * Returns the first entry in the queue, | |
936 | * Header: | |
937 | * uintpr_t vm_page_queue_first(q) | |
938 | * vm_page_queue_t q; \* IN *\ | |
939 | */ | |
940 | #define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next)) | |
941 | ||
942 | ||
943 | ||
944 | /* | |
945 | * Macro: vm_page_queue_last | |
946 | * Function: | |
947 | * Returns the last entry in the queue. | |
948 | * Header: | |
949 | * vm_page_queue_entry_t queue_last(q) | |
950 | * queue_t q; \* IN *\ | |
951 | */ | |
952 | #define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev)) | |
953 | ||
954 | ||
955 | ||
956 | /* | |
957 | * Macro: vm_page_queue_next | |
958 | * Function: | |
959 | * Returns the entry after an item in the queue. | |
960 | * Header: | |
961 | * uintpr_t vm_page_queue_next(qc) | |
962 | * vm_page_queue_t qc; | |
963 | */ | |
964 | #define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next)) | |
965 | ||
966 | ||
967 | ||
968 | /* | |
969 | * Macro: vm_page_queue_prev | |
970 | * Function: | |
971 | * Returns the entry before an item in the queue. | |
972 | * Header: | |
973 | * uinptr_t vm_page_queue_prev(qc) | |
974 | * vm_page_queue_t qc; | |
975 | */ | |
976 | #define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev)) | |
977 | ||
978 | ||
979 | ||
980 | /* | |
981 | * Macro: vm_page_queue_iterate | |
982 | * Function: | |
983 | * iterate over each item in a vm_page queue. | |
984 | * Generates a 'for' loop, setting elt to | |
985 | * each item in turn (by reference). | |
986 | * Header: | |
987 | * vm_page_queue_iterate(q, elt, field) | |
988 | * queue_t q; | |
989 | * vm_page_t elt; | |
990 | * <field> is the chain field in vm_page_t | |
991 | */ | |
992 | #define vm_page_queue_iterate(head, elt, field) \ | |
993 | for ((elt) = (vm_page_t)vm_page_queue_first(head); \ | |
994 | !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \ | |
995 | (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \ | |
996 | ||
997 | #else // LP64 | |
998 | ||
999 | #define VM_VPLQ_ALIGNMENT 128 | |
1000 | #define VM_PAGE_PACKED_PTR_ALIGNMENT sizeof(vm_offset_t) | |
1001 | #define VM_PAGE_PACKED_ALIGNED | |
1002 | #define VM_PAGE_PACKED_PTR_BITS 32 | |
1003 | #define VM_PAGE_PACKED_PTR_SHIFT 0 | |
1004 | #define VM_PAGE_PACKED_PTR_BASE 0 | |
1005 | ||
1006 | #define VM_PAGE_PACKED_FROM_ARRAY 0 | |
1007 | ||
1008 | #define VM_PAGE_PACK_PTR(p) (p) | |
1009 | #define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p)) | |
1010 | ||
1011 | #define VM_PAGE_OBJECT(p) ((vm_object_t)((p)->vmp_object)) | |
1012 | #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) | |
1013 | ||
1014 | ||
1015 | #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ | |
1016 | MACRO_BEGIN \ | |
1017 | (p)->vmp_pageq.next = 0; \ | |
1018 | (p)->vmp_pageq.prev = 0; \ | |
1019 | MACRO_END | |
1020 | ||
1021 | #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p)) | |
1022 | ||
1023 | #define vm_page_remque remque | |
1024 | #define vm_page_enqueue_tail enqueue_tail | |
1025 | #define vm_page_queue_init queue_init | |
1026 | #define vm_page_queue_enter(h, e, f) queue_enter(h, e, vm_page_t, f) | |
1027 | #define vm_page_queue_enter_first(h, e, f) queue_enter_first(h, e, vm_page_t, f) | |
1028 | #define vm_page_queue_remove(h, e, f) queue_remove(h, e, vm_page_t, f) | |
1029 | #define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f) | |
1030 | #define vm_page_queue_end queue_end | |
1031 | #define vm_page_queue_empty queue_empty | |
1032 | #define vm_page_queue_first queue_first | |
1033 | #define vm_page_queue_last queue_last | |
1034 | #define vm_page_queue_next queue_next | |
1035 | #define vm_page_queue_prev queue_prev | |
1036 | #define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f) | |
1037 | ||
1038 | #endif // __LP64__ | |
1039 | ||
1040 | ||
1041 | ||
1042 | /* | |
1043 | * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q | |
1044 | * represents a set of aging bins that are 'protected'... | |
1045 | * | |
1046 | * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have | |
1047 | * not yet been 'claimed' but have been aged out of the protective bins | |
1048 | * this occurs in vm_page_speculate when it advances to the next bin | |
1049 | * and discovers that it is still occupied... at that point, all of the | |
1050 | * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages | |
1051 | * in that bin are all guaranteed to have reached at least the maximum age | |
1052 | * we allow for a protected page... they can be older if there is no | |
1053 | * memory pressure to pull them from the bin, or there are no new speculative pages | |
1054 | * being generated to push them out. | |
1055 | * this list is the one that vm_pageout_scan will prefer when looking | |
1056 | * for pages to move to the underweight free list | |
1057 | * | |
1058 | * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS | |
1059 | * defines the amount of time a speculative page is normally | |
1060 | * allowed to live in the 'protected' state (i.e. not available | |
1061 | * to be stolen if vm_pageout_scan is running and looking for | |
1062 | * pages)... however, if the total number of speculative pages | |
1063 | * in the protected state exceeds our limit (defined in vm_pageout.c) | |
1064 | * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then | |
1065 | * vm_pageout_scan is allowed to steal pages from the protected | |
1066 | * bucket even if they are underage. | |
1067 | * | |
1068 | * vm_pageout_scan is also allowed to pull pages from a protected | |
1069 | * bin if the bin has reached the "age of consent" we've set | |
1070 | */ | |
1071 | #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 | |
1072 | #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 | |
1073 | #define VM_PAGE_SPECULATIVE_AGED_Q 0 | |
1074 | ||
1075 | #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 | |
1076 | ||
1077 | struct vm_speculative_age_q { | |
1078 | /* | |
1079 | * memory queue for speculative pages via clustered pageins | |
1080 | */ | |
1081 | vm_page_queue_head_t age_q; | |
1082 | mach_timespec_t age_ts; | |
1083 | } VM_PAGE_PACKED_ALIGNED; | |
1084 | ||
1085 | ||
1086 | ||
1087 | extern | |
1088 | struct vm_speculative_age_q vm_page_queue_speculative[]; | |
1089 | ||
1090 | extern int speculative_steal_index; | |
1091 | extern int speculative_age_index; | |
1092 | extern unsigned int vm_page_speculative_q_age_ms; | |
1093 | ||
1094 | ||
1095 | typedef struct vm_locks_array { | |
1096 | char pad __attribute__ ((aligned(64))); | |
1097 | lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned(64))); | |
1098 | lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned(64))); | |
1099 | char pad2 __attribute__ ((aligned(64))); | |
1100 | } vm_locks_array_t; | |
1101 | ||
1102 | ||
1103 | #if CONFIG_BACKGROUND_QUEUE | |
1104 | extern void vm_page_assign_background_state(vm_page_t mem); | |
1105 | extern void vm_page_update_background_state(vm_page_t mem); | |
1106 | extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first); | |
1107 | extern void vm_page_remove_from_backgroundq(vm_page_t mem); | |
1108 | #endif | |
1109 | ||
1110 | #define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED) | |
1111 | #define NEXT_PAGE(m) ((m)->vmp_snext) | |
1112 | #define NEXT_PAGE_PTR(m) (&(m)->vmp_snext) | |
1113 | ||
1114 | /* | |
1115 | * XXX The unusual bit should not be necessary. Most of the bit | |
1116 | * XXX fields above really want to be masks. | |
1117 | */ | |
1118 | ||
1119 | /* | |
1120 | * For debugging, this macro can be defined to perform | |
1121 | * some useful check on a page structure. | |
1122 | * INTENTIONALLY left as a no-op so that the | |
1123 | * current call-sites can be left intact for future uses. | |
1124 | */ | |
1125 | ||
1126 | #define VM_PAGE_CHECK(mem) \ | |
1127 | MACRO_BEGIN \ | |
1128 | MACRO_END | |
1129 | ||
1130 | /* Page coloring: | |
1131 | * | |
1132 | * The free page list is actually n lists, one per color, | |
1133 | * where the number of colors is a function of the machine's | |
1134 | * cache geometry set at system initialization. To disable | |
1135 | * coloring, set vm_colors to 1 and vm_color_mask to 0. | |
1136 | * The boot-arg "colors" may be used to override vm_colors. | |
1137 | * Note that there is little harm in having more colors than needed. | |
1138 | */ | |
1139 | ||
1140 | #define MAX_COLORS 128 | |
1141 | #define DEFAULT_COLORS 32 | |
1142 | ||
1143 | extern | |
1144 | unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ | |
1145 | extern | |
1146 | unsigned int vm_color_mask; /* must be (vm_colors-1) */ | |
1147 | extern | |
1148 | unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ | |
1149 | ||
1150 | /* | |
1151 | * Wired memory is a very limited resource and we can't let users exhaust it | |
1152 | * and deadlock the entire system. We enforce the following limits: | |
1153 | * | |
1154 | * vm_per_task_user_wire_limit | |
1155 | * how much memory can be user-wired in one user task | |
1156 | * | |
1157 | * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit) | |
1158 | * how much memory can be user-wired in all user tasks | |
1159 | * | |
1160 | * These values are set to defaults based on the number of pages managed | |
1161 | * by the VM system. They can be overriden via sysctls. | |
1162 | * See kmem_set_user_wire_limits for details on the default values. | |
1163 | * | |
1164 | * Regardless of the amount of memory in the system, we never reserve | |
1165 | * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable. | |
1166 | */ | |
1167 | #if defined(__LP64__) | |
1168 | #define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024) /* 32GB */ | |
1169 | #else | |
1170 | #define VM_NOT_USER_WIREABLE_MAX (1UL*1024*1024*1024) /* 1GB */ | |
1171 | #endif /* __LP64__ */ | |
1172 | extern | |
1173 | vm_map_size_t vm_per_task_user_wire_limit; | |
1174 | extern | |
1175 | vm_map_size_t vm_global_user_wire_limit; | |
1176 | extern | |
1177 | uint64_t vm_add_wire_count_over_global_limit; | |
1178 | extern | |
1179 | uint64_t vm_add_wire_count_over_user_limit; | |
1180 | ||
1181 | /* | |
1182 | * Each pageable resident page falls into one of three lists: | |
1183 | * | |
1184 | * free | |
1185 | * Available for allocation now. The free list is | |
1186 | * actually an array of lists, one per color. | |
1187 | * inactive | |
1188 | * Not referenced in any map, but still has an | |
1189 | * object/offset-page mapping, and may be dirty. | |
1190 | * This is the list of pages that should be | |
1191 | * paged out next. There are actually two | |
1192 | * inactive lists, one for pages brought in from | |
1193 | * disk or other backing store, and another | |
1194 | * for "zero-filled" pages. See vm_pageout_scan() | |
1195 | * for the distinction and usage. | |
1196 | * active | |
1197 | * A list of pages which have been placed in | |
1198 | * at least one physical map. This list is | |
1199 | * ordered, in LRU-like fashion. | |
1200 | */ | |
1201 | ||
1202 | ||
1203 | #define VPL_LOCK_SPIN 1 | |
1204 | ||
1205 | struct vpl { | |
1206 | vm_page_queue_head_t vpl_queue; | |
1207 | unsigned int vpl_count; | |
1208 | unsigned int vpl_internal_count; | |
1209 | unsigned int vpl_external_count; | |
1210 | #ifdef VPL_LOCK_SPIN | |
1211 | lck_spin_t vpl_lock; | |
1212 | #else | |
1213 | lck_mtx_t vpl_lock; | |
1214 | lck_mtx_ext_t vpl_lock_ext; | |
1215 | #endif | |
1216 | }; | |
1217 | ||
1218 | extern | |
1219 | struct vpl * /* __zpercpu */ vm_page_local_q; | |
1220 | extern | |
1221 | unsigned int vm_page_local_q_soft_limit; | |
1222 | extern | |
1223 | unsigned int vm_page_local_q_hard_limit; | |
1224 | extern | |
1225 | vm_locks_array_t vm_page_locks; | |
1226 | ||
1227 | extern | |
1228 | vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */ | |
1229 | extern | |
1230 | vm_page_queue_head_t vm_page_queue_active; /* active memory queue */ | |
1231 | extern | |
1232 | vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ | |
1233 | #if CONFIG_SECLUDED_MEMORY | |
1234 | extern | |
1235 | vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */ | |
1236 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1237 | extern | |
1238 | vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ | |
1239 | extern | |
1240 | vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ | |
1241 | extern | |
1242 | vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ | |
1243 | ||
1244 | extern | |
1245 | queue_head_t vm_objects_wired; | |
1246 | extern | |
1247 | lck_spin_t vm_objects_wired_lock; | |
1248 | ||
1249 | #if CONFIG_BACKGROUND_QUEUE | |
1250 | ||
1251 | #define VM_PAGE_BACKGROUND_TARGET_MAX 50000 | |
1252 | ||
1253 | #define VM_PAGE_BG_DISABLED 0 | |
1254 | #define VM_PAGE_BG_LEVEL_1 1 | |
1255 | ||
1256 | extern | |
1257 | vm_page_queue_head_t vm_page_queue_background; | |
1258 | extern | |
1259 | uint64_t vm_page_background_promoted_count; | |
1260 | extern | |
1261 | uint32_t vm_page_background_count; | |
1262 | extern | |
1263 | uint32_t vm_page_background_target; | |
1264 | extern | |
1265 | uint32_t vm_page_background_internal_count; | |
1266 | extern | |
1267 | uint32_t vm_page_background_external_count; | |
1268 | extern | |
1269 | uint32_t vm_page_background_mode; | |
1270 | extern | |
1271 | uint32_t vm_page_background_exclude_external; | |
1272 | ||
1273 | #endif | |
1274 | ||
1275 | extern | |
1276 | vm_offset_t first_phys_addr; /* physical address for first_page */ | |
1277 | extern | |
1278 | vm_offset_t last_phys_addr; /* physical address for last_page */ | |
1279 | ||
1280 | extern | |
1281 | unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ | |
1282 | extern | |
1283 | unsigned int vm_page_active_count; /* How many pages are active? */ | |
1284 | extern | |
1285 | unsigned int vm_page_inactive_count; /* How many pages are inactive? */ | |
1286 | extern | |
1287 | unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */ | |
1288 | #if CONFIG_SECLUDED_MEMORY | |
1289 | extern | |
1290 | unsigned int vm_page_secluded_count; /* How many pages are secluded? */ | |
1291 | extern | |
1292 | unsigned int vm_page_secluded_count_free; /* how many of them are free? */ | |
1293 | extern | |
1294 | unsigned int vm_page_secluded_count_inuse; /* how many of them are in use? */ | |
1295 | /* | |
1296 | * We keep filling the secluded pool with new eligible pages and | |
1297 | * we can overshoot our target by a lot. | |
1298 | * When there's memory pressure, vm_pageout_scan() will re-balance the queues, | |
1299 | * pushing the extra secluded pages to the active or free queue. | |
1300 | * Since these "over target" secluded pages are actually "available", jetsam | |
1301 | * should consider them as such, so make them visible to jetsam via the | |
1302 | * "vm_page_secluded_count_over_target" counter and update it whenever we | |
1303 | * update vm_page_secluded_count or vm_page_secluded_target. | |
1304 | */ | |
1305 | extern | |
1306 | unsigned int vm_page_secluded_count_over_target; | |
1307 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \ | |
1308 | MACRO_BEGIN \ | |
1309 | if (vm_page_secluded_count > vm_page_secluded_target) { \ | |
1310 | vm_page_secluded_count_over_target = \ | |
1311 | (vm_page_secluded_count - vm_page_secluded_target); \ | |
1312 | } else { \ | |
1313 | vm_page_secluded_count_over_target = 0; \ | |
1314 | } \ | |
1315 | MACRO_END | |
1316 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target | |
1317 | #else /* CONFIG_SECLUDED_MEMORY */ | |
1318 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \ | |
1319 | MACRO_BEGIN \ | |
1320 | MACRO_END | |
1321 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0 | |
1322 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1323 | extern | |
1324 | unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ | |
1325 | extern | |
1326 | unsigned int vm_page_throttled_count;/* How many inactives are throttled */ | |
1327 | extern | |
1328 | unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ | |
1329 | extern unsigned int vm_page_pageable_internal_count; | |
1330 | extern unsigned int vm_page_pageable_external_count; | |
1331 | extern | |
1332 | unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ | |
1333 | extern | |
1334 | unsigned int vm_page_external_count; /* How many pages are file-backed? */ | |
1335 | extern | |
1336 | unsigned int vm_page_internal_count; /* How many pages are anonymous? */ | |
1337 | extern | |
1338 | unsigned int vm_page_wire_count; /* How many pages are wired? */ | |
1339 | extern | |
1340 | unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ | |
1341 | extern | |
1342 | unsigned int vm_page_wire_count_on_boot; /* even earlier than _initial */ | |
1343 | extern | |
1344 | unsigned int vm_page_free_target; /* How many do we want free? */ | |
1345 | extern | |
1346 | unsigned int vm_page_free_min; /* When to wakeup pageout */ | |
1347 | extern | |
1348 | unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ | |
1349 | extern | |
1350 | unsigned int vm_page_inactive_target;/* How many do we want inactive? */ | |
1351 | #if CONFIG_SECLUDED_MEMORY | |
1352 | extern | |
1353 | unsigned int vm_page_secluded_target;/* How many do we want secluded? */ | |
1354 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1355 | extern | |
1356 | unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ | |
1357 | extern | |
1358 | unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ | |
1359 | extern | |
1360 | unsigned int vm_page_gobble_count; | |
1361 | extern | |
1362 | unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */ | |
1363 | extern | |
1364 | unsigned int vm_page_kern_lpage_count; /* Count of large pages used in early boot */ | |
1365 | ||
1366 | ||
1367 | #if DEVELOPMENT || DEBUG | |
1368 | extern | |
1369 | unsigned int vm_page_speculative_used; | |
1370 | #endif | |
1371 | ||
1372 | extern | |
1373 | unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ | |
1374 | extern | |
1375 | unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ | |
1376 | extern | |
1377 | uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ | |
1378 | ||
1379 | extern unsigned int vm_page_free_wanted; | |
1380 | /* how many threads are waiting for memory */ | |
1381 | ||
1382 | extern unsigned int vm_page_free_wanted_privileged; | |
1383 | /* how many VM privileged threads are waiting for memory */ | |
1384 | #if CONFIG_SECLUDED_MEMORY | |
1385 | extern unsigned int vm_page_free_wanted_secluded; | |
1386 | /* how many threads are waiting for secluded memory */ | |
1387 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1388 | ||
1389 | extern const ppnum_t vm_page_fictitious_addr; | |
1390 | /* (fake) phys_addr of fictitious pages */ | |
1391 | ||
1392 | extern const ppnum_t vm_page_guard_addr; | |
1393 | /* (fake) phys_addr of guard pages */ | |
1394 | ||
1395 | ||
1396 | extern boolean_t vm_page_deactivate_hint; | |
1397 | ||
1398 | extern int vm_compressor_mode; | |
1399 | ||
1400 | /* | |
1401 | * Defaults to true, so highest memory is used first. | |
1402 | */ | |
1403 | extern boolean_t vm_himemory_mode; | |
1404 | ||
1405 | extern boolean_t vm_lopage_needed; | |
1406 | extern uint32_t vm_lopage_free_count; | |
1407 | extern uint32_t vm_lopage_free_limit; | |
1408 | extern uint32_t vm_lopage_lowater; | |
1409 | extern boolean_t vm_lopage_refill; | |
1410 | extern uint64_t max_valid_dma_address; | |
1411 | extern ppnum_t max_valid_low_ppnum; | |
1412 | ||
1413 | /* | |
1414 | * Prototypes for functions exported by this module. | |
1415 | */ | |
1416 | extern void vm_page_bootstrap( | |
1417 | vm_offset_t *startp, | |
1418 | vm_offset_t *endp); | |
1419 | ||
1420 | extern void vm_page_init_local_q(unsigned int num_cpus); | |
1421 | ||
1422 | extern void vm_page_create( | |
1423 | ppnum_t start, | |
1424 | ppnum_t end); | |
1425 | ||
1426 | extern void vm_page_create_retired( | |
1427 | ppnum_t pn); | |
1428 | ||
1429 | extern vm_page_t kdp_vm_page_lookup( | |
1430 | vm_object_t object, | |
1431 | vm_object_offset_t offset); | |
1432 | ||
1433 | extern vm_page_t vm_page_lookup( | |
1434 | vm_object_t object, | |
1435 | vm_object_offset_t offset); | |
1436 | ||
1437 | extern vm_page_t vm_page_grab_fictitious(boolean_t canwait); | |
1438 | ||
1439 | extern vm_page_t vm_page_grab_guard(boolean_t canwait); | |
1440 | ||
1441 | extern void vm_page_release_fictitious( | |
1442 | vm_page_t page); | |
1443 | ||
1444 | extern void vm_free_delayed_pages(void); | |
1445 | ||
1446 | extern bool vm_pool_low(void); | |
1447 | ||
1448 | extern vm_page_t vm_page_grab(void); | |
1449 | extern vm_page_t vm_page_grab_options(int flags); | |
1450 | ||
1451 | #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000 | |
1452 | #if CONFIG_SECLUDED_MEMORY | |
1453 | #define VM_PAGE_GRAB_SECLUDED 0x00000001 | |
1454 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1455 | #define VM_PAGE_GRAB_Q_LOCK_HELD 0x00000002 | |
1456 | ||
1457 | extern vm_page_t vm_page_grablo(void); | |
1458 | ||
1459 | extern void vm_page_release( | |
1460 | vm_page_t page, | |
1461 | boolean_t page_queues_locked); | |
1462 | ||
1463 | extern boolean_t vm_page_wait( | |
1464 | int interruptible ); | |
1465 | ||
1466 | extern vm_page_t vm_page_alloc( | |
1467 | vm_object_t object, | |
1468 | vm_object_offset_t offset); | |
1469 | ||
1470 | extern void vm_page_init( | |
1471 | vm_page_t page, | |
1472 | ppnum_t phys_page, | |
1473 | boolean_t lopage); | |
1474 | ||
1475 | extern void vm_page_free( | |
1476 | vm_page_t page); | |
1477 | ||
1478 | extern void vm_page_free_unlocked( | |
1479 | vm_page_t page, | |
1480 | boolean_t remove_from_hash); | |
1481 | ||
1482 | extern void vm_page_balance_inactive( | |
1483 | int max_to_move); | |
1484 | ||
1485 | extern void vm_page_activate( | |
1486 | vm_page_t page); | |
1487 | ||
1488 | extern void vm_page_deactivate( | |
1489 | vm_page_t page); | |
1490 | ||
1491 | extern void vm_page_deactivate_internal( | |
1492 | vm_page_t page, | |
1493 | boolean_t clear_hw_reference); | |
1494 | ||
1495 | extern void vm_page_enqueue_cleaned(vm_page_t page); | |
1496 | ||
1497 | extern void vm_page_lru( | |
1498 | vm_page_t page); | |
1499 | ||
1500 | extern void vm_page_speculate( | |
1501 | vm_page_t page, | |
1502 | boolean_t new); | |
1503 | ||
1504 | extern void vm_page_speculate_ageit( | |
1505 | struct vm_speculative_age_q *aq); | |
1506 | ||
1507 | extern void vm_page_reactivate_all_throttled(void); | |
1508 | ||
1509 | extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); | |
1510 | ||
1511 | extern void vm_page_rename( | |
1512 | vm_page_t page, | |
1513 | vm_object_t new_object, | |
1514 | vm_object_offset_t new_offset); | |
1515 | ||
1516 | extern void vm_page_insert( | |
1517 | vm_page_t page, | |
1518 | vm_object_t object, | |
1519 | vm_object_offset_t offset); | |
1520 | ||
1521 | extern void vm_page_insert_wired( | |
1522 | vm_page_t page, | |
1523 | vm_object_t object, | |
1524 | vm_object_offset_t offset, | |
1525 | vm_tag_t tag); | |
1526 | ||
1527 | extern void vm_page_insert_internal( | |
1528 | vm_page_t page, | |
1529 | vm_object_t object, | |
1530 | vm_object_offset_t offset, | |
1531 | vm_tag_t tag, | |
1532 | boolean_t queues_lock_held, | |
1533 | boolean_t insert_in_hash, | |
1534 | boolean_t batch_pmap_op, | |
1535 | boolean_t delayed_accounting, | |
1536 | uint64_t *delayed_ledger_update); | |
1537 | ||
1538 | extern void vm_page_replace( | |
1539 | vm_page_t mem, | |
1540 | vm_object_t object, | |
1541 | vm_object_offset_t offset); | |
1542 | ||
1543 | extern void vm_page_remove( | |
1544 | vm_page_t page, | |
1545 | boolean_t remove_from_hash); | |
1546 | ||
1547 | extern void vm_page_zero_fill( | |
1548 | vm_page_t page); | |
1549 | ||
1550 | extern void vm_page_part_zero_fill( | |
1551 | vm_page_t m, | |
1552 | vm_offset_t m_pa, | |
1553 | vm_size_t len); | |
1554 | ||
1555 | extern void vm_page_copy( | |
1556 | vm_page_t src_page, | |
1557 | vm_page_t dest_page); | |
1558 | ||
1559 | extern void vm_page_part_copy( | |
1560 | vm_page_t src_m, | |
1561 | vm_offset_t src_pa, | |
1562 | vm_page_t dst_m, | |
1563 | vm_offset_t dst_pa, | |
1564 | vm_size_t len); | |
1565 | ||
1566 | extern void vm_page_wire( | |
1567 | vm_page_t page, | |
1568 | vm_tag_t tag, | |
1569 | boolean_t check_memorystatus); | |
1570 | ||
1571 | extern void vm_page_unwire( | |
1572 | vm_page_t page, | |
1573 | boolean_t queueit); | |
1574 | ||
1575 | extern void vm_set_page_size(void); | |
1576 | ||
1577 | extern void vm_page_gobble( | |
1578 | vm_page_t page); | |
1579 | ||
1580 | extern void vm_page_validate_cs( | |
1581 | vm_page_t page, | |
1582 | vm_map_size_t fault_page_size, | |
1583 | vm_map_offset_t fault_phys_offset); | |
1584 | extern void vm_page_validate_cs_mapped( | |
1585 | vm_page_t page, | |
1586 | vm_map_size_t fault_page_size, | |
1587 | vm_map_offset_t fault_phys_offset, | |
1588 | const void *kaddr); | |
1589 | extern void vm_page_validate_cs_mapped_slow( | |
1590 | vm_page_t page, | |
1591 | const void *kaddr); | |
1592 | extern void vm_page_validate_cs_mapped_chunk( | |
1593 | vm_page_t page, | |
1594 | const void *kaddr, | |
1595 | vm_offset_t chunk_offset, | |
1596 | vm_size_t chunk_size, | |
1597 | boolean_t *validated, | |
1598 | unsigned *tainted); | |
1599 | ||
1600 | extern void vm_page_free_prepare_queues( | |
1601 | vm_page_t page); | |
1602 | ||
1603 | extern void vm_page_free_prepare_object( | |
1604 | vm_page_t page, | |
1605 | boolean_t remove_from_hash); | |
1606 | ||
1607 | #if CONFIG_IOSCHED | |
1608 | extern wait_result_t vm_page_sleep( | |
1609 | vm_object_t object, | |
1610 | vm_page_t m, | |
1611 | int interruptible); | |
1612 | #endif | |
1613 | ||
1614 | extern void vm_pressure_response(void); | |
1615 | ||
1616 | #if CONFIG_JETSAM | |
1617 | extern void memorystatus_pages_update(unsigned int pages_avail); | |
1618 | ||
1619 | #define VM_CHECK_MEMORYSTATUS do { \ | |
1620 | memorystatus_pages_update( \ | |
1621 | vm_page_pageable_external_count + \ | |
1622 | vm_page_free_count + \ | |
1623 | VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \ | |
1624 | (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \ | |
1625 | ); \ | |
1626 | } while(0) | |
1627 | ||
1628 | #else /* CONFIG_JETSAM */ | |
1629 | ||
1630 | #if !XNU_TARGET_OS_OSX | |
1631 | ||
1632 | #define VM_CHECK_MEMORYSTATUS do {} while(0) | |
1633 | ||
1634 | #else /* !XNU_TARGET_OS_OSX */ | |
1635 | ||
1636 | #define VM_CHECK_MEMORYSTATUS vm_pressure_response() | |
1637 | ||
1638 | #endif /* !XNU_TARGET_OS_OSX */ | |
1639 | ||
1640 | #endif /* CONFIG_JETSAM */ | |
1641 | ||
1642 | /* | |
1643 | * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are | |
1644 | * protected by the object lock. | |
1645 | */ | |
1646 | ||
1647 | #if !XNU_TARGET_OS_OSX | |
1648 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ | |
1649 | MACRO_BEGIN \ | |
1650 | vm_page_t __page__ = (m); \ | |
1651 | if (__page__->vmp_pmapped == TRUE && \ | |
1652 | __page__->vmp_wpmapped == TRUE && \ | |
1653 | __page__->vmp_dirty == FALSE && \ | |
1654 | (set_pmap_modified)) { \ | |
1655 | pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \ | |
1656 | } \ | |
1657 | __page__->vmp_dirty = TRUE; \ | |
1658 | MACRO_END | |
1659 | #else /* !XNU_TARGET_OS_OSX */ | |
1660 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ | |
1661 | MACRO_BEGIN \ | |
1662 | vm_page_t __page__ = (m); \ | |
1663 | __page__->vmp_dirty = TRUE; \ | |
1664 | MACRO_END | |
1665 | #endif /* !XNU_TARGET_OS_OSX */ | |
1666 | ||
1667 | #define PAGE_ASSERT_WAIT(m, interruptible) \ | |
1668 | (((m)->vmp_wanted = TRUE), \ | |
1669 | assert_wait((event_t) (m), (interruptible))) | |
1670 | ||
1671 | #if CONFIG_IOSCHED | |
1672 | #define PAGE_SLEEP(o, m, interruptible) \ | |
1673 | vm_page_sleep(o, m, interruptible) | |
1674 | #else | |
1675 | #define PAGE_SLEEP(o, m, interruptible) \ | |
1676 | (((m)->vmp_wanted = TRUE), \ | |
1677 | thread_sleep_vm_object((o), (m), (interruptible))) | |
1678 | #endif | |
1679 | ||
1680 | #define PAGE_WAKEUP_DONE(m) \ | |
1681 | MACRO_BEGIN \ | |
1682 | (m)->vmp_busy = FALSE; \ | |
1683 | if ((m)->vmp_wanted) { \ | |
1684 | (m)->vmp_wanted = FALSE; \ | |
1685 | thread_wakeup((event_t) (m)); \ | |
1686 | } \ | |
1687 | MACRO_END | |
1688 | ||
1689 | #define PAGE_WAKEUP(m) \ | |
1690 | MACRO_BEGIN \ | |
1691 | if ((m)->vmp_wanted) { \ | |
1692 | (m)->vmp_wanted = FALSE; \ | |
1693 | thread_wakeup((event_t) (m)); \ | |
1694 | } \ | |
1695 | MACRO_END | |
1696 | ||
1697 | #define VM_PAGE_FREE(p) \ | |
1698 | MACRO_BEGIN \ | |
1699 | vm_page_free_unlocked(p, TRUE); \ | |
1700 | MACRO_END | |
1701 | ||
1702 | #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) | |
1703 | ||
1704 | #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) | |
1705 | #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) | |
1706 | ||
1707 | #define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) | |
1708 | #define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock) | |
1709 | #define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) | |
1710 | ||
1711 | #define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) | |
1712 | #define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) | |
1713 | #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) | |
1714 | ||
1715 | #ifdef VPL_LOCK_SPIN | |
1716 | extern lck_grp_t vm_page_lck_grp_local; | |
1717 | ||
1718 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) | |
1719 | #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local) | |
1720 | #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) | |
1721 | #else | |
1722 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr) | |
1723 | #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl) | |
1724 | #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl) | |
1725 | #endif | |
1726 | ||
1727 | ||
1728 | #if DEVELOPMENT || DEBUG | |
1729 | #define VM_PAGE_SPECULATIVE_USED_ADD() \ | |
1730 | MACRO_BEGIN \ | |
1731 | OSAddAtomic(1, &vm_page_speculative_used); \ | |
1732 | MACRO_END | |
1733 | #else | |
1734 | #define VM_PAGE_SPECULATIVE_USED_ADD() | |
1735 | #endif | |
1736 | ||
1737 | ||
1738 | #define VM_PAGE_CONSUME_CLUSTERED(mem) \ | |
1739 | MACRO_BEGIN \ | |
1740 | ppnum_t __phys_page; \ | |
1741 | __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \ | |
1742 | pmap_lock_phys_page(__phys_page); \ | |
1743 | if (mem->vmp_clustered) { \ | |
1744 | vm_object_t o; \ | |
1745 | o = VM_PAGE_OBJECT(mem); \ | |
1746 | assert(o); \ | |
1747 | o->pages_used++; \ | |
1748 | mem->vmp_clustered = FALSE; \ | |
1749 | VM_PAGE_SPECULATIVE_USED_ADD(); \ | |
1750 | } \ | |
1751 | pmap_unlock_phys_page(__phys_page); \ | |
1752 | MACRO_END | |
1753 | ||
1754 | ||
1755 | #define VM_PAGE_COUNT_AS_PAGEIN(mem) \ | |
1756 | MACRO_BEGIN \ | |
1757 | { \ | |
1758 | vm_object_t o; \ | |
1759 | o = VM_PAGE_OBJECT(mem); \ | |
1760 | DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ | |
1761 | current_task()->pageins++; \ | |
1762 | if (o->internal) { \ | |
1763 | DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ | |
1764 | } else { \ | |
1765 | DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ | |
1766 | } \ | |
1767 | } \ | |
1768 | MACRO_END | |
1769 | ||
1770 | /* adjust for stolen pages accounted elsewhere */ | |
1771 | #define VM_PAGE_MOVE_STOLEN(page_count) \ | |
1772 | MACRO_BEGIN \ | |
1773 | vm_page_stolen_count -= (page_count); \ | |
1774 | vm_page_wire_count_initial -= (page_count); \ | |
1775 | MACRO_END | |
1776 | ||
1777 | #define DW_vm_page_unwire 0x01 | |
1778 | #define DW_vm_page_wire 0x02 | |
1779 | #define DW_vm_page_free 0x04 | |
1780 | #define DW_vm_page_activate 0x08 | |
1781 | #define DW_vm_page_deactivate_internal 0x10 | |
1782 | #define DW_vm_page_speculate 0x20 | |
1783 | #define DW_vm_page_lru 0x40 | |
1784 | #define DW_vm_pageout_throttle_up 0x80 | |
1785 | #define DW_PAGE_WAKEUP 0x100 | |
1786 | #define DW_clear_busy 0x200 | |
1787 | #define DW_clear_reference 0x400 | |
1788 | #define DW_set_reference 0x800 | |
1789 | #define DW_move_page 0x1000 | |
1790 | #define DW_VM_PAGE_QUEUES_REMOVE 0x2000 | |
1791 | #define DW_enqueue_cleaned 0x4000 | |
1792 | #define DW_vm_phantom_cache_update 0x8000 | |
1793 | ||
1794 | struct vm_page_delayed_work { | |
1795 | vm_page_t dw_m; | |
1796 | int dw_mask; | |
1797 | }; | |
1798 | ||
1799 | #define DEFAULT_DELAYED_WORK_LIMIT 32 | |
1800 | ||
1801 | struct vm_page_delayed_work_ctx { | |
1802 | struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT]; | |
1803 | thread_t delayed_owner; | |
1804 | }; | |
1805 | ||
1806 | void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count); | |
1807 | ||
1808 | extern unsigned int vm_max_delayed_work_limit; | |
1809 | ||
1810 | extern void vm_page_delayed_work_init_ctx(void); | |
1811 | ||
1812 | #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) | |
1813 | ||
1814 | /* | |
1815 | * vm_page_do_delayed_work may need to drop the object lock... | |
1816 | * if it does, we need the pages it's looking at to | |
1817 | * be held stable via the busy bit, so if busy isn't already | |
1818 | * set, we need to set it and ask vm_page_do_delayed_work | |
1819 | * to clear it and wakeup anyone that might have blocked on | |
1820 | * it once we're done processing the page. | |
1821 | */ | |
1822 | ||
1823 | #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ | |
1824 | MACRO_BEGIN \ | |
1825 | if (mem->vmp_busy == FALSE) { \ | |
1826 | mem->vmp_busy = TRUE; \ | |
1827 | if ( !(dwp->dw_mask & DW_vm_page_free)) \ | |
1828 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ | |
1829 | } \ | |
1830 | dwp->dw_m = mem; \ | |
1831 | dwp++; \ | |
1832 | dw_cnt++; \ | |
1833 | MACRO_END | |
1834 | ||
1835 | extern vm_page_t vm_object_page_grab(vm_object_t); | |
1836 | ||
1837 | #if VM_PAGE_BUCKETS_CHECK | |
1838 | extern void vm_page_buckets_check(void); | |
1839 | #endif /* VM_PAGE_BUCKETS_CHECK */ | |
1840 | ||
1841 | extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq); | |
1842 | extern void vm_page_remove_internal(vm_page_t page); | |
1843 | extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first); | |
1844 | extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first); | |
1845 | extern void vm_page_check_pageable_safe(vm_page_t page); | |
1846 | ||
1847 | #if CONFIG_SECLUDED_MEMORY | |
1848 | extern uint64_t secluded_shutoff_trigger; | |
1849 | extern uint64_t secluded_shutoff_headroom; | |
1850 | extern void start_secluded_suppression(task_t); | |
1851 | extern void stop_secluded_suppression(task_t); | |
1852 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1853 | ||
1854 | extern void vm_retire_boot_pages(void); | |
1855 | extern uint32_t vm_retired_pages_count(void); | |
1856 | ||
1857 | #endif /* _VM_VM_PAGE_H_ */ |