]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_resident.c
45430fce92cd1ac84e884eebc4969412d1b640c2
[apple/xnu.git] / osfmk / vm / vm_resident.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Resident memory management module.
63 */
64
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counters.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_init.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
88 #include <kern/misc_protos.h>
89 #include <mach_debug/zone_info.h>
90 #include <vm/cpm.h>
91 #include <pexpert/pexpert.h>
92 #include <san/kasan.h>
93
94 #include <vm/vm_protos.h>
95 #include <vm/memory_object.h>
96 #include <vm/vm_purgeable_internal.h>
97 #include <vm/vm_compressor.h>
98 #if defined (__x86_64__)
99 #include <i386/misc_protos.h>
100 #endif
101
102 #if CONFIG_PHANTOM_CACHE
103 #include <vm/vm_phantom_cache.h>
104 #endif
105
106 #if HIBERNATION
107 #include <IOKit/IOHibernatePrivate.h>
108 #include <machine/pal_hibernate.h>
109 #endif /* HIBERNATION */
110
111 #include <sys/kdebug.h>
112
113 #if defined(HAS_APPLE_PAC)
114 #include <ptrauth.h>
115 #endif
116 #if defined(__arm64__)
117 #include <arm/cpu_internal.h>
118 #endif /* defined(__arm64__) */
119
120 #if MACH_ASSERT
121
122 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
123
124 #else /* MACH_ASSERT */
125
126 #define ASSERT_PMAP_FREE(mem) /* nothing */
127
128 #endif /* MACH_ASSERT */
129
130 extern boolean_t vm_pageout_running;
131 extern thread_t vm_pageout_scan_thread;
132 extern boolean_t vps_dynamic_priority_enabled;
133
134 char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
135 char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
136 char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
137 char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
138
139 #if CONFIG_SECLUDED_MEMORY
140 struct vm_page_secluded_data vm_page_secluded;
141 #endif /* CONFIG_SECLUDED_MEMORY */
142
143 #if DEVELOPMENT || DEBUG
144 extern struct memory_object_pager_ops shared_region_pager_ops;
145 unsigned int shared_region_pagers_resident_count = 0;
146 unsigned int shared_region_pagers_resident_peak = 0;
147 #endif /* DEVELOPMENT || DEBUG */
148
149 int PERCPU_DATA(start_color);
150 vm_page_t PERCPU_DATA(free_pages);
151 boolean_t hibernate_cleaning_in_progress = FALSE;
152 boolean_t vm_page_free_verify = TRUE;
153
154 uint32_t vm_lopage_free_count = 0;
155 uint32_t vm_lopage_free_limit = 0;
156 uint32_t vm_lopage_lowater = 0;
157 boolean_t vm_lopage_refill = FALSE;
158 boolean_t vm_lopage_needed = FALSE;
159
160 lck_mtx_ext_t vm_page_queue_lock_ext;
161 lck_mtx_ext_t vm_page_queue_free_lock_ext;
162 lck_mtx_ext_t vm_purgeable_queue_lock_ext;
163
164 int speculative_age_index = 0;
165 int speculative_steal_index = 0;
166 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
167
168 boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
169 * Updated and checked behind the vm_page_queues_lock. */
170
171 static void vm_page_free_prepare(vm_page_t page);
172 static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr);
173
174 static void vm_tag_init(void);
175
176 /* for debugging purposes */
177 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
178 VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
179
180 /*
181 * Associated with page of user-allocatable memory is a
182 * page structure.
183 */
184
185 /*
186 * These variables record the values returned by vm_page_bootstrap,
187 * for debugging purposes. The implementation of pmap_steal_memory
188 * and pmap_startup here also uses them internally.
189 */
190
191 vm_offset_t virtual_space_start;
192 vm_offset_t virtual_space_end;
193 uint32_t vm_page_pages;
194
195 /*
196 * The vm_page_lookup() routine, which provides for fast
197 * (virtual memory object, offset) to page lookup, employs
198 * the following hash table. The vm_page_{insert,remove}
199 * routines install and remove associations in the table.
200 * [This table is often called the virtual-to-physical,
201 * or VP, table.]
202 */
203 typedef struct {
204 vm_page_packed_t page_list;
205 #if MACH_PAGE_HASH_STATS
206 int cur_count; /* current count */
207 int hi_count; /* high water mark */
208 #endif /* MACH_PAGE_HASH_STATS */
209 } vm_page_bucket_t;
210
211
212 #define BUCKETS_PER_LOCK 16
213
214 vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
215 unsigned int vm_page_bucket_count = 0; /* How big is array? */
216 unsigned int vm_page_hash_mask; /* Mask for hash function */
217 unsigned int vm_page_hash_shift; /* Shift for hash function */
218 uint32_t vm_page_bucket_hash; /* Basic bucket hash */
219 unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */
220
221 #ifndef VM_TAG_ACTIVE_UPDATE
222 #error VM_TAG_ACTIVE_UPDATE
223 #endif
224 #ifndef VM_MAX_TAG_ZONES
225 #error VM_MAX_TAG_ZONES
226 #endif
227
228 boolean_t vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
229 lck_spin_t *vm_page_bucket_locks;
230
231 vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
232 vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE];
233 #if VM_MAX_TAG_ZONES
234 vm_allocation_zone_total_t ** vm_allocation_zone_totals;
235 #endif /* VM_MAX_TAG_ZONES */
236
237 vm_tag_t vm_allocation_tag_highest;
238
239 #if VM_PAGE_BUCKETS_CHECK
240 boolean_t vm_page_buckets_check_ready = FALSE;
241 #if VM_PAGE_FAKE_BUCKETS
242 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
243 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
244 #endif /* VM_PAGE_FAKE_BUCKETS */
245 #endif /* VM_PAGE_BUCKETS_CHECK */
246
247
248
249 #if MACH_PAGE_HASH_STATS
250 /* This routine is only for debug. It is intended to be called by
251 * hand by a developer using a kernel debugger. This routine prints
252 * out vm_page_hash table statistics to the kernel debug console.
253 */
254 void
255 hash_debug(void)
256 {
257 int i;
258 int numbuckets = 0;
259 int highsum = 0;
260 int maxdepth = 0;
261
262 for (i = 0; i < vm_page_bucket_count; i++) {
263 if (vm_page_buckets[i].hi_count) {
264 numbuckets++;
265 highsum += vm_page_buckets[i].hi_count;
266 if (vm_page_buckets[i].hi_count > maxdepth) {
267 maxdepth = vm_page_buckets[i].hi_count;
268 }
269 }
270 }
271 printf("Total number of buckets: %d\n", vm_page_bucket_count);
272 printf("Number used buckets: %d = %d%%\n",
273 numbuckets, 100 * numbuckets / vm_page_bucket_count);
274 printf("Number unused buckets: %d = %d%%\n",
275 vm_page_bucket_count - numbuckets,
276 100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
277 printf("Sum of bucket max depth: %d\n", highsum);
278 printf("Average bucket depth: %d.%2d\n",
279 highsum / vm_page_bucket_count,
280 highsum % vm_page_bucket_count);
281 printf("Maximum bucket depth: %d\n", maxdepth);
282 }
283 #endif /* MACH_PAGE_HASH_STATS */
284
285 /*
286 * The virtual page size is currently implemented as a runtime
287 * variable, but is constant once initialized using vm_set_page_size.
288 * This initialization must be done in the machine-dependent
289 * bootstrap sequence, before calling other machine-independent
290 * initializations.
291 *
292 * All references to the virtual page size outside this
293 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
294 * constants.
295 */
296 #if defined(__arm__) || defined(__arm64__)
297 vm_size_t page_size;
298 vm_size_t page_mask;
299 int page_shift;
300 #else
301 vm_size_t page_size = PAGE_SIZE;
302 vm_size_t page_mask = PAGE_MASK;
303 int page_shift = PAGE_SHIFT;
304 #endif
305
306 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
307 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
308 vm_page_t vm_page_array_ending_addr;
309
310 unsigned int vm_pages_count = 0;
311
312 /*
313 * Resident pages that represent real memory
314 * are allocated from a set of free lists,
315 * one per color.
316 */
317 unsigned int vm_colors;
318 unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
319 unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
320 unsigned int vm_free_magazine_refill_limit = 0;
321
322
323 struct vm_page_queue_free_head {
324 vm_page_queue_head_t qhead;
325 } VM_PAGE_PACKED_ALIGNED;
326
327 struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS];
328
329
330 unsigned int vm_page_free_wanted;
331 unsigned int vm_page_free_wanted_privileged;
332 #if CONFIG_SECLUDED_MEMORY
333 unsigned int vm_page_free_wanted_secluded;
334 #endif /* CONFIG_SECLUDED_MEMORY */
335 unsigned int vm_page_free_count;
336
337 /*
338 * Occasionally, the virtual memory system uses
339 * resident page structures that do not refer to
340 * real pages, for example to leave a page with
341 * important state information in the VP table.
342 *
343 * These page structures are allocated the way
344 * most other kernel structures are.
345 */
346 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
347 vm_locks_array_t vm_page_locks;
348
349 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
350 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
351 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
352 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
353 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
354 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
355 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
356 LCK_MTX_EARLY_DECLARE_ATTR(vm_page_alloc_lock, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
357 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
358 LCK_SPIN_DECLARE_ATTR(vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
359
360 unsigned int vm_page_local_q_soft_limit = 250;
361 unsigned int vm_page_local_q_hard_limit = 500;
362 struct vpl *__zpercpu vm_page_local_q;
363
364 /* N.B. Guard and fictitious pages must not
365 * be assigned a zero phys_page value.
366 */
367 /*
368 * Fictitious pages don't have a physical address,
369 * but we must initialize phys_page to something.
370 * For debugging, this should be a strange value
371 * that the pmap module can recognize in assertions.
372 */
373 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
374
375 /*
376 * Guard pages are not accessible so they don't
377 * need a physical address, but we need to enter
378 * one in the pmap.
379 * Let's make it recognizable and make sure that
380 * we don't use a real physical page with that
381 * physical address.
382 */
383 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
384
385 /*
386 * Resident page structures are also chained on
387 * queues that are used by the page replacement
388 * system (pageout daemon). These queues are
389 * defined here, but are shared by the pageout
390 * module. The inactive queue is broken into
391 * file backed and anonymous for convenience as the
392 * pageout daemon often assignes a higher
393 * importance to anonymous pages (less likely to pick)
394 */
395 vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
396 vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
397 #if CONFIG_SECLUDED_MEMORY
398 vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
399 #endif /* CONFIG_SECLUDED_MEMORY */
400 vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED; /* inactive memory queue for anonymous pages */
401 vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
402
403 queue_head_t vm_objects_wired;
404
405 void vm_update_darkwake_mode(boolean_t);
406
407 #if CONFIG_BACKGROUND_QUEUE
408 vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
409 uint32_t vm_page_background_target;
410 uint32_t vm_page_background_target_snapshot;
411 uint32_t vm_page_background_count;
412 uint64_t vm_page_background_promoted_count;
413
414 uint32_t vm_page_background_internal_count;
415 uint32_t vm_page_background_external_count;
416
417 uint32_t vm_page_background_mode;
418 uint32_t vm_page_background_exclude_external;
419 #endif
420
421 unsigned int vm_page_active_count;
422 unsigned int vm_page_inactive_count;
423 unsigned int vm_page_kernelcache_count;
424 #if CONFIG_SECLUDED_MEMORY
425 unsigned int vm_page_secluded_count;
426 unsigned int vm_page_secluded_count_free;
427 unsigned int vm_page_secluded_count_inuse;
428 unsigned int vm_page_secluded_count_over_target;
429 #endif /* CONFIG_SECLUDED_MEMORY */
430 unsigned int vm_page_anonymous_count;
431 unsigned int vm_page_throttled_count;
432 unsigned int vm_page_speculative_count;
433
434 unsigned int vm_page_wire_count;
435 unsigned int vm_page_wire_count_on_boot = 0;
436 unsigned int vm_page_stolen_count = 0;
437 unsigned int vm_page_wire_count_initial;
438 unsigned int vm_page_gobble_count = 0;
439 unsigned int vm_page_kern_lpage_count = 0;
440
441 uint64_t booter_size; /* external so it can be found in core dumps */
442
443 #define VM_PAGE_WIRE_COUNT_WARNING 0
444 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
445
446 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
447 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
448 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
449
450 unsigned int vm_page_xpmapped_external_count = 0;
451 unsigned int vm_page_external_count = 0;
452 unsigned int vm_page_internal_count = 0;
453 unsigned int vm_page_pageable_external_count = 0;
454 unsigned int vm_page_pageable_internal_count = 0;
455
456 #if DEVELOPMENT || DEBUG
457 unsigned int vm_page_speculative_recreated = 0;
458 unsigned int vm_page_speculative_created = 0;
459 unsigned int vm_page_speculative_used = 0;
460 #endif
461
462 vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
463
464 unsigned int vm_page_cleaned_count = 0;
465
466 uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
467 ppnum_t max_valid_low_ppnum = PPNUM_MAX;
468
469
470 /*
471 * Several page replacement parameters are also
472 * shared with this module, so that page allocation
473 * (done here in vm_page_alloc) can trigger the
474 * pageout daemon.
475 */
476 unsigned int vm_page_free_target = 0;
477 unsigned int vm_page_free_min = 0;
478 unsigned int vm_page_throttle_limit = 0;
479 unsigned int vm_page_inactive_target = 0;
480 #if CONFIG_SECLUDED_MEMORY
481 unsigned int vm_page_secluded_target = 0;
482 #endif /* CONFIG_SECLUDED_MEMORY */
483 unsigned int vm_page_anonymous_min = 0;
484 unsigned int vm_page_free_reserved = 0;
485
486
487 /*
488 * The VM system has a couple of heuristics for deciding
489 * that pages are "uninteresting" and should be placed
490 * on the inactive queue as likely candidates for replacement.
491 * These variables let the heuristics be controlled at run-time
492 * to make experimentation easier.
493 */
494
495 boolean_t vm_page_deactivate_hint = TRUE;
496
497 struct vm_page_stats_reusable vm_page_stats_reusable;
498
499 /*
500 * vm_set_page_size:
501 *
502 * Sets the page size, perhaps based upon the memory
503 * size. Must be called before any use of page-size
504 * dependent functions.
505 *
506 * Sets page_shift and page_mask from page_size.
507 */
508 void
509 vm_set_page_size(void)
510 {
511 page_size = PAGE_SIZE;
512 page_mask = PAGE_MASK;
513 page_shift = PAGE_SHIFT;
514
515 if ((page_mask & page_size) != 0) {
516 panic("vm_set_page_size: page size not a power of two");
517 }
518
519 for (page_shift = 0;; page_shift++) {
520 if ((1U << page_shift) == page_size) {
521 break;
522 }
523 }
524 }
525
526 #if defined (__x86_64__)
527
528 #define MAX_CLUMP_SIZE 16
529 #define DEFAULT_CLUMP_SIZE 4
530
531 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
532
533 #if DEVELOPMENT || DEBUG
534 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
535 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
536
537 static inline void
538 vm_clump_update_stats(unsigned int c)
539 {
540 assert(c <= vm_clump_size);
541 if (c > 0 && c <= vm_clump_size) {
542 vm_clump_stats[c] += c;
543 }
544 vm_clump_allocs += c;
545 }
546 #endif /* if DEVELOPMENT || DEBUG */
547
548 /* Called once to setup the VM clump knobs */
549 static void
550 vm_page_setup_clump( void )
551 {
552 unsigned int override, n;
553
554 vm_clump_size = DEFAULT_CLUMP_SIZE;
555 if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
556 vm_clump_size = override;
557 }
558
559 if (vm_clump_size > MAX_CLUMP_SIZE) {
560 panic("vm_page_setup_clump:: clump_size is too large!");
561 }
562 if (vm_clump_size < 1) {
563 panic("vm_page_setup_clump:: clump_size must be >= 1");
564 }
565 if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
566 panic("vm_page_setup_clump:: clump_size must be a power of 2");
567 }
568
569 vm_clump_promote_threshold = vm_clump_size;
570 vm_clump_mask = vm_clump_size - 1;
571 for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
572 ;
573 }
574
575 #if DEVELOPMENT || DEBUG
576 bzero(vm_clump_stats, sizeof(vm_clump_stats));
577 vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
578 #endif /* if DEVELOPMENT || DEBUG */
579 }
580
581 #endif /* #if defined (__x86_64__) */
582
583 #define COLOR_GROUPS_TO_STEAL 4
584
585 /* Called once during statup, once the cache geometry is known.
586 */
587 static void
588 vm_page_set_colors( void )
589 {
590 unsigned int n, override;
591
592 #if defined (__x86_64__)
593 /* adjust #colors because we need to color outside the clump boundary */
594 vm_cache_geometry_colors >>= vm_clump_shift;
595 #endif
596 if (PE_parse_boot_argn("colors", &override, sizeof(override))) { /* colors specified as a boot-arg? */
597 n = override;
598 } else if (vm_cache_geometry_colors) { /* do we know what the cache geometry is? */
599 n = vm_cache_geometry_colors;
600 } else {
601 n = DEFAULT_COLORS; /* use default if all else fails */
602 }
603 if (n == 0) {
604 n = 1;
605 }
606 if (n > MAX_COLORS) {
607 n = MAX_COLORS;
608 }
609
610 /* the count must be a power of 2 */
611 if ((n & (n - 1)) != 0) {
612 n = DEFAULT_COLORS; /* use default if all else fails */
613 }
614 vm_colors = n;
615 vm_color_mask = n - 1;
616
617 vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
618
619 #if defined (__x86_64__)
620 /* adjust for reduction in colors due to clumping and multiple cores */
621 if (real_ncpus) {
622 vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
623 }
624 #endif
625 }
626
627 /*
628 * During single threaded early boot we don't initialize all pages.
629 * This avoids some delay during boot. They'll be initialized and
630 * added to the free list as needed or after we are multithreaded by
631 * what becomes the pageout thread.
632 */
633 static boolean_t fill = FALSE;
634 static unsigned int fillval;
635 uint_t vm_delayed_count = 0; /* when non-zero, indicates we may have more pages to init */
636 ppnum_t delay_above_pnum = PPNUM_MAX;
637
638 /*
639 * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
640 * If ARM ever uses delayed page initialization, this value may need to be quite different.
641 */
642 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
643
644 /*
645 * When we have to dip into more delayed pages due to low memory, free up
646 * a large chunk to get things back to normal. This avoids contention on the
647 * delayed code allocating page by page.
648 */
649 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
650
651 /*
652 * Get and initialize the next delayed page.
653 */
654 static vm_page_t
655 vm_get_delayed_page(int grab_options)
656 {
657 vm_page_t p;
658 ppnum_t pnum;
659
660 /*
661 * Get a new page if we have one.
662 */
663 lck_mtx_lock(&vm_page_queue_free_lock);
664 if (vm_delayed_count == 0) {
665 lck_mtx_unlock(&vm_page_queue_free_lock);
666 return NULL;
667 }
668 if (!pmap_next_page(&pnum)) {
669 vm_delayed_count = 0;
670 lck_mtx_unlock(&vm_page_queue_free_lock);
671 return NULL;
672 }
673
674 assert(vm_delayed_count > 0);
675 --vm_delayed_count;
676
677 #if defined(__x86_64__)
678 /* x86 cluster code requires increasing phys_page in vm_pages[] */
679 if (vm_pages_count > 0) {
680 assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
681 }
682 #endif
683 p = &vm_pages[vm_pages_count];
684 assert(p < vm_page_array_ending_addr);
685 vm_page_init(p, pnum, FALSE);
686 ++vm_pages_count;
687 ++vm_page_pages;
688 lck_mtx_unlock(&vm_page_queue_free_lock);
689
690 /*
691 * These pages were initially counted as wired, undo that now.
692 */
693 if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
694 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
695 } else {
696 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
697 vm_page_lockspin_queues();
698 }
699 --vm_page_wire_count;
700 --vm_page_wire_count_initial;
701 if (vm_page_wire_count_on_boot != 0) {
702 --vm_page_wire_count_on_boot;
703 }
704 if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
705 vm_page_unlock_queues();
706 }
707
708
709 if (fill) {
710 fillPage(pnum, fillval);
711 }
712 return p;
713 }
714
715 static void vm_page_module_init_delayed(void);
716
717 /*
718 * Free all remaining delayed pages to the free lists.
719 */
720 void
721 vm_free_delayed_pages(void)
722 {
723 vm_page_t p;
724 vm_page_t list = NULL;
725 uint_t cnt = 0;
726 vm_offset_t start_free_va;
727 int64_t free_size;
728
729 while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
730 if (vm_himemory_mode) {
731 vm_page_release(p, FALSE);
732 } else {
733 p->vmp_snext = list;
734 list = p;
735 }
736 ++cnt;
737 }
738
739 /*
740 * Free the pages in reverse order if not himemory mode.
741 * Hence the low memory pages will be first on free lists. (LIFO)
742 */
743 while (list != NULL) {
744 p = list;
745 list = p->vmp_snext;
746 p->vmp_snext = NULL;
747 vm_page_release(p, FALSE);
748 }
749 #if DEVELOPMENT || DEBUG
750 kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
751 #endif
752
753 /*
754 * Free up any unused full pages at the end of the vm_pages[] array
755 */
756 start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
757
758 #if defined(__x86_64__)
759 /*
760 * Since x86 might have used large pages for vm_pages[], we can't
761 * free starting in the middle of a partially used large page.
762 */
763 if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
764 start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
765 }
766 #endif
767 if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
768 free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
769 if (free_size > 0) {
770 ml_static_mfree(start_free_va, (vm_offset_t)free_size);
771 vm_page_array_ending_addr = (void *)start_free_va;
772
773 /*
774 * Note there's no locking here, as only this thread will ever change this value.
775 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
776 */
777 vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
778
779 #if DEVELOPMENT || DEBUG
780 kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
781 (long)free_size, (long)start_free_va);
782 #endif
783 }
784 }
785
786
787 /*
788 * now we can create the VM page array zone
789 */
790 vm_page_module_init_delayed();
791 }
792
793 /*
794 * Try and free up enough delayed pages to match a contig memory allocation.
795 */
796 static void
797 vm_free_delayed_pages_contig(
798 uint_t npages,
799 ppnum_t max_pnum,
800 ppnum_t pnum_mask)
801 {
802 vm_page_t p;
803 ppnum_t pnum;
804 uint_t cnt = 0;
805
806 /*
807 * Treat 0 as the absolute max page number.
808 */
809 if (max_pnum == 0) {
810 max_pnum = PPNUM_MAX;
811 }
812
813 /*
814 * Free till we get a properly aligned start page
815 */
816 for (;;) {
817 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
818 if (p == NULL) {
819 return;
820 }
821 pnum = VM_PAGE_GET_PHYS_PAGE(p);
822 vm_page_release(p, FALSE);
823 if (pnum >= max_pnum) {
824 return;
825 }
826 if ((pnum & pnum_mask) == 0) {
827 break;
828 }
829 }
830
831 /*
832 * Having a healthy pool of free pages will help performance. We don't
833 * want to fall back to the delayed code for every page allocation.
834 */
835 if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
836 npages += VM_DELAY_PAGE_CHUNK;
837 }
838
839 /*
840 * Now free up the pages
841 */
842 for (cnt = 1; cnt < npages; ++cnt) {
843 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
844 if (p == NULL) {
845 return;
846 }
847 vm_page_release(p, FALSE);
848 }
849 }
850
851 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
852
853 void
854 vm_page_init_local_q(unsigned int num_cpus)
855 {
856 struct vpl *t_local_q;
857
858 /*
859 * no point in this for a uni-processor system
860 */
861 if (num_cpus >= 2) {
862 ml_cpu_info_t cpu_info;
863
864 /*
865 * Force the allocation alignment to a cacheline,
866 * because the `vpl` struct has a lock and will be taken
867 * cross CPU so we want to isolate the rest of the per-CPU
868 * data to avoid false sharing due to this lock being taken.
869 */
870
871 ml_cpu_get_info(&cpu_info);
872
873 t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
874 cpu_info.cache_line_size - 1);
875
876 zpercpu_foreach(lq, t_local_q) {
877 VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
878 vm_page_queue_init(&lq->vpl_queue);
879 }
880
881 /* make the initialization visible to all cores */
882 os_atomic_store(&vm_page_local_q, t_local_q, release);
883 }
884 }
885
886 /*
887 * vm_init_before_launchd
888 *
889 * This should be called right before launchd is loaded.
890 */
891 void
892 vm_init_before_launchd()
893 {
894 vm_page_lockspin_queues();
895 vm_page_wire_count_on_boot = vm_page_wire_count;
896 vm_page_unlock_queues();
897 }
898
899
900 /*
901 * vm_page_bootstrap:
902 *
903 * Initializes the resident memory module.
904 *
905 * Allocates memory for the page cells, and
906 * for the object/offset-to-page hash table headers.
907 * Each page cell is initialized and placed on the free list.
908 * Returns the range of available kernel virtual memory.
909 */
910 __startup_func
911 void
912 vm_page_bootstrap(
913 vm_offset_t *startp,
914 vm_offset_t *endp)
915 {
916 unsigned int i;
917 unsigned int log1;
918 unsigned int log2;
919 unsigned int size;
920
921 /*
922 * Initialize the page queues.
923 */
924
925 lck_mtx_init_ext(&vm_page_queue_free_lock, &vm_page_queue_free_lock_ext, &vm_page_lck_grp_free, &vm_page_lck_attr);
926 lck_mtx_init_ext(&vm_page_queue_lock, &vm_page_queue_lock_ext, &vm_page_lck_grp_queue, &vm_page_lck_attr);
927 lck_mtx_init_ext(&vm_purgeable_queue_lock, &vm_purgeable_queue_lock_ext, &vm_page_lck_grp_purge, &vm_page_lck_attr);
928
929 for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
930 int group;
931
932 purgeable_queues[i].token_q_head = 0;
933 purgeable_queues[i].token_q_tail = 0;
934 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
935 queue_init(&purgeable_queues[i].objq[group]);
936 }
937
938 purgeable_queues[i].type = i;
939 purgeable_queues[i].new_pages = 0;
940 #if MACH_ASSERT
941 purgeable_queues[i].debug_count_tokens = 0;
942 purgeable_queues[i].debug_count_objects = 0;
943 #endif
944 }
945 ;
946 purgeable_nonvolatile_count = 0;
947 queue_init(&purgeable_nonvolatile_queue);
948
949 for (i = 0; i < MAX_COLORS; i++) {
950 vm_page_queue_init(&vm_page_queue_free[i].qhead);
951 }
952
953 vm_page_queue_init(&vm_lopage_queue_free);
954 vm_page_queue_init(&vm_page_queue_active);
955 vm_page_queue_init(&vm_page_queue_inactive);
956 #if CONFIG_SECLUDED_MEMORY
957 vm_page_queue_init(&vm_page_queue_secluded);
958 #endif /* CONFIG_SECLUDED_MEMORY */
959 vm_page_queue_init(&vm_page_queue_cleaned);
960 vm_page_queue_init(&vm_page_queue_throttled);
961 vm_page_queue_init(&vm_page_queue_anonymous);
962 queue_init(&vm_objects_wired);
963
964 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
965 vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
966
967 vm_page_queue_speculative[i].age_ts.tv_sec = 0;
968 vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
969 }
970 #if CONFIG_BACKGROUND_QUEUE
971 vm_page_queue_init(&vm_page_queue_background);
972
973 vm_page_background_count = 0;
974 vm_page_background_internal_count = 0;
975 vm_page_background_external_count = 0;
976 vm_page_background_promoted_count = 0;
977
978 vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
979
980 if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
981 vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
982 }
983
984 vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
985 vm_page_background_exclude_external = 0;
986
987 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
988 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
989 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
990
991 if (vm_page_background_mode > VM_PAGE_BG_LEVEL_1) {
992 vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
993 }
994 #endif
995 vm_page_free_wanted = 0;
996 vm_page_free_wanted_privileged = 0;
997 #if CONFIG_SECLUDED_MEMORY
998 vm_page_free_wanted_secluded = 0;
999 #endif /* CONFIG_SECLUDED_MEMORY */
1000
1001 #if defined (__x86_64__)
1002 /* this must be called before vm_page_set_colors() */
1003 vm_page_setup_clump();
1004 #endif
1005
1006 vm_page_set_colors();
1007
1008 bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1009 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1010 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1011 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1012
1013 bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1014 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1015 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1016 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1017 vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1018 vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1019 vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1020 #if CONFIG_SECLUDED_MEMORY
1021 vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1022 #endif /* CONFIG_SECLUDED_MEMORY */
1023
1024 bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1025 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1026 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1027 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1028 vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1029 vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1030 #if CONFIG_SECLUDED_MEMORY
1031 vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1032 #endif /* CONFIG_SECLUDED_MEMORY */
1033
1034 bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1035 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1036 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1037 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1038 vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1039 #if CONFIG_SECLUDED_MEMORY
1040 vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1041 #endif /* CONFIG_SECLUDED_MEMORY */
1042
1043 for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1044 vm_allocation_sites_static[t].refcount = 2;
1045 vm_allocation_sites_static[t].tag = t;
1046 vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1047 }
1048 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1049 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1050 vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1051
1052 /*
1053 * Steal memory for the map and zone subsystems.
1054 */
1055 kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1056
1057 /*
1058 * Allocate (and initialize) the virtual-to-physical
1059 * table hash buckets.
1060 *
1061 * The number of buckets should be a power of two to
1062 * get a good hash function. The following computation
1063 * chooses the first power of two that is greater
1064 * than the number of physical pages in the system.
1065 */
1066
1067 if (vm_page_bucket_count == 0) {
1068 unsigned int npages = pmap_free_pages();
1069
1070 vm_page_bucket_count = 1;
1071 while (vm_page_bucket_count < npages) {
1072 vm_page_bucket_count <<= 1;
1073 }
1074 }
1075 vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1076
1077 vm_page_hash_mask = vm_page_bucket_count - 1;
1078
1079 /*
1080 * Calculate object shift value for hashing algorithm:
1081 * O = log2(sizeof(struct vm_object))
1082 * B = log2(vm_page_bucket_count)
1083 * hash shifts the object left by
1084 * B/2 - O
1085 */
1086 size = vm_page_bucket_count;
1087 for (log1 = 0; size > 1; log1++) {
1088 size /= 2;
1089 }
1090 size = sizeof(struct vm_object);
1091 for (log2 = 0; size > 1; log2++) {
1092 size /= 2;
1093 }
1094 vm_page_hash_shift = log1 / 2 - log2 + 1;
1095
1096 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
1097 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
1098 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1099
1100 if (vm_page_hash_mask & vm_page_bucket_count) {
1101 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1102 }
1103
1104 #if VM_PAGE_BUCKETS_CHECK
1105 #if VM_PAGE_FAKE_BUCKETS
1106 /*
1107 * Allocate a decoy set of page buckets, to detect
1108 * any stomping there.
1109 */
1110 vm_page_fake_buckets = (vm_page_bucket_t *)
1111 pmap_steal_memory(vm_page_bucket_count *
1112 sizeof(vm_page_bucket_t));
1113 vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1114 vm_page_fake_buckets_end =
1115 vm_map_round_page((vm_page_fake_buckets_start +
1116 (vm_page_bucket_count *
1117 sizeof(vm_page_bucket_t))),
1118 PAGE_MASK);
1119 char *cp;
1120 for (cp = (char *)vm_page_fake_buckets_start;
1121 cp < (char *)vm_page_fake_buckets_end;
1122 cp++) {
1123 *cp = 0x5a;
1124 }
1125 #endif /* VM_PAGE_FAKE_BUCKETS */
1126 #endif /* VM_PAGE_BUCKETS_CHECK */
1127
1128 kernel_debug_string_early("vm_page_buckets");
1129 vm_page_buckets = (vm_page_bucket_t *)
1130 pmap_steal_memory(vm_page_bucket_count *
1131 sizeof(vm_page_bucket_t));
1132
1133 kernel_debug_string_early("vm_page_bucket_locks");
1134 vm_page_bucket_locks = (lck_spin_t *)
1135 pmap_steal_memory(vm_page_bucket_lock_count *
1136 sizeof(lck_spin_t));
1137
1138 for (i = 0; i < vm_page_bucket_count; i++) {
1139 vm_page_bucket_t *bucket = &vm_page_buckets[i];
1140
1141 bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1142 #if MACH_PAGE_HASH_STATS
1143 bucket->cur_count = 0;
1144 bucket->hi_count = 0;
1145 #endif /* MACH_PAGE_HASH_STATS */
1146 }
1147
1148 for (i = 0; i < vm_page_bucket_lock_count; i++) {
1149 lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1150 }
1151
1152 vm_tag_init();
1153
1154 #if VM_PAGE_BUCKETS_CHECK
1155 vm_page_buckets_check_ready = TRUE;
1156 #endif /* VM_PAGE_BUCKETS_CHECK */
1157
1158 /*
1159 * Machine-dependent code allocates the resident page table.
1160 * It uses vm_page_init to initialize the page frames.
1161 * The code also returns to us the virtual space available
1162 * to the kernel. We don't trust the pmap module
1163 * to get the alignment right.
1164 */
1165
1166 kernel_debug_string_early("pmap_startup");
1167 pmap_startup(&virtual_space_start, &virtual_space_end);
1168 virtual_space_start = round_page(virtual_space_start);
1169 virtual_space_end = trunc_page(virtual_space_end);
1170
1171 *startp = virtual_space_start;
1172 *endp = virtual_space_end;
1173
1174 /*
1175 * Compute the initial "wire" count.
1176 * Up until now, the pages which have been set aside are not under
1177 * the VM system's control, so although they aren't explicitly
1178 * wired, they nonetheless can't be moved. At this moment,
1179 * all VM managed pages are "free", courtesy of pmap_startup.
1180 */
1181 assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1182 vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1183 vm_page_free_count - vm_lopage_free_count;
1184 #if CONFIG_SECLUDED_MEMORY
1185 vm_page_wire_count -= vm_page_secluded_count;
1186 #endif
1187 vm_page_wire_count_initial = vm_page_wire_count;
1188
1189 /* capture this for later use */
1190 booter_size = ml_get_booter_memory_size();
1191
1192 printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1193 vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1194
1195 kernel_debug_string_early("vm_page_bootstrap complete");
1196 }
1197
1198 #ifndef MACHINE_PAGES
1199 /*
1200 * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1201 * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1202 * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1203 */
1204 static void *
1205 pmap_steal_memory_internal(
1206 vm_size_t size,
1207 boolean_t might_free)
1208 {
1209 kern_return_t kr;
1210 vm_offset_t addr;
1211 vm_offset_t map_addr;
1212 ppnum_t phys_page;
1213
1214 /*
1215 * Size needs to be aligned to word size.
1216 */
1217 size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1218
1219 /*
1220 * On the first call, get the initial values for virtual address space
1221 * and page align them.
1222 */
1223 if (virtual_space_start == virtual_space_end) {
1224 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1225 virtual_space_start = round_page(virtual_space_start);
1226 virtual_space_end = trunc_page(virtual_space_end);
1227
1228 #if defined(__x86_64__)
1229 /*
1230 * Release remaining unused section of preallocated KVA and the 4K page tables
1231 * that map it. This makes the VA available for large page mappings.
1232 */
1233 Idle_PTs_release(virtual_space_start, virtual_space_end);
1234 #endif
1235 }
1236
1237 /*
1238 * Allocate the virtual space for this request. On x86, we'll align to a large page
1239 * address if the size is big enough to back with at least 1 large page.
1240 */
1241 #if defined(__x86_64__)
1242 if (size >= I386_LPGBYTES) {
1243 virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1244 }
1245 #endif
1246 addr = virtual_space_start;
1247 virtual_space_start += size;
1248
1249 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
1250
1251 /*
1252 * Allocate and map physical pages to back the new virtual space.
1253 */
1254 map_addr = round_page(addr);
1255 while (map_addr < addr + size) {
1256 #if defined(__x86_64__)
1257 /*
1258 * Back with a large page if properly aligned on x86
1259 */
1260 if ((map_addr & I386_LPGMASK) == 0 &&
1261 map_addr + I386_LPGBYTES <= addr + size &&
1262 pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1263 pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1264 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1265 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1266 VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE);
1267
1268 if (kr != KERN_SUCCESS) {
1269 panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1270 (unsigned long)map_addr, phys_page);
1271 }
1272 map_addr += I386_LPGBYTES;
1273 vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1274 vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1275 vm_page_kern_lpage_count++;
1276 continue;
1277 }
1278 #endif
1279
1280 if (!pmap_next_page_hi(&phys_page, might_free)) {
1281 panic("pmap_steal_memory() size: 0x%llx\n", (uint64_t)size);
1282 }
1283
1284 #if defined(__x86_64__)
1285 pmap_pre_expand(kernel_pmap, map_addr);
1286 #endif
1287
1288 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1289 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1290 VM_WIMG_USE_DEFAULT, FALSE);
1291
1292 if (kr != KERN_SUCCESS) {
1293 panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1294 (unsigned long)map_addr, phys_page);
1295 }
1296 map_addr += PAGE_SIZE;
1297
1298 /*
1299 * Account for newly stolen memory
1300 */
1301 vm_page_wire_count++;
1302 vm_page_stolen_count++;
1303 }
1304
1305 #if defined(__x86_64__)
1306 /*
1307 * The call with might_free is currently the last use of pmap_steal_memory*().
1308 * Notify the pmap layer to record which high pages were allocated so far.
1309 */
1310 if (might_free) {
1311 pmap_hi_pages_done();
1312 }
1313 #endif
1314 #if KASAN
1315 kasan_notify_address(round_page(addr), size);
1316 #endif
1317 return (void *) addr;
1318 }
1319
1320 void *
1321 pmap_steal_memory(
1322 vm_size_t size)
1323 {
1324 return pmap_steal_memory_internal(size, FALSE);
1325 }
1326
1327 void *
1328 pmap_steal_freeable_memory(
1329 vm_size_t size)
1330 {
1331 return pmap_steal_memory_internal(size, TRUE);
1332 }
1333
1334 #if CONFIG_SECLUDED_MEMORY
1335 /* boot-args to control secluded memory */
1336 unsigned int secluded_mem_mb = 0; /* # of MBs of RAM to seclude */
1337 int secluded_for_iokit = 1; /* IOKit can use secluded memory */
1338 int secluded_for_apps = 1; /* apps can use secluded memory */
1339 int secluded_for_filecache = 2; /* filecache can use seclude memory */
1340 #if 11
1341 int secluded_for_fbdp = 0;
1342 #endif
1343 uint64_t secluded_shutoff_trigger = 0;
1344 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1345 #endif /* CONFIG_SECLUDED_MEMORY */
1346
1347
1348 #if defined(__arm__) || defined(__arm64__)
1349 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1350 unsigned int vm_first_phys_ppnum = 0;
1351 #endif
1352
1353 void vm_page_release_startup(vm_page_t mem);
1354 void
1355 pmap_startup(
1356 vm_offset_t *startp,
1357 vm_offset_t *endp)
1358 {
1359 unsigned int i, npages;
1360 ppnum_t phys_page;
1361 uint64_t mem_sz;
1362 uint64_t start_ns;
1363 uint64_t now_ns;
1364 uint_t low_page_count = 0;
1365
1366 #if defined(__LP64__)
1367 /*
1368 * make sure we are aligned on a 64 byte boundary
1369 * for VM_PAGE_PACK_PTR (it clips off the low-order
1370 * 6 bits of the pointer)
1371 */
1372 if (virtual_space_start != virtual_space_end) {
1373 virtual_space_start = round_page(virtual_space_start);
1374 }
1375 #endif
1376
1377 /*
1378 * We calculate how many page frames we will have
1379 * and then allocate the page structures in one chunk.
1380 *
1381 * Note that the calculation here doesn't take into account
1382 * the memory needed to map what's being allocated, i.e. the page
1383 * table entries. So the actual number of pages we get will be
1384 * less than this. To do someday: include that in the computation.
1385 */
1386 mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1387 mem_sz += round_page(virtual_space_start) - virtual_space_start; /* Account for any slop */
1388 npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages))); /* scaled to include the vm_page_ts */
1389
1390 vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1391
1392 /*
1393 * Check if we want to initialize pages to a known value
1394 */
1395 if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1396 fill = TRUE;
1397 }
1398 #if DEBUG
1399 /* This slows down booting the DEBUG kernel, particularly on
1400 * large memory systems, but is worthwhile in deterministically
1401 * trapping uninitialized memory usage.
1402 */
1403 if (!fill) {
1404 fill = TRUE;
1405 fillval = 0xDEB8F177;
1406 }
1407 #endif
1408 if (fill) {
1409 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1410 }
1411
1412 #if CONFIG_SECLUDED_MEMORY
1413 /*
1414 * Figure out how much secluded memory to have before we start
1415 * release pages to free lists.
1416 * The default, if specified nowhere else, is no secluded mem.
1417 */
1418 secluded_mem_mb = 0;
1419 if (max_mem > 1 * 1024 * 1024 * 1024) {
1420 /* default to 90MB for devices with > 1GB of RAM */
1421 secluded_mem_mb = 90;
1422 }
1423 /* override with value from device tree, if provided */
1424 PE_get_default("kern.secluded_mem_mb",
1425 &secluded_mem_mb, sizeof(secluded_mem_mb));
1426 /* override with value from boot-args, if provided */
1427 PE_parse_boot_argn("secluded_mem_mb",
1428 &secluded_mem_mb,
1429 sizeof(secluded_mem_mb));
1430
1431 vm_page_secluded_target = (unsigned int)
1432 ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE);
1433 PE_parse_boot_argn("secluded_for_iokit",
1434 &secluded_for_iokit,
1435 sizeof(secluded_for_iokit));
1436 PE_parse_boot_argn("secluded_for_apps",
1437 &secluded_for_apps,
1438 sizeof(secluded_for_apps));
1439 PE_parse_boot_argn("secluded_for_filecache",
1440 &secluded_for_filecache,
1441 sizeof(secluded_for_filecache));
1442 #if 11
1443 PE_parse_boot_argn("secluded_for_fbdp",
1444 &secluded_for_fbdp,
1445 sizeof(secluded_for_fbdp));
1446 #endif
1447
1448 /*
1449 * Allow a really large app to effectively use secluded memory until it exits.
1450 */
1451 if (vm_page_secluded_target != 0) {
1452 /*
1453 * Get an amount from boot-args, else use 1/2 of max_mem.
1454 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1455 * used munch to induce jetsam thrashing of false idle daemons on N56.
1456 */
1457 int secluded_shutoff_mb;
1458 if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1459 sizeof(secluded_shutoff_mb))) {
1460 secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1461 } else {
1462 secluded_shutoff_trigger = max_mem / 2;
1463 }
1464
1465 /* ensure the headroom value is sensible and avoid underflows */
1466 assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1467 }
1468
1469 #endif /* CONFIG_SECLUDED_MEMORY */
1470
1471 #if defined(__x86_64__)
1472
1473 /*
1474 * Decide how much memory we delay freeing at boot time.
1475 */
1476 uint32_t delay_above_gb;
1477 if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1478 delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1479 }
1480
1481 if (delay_above_gb == 0) {
1482 delay_above_pnum = PPNUM_MAX;
1483 } else {
1484 delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1485 }
1486
1487 /* make sure we have sane breathing room: 1G above low memory */
1488 if (delay_above_pnum <= max_valid_low_ppnum) {
1489 delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1490 }
1491
1492 if (delay_above_pnum < PPNUM_MAX) {
1493 printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1494 }
1495
1496 #endif /* defined(__x86_64__) */
1497
1498 /*
1499 * Initialize and release the page frames.
1500 */
1501 kernel_debug_string_early("page_frame_init");
1502
1503 vm_page_array_beginning_addr = &vm_pages[0];
1504 vm_page_array_ending_addr = &vm_pages[npages]; /* used by ptr packing/unpacking code */
1505 #if VM_PAGE_PACKED_FROM_ARRAY
1506 if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1507 panic("pmap_startup(): too many pages to support vm_page packing");
1508 }
1509 #endif
1510
1511 vm_delayed_count = 0;
1512
1513 absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1514 vm_pages_count = 0;
1515 for (i = 0; i < npages; i++) {
1516 /* Did we run out of pages? */
1517 if (!pmap_next_page(&phys_page)) {
1518 break;
1519 }
1520
1521 if (phys_page < max_valid_low_ppnum) {
1522 ++low_page_count;
1523 }
1524
1525 /* Are we at high enough pages to delay the rest? */
1526 if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1527 vm_delayed_count = pmap_free_pages();
1528 break;
1529 }
1530
1531 #if defined(__arm__) || defined(__arm64__)
1532 if (i == 0) {
1533 vm_first_phys_ppnum = phys_page;
1534 patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1535 (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1536 }
1537 assert((i + vm_first_phys_ppnum) == phys_page);
1538 #endif
1539
1540 #if defined(__x86_64__)
1541 /* The x86 clump freeing code requires increasing ppn's to work correctly */
1542 if (i > 0) {
1543 assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1544 }
1545 #endif
1546 ++vm_pages_count;
1547 vm_page_init(&vm_pages[i], phys_page, FALSE);
1548 if (fill) {
1549 fillPage(phys_page, fillval);
1550 }
1551 if (vm_himemory_mode) {
1552 vm_page_release_startup(&vm_pages[i]);
1553 }
1554 }
1555 vm_page_pages = vm_pages_count; /* used to report to user space */
1556
1557 if (!vm_himemory_mode) {
1558 do {
1559 vm_page_release_startup(&vm_pages[--i]);
1560 } while (i != 0);
1561 }
1562
1563 absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1564 printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1565 printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1566
1567 #if defined(__LP64__)
1568 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1569 panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1570 }
1571
1572 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1573 panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1574 }
1575 #endif
1576
1577 VM_CHECK_MEMORYSTATUS;
1578
1579 /*
1580 * We have to re-align virtual_space_start,
1581 * because pmap_steal_memory has been using it.
1582 */
1583 virtual_space_start = round_page(virtual_space_start);
1584 *startp = virtual_space_start;
1585 *endp = virtual_space_end;
1586 }
1587 #endif /* MACHINE_PAGES */
1588
1589 /*
1590 * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1591 * or frees to this zone. It's just here for reporting purposes via zprint command.
1592 * This needs to be done after all initially delayed pages are put on the free lists.
1593 */
1594 static void
1595 vm_page_module_init_delayed(void)
1596 {
1597 (void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1598 ZC_NOGZALLOC, ZONE_ID_ANY, ^(zone_t z) {
1599 uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1600
1601 zone_set_exhaustible(z, 0);
1602 /*
1603 * Reflect size and usage information for vm_pages[].
1604 */
1605
1606 z->countavail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1607 z->countfree = z->countavail - vm_pages_count;
1608 zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1609 vm_pages_count * sizeof(struct vm_page);
1610 vm_page_array_zone_data_size = (uintptr_t)((void *)vm_page_array_ending_addr - (void *)vm_pages);
1611 vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1612 z->page_count += vm_page_zone_pages;
1613 /* since zone accounts for these, take them out of stolen */
1614 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1615 });
1616 }
1617
1618 /*
1619 * Create the vm_pages zone. This is used for the vm_page structures for the pages
1620 * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1621 * this needs to happen in early VM bootstrap.
1622 */
1623
1624 __startup_func
1625 static void
1626 vm_page_module_init(void)
1627 {
1628 vm_size_t vm_page_with_ppnum_size;
1629
1630 /*
1631 * Since the pointers to elements in this zone will be packed, they
1632 * must have appropriate size. Not strictly what sizeof() reports.
1633 */
1634 vm_page_with_ppnum_size =
1635 (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1636 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1637
1638 vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1639 ZC_ALLOW_FOREIGN | ZC_NOGZALLOC | ZC_ALIGNMENT_REQUIRED |
1640 ZC_NOCALLOUT, ZONE_ID_ANY, ^(zone_t z) {
1641 #if defined(__LP64__)
1642 zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP);
1643 #endif
1644 zone_set_exhaustible(z, 0);
1645 });
1646 }
1647 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1648
1649 /*
1650 * Routine: vm_page_create
1651 * Purpose:
1652 * After the VM system is up, machine-dependent code
1653 * may stumble across more physical memory. For example,
1654 * memory that it was reserving for a frame buffer.
1655 * vm_page_create turns this memory into available pages.
1656 */
1657
1658 void
1659 vm_page_create(
1660 ppnum_t start,
1661 ppnum_t end)
1662 {
1663 ppnum_t phys_page;
1664 vm_page_t m;
1665
1666 for (phys_page = start;
1667 phys_page < end;
1668 phys_page++) {
1669 while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page))
1670 == VM_PAGE_NULL) {
1671 vm_page_more_fictitious();
1672 }
1673
1674 m->vmp_fictitious = FALSE;
1675 pmap_clear_noencrypt(phys_page);
1676
1677 lck_mtx_lock(&vm_page_queue_free_lock);
1678 vm_page_pages++;
1679 lck_mtx_unlock(&vm_page_queue_free_lock);
1680 vm_page_release(m, FALSE);
1681 }
1682 }
1683
1684 /*
1685 * vm_page_hash:
1686 *
1687 * Distributes the object/offset key pair among hash buckets.
1688 *
1689 * NOTE: The bucket count must be a power of 2
1690 */
1691 #define vm_page_hash(object, offset) (\
1692 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1693 & vm_page_hash_mask)
1694
1695
1696 /*
1697 * vm_page_insert: [ internal use only ]
1698 *
1699 * Inserts the given mem entry into the object/object-page
1700 * table and object list.
1701 *
1702 * The object must be locked.
1703 */
1704 void
1705 vm_page_insert(
1706 vm_page_t mem,
1707 vm_object_t object,
1708 vm_object_offset_t offset)
1709 {
1710 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1711 }
1712
1713 void
1714 vm_page_insert_wired(
1715 vm_page_t mem,
1716 vm_object_t object,
1717 vm_object_offset_t offset,
1718 vm_tag_t tag)
1719 {
1720 vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1721 }
1722
1723 void
1724 vm_page_insert_internal(
1725 vm_page_t mem,
1726 vm_object_t object,
1727 vm_object_offset_t offset,
1728 vm_tag_t tag,
1729 boolean_t queues_lock_held,
1730 boolean_t insert_in_hash,
1731 boolean_t batch_pmap_op,
1732 boolean_t batch_accounting,
1733 uint64_t *delayed_ledger_update)
1734 {
1735 vm_page_bucket_t *bucket;
1736 lck_spin_t *bucket_lock;
1737 int hash_id;
1738 task_t owner;
1739 int ledger_idx_volatile;
1740 int ledger_idx_nonvolatile;
1741 int ledger_idx_volatile_compressed;
1742 int ledger_idx_nonvolatile_compressed;
1743 boolean_t do_footprint;
1744
1745 #if 0
1746 /*
1747 * we may not hold the page queue lock
1748 * so this check isn't safe to make
1749 */
1750 VM_PAGE_CHECK(mem);
1751 #endif
1752
1753 assertf(page_aligned(offset), "0x%llx\n", offset);
1754
1755 assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1756
1757 /* the vm_submap_object is only a placeholder for submaps */
1758 assert(object != vm_submap_object);
1759
1760 vm_object_lock_assert_exclusive(object);
1761 LCK_MTX_ASSERT(&vm_page_queue_lock,
1762 queues_lock_held ? LCK_MTX_ASSERT_OWNED
1763 : LCK_MTX_ASSERT_NOTOWNED);
1764
1765 if (queues_lock_held == FALSE) {
1766 assert(!VM_PAGE_PAGEABLE(mem));
1767 }
1768
1769 if (insert_in_hash == TRUE) {
1770 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1771 if (mem->vmp_tabled || mem->vmp_object) {
1772 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1773 "already in (obj=%p,off=0x%llx)",
1774 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1775 }
1776 #endif
1777 if (object->internal && (offset >= object->vo_size)) {
1778 panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1779 mem, object, offset, object->vo_size);
1780 }
1781
1782 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1783
1784 /*
1785 * Record the object/offset pair in this page
1786 */
1787
1788 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1789 mem->vmp_offset = offset;
1790
1791 #if CONFIG_SECLUDED_MEMORY
1792 if (object->eligible_for_secluded) {
1793 vm_page_secluded.eligible_for_secluded++;
1794 }
1795 #endif /* CONFIG_SECLUDED_MEMORY */
1796
1797 /*
1798 * Insert it into the object_object/offset hash table
1799 */
1800 hash_id = vm_page_hash(object, offset);
1801 bucket = &vm_page_buckets[hash_id];
1802 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1803
1804 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1805
1806 mem->vmp_next_m = bucket->page_list;
1807 bucket->page_list = VM_PAGE_PACK_PTR(mem);
1808 assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1809
1810 #if MACH_PAGE_HASH_STATS
1811 if (++bucket->cur_count > bucket->hi_count) {
1812 bucket->hi_count = bucket->cur_count;
1813 }
1814 #endif /* MACH_PAGE_HASH_STATS */
1815 mem->vmp_hashed = TRUE;
1816 lck_spin_unlock(bucket_lock);
1817 }
1818
1819 {
1820 unsigned int cache_attr;
1821
1822 cache_attr = object->wimg_bits & VM_WIMG_MASK;
1823
1824 if (cache_attr != VM_WIMG_USE_DEFAULT) {
1825 PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1826 }
1827 }
1828 /*
1829 * Now link into the object's list of backed pages.
1830 */
1831 vm_page_queue_enter(&object->memq, mem, vmp_listq);
1832 object->memq_hint = mem;
1833 mem->vmp_tabled = TRUE;
1834
1835 /*
1836 * Show that the object has one more resident page.
1837 */
1838
1839 object->resident_page_count++;
1840 if (VM_PAGE_WIRED(mem)) {
1841 assert(mem->vmp_wire_count > 0);
1842 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1843 VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1844 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1845 }
1846 assert(object->resident_page_count >= object->wired_page_count);
1847
1848 #if DEVELOPMENT || DEBUG
1849 if (object->object_is_shared_cache &&
1850 object->pager != NULL &&
1851 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1852 int new, old;
1853 assert(!object->internal);
1854 new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1855 do {
1856 old = shared_region_pagers_resident_peak;
1857 } while (old < new &&
1858 !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1859 }
1860 #endif /* DEVELOPMENT || DEBUG */
1861
1862 if (batch_accounting == FALSE) {
1863 if (object->internal) {
1864 OSAddAtomic(1, &vm_page_internal_count);
1865 } else {
1866 OSAddAtomic(1, &vm_page_external_count);
1867 }
1868 }
1869
1870 /*
1871 * It wouldn't make sense to insert a "reusable" page in
1872 * an object (the page would have been marked "reusable" only
1873 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1874 * in the object at that time).
1875 * But a page could be inserted in a "all_reusable" object, if
1876 * something faults it in (a vm_read() from another task or a
1877 * "use-after-free" issue in user space, for example). It can
1878 * also happen if we're relocating a page from that object to
1879 * a different physical page during a physically-contiguous
1880 * allocation.
1881 */
1882 assert(!mem->vmp_reusable);
1883 if (object->all_reusable) {
1884 OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1885 }
1886
1887 if (object->purgable == VM_PURGABLE_DENY &&
1888 !object->vo_ledger_tag) {
1889 owner = TASK_NULL;
1890 } else {
1891 owner = VM_OBJECT_OWNER(object);
1892 vm_object_ledger_tag_ledgers(object,
1893 &ledger_idx_volatile,
1894 &ledger_idx_nonvolatile,
1895 &ledger_idx_volatile_compressed,
1896 &ledger_idx_nonvolatile_compressed,
1897 &do_footprint);
1898 }
1899 if (owner &&
1900 (object->purgable == VM_PURGABLE_NONVOLATILE ||
1901 object->purgable == VM_PURGABLE_DENY ||
1902 VM_PAGE_WIRED(mem))) {
1903 if (delayed_ledger_update) {
1904 *delayed_ledger_update += PAGE_SIZE;
1905 } else {
1906 /* more non-volatile bytes */
1907 ledger_credit(owner->ledger,
1908 ledger_idx_nonvolatile,
1909 PAGE_SIZE);
1910 if (do_footprint) {
1911 /* more footprint */
1912 ledger_credit(owner->ledger,
1913 task_ledgers.phys_footprint,
1914 PAGE_SIZE);
1915 }
1916 }
1917 } else if (owner &&
1918 (object->purgable == VM_PURGABLE_VOLATILE ||
1919 object->purgable == VM_PURGABLE_EMPTY)) {
1920 assert(!VM_PAGE_WIRED(mem));
1921 /* more volatile bytes */
1922 ledger_credit(owner->ledger,
1923 ledger_idx_volatile,
1924 PAGE_SIZE);
1925 }
1926
1927 if (object->purgable == VM_PURGABLE_VOLATILE) {
1928 if (VM_PAGE_WIRED(mem)) {
1929 OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1930 } else {
1931 OSAddAtomic(+1, &vm_page_purgeable_count);
1932 }
1933 } else if (object->purgable == VM_PURGABLE_EMPTY &&
1934 mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1935 /*
1936 * This page belongs to a purged VM object but hasn't
1937 * been purged (because it was "busy").
1938 * It's in the "throttled" queue and hence not
1939 * visible to vm_pageout_scan(). Move it to a pageable
1940 * queue, so that it can eventually be reclaimed, instead
1941 * of lingering in the "empty" object.
1942 */
1943 if (queues_lock_held == FALSE) {
1944 vm_page_lockspin_queues();
1945 }
1946 vm_page_deactivate(mem);
1947 if (queues_lock_held == FALSE) {
1948 vm_page_unlock_queues();
1949 }
1950 }
1951
1952 #if VM_OBJECT_TRACKING_OP_MODIFIED
1953 if (vm_object_tracking_inited &&
1954 object->internal &&
1955 object->resident_page_count == 0 &&
1956 object->pager == NULL &&
1957 object->shadow != NULL &&
1958 object->shadow->copy == object) {
1959 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
1960 int numsaved = 0;
1961
1962 numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
1963 btlog_add_entry(vm_object_tracking_btlog,
1964 object,
1965 VM_OBJECT_TRACKING_OP_MODIFIED,
1966 bt,
1967 numsaved);
1968 }
1969 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
1970 }
1971
1972 /*
1973 * vm_page_replace:
1974 *
1975 * Exactly like vm_page_insert, except that we first
1976 * remove any existing page at the given offset in object.
1977 *
1978 * The object must be locked.
1979 */
1980 void
1981 vm_page_replace(
1982 vm_page_t mem,
1983 vm_object_t object,
1984 vm_object_offset_t offset)
1985 {
1986 vm_page_bucket_t *bucket;
1987 vm_page_t found_m = VM_PAGE_NULL;
1988 lck_spin_t *bucket_lock;
1989 int hash_id;
1990
1991 #if 0
1992 /*
1993 * we don't hold the page queue lock
1994 * so this check isn't safe to make
1995 */
1996 VM_PAGE_CHECK(mem);
1997 #endif
1998 vm_object_lock_assert_exclusive(object);
1999 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2000 if (mem->vmp_tabled || mem->vmp_object) {
2001 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2002 "already in (obj=%p,off=0x%llx)",
2003 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2004 }
2005 #endif
2006 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2007
2008 assert(!VM_PAGE_PAGEABLE(mem));
2009
2010 /*
2011 * Record the object/offset pair in this page
2012 */
2013 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2014 mem->vmp_offset = offset;
2015
2016 /*
2017 * Insert it into the object_object/offset hash table,
2018 * replacing any page that might have been there.
2019 */
2020
2021 hash_id = vm_page_hash(object, offset);
2022 bucket = &vm_page_buckets[hash_id];
2023 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2024
2025 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2026
2027 if (bucket->page_list) {
2028 vm_page_packed_t *mp = &bucket->page_list;
2029 vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2030
2031 do {
2032 /*
2033 * compare packed object pointers
2034 */
2035 if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2036 /*
2037 * Remove old page from hash list
2038 */
2039 *mp = m->vmp_next_m;
2040 m->vmp_hashed = FALSE;
2041 m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2042
2043 found_m = m;
2044 break;
2045 }
2046 mp = &m->vmp_next_m;
2047 } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2048
2049 mem->vmp_next_m = bucket->page_list;
2050 } else {
2051 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2052 }
2053 /*
2054 * insert new page at head of hash list
2055 */
2056 bucket->page_list = VM_PAGE_PACK_PTR(mem);
2057 mem->vmp_hashed = TRUE;
2058
2059 lck_spin_unlock(bucket_lock);
2060
2061 if (found_m) {
2062 /*
2063 * there was already a page at the specified
2064 * offset for this object... remove it from
2065 * the object and free it back to the free list
2066 */
2067 vm_page_free_unlocked(found_m, FALSE);
2068 }
2069 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2070 }
2071
2072 /*
2073 * vm_page_remove: [ internal use only ]
2074 *
2075 * Removes the given mem entry from the object/offset-page
2076 * table and the object page list.
2077 *
2078 * The object must be locked.
2079 */
2080
2081 void
2082 vm_page_remove(
2083 vm_page_t mem,
2084 boolean_t remove_from_hash)
2085 {
2086 vm_page_bucket_t *bucket;
2087 vm_page_t this;
2088 lck_spin_t *bucket_lock;
2089 int hash_id;
2090 task_t owner;
2091 vm_object_t m_object;
2092 int ledger_idx_volatile;
2093 int ledger_idx_nonvolatile;
2094 int ledger_idx_volatile_compressed;
2095 int ledger_idx_nonvolatile_compressed;
2096 int do_footprint;
2097
2098 m_object = VM_PAGE_OBJECT(mem);
2099
2100 vm_object_lock_assert_exclusive(m_object);
2101 assert(mem->vmp_tabled);
2102 assert(!mem->vmp_cleaning);
2103 assert(!mem->vmp_laundry);
2104
2105 if (VM_PAGE_PAGEABLE(mem)) {
2106 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2107 }
2108 #if 0
2109 /*
2110 * we don't hold the page queue lock
2111 * so this check isn't safe to make
2112 */
2113 VM_PAGE_CHECK(mem);
2114 #endif
2115 if (remove_from_hash == TRUE) {
2116 /*
2117 * Remove from the object_object/offset hash table
2118 */
2119 hash_id = vm_page_hash(m_object, mem->vmp_offset);
2120 bucket = &vm_page_buckets[hash_id];
2121 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2122
2123 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2124
2125 if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2126 /* optimize for common case */
2127
2128 bucket->page_list = mem->vmp_next_m;
2129 } else {
2130 vm_page_packed_t *prev;
2131
2132 for (prev = &this->vmp_next_m;
2133 (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2134 prev = &this->vmp_next_m) {
2135 continue;
2136 }
2137 *prev = this->vmp_next_m;
2138 }
2139 #if MACH_PAGE_HASH_STATS
2140 bucket->cur_count--;
2141 #endif /* MACH_PAGE_HASH_STATS */
2142 mem->vmp_hashed = FALSE;
2143 this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2144 lck_spin_unlock(bucket_lock);
2145 }
2146 /*
2147 * Now remove from the object's list of backed pages.
2148 */
2149
2150 vm_page_remove_internal(mem);
2151
2152 /*
2153 * And show that the object has one fewer resident
2154 * page.
2155 */
2156
2157 assert(m_object->resident_page_count > 0);
2158 m_object->resident_page_count--;
2159
2160 #if DEVELOPMENT || DEBUG
2161 if (m_object->object_is_shared_cache &&
2162 m_object->pager != NULL &&
2163 m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2164 assert(!m_object->internal);
2165 OSAddAtomic(-1, &shared_region_pagers_resident_count);
2166 }
2167 #endif /* DEVELOPMENT || DEBUG */
2168
2169 if (m_object->internal) {
2170 #if DEBUG
2171 assert(vm_page_internal_count);
2172 #endif /* DEBUG */
2173
2174 OSAddAtomic(-1, &vm_page_internal_count);
2175 } else {
2176 assert(vm_page_external_count);
2177 OSAddAtomic(-1, &vm_page_external_count);
2178
2179 if (mem->vmp_xpmapped) {
2180 assert(vm_page_xpmapped_external_count);
2181 OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2182 }
2183 }
2184 if (!m_object->internal &&
2185 m_object->cached_list.next &&
2186 m_object->cached_list.prev) {
2187 if (m_object->resident_page_count == 0) {
2188 vm_object_cache_remove(m_object);
2189 }
2190 }
2191
2192 if (VM_PAGE_WIRED(mem)) {
2193 assert(mem->vmp_wire_count > 0);
2194 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2195 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2196 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2197 }
2198 assert(m_object->resident_page_count >=
2199 m_object->wired_page_count);
2200 if (mem->vmp_reusable) {
2201 assert(m_object->reusable_page_count > 0);
2202 m_object->reusable_page_count--;
2203 assert(m_object->reusable_page_count <=
2204 m_object->resident_page_count);
2205 mem->vmp_reusable = FALSE;
2206 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2207 vm_page_stats_reusable.reused_remove++;
2208 } else if (m_object->all_reusable) {
2209 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2210 vm_page_stats_reusable.reused_remove++;
2211 }
2212
2213 if (m_object->purgable == VM_PURGABLE_DENY &&
2214 !m_object->vo_ledger_tag) {
2215 owner = TASK_NULL;
2216 } else {
2217 owner = VM_OBJECT_OWNER(m_object);
2218 vm_object_ledger_tag_ledgers(m_object,
2219 &ledger_idx_volatile,
2220 &ledger_idx_nonvolatile,
2221 &ledger_idx_volatile_compressed,
2222 &ledger_idx_nonvolatile_compressed,
2223 &do_footprint);
2224 }
2225 if (owner &&
2226 (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2227 m_object->purgable == VM_PURGABLE_DENY ||
2228 VM_PAGE_WIRED(mem))) {
2229 /* less non-volatile bytes */
2230 ledger_debit(owner->ledger,
2231 ledger_idx_nonvolatile,
2232 PAGE_SIZE);
2233 if (do_footprint) {
2234 /* less footprint */
2235 ledger_debit(owner->ledger,
2236 task_ledgers.phys_footprint,
2237 PAGE_SIZE);
2238 }
2239 } else if (owner &&
2240 (m_object->purgable == VM_PURGABLE_VOLATILE ||
2241 m_object->purgable == VM_PURGABLE_EMPTY)) {
2242 assert(!VM_PAGE_WIRED(mem));
2243 /* less volatile bytes */
2244 ledger_debit(owner->ledger,
2245 ledger_idx_volatile,
2246 PAGE_SIZE);
2247 }
2248 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2249 if (VM_PAGE_WIRED(mem)) {
2250 assert(vm_page_purgeable_wired_count > 0);
2251 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2252 } else {
2253 assert(vm_page_purgeable_count > 0);
2254 OSAddAtomic(-1, &vm_page_purgeable_count);
2255 }
2256 }
2257
2258 if (m_object->set_cache_attr == TRUE) {
2259 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2260 }
2261
2262 mem->vmp_tabled = FALSE;
2263 mem->vmp_object = 0;
2264 mem->vmp_offset = (vm_object_offset_t) -1;
2265 }
2266
2267
2268 /*
2269 * vm_page_lookup:
2270 *
2271 * Returns the page associated with the object/offset
2272 * pair specified; if none is found, VM_PAGE_NULL is returned.
2273 *
2274 * The object must be locked. No side effects.
2275 */
2276
2277 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
2278
2279 #if DEBUG_VM_PAGE_LOOKUP
2280
2281 struct {
2282 uint64_t vpl_total;
2283 uint64_t vpl_empty_obj;
2284 uint64_t vpl_bucket_NULL;
2285 uint64_t vpl_hit_hint;
2286 uint64_t vpl_hit_hint_next;
2287 uint64_t vpl_hit_hint_prev;
2288 uint64_t vpl_fast;
2289 uint64_t vpl_slow;
2290 uint64_t vpl_hit;
2291 uint64_t vpl_miss;
2292
2293 uint64_t vpl_fast_elapsed;
2294 uint64_t vpl_slow_elapsed;
2295 } vm_page_lookup_stats __attribute__((aligned(8)));
2296
2297 #endif
2298
2299 #define KDP_VM_PAGE_WALK_MAX 1000
2300
2301 vm_page_t
2302 kdp_vm_page_lookup(
2303 vm_object_t object,
2304 vm_object_offset_t offset)
2305 {
2306 vm_page_t cur_page;
2307 int num_traversed = 0;
2308
2309 if (not_in_kdp) {
2310 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2311 }
2312
2313 vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2314 if (cur_page->vmp_offset == offset) {
2315 return cur_page;
2316 }
2317 num_traversed++;
2318
2319 if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2320 return VM_PAGE_NULL;
2321 }
2322 }
2323
2324 return VM_PAGE_NULL;
2325 }
2326
2327 vm_page_t
2328 vm_page_lookup(
2329 vm_object_t object,
2330 vm_object_offset_t offset)
2331 {
2332 vm_page_t mem;
2333 vm_page_bucket_t *bucket;
2334 vm_page_queue_entry_t qe;
2335 lck_spin_t *bucket_lock = NULL;
2336 int hash_id;
2337 #if DEBUG_VM_PAGE_LOOKUP
2338 uint64_t start, elapsed;
2339
2340 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2341 #endif
2342 vm_object_lock_assert_held(object);
2343 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2344
2345 if (object->resident_page_count == 0) {
2346 #if DEBUG_VM_PAGE_LOOKUP
2347 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2348 #endif
2349 return VM_PAGE_NULL;
2350 }
2351
2352 mem = object->memq_hint;
2353
2354 if (mem != VM_PAGE_NULL) {
2355 assert(VM_PAGE_OBJECT(mem) == object);
2356
2357 if (mem->vmp_offset == offset) {
2358 #if DEBUG_VM_PAGE_LOOKUP
2359 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2360 #endif
2361 return mem;
2362 }
2363 qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2364
2365 if (!vm_page_queue_end(&object->memq, qe)) {
2366 vm_page_t next_page;
2367
2368 next_page = (vm_page_t)((uintptr_t)qe);
2369 assert(VM_PAGE_OBJECT(next_page) == object);
2370
2371 if (next_page->vmp_offset == offset) {
2372 object->memq_hint = next_page; /* new hint */
2373 #if DEBUG_VM_PAGE_LOOKUP
2374 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2375 #endif
2376 return next_page;
2377 }
2378 }
2379 qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2380
2381 if (!vm_page_queue_end(&object->memq, qe)) {
2382 vm_page_t prev_page;
2383
2384 prev_page = (vm_page_t)((uintptr_t)qe);
2385 assert(VM_PAGE_OBJECT(prev_page) == object);
2386
2387 if (prev_page->vmp_offset == offset) {
2388 object->memq_hint = prev_page; /* new hint */
2389 #if DEBUG_VM_PAGE_LOOKUP
2390 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2391 #endif
2392 return prev_page;
2393 }
2394 }
2395 }
2396 /*
2397 * Search the hash table for this object/offset pair
2398 */
2399 hash_id = vm_page_hash(object, offset);
2400 bucket = &vm_page_buckets[hash_id];
2401
2402 /*
2403 * since we hold the object lock, we are guaranteed that no
2404 * new pages can be inserted into this object... this in turn
2405 * guarantess that the page we're looking for can't exist
2406 * if the bucket it hashes to is currently NULL even when looked
2407 * at outside the scope of the hash bucket lock... this is a
2408 * really cheap optimiztion to avoid taking the lock
2409 */
2410 if (!bucket->page_list) {
2411 #if DEBUG_VM_PAGE_LOOKUP
2412 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2413 #endif
2414 return VM_PAGE_NULL;
2415 }
2416
2417 #if DEBUG_VM_PAGE_LOOKUP
2418 start = mach_absolute_time();
2419 #endif
2420 if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2421 /*
2422 * on average, it's roughly 3 times faster to run a short memq list
2423 * than to take the spin lock and go through the hash list
2424 */
2425 mem = (vm_page_t)vm_page_queue_first(&object->memq);
2426
2427 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2428 if (mem->vmp_offset == offset) {
2429 break;
2430 }
2431
2432 mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2433 }
2434 if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2435 mem = NULL;
2436 }
2437 } else {
2438 vm_page_object_t packed_object;
2439
2440 packed_object = VM_PAGE_PACK_OBJECT(object);
2441
2442 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2443
2444 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2445
2446 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2447 mem != VM_PAGE_NULL;
2448 mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2449 #if 0
2450 /*
2451 * we don't hold the page queue lock
2452 * so this check isn't safe to make
2453 */
2454 VM_PAGE_CHECK(mem);
2455 #endif
2456 if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2457 break;
2458 }
2459 }
2460 lck_spin_unlock(bucket_lock);
2461 }
2462
2463 #if DEBUG_VM_PAGE_LOOKUP
2464 elapsed = mach_absolute_time() - start;
2465
2466 if (bucket_lock) {
2467 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2468 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2469 } else {
2470 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2471 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2472 }
2473 if (mem != VM_PAGE_NULL) {
2474 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2475 } else {
2476 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2477 }
2478 #endif
2479 if (mem != VM_PAGE_NULL) {
2480 assert(VM_PAGE_OBJECT(mem) == object);
2481
2482 object->memq_hint = mem;
2483 }
2484 return mem;
2485 }
2486
2487
2488 /*
2489 * vm_page_rename:
2490 *
2491 * Move the given memory entry from its
2492 * current object to the specified target object/offset.
2493 *
2494 * The object must be locked.
2495 */
2496 void
2497 vm_page_rename(
2498 vm_page_t mem,
2499 vm_object_t new_object,
2500 vm_object_offset_t new_offset)
2501 {
2502 boolean_t internal_to_external, external_to_internal;
2503 vm_tag_t tag;
2504 vm_object_t m_object;
2505
2506 m_object = VM_PAGE_OBJECT(mem);
2507
2508 assert(m_object != new_object);
2509 assert(m_object);
2510
2511 /*
2512 * Changes to mem->vmp_object require the page lock because
2513 * the pageout daemon uses that lock to get the object.
2514 */
2515 vm_page_lockspin_queues();
2516
2517 internal_to_external = FALSE;
2518 external_to_internal = FALSE;
2519
2520 if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2521 /*
2522 * it's much easier to get the vm_page_pageable_xxx accounting correct
2523 * if we first move the page to the active queue... it's going to end
2524 * up there anyway, and we don't do vm_page_rename's frequently enough
2525 * for this to matter.
2526 */
2527 vm_page_queues_remove(mem, FALSE);
2528 vm_page_activate(mem);
2529 }
2530 if (VM_PAGE_PAGEABLE(mem)) {
2531 if (m_object->internal && !new_object->internal) {
2532 internal_to_external = TRUE;
2533 }
2534 if (!m_object->internal && new_object->internal) {
2535 external_to_internal = TRUE;
2536 }
2537 }
2538
2539 tag = m_object->wire_tag;
2540 vm_page_remove(mem, TRUE);
2541 vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2542
2543 if (internal_to_external) {
2544 vm_page_pageable_internal_count--;
2545 vm_page_pageable_external_count++;
2546 } else if (external_to_internal) {
2547 vm_page_pageable_external_count--;
2548 vm_page_pageable_internal_count++;
2549 }
2550
2551 vm_page_unlock_queues();
2552 }
2553
2554 /*
2555 * vm_page_init:
2556 *
2557 * Initialize the fields in a new page.
2558 * This takes a structure with random values and initializes it
2559 * so that it can be given to vm_page_release or vm_page_insert.
2560 */
2561 void
2562 vm_page_init(
2563 vm_page_t mem,
2564 ppnum_t phys_page,
2565 boolean_t lopage)
2566 {
2567 uint_t i;
2568 uintptr_t *p;
2569
2570 assert(phys_page);
2571
2572 #if DEBUG
2573 if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2574 if (!(pmap_valid_page(phys_page))) {
2575 panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page);
2576 }
2577 }
2578 #endif /* DEBUG */
2579
2580 /*
2581 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2582 * try to use initial values which match 0. This minimizes the number of writes
2583 * needed for boot-time initialization.
2584 *
2585 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2586 */
2587 assert(VM_PAGE_NOT_ON_Q == 0);
2588 assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2589 for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2590 *p++ = 0;
2591 }
2592 mem->vmp_offset = (vm_object_offset_t)-1;
2593 mem->vmp_busy = TRUE;
2594 mem->vmp_lopage = lopage;
2595
2596 VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2597 #if 0
2598 /*
2599 * we're leaving this turned off for now... currently pages
2600 * come off the free list and are either immediately dirtied/referenced
2601 * due to zero-fill or COW faults, or are used to read or write files...
2602 * in the file I/O case, the UPL mechanism takes care of clearing
2603 * the state of the HW ref/mod bits in a somewhat fragile way.
2604 * Since we may change the way this works in the future (to toughen it up),
2605 * I'm leaving this as a reminder of where these bits could get cleared
2606 */
2607
2608 /*
2609 * make sure both the h/w referenced and modified bits are
2610 * clear at this point... we are especially dependent on
2611 * not finding a 'stale' h/w modified in a number of spots
2612 * once this page goes back into use
2613 */
2614 pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2615 #endif
2616 }
2617
2618 /*
2619 * vm_page_grab_fictitious:
2620 *
2621 * Remove a fictitious page from the free list.
2622 * Returns VM_PAGE_NULL if there are no free pages.
2623 */
2624 int c_vm_page_grab_fictitious = 0;
2625 int c_vm_page_grab_fictitious_failed = 0;
2626 int c_vm_page_release_fictitious = 0;
2627 int c_vm_page_more_fictitious = 0;
2628
2629 vm_page_t
2630 vm_page_grab_fictitious_common(
2631 ppnum_t phys_addr)
2632 {
2633 vm_page_t m;
2634
2635 if ((m = (vm_page_t)zalloc_noblock(vm_page_zone))) {
2636 vm_page_init(m, phys_addr, FALSE);
2637 m->vmp_fictitious = TRUE;
2638
2639 c_vm_page_grab_fictitious++;
2640 } else {
2641 c_vm_page_grab_fictitious_failed++;
2642 }
2643
2644 return m;
2645 }
2646
2647 vm_page_t
2648 vm_page_grab_fictitious(void)
2649 {
2650 return vm_page_grab_fictitious_common(vm_page_fictitious_addr);
2651 }
2652
2653 int vm_guard_count;
2654
2655
2656 vm_page_t
2657 vm_page_grab_guard(void)
2658 {
2659 vm_page_t page;
2660 page = vm_page_grab_fictitious_common(vm_page_guard_addr);
2661 if (page) {
2662 OSAddAtomic(1, &vm_guard_count);
2663 }
2664 return page;
2665 }
2666
2667
2668 /*
2669 * vm_page_release_fictitious:
2670 *
2671 * Release a fictitious page to the zone pool
2672 */
2673 void
2674 vm_page_release_fictitious(
2675 vm_page_t m)
2676 {
2677 assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2678 assert(m->vmp_fictitious);
2679 assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2680 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2681
2682
2683 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2684 OSAddAtomic(-1, &vm_guard_count);
2685 }
2686
2687 c_vm_page_release_fictitious++;
2688
2689 zfree(vm_page_zone, m);
2690 }
2691
2692 /*
2693 * vm_page_more_fictitious:
2694 *
2695 * Add more fictitious pages to the zone.
2696 * Allowed to block. This routine is way intimate
2697 * with the zones code, for several reasons:
2698 * 1. we need to carve some page structures out of physical
2699 * memory before zones work, so they _cannot_ come from
2700 * the zone restricted submap.
2701 * 2. the zone needs to be collectable in order to prevent
2702 * growth without bound. These structures are used by
2703 * the device pager (by the hundreds and thousands), as
2704 * private pages for pageout, and as blocking pages for
2705 * pagein. Temporary bursts in demand should not result in
2706 * permanent allocation of a resource.
2707 * 3. To smooth allocation humps, we allocate single pages
2708 * with kernel_memory_allocate(), and cram them into the
2709 * zone.
2710 */
2711
2712 void
2713 vm_page_more_fictitious(void)
2714 {
2715 vm_offset_t addr;
2716 kern_return_t retval;
2717
2718 c_vm_page_more_fictitious++;
2719
2720 /*
2721 * Allocate a single page from the zone restricted submap. Do not wait
2722 * if no physical pages are immediately available, and do not zero the
2723 * space. We need our own blocking lock here to prevent having multiple,
2724 * simultaneous requests from piling up on the zone restricted submap
2725 * lock.
2726 * Exactly one (of our) threads should be potentially waiting on the map
2727 * lock. If winner is not vm-privileged, then the page allocation will
2728 * fail, and it will temporarily block here in the vm_page_wait().
2729 */
2730 lck_mtx_lock(&vm_page_alloc_lock);
2731 /*
2732 * If another thread allocated space, just bail out now.
2733 */
2734 if (os_atomic_load(&vm_page_zone->countfree, relaxed) > 5) {
2735 /*
2736 * The number "5" is a small number that is larger than the
2737 * number of fictitious pages that any single caller will
2738 * attempt to allocate. Otherwise, a thread will attempt to
2739 * acquire a fictitious page (vm_page_grab_fictitious), fail,
2740 * release all of the resources and locks already acquired,
2741 * and then call this routine. This routine finds the pages
2742 * that the caller released, so fails to allocate new space.
2743 * The process repeats infinitely. The largest known number
2744 * of fictitious pages required in this manner is 2. 5 is
2745 * simply a somewhat larger number.
2746 */
2747 lck_mtx_unlock(&vm_page_alloc_lock);
2748 return;
2749 }
2750
2751 retval = kernel_memory_allocate(zone_submap(vm_page_zone),
2752 &addr, PAGE_SIZE, 0, KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
2753 VM_KERN_MEMORY_ZONE);
2754
2755 if (retval != KERN_SUCCESS) {
2756 /*
2757 * No page was available. Drop the
2758 * lock to give another thread a chance at it, and
2759 * wait for the pageout daemon to make progress.
2760 */
2761 lck_mtx_unlock(&vm_page_alloc_lock);
2762 vm_page_wait(THREAD_UNINT);
2763 return;
2764 }
2765
2766 zcram(vm_page_zone, addr, PAGE_SIZE);
2767
2768 lck_mtx_unlock(&vm_page_alloc_lock);
2769 }
2770
2771
2772 /*
2773 * vm_pool_low():
2774 *
2775 * Return true if it is not likely that a non-vm_privileged thread
2776 * can get memory without blocking. Advisory only, since the
2777 * situation may change under us.
2778 */
2779 int
2780 vm_pool_low(void)
2781 {
2782 /* No locking, at worst we will fib. */
2783 return vm_page_free_count <= vm_page_free_reserved;
2784 }
2785
2786 boolean_t vm_darkwake_mode = FALSE;
2787
2788 /*
2789 * vm_update_darkwake_mode():
2790 *
2791 * Tells the VM that the system is in / out of darkwake.
2792 *
2793 * Today, the VM only lowers/raises the background queue target
2794 * so as to favor consuming more/less background pages when
2795 * darwake is ON/OFF.
2796 *
2797 * We might need to do more things in the future.
2798 */
2799
2800 void
2801 vm_update_darkwake_mode(boolean_t darkwake_mode)
2802 {
2803 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2804
2805 vm_page_lockspin_queues();
2806
2807 if (vm_darkwake_mode == darkwake_mode) {
2808 /*
2809 * No change.
2810 */
2811 vm_page_unlock_queues();
2812 return;
2813 }
2814
2815 vm_darkwake_mode = darkwake_mode;
2816
2817 if (vm_darkwake_mode == TRUE) {
2818 #if CONFIG_BACKGROUND_QUEUE
2819
2820 /* save background target to restore later */
2821 vm_page_background_target_snapshot = vm_page_background_target;
2822
2823 /* target is set to 0...no protection for background pages */
2824 vm_page_background_target = 0;
2825
2826 #endif /* CONFIG_BACKGROUND_QUEUE */
2827 } else if (vm_darkwake_mode == FALSE) {
2828 #if CONFIG_BACKGROUND_QUEUE
2829
2830 if (vm_page_background_target_snapshot) {
2831 vm_page_background_target = vm_page_background_target_snapshot;
2832 }
2833 #endif /* CONFIG_BACKGROUND_QUEUE */
2834 }
2835 vm_page_unlock_queues();
2836 }
2837
2838 #if CONFIG_BACKGROUND_QUEUE
2839
2840 void
2841 vm_page_update_background_state(vm_page_t mem)
2842 {
2843 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2844 return;
2845 }
2846
2847 if (mem->vmp_in_background == FALSE) {
2848 return;
2849 }
2850
2851 task_t my_task = current_task();
2852
2853 if (my_task) {
2854 if (task_get_darkwake_mode(my_task)) {
2855 return;
2856 }
2857 }
2858
2859 #if BACKGROUNDQ_BASED_ON_QOS
2860 if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) {
2861 return;
2862 }
2863 #else
2864 if (my_task) {
2865 if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2866 return;
2867 }
2868 }
2869 #endif
2870 vm_page_lockspin_queues();
2871
2872 mem->vmp_in_background = FALSE;
2873 vm_page_background_promoted_count++;
2874
2875 vm_page_remove_from_backgroundq(mem);
2876
2877 vm_page_unlock_queues();
2878 }
2879
2880
2881 void
2882 vm_page_assign_background_state(vm_page_t mem)
2883 {
2884 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2885 return;
2886 }
2887
2888 task_t my_task = current_task();
2889
2890 if (my_task) {
2891 if (task_get_darkwake_mode(my_task)) {
2892 mem->vmp_in_background = TRUE;
2893 return;
2894 }
2895 }
2896
2897 #if BACKGROUNDQ_BASED_ON_QOS
2898 if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) {
2899 mem->vmp_in_background = TRUE;
2900 } else {
2901 mem->vmp_in_background = FALSE;
2902 }
2903 #else
2904 if (my_task) {
2905 mem->vmp_in_background = proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG);
2906 }
2907 #endif
2908 }
2909
2910
2911 void
2912 vm_page_remove_from_backgroundq(
2913 vm_page_t mem)
2914 {
2915 vm_object_t m_object;
2916
2917 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2918
2919 if (mem->vmp_on_backgroundq) {
2920 vm_page_queue_remove(&vm_page_queue_background, mem, vmp_backgroundq);
2921
2922 mem->vmp_backgroundq.next = 0;
2923 mem->vmp_backgroundq.prev = 0;
2924 mem->vmp_on_backgroundq = FALSE;
2925
2926 vm_page_background_count--;
2927
2928 m_object = VM_PAGE_OBJECT(mem);
2929
2930 if (m_object->internal) {
2931 vm_page_background_internal_count--;
2932 } else {
2933 vm_page_background_external_count--;
2934 }
2935 } else {
2936 assert(VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.next) == (uintptr_t)NULL &&
2937 VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.prev) == (uintptr_t)NULL);
2938 }
2939 }
2940
2941
2942 void
2943 vm_page_add_to_backgroundq(
2944 vm_page_t mem,
2945 boolean_t first)
2946 {
2947 vm_object_t m_object;
2948
2949 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2950
2951 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2952 return;
2953 }
2954
2955 if (mem->vmp_on_backgroundq == FALSE) {
2956 m_object = VM_PAGE_OBJECT(mem);
2957
2958 if (vm_page_background_exclude_external && !m_object->internal) {
2959 return;
2960 }
2961
2962 if (first == TRUE) {
2963 vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_backgroundq);
2964 } else {
2965 vm_page_queue_enter(&vm_page_queue_background, mem, vmp_backgroundq);
2966 }
2967 mem->vmp_on_backgroundq = TRUE;
2968
2969 vm_page_background_count++;
2970
2971 if (m_object->internal) {
2972 vm_page_background_internal_count++;
2973 } else {
2974 vm_page_background_external_count++;
2975 }
2976 }
2977 }
2978
2979 #endif /* CONFIG_BACKGROUND_QUEUE */
2980
2981 /*
2982 * This can be switched to FALSE to help debug drivers
2983 * that are having problems with memory > 4G.
2984 */
2985 boolean_t vm_himemory_mode = TRUE;
2986
2987 /*
2988 * this interface exists to support hardware controllers
2989 * incapable of generating DMAs with more than 32 bits
2990 * of address on platforms with physical memory > 4G...
2991 */
2992 unsigned int vm_lopages_allocated_q = 0;
2993 unsigned int vm_lopages_allocated_cpm_success = 0;
2994 unsigned int vm_lopages_allocated_cpm_failed = 0;
2995 vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
2996
2997 vm_page_t
2998 vm_page_grablo(void)
2999 {
3000 vm_page_t mem;
3001
3002 if (vm_lopage_needed == FALSE) {
3003 return vm_page_grab();
3004 }
3005
3006 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3007
3008 if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3009 vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3010 assert(vm_lopage_free_count);
3011 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3012 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3013
3014 vm_lopage_free_count--;
3015 vm_lopages_allocated_q++;
3016
3017 if (vm_lopage_free_count < vm_lopage_lowater) {
3018 vm_lopage_refill = TRUE;
3019 }
3020
3021 lck_mtx_unlock(&vm_page_queue_free_lock);
3022
3023 #if CONFIG_BACKGROUND_QUEUE
3024 vm_page_assign_background_state(mem);
3025 #endif
3026 } else {
3027 lck_mtx_unlock(&vm_page_queue_free_lock);
3028
3029 if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3030 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3031 vm_lopages_allocated_cpm_failed++;
3032 lck_mtx_unlock(&vm_page_queue_free_lock);
3033
3034 return VM_PAGE_NULL;
3035 }
3036 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3037
3038 mem->vmp_busy = TRUE;
3039
3040 vm_page_lockspin_queues();
3041
3042 mem->vmp_gobbled = FALSE;
3043 vm_page_gobble_count--;
3044 vm_page_wire_count--;
3045
3046 vm_lopages_allocated_cpm_success++;
3047 vm_page_unlock_queues();
3048 }
3049 assert(mem->vmp_busy);
3050 assert(!mem->vmp_pmapped);
3051 assert(!mem->vmp_wpmapped);
3052 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3053
3054 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3055
3056 disable_preemption();
3057 *PERCPU_GET(vm_page_grab_count) += 1;
3058 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3059 enable_preemption();
3060
3061 return mem;
3062 }
3063
3064
3065 /*
3066 * vm_page_grab:
3067 *
3068 * first try to grab a page from the per-cpu free list...
3069 * this must be done while pre-emption is disabled... if
3070 * a page is available, we're done...
3071 * if no page is available, grab the vm_page_queue_free_lock
3072 * and see if current number of free pages would allow us
3073 * to grab at least 1... if not, return VM_PAGE_NULL as before...
3074 * if there are pages available, disable preemption and
3075 * recheck the state of the per-cpu free list... we could
3076 * have been preempted and moved to a different cpu, or
3077 * some other thread could have re-filled it... if still
3078 * empty, figure out how many pages we can steal from the
3079 * global free queue and move to the per-cpu queue...
3080 * return 1 of these pages when done... only wakeup the
3081 * pageout_scan thread if we moved pages from the global
3082 * list... no need for the wakeup if we've satisfied the
3083 * request from the per-cpu queue.
3084 */
3085
3086 #if CONFIG_SECLUDED_MEMORY
3087 vm_page_t vm_page_grab_secluded(void);
3088 #endif /* CONFIG_SECLUDED_MEMORY */
3089
3090 static inline void
3091 vm_page_grab_diags(void);
3092
3093 vm_page_t
3094 vm_page_grab(void)
3095 {
3096 return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3097 }
3098
3099 #if HIBERNATION
3100 boolean_t hibernate_rebuild_needed = FALSE;
3101 #endif /* HIBERNATION */
3102
3103 vm_page_t
3104 vm_page_grab_options(
3105 int grab_options)
3106 {
3107 vm_page_t mem;
3108
3109 disable_preemption();
3110
3111 if ((mem = *PERCPU_GET(free_pages))) {
3112 return_page_from_cpu_list:
3113 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3114
3115 #if HIBERNATION
3116 if (hibernate_rebuild_needed) {
3117 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3118 }
3119 #endif /* HIBERNATION */
3120
3121 vm_page_grab_diags();
3122
3123 vm_offset_t pcpu_base = current_percpu_base();
3124 *PERCPU_GET_WITH_BASE(pcpu_base, vm_page_grab_count) += 1;
3125 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3126 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3127
3128 enable_preemption();
3129 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3130 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3131
3132 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3133 assert(mem->vmp_tabled == FALSE);
3134 assert(mem->vmp_object == 0);
3135 assert(!mem->vmp_laundry);
3136 ASSERT_PMAP_FREE(mem);
3137 assert(mem->vmp_busy);
3138 assert(!mem->vmp_pmapped);
3139 assert(!mem->vmp_wpmapped);
3140 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3141
3142 #if CONFIG_BACKGROUND_QUEUE
3143 vm_page_assign_background_state(mem);
3144 #endif
3145 return mem;
3146 }
3147 enable_preemption();
3148
3149
3150 /*
3151 * Optionally produce warnings if the wire or gobble
3152 * counts exceed some threshold.
3153 */
3154 #if VM_PAGE_WIRE_COUNT_WARNING
3155 if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3156 printf("mk: vm_page_grab(): high wired page count of %d\n",
3157 vm_page_wire_count);
3158 }
3159 #endif
3160 #if VM_PAGE_GOBBLE_COUNT_WARNING
3161 if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3162 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3163 vm_page_gobble_count);
3164 }
3165 #endif
3166
3167 /*
3168 * If free count is low and we have delayed pages from early boot,
3169 * get one of those instead.
3170 */
3171 if (__improbable(vm_delayed_count > 0 &&
3172 vm_page_free_count <= vm_page_free_target &&
3173 (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3174 return mem;
3175 }
3176
3177 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3178
3179 /*
3180 * Only let privileged threads (involved in pageout)
3181 * dip into the reserved pool.
3182 */
3183 if ((vm_page_free_count < vm_page_free_reserved) &&
3184 !(current_thread()->options & TH_OPT_VMPRIV)) {
3185 /* no page for us in the free queue... */
3186 lck_mtx_unlock(&vm_page_queue_free_lock);
3187 mem = VM_PAGE_NULL;
3188
3189 #if CONFIG_SECLUDED_MEMORY
3190 /* ... but can we try and grab from the secluded queue? */
3191 if (vm_page_secluded_count > 0 &&
3192 ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3193 task_can_use_secluded_mem(current_task(), TRUE))) {
3194 mem = vm_page_grab_secluded();
3195 if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3196 vm_page_secluded.grab_for_iokit++;
3197 if (mem) {
3198 vm_page_secluded.grab_for_iokit_success++;
3199 }
3200 }
3201 if (mem) {
3202 VM_CHECK_MEMORYSTATUS;
3203
3204 disable_preemption();
3205 vm_page_grab_diags();
3206 *PERCPU_GET(vm_page_grab_count) += 1;
3207 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3208 enable_preemption();
3209
3210 return mem;
3211 }
3212 }
3213 #else /* CONFIG_SECLUDED_MEMORY */
3214 (void) grab_options;
3215 #endif /* CONFIG_SECLUDED_MEMORY */
3216 } else {
3217 vm_page_t head;
3218 vm_page_t tail;
3219 unsigned int pages_to_steal;
3220 unsigned int color;
3221 unsigned int clump_end, sub_count;
3222
3223 while (vm_page_free_count == 0) {
3224 lck_mtx_unlock(&vm_page_queue_free_lock);
3225 /*
3226 * must be a privileged thread to be
3227 * in this state since a non-privileged
3228 * thread would have bailed if we were
3229 * under the vm_page_free_reserved mark
3230 */
3231 VM_PAGE_WAIT();
3232 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3233 }
3234
3235 disable_preemption();
3236
3237 if ((mem = *PERCPU_GET(free_pages))) {
3238 lck_mtx_unlock(&vm_page_queue_free_lock);
3239
3240 /*
3241 * we got preempted and moved to another processor
3242 * or we got preempted and someone else ran and filled the cache
3243 */
3244 goto return_page_from_cpu_list;
3245 }
3246 if (vm_page_free_count <= vm_page_free_reserved) {
3247 pages_to_steal = 1;
3248 } else {
3249 if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3250 pages_to_steal = vm_free_magazine_refill_limit;
3251 } else {
3252 pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3253 }
3254 }
3255 color = *PERCPU_GET(start_color);
3256 head = tail = NULL;
3257
3258 vm_page_free_count -= pages_to_steal;
3259 clump_end = sub_count = 0;
3260
3261 while (pages_to_steal--) {
3262 while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3263 color = (color + 1) & vm_color_mask;
3264 }
3265 #if defined(__x86_64__)
3266 vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3267 mem, clump_end);
3268 #else
3269 vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3270 mem, vmp_pageq);
3271 #endif
3272
3273 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3274
3275 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3276
3277 #if defined(__arm__) || defined(__arm64__)
3278 color = (color + 1) & vm_color_mask;
3279 #else
3280
3281 #if DEVELOPMENT || DEBUG
3282
3283 sub_count++;
3284 if (clump_end) {
3285 vm_clump_update_stats(sub_count);
3286 sub_count = 0;
3287 color = (color + 1) & vm_color_mask;
3288 }
3289 #else
3290 if (clump_end) {
3291 color = (color + 1) & vm_color_mask;
3292 }
3293
3294 #endif /* if DEVELOPMENT || DEBUG */
3295
3296 #endif /* if defined(__arm__) || defined(__arm64__) */
3297
3298 if (head == NULL) {
3299 head = mem;
3300 } else {
3301 tail->vmp_snext = mem;
3302 }
3303 tail = mem;
3304
3305 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3306 assert(mem->vmp_tabled == FALSE);
3307 assert(mem->vmp_object == 0);
3308 assert(!mem->vmp_laundry);
3309
3310 mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3311
3312 ASSERT_PMAP_FREE(mem);
3313 assert(mem->vmp_busy);
3314 assert(!mem->vmp_pmapped);
3315 assert(!mem->vmp_wpmapped);
3316 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3317 }
3318 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3319 vm_clump_update_stats(sub_count);
3320 #endif
3321 lck_mtx_unlock(&vm_page_queue_free_lock);
3322
3323 #if HIBERNATION
3324 if (hibernate_rebuild_needed) {
3325 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3326 }
3327 #endif /* HIBERNATION */
3328 vm_offset_t pcpu_base = current_percpu_base();
3329 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head->vmp_snext;
3330 *PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3331
3332 /*
3333 * satisfy this request
3334 */
3335 vm_page_grab_diags();
3336 *PERCPU_GET_WITH_BASE(pcpu_base, vm_page_grab_count) += 1;
3337 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3338 mem = head;
3339 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3340
3341 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3342 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3343
3344 enable_preemption();
3345 }
3346 /*
3347 * Decide if we should poke the pageout daemon.
3348 * We do this if the free count is less than the low
3349 * water mark. VM Pageout Scan will keep running till
3350 * the free_count > free_target (& hence above free_min).
3351 * This wakeup is to catch the possibility of the counts
3352 * dropping between VM Pageout Scan parking and this check.
3353 *
3354 * We don't have the counts locked ... if they change a little,
3355 * it doesn't really matter.
3356 */
3357 if (vm_page_free_count < vm_page_free_min) {
3358 lck_mtx_lock(&vm_page_queue_free_lock);
3359 if (vm_pageout_running == FALSE) {
3360 lck_mtx_unlock(&vm_page_queue_free_lock);
3361 thread_wakeup((event_t) &vm_page_free_wanted);
3362 } else {
3363 lck_mtx_unlock(&vm_page_queue_free_lock);
3364 }
3365 }
3366
3367 VM_CHECK_MEMORYSTATUS;
3368
3369 if (mem) {
3370 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
3371
3372 #if CONFIG_BACKGROUND_QUEUE
3373 vm_page_assign_background_state(mem);
3374 #endif
3375 }
3376 return mem;
3377 }
3378
3379 #if CONFIG_SECLUDED_MEMORY
3380 vm_page_t
3381 vm_page_grab_secluded(void)
3382 {
3383 vm_page_t mem;
3384 vm_object_t object;
3385 int refmod_state;
3386
3387 if (vm_page_secluded_count == 0) {
3388 /* no secluded pages to grab... */
3389 return VM_PAGE_NULL;
3390 }
3391
3392 /* secluded queue is protected by the VM page queue lock */
3393 vm_page_lock_queues();
3394
3395 if (vm_page_secluded_count == 0) {
3396 /* no secluded pages to grab... */
3397 vm_page_unlock_queues();
3398 return VM_PAGE_NULL;
3399 }
3400
3401 #if 00
3402 /* can we grab from the secluded queue? */
3403 if (vm_page_secluded_count > vm_page_secluded_target ||
3404 (vm_page_secluded_count > 0 &&
3405 task_can_use_secluded_mem(current_task(), TRUE))) {
3406 /* OK */
3407 } else {
3408 /* can't grab from secluded queue... */
3409 vm_page_unlock_queues();
3410 return VM_PAGE_NULL;
3411 }
3412 #endif
3413
3414 /* we can grab a page from secluded queue! */
3415 assert((vm_page_secluded_count_free +
3416 vm_page_secluded_count_inuse) ==
3417 vm_page_secluded_count);
3418 if (current_task()->task_can_use_secluded_mem) {
3419 assert(num_tasks_can_use_secluded_mem > 0);
3420 }
3421 assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3422 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3423 mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3424 assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3425 vm_page_queues_remove(mem, TRUE);
3426
3427 object = VM_PAGE_OBJECT(mem);
3428
3429 assert(!mem->vmp_fictitious);
3430 assert(!VM_PAGE_WIRED(mem));
3431 if (object == VM_OBJECT_NULL) {
3432 /* free for grab! */
3433 vm_page_unlock_queues();
3434 vm_page_secluded.grab_success_free++;
3435
3436 assert(mem->vmp_busy);
3437 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3438 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3439 assert(mem->vmp_pageq.next == 0);
3440 assert(mem->vmp_pageq.prev == 0);
3441 assert(mem->vmp_listq.next == 0);
3442 assert(mem->vmp_listq.prev == 0);
3443 #if CONFIG_BACKGROUND_QUEUE
3444 assert(mem->vmp_on_backgroundq == 0);
3445 assert(mem->vmp_backgroundq.next == 0);
3446 assert(mem->vmp_backgroundq.prev == 0);
3447 #endif /* CONFIG_BACKGROUND_QUEUE */
3448 return mem;
3449 }
3450
3451 assert(!object->internal);
3452 // vm_page_pageable_external_count--;
3453
3454 if (!vm_object_lock_try(object)) {
3455 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
3456 vm_page_secluded.grab_failure_locked++;
3457 reactivate_secluded_page:
3458 vm_page_activate(mem);
3459 vm_page_unlock_queues();
3460 return VM_PAGE_NULL;
3461 }
3462 if (mem->vmp_busy ||
3463 mem->vmp_cleaning ||
3464 mem->vmp_laundry) {
3465 /* can't steal page in this state... */
3466 vm_object_unlock(object);
3467 vm_page_secluded.grab_failure_state++;
3468 goto reactivate_secluded_page;
3469 }
3470
3471 mem->vmp_busy = TRUE;
3472 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3473 if (refmod_state & VM_MEM_REFERENCED) {
3474 mem->vmp_reference = TRUE;
3475 }
3476 if (refmod_state & VM_MEM_MODIFIED) {
3477 SET_PAGE_DIRTY(mem, FALSE);
3478 }
3479 if (mem->vmp_dirty || mem->vmp_precious) {
3480 /* can't grab a dirty page; re-activate */
3481 // printf("SECLUDED: dirty page %p\n", mem);
3482 PAGE_WAKEUP_DONE(mem);
3483 vm_page_secluded.grab_failure_dirty++;
3484 vm_object_unlock(object);
3485 goto reactivate_secluded_page;
3486 }
3487 if (mem->vmp_reference) {
3488 /* it's been used but we do need to grab a page... */
3489 }
3490
3491 vm_page_unlock_queues();
3492
3493 /* finish what vm_page_free() would have done... */
3494 vm_page_free_prepare_object(mem, TRUE);
3495 vm_object_unlock(object);
3496 object = VM_OBJECT_NULL;
3497 if (vm_page_free_verify) {
3498 ASSERT_PMAP_FREE(mem);
3499 }
3500 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3501 vm_page_secluded.grab_success_other++;
3502
3503 assert(mem->vmp_busy);
3504 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3505 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3506 assert(mem->vmp_pageq.next == 0);
3507 assert(mem->vmp_pageq.prev == 0);
3508 assert(mem->vmp_listq.next == 0);
3509 assert(mem->vmp_listq.prev == 0);
3510 #if CONFIG_BACKGROUND_QUEUE
3511 assert(mem->vmp_on_backgroundq == 0);
3512 assert(mem->vmp_backgroundq.next == 0);
3513 assert(mem->vmp_backgroundq.prev == 0);
3514 #endif /* CONFIG_BACKGROUND_QUEUE */
3515
3516 return mem;
3517 }
3518
3519 uint64_t
3520 vm_page_secluded_drain(void)
3521 {
3522 vm_page_t local_freeq;
3523 int local_freed;
3524 uint64_t num_reclaimed;
3525 unsigned int saved_secluded_count, saved_secluded_target;
3526
3527 num_reclaimed = 0;
3528 local_freeq = NULL;
3529 local_freed = 0;
3530
3531 vm_page_lock_queues();
3532
3533 saved_secluded_count = vm_page_secluded_count;
3534 saved_secluded_target = vm_page_secluded_target;
3535 vm_page_secluded_target = 0;
3536 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3537 while (vm_page_secluded_count) {
3538 vm_page_t secluded_page;
3539
3540 assert((vm_page_secluded_count_free +
3541 vm_page_secluded_count_inuse) ==
3542 vm_page_secluded_count);
3543 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3544 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3545
3546 vm_page_queues_remove(secluded_page, FALSE);
3547 assert(!secluded_page->vmp_fictitious);
3548 assert(!VM_PAGE_WIRED(secluded_page));
3549
3550 if (secluded_page->vmp_object == 0) {
3551 /* transfer to free queue */
3552 assert(secluded_page->vmp_busy);
3553 secluded_page->vmp_snext = local_freeq;
3554 local_freeq = secluded_page;
3555 local_freed += 1;
3556 } else {
3557 /* transfer to head of active queue */
3558 vm_page_enqueue_active(secluded_page, FALSE);
3559 secluded_page = VM_PAGE_NULL;
3560 }
3561 num_reclaimed++;
3562 }
3563 vm_page_secluded_target = saved_secluded_target;
3564 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3565
3566 // printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3567
3568 vm_page_unlock_queues();
3569
3570 if (local_freed) {
3571 vm_page_free_list(local_freeq, TRUE);
3572 local_freeq = NULL;
3573 local_freed = 0;
3574 }
3575
3576 return num_reclaimed;
3577 }
3578 #endif /* CONFIG_SECLUDED_MEMORY */
3579
3580
3581 static inline void
3582 vm_page_grab_diags()
3583 {
3584 #if DEVELOPMENT || DEBUG
3585 task_t task = current_task();
3586 if (task == NULL) {
3587 return;
3588 }
3589
3590 ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3591 #endif /* DEVELOPMENT || DEBUG */
3592 }
3593
3594 /*
3595 * vm_page_release:
3596 *
3597 * Return a page to the free list.
3598 */
3599
3600 void
3601 vm_page_release(
3602 vm_page_t mem,
3603 boolean_t page_queues_locked)
3604 {
3605 unsigned int color;
3606 int need_wakeup = 0;
3607 int need_priv_wakeup = 0;
3608 #if CONFIG_SECLUDED_MEMORY
3609 int need_secluded_wakeup = 0;
3610 #endif /* CONFIG_SECLUDED_MEMORY */
3611 event_t wakeup_event = NULL;
3612
3613 if (page_queues_locked) {
3614 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3615 } else {
3616 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3617 }
3618
3619 assert(!mem->vmp_private && !mem->vmp_fictitious);
3620 if (vm_page_free_verify) {
3621 ASSERT_PMAP_FREE(mem);
3622 }
3623 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
3624
3625 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3626
3627 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3628
3629 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3630 assert(mem->vmp_busy);
3631 assert(!mem->vmp_laundry);
3632 assert(mem->vmp_object == 0);
3633 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3634 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3635 #if CONFIG_BACKGROUND_QUEUE
3636 assert(mem->vmp_backgroundq.next == 0 &&
3637 mem->vmp_backgroundq.prev == 0 &&
3638 mem->vmp_on_backgroundq == FALSE);
3639 #endif
3640 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3641 vm_lopage_free_count < vm_lopage_free_limit &&
3642 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3643 /*
3644 * this exists to support hardware controllers
3645 * incapable of generating DMAs with more than 32 bits
3646 * of address on platforms with physical memory > 4G...
3647 */
3648 vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3649 vm_lopage_free_count++;
3650
3651 if (vm_lopage_free_count >= vm_lopage_free_limit) {
3652 vm_lopage_refill = FALSE;
3653 }
3654
3655 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3656 mem->vmp_lopage = TRUE;
3657 #if CONFIG_SECLUDED_MEMORY
3658 } else if (vm_page_free_count > vm_page_free_reserved &&
3659 vm_page_secluded_count < vm_page_secluded_target &&
3660 num_tasks_can_use_secluded_mem == 0) {
3661 /*
3662 * XXX FBDP TODO: also avoid refilling secluded queue
3663 * when some IOKit objects are already grabbing from it...
3664 */
3665 if (!page_queues_locked) {
3666 if (!vm_page_trylock_queues()) {
3667 /* take locks in right order */
3668 lck_mtx_unlock(&vm_page_queue_free_lock);
3669 vm_page_lock_queues();
3670 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3671 }
3672 }
3673 mem->vmp_lopage = FALSE;
3674 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3675 vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3676 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3677 vm_page_secluded_count++;
3678 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3679 vm_page_secluded_count_free++;
3680 if (!page_queues_locked) {
3681 vm_page_unlock_queues();
3682 }
3683 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3684 if (vm_page_free_wanted_secluded > 0) {
3685 vm_page_free_wanted_secluded--;
3686 need_secluded_wakeup = 1;
3687 }
3688 #endif /* CONFIG_SECLUDED_MEMORY */
3689 } else {
3690 mem->vmp_lopage = FALSE;
3691 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3692
3693 color = VM_PAGE_GET_COLOR(mem);
3694 #if defined(__x86_64__)
3695 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3696 #else
3697 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3698 #endif
3699 vm_page_free_count++;
3700 /*
3701 * Check if we should wake up someone waiting for page.
3702 * But don't bother waking them unless they can allocate.
3703 *
3704 * We wakeup only one thread, to prevent starvation.
3705 * Because the scheduling system handles wait queues FIFO,
3706 * if we wakeup all waiting threads, one greedy thread
3707 * can starve multiple niceguy threads. When the threads
3708 * all wakeup, the greedy threads runs first, grabs the page,
3709 * and waits for another page. It will be the first to run
3710 * when the next page is freed.
3711 *
3712 * However, there is a slight danger here.
3713 * The thread we wake might not use the free page.
3714 * Then the other threads could wait indefinitely
3715 * while the page goes unused. To forestall this,
3716 * the pageout daemon will keep making free pages
3717 * as long as vm_page_free_wanted is non-zero.
3718 */
3719
3720 assert(vm_page_free_count > 0);
3721 if (vm_page_free_wanted_privileged > 0) {
3722 vm_page_free_wanted_privileged--;
3723 need_priv_wakeup = 1;
3724 #if CONFIG_SECLUDED_MEMORY
3725 } else if (vm_page_free_wanted_secluded > 0 &&
3726 vm_page_free_count > vm_page_free_reserved) {
3727 vm_page_free_wanted_secluded--;
3728 need_secluded_wakeup = 1;
3729 #endif /* CONFIG_SECLUDED_MEMORY */
3730 } else if (vm_page_free_wanted > 0 &&
3731 vm_page_free_count > vm_page_free_reserved) {
3732 vm_page_free_wanted--;
3733 need_wakeup = 1;
3734 }
3735 }
3736 vm_pageout_vminfo.vm_page_pages_freed++;
3737
3738 VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3739
3740 lck_mtx_unlock(&vm_page_queue_free_lock);
3741
3742 if (need_priv_wakeup) {
3743 wakeup_event = &vm_page_free_wanted_privileged;
3744 }
3745 #if CONFIG_SECLUDED_MEMORY
3746 else if (need_secluded_wakeup) {
3747 wakeup_event = &vm_page_free_wanted_secluded;
3748 }
3749 #endif /* CONFIG_SECLUDED_MEMORY */
3750 else if (need_wakeup) {
3751 wakeup_event = &vm_page_free_count;
3752 }
3753
3754 if (wakeup_event) {
3755 if (vps_dynamic_priority_enabled == TRUE) {
3756 thread_t thread_woken = NULL;
3757 wakeup_one_with_inheritor((event_t) wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
3758 thread_deallocate(thread_woken);
3759 } else {
3760 thread_wakeup_one((event_t) wakeup_event);
3761 }
3762 }
3763
3764 VM_CHECK_MEMORYSTATUS;
3765 }
3766
3767 /*
3768 * This version of vm_page_release() is used only at startup
3769 * when we are single-threaded and pages are being released
3770 * for the first time. Hence, no locking or unnecessary checks are made.
3771 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3772 */
3773 void
3774 vm_page_release_startup(
3775 vm_page_t mem)
3776 {
3777 vm_page_queue_t queue_free;
3778
3779 if (vm_lopage_free_count < vm_lopage_free_limit &&
3780 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3781 mem->vmp_lopage = TRUE;
3782 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3783 vm_lopage_free_count++;
3784 queue_free = &vm_lopage_queue_free;
3785 #if CONFIG_SECLUDED_MEMORY
3786 } else if (vm_page_secluded_count < vm_page_secluded_target) {
3787 mem->vmp_lopage = FALSE;
3788 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3789 vm_page_secluded_count++;
3790 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3791 vm_page_secluded_count_free++;
3792 queue_free = &vm_page_queue_secluded;
3793 #endif /* CONFIG_SECLUDED_MEMORY */
3794 } else {
3795 mem->vmp_lopage = FALSE;
3796 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3797 vm_page_free_count++;
3798 queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3799 }
3800 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3801 #if defined(__x86_64__)
3802 vm_page_queue_enter_clump(queue_free, mem);
3803 #else
3804 vm_page_queue_enter(queue_free, mem, vmp_pageq);
3805 #endif
3806 } else {
3807 vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3808 }
3809 }
3810
3811 /*
3812 * vm_page_wait:
3813 *
3814 * Wait for a page to become available.
3815 * If there are plenty of free pages, then we don't sleep.
3816 *
3817 * Returns:
3818 * TRUE: There may be another page, try again
3819 * FALSE: We were interrupted out of our wait, don't try again
3820 */
3821
3822 boolean_t
3823 vm_page_wait(
3824 int interruptible )
3825 {
3826 /*
3827 * We can't use vm_page_free_reserved to make this
3828 * determination. Consider: some thread might
3829 * need to allocate two pages. The first allocation
3830 * succeeds, the second fails. After the first page is freed,
3831 * a call to vm_page_wait must really block.
3832 */
3833 kern_return_t wait_result;
3834 int need_wakeup = 0;
3835 int is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3836 event_t wait_event = NULL;
3837
3838 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3839
3840 if (is_privileged && vm_page_free_count) {
3841 lck_mtx_unlock(&vm_page_queue_free_lock);
3842 return TRUE;
3843 }
3844
3845 if (vm_page_free_count >= vm_page_free_target) {
3846 lck_mtx_unlock(&vm_page_queue_free_lock);
3847 return TRUE;
3848 }
3849
3850 if (is_privileged) {
3851 if (vm_page_free_wanted_privileged++ == 0) {
3852 need_wakeup = 1;
3853 }
3854 wait_event = (event_t)&vm_page_free_wanted_privileged;
3855 #if CONFIG_SECLUDED_MEMORY
3856 } else if (secluded_for_apps &&
3857 task_can_use_secluded_mem(current_task(), FALSE)) {
3858 #if 00
3859 /* XXX FBDP: need pageq lock for this... */
3860 /* XXX FBDP: might wait even if pages available, */
3861 /* XXX FBDP: hopefully not for too long... */
3862 if (vm_page_secluded_count > 0) {
3863 lck_mtx_unlock(&vm_page_queue_free_lock);
3864 return TRUE;
3865 }
3866 #endif
3867 if (vm_page_free_wanted_secluded++ == 0) {
3868 need_wakeup = 1;
3869 }
3870 wait_event = (event_t)&vm_page_free_wanted_secluded;
3871 #endif /* CONFIG_SECLUDED_MEMORY */
3872 } else {
3873 if (vm_page_free_wanted++ == 0) {
3874 need_wakeup = 1;
3875 }
3876 wait_event = (event_t)&vm_page_free_count;
3877 }
3878
3879 /*
3880 * We don't do a vm_pageout_scan wakeup if we already have
3881 * some waiters because vm_pageout_scan checks for waiters
3882 * before it returns and does so behind the vm_page_queue_free_lock,
3883 * which we own when we bump the waiter counts.
3884 */
3885
3886 if (vps_dynamic_priority_enabled == TRUE) {
3887 /*
3888 * We are waking up vm_pageout_scan here. If it needs
3889 * the vm_page_queue_free_lock before we unlock it
3890 * we'll end up just blocking and incur an extra
3891 * context switch. Could be a perf. issue.
3892 */
3893
3894 counter(c_vm_page_wait_block++);
3895
3896 if (need_wakeup) {
3897 thread_wakeup((event_t)&vm_page_free_wanted);
3898 }
3899
3900 /*
3901 * LD: This event is going to get recorded every time because
3902 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
3903 * We just block in that routine.
3904 */
3905 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3906 vm_page_free_wanted_privileged,
3907 vm_page_free_wanted,
3908 #if CONFIG_SECLUDED_MEMORY
3909 vm_page_free_wanted_secluded,
3910 #else /* CONFIG_SECLUDED_MEMORY */
3911 0,
3912 #endif /* CONFIG_SECLUDED_MEMORY */
3913 0);
3914 wait_result = lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
3915 LCK_SLEEP_UNLOCK,
3916 wait_event,
3917 vm_pageout_scan_thread,
3918 interruptible,
3919 0);
3920 } else {
3921 wait_result = assert_wait(wait_event, interruptible);
3922
3923 lck_mtx_unlock(&vm_page_queue_free_lock);
3924 counter(c_vm_page_wait_block++);
3925
3926 if (need_wakeup) {
3927 thread_wakeup((event_t)&vm_page_free_wanted);
3928 }
3929
3930 if (wait_result == THREAD_WAITING) {
3931 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3932 vm_page_free_wanted_privileged,
3933 vm_page_free_wanted,
3934 #if CONFIG_SECLUDED_MEMORY
3935 vm_page_free_wanted_secluded,
3936 #else /* CONFIG_SECLUDED_MEMORY */
3937 0,
3938 #endif /* CONFIG_SECLUDED_MEMORY */
3939 0);
3940 wait_result = thread_block(THREAD_CONTINUE_NULL);
3941 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
3942 VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
3943 }
3944 }
3945
3946 return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
3947 }
3948
3949 /*
3950 * vm_page_alloc:
3951 *
3952 * Allocate and return a memory cell associated
3953 * with this VM object/offset pair.
3954 *
3955 * Object must be locked.
3956 */
3957
3958 vm_page_t
3959 vm_page_alloc(
3960 vm_object_t object,
3961 vm_object_offset_t offset)
3962 {
3963 vm_page_t mem;
3964 int grab_options;
3965
3966 vm_object_lock_assert_exclusive(object);
3967 grab_options = 0;
3968 #if CONFIG_SECLUDED_MEMORY
3969 if (object->can_grab_secluded) {
3970 grab_options |= VM_PAGE_GRAB_SECLUDED;
3971 }
3972 #endif /* CONFIG_SECLUDED_MEMORY */
3973 mem = vm_page_grab_options(grab_options);
3974 if (mem == VM_PAGE_NULL) {
3975 return VM_PAGE_NULL;
3976 }
3977
3978 vm_page_insert(mem, object, offset);
3979
3980 return mem;
3981 }
3982
3983 /*
3984 * vm_page_alloc_guard:
3985 *
3986 * Allocate a fictitious page which will be used
3987 * as a guard page. The page will be inserted into
3988 * the object and returned to the caller.
3989 */
3990
3991 vm_page_t
3992 vm_page_alloc_guard(
3993 vm_object_t object,
3994 vm_object_offset_t offset)
3995 {
3996 vm_page_t mem;
3997
3998 vm_object_lock_assert_exclusive(object);
3999 mem = vm_page_grab_guard();
4000 if (mem == VM_PAGE_NULL) {
4001 return VM_PAGE_NULL;
4002 }
4003
4004 vm_page_insert(mem, object, offset);
4005
4006 return mem;
4007 }
4008
4009
4010 counter(unsigned int c_laundry_pages_freed = 0; )
4011
4012 /*
4013 * vm_page_free_prepare:
4014 *
4015 * Removes page from any queue it may be on
4016 * and disassociates it from its VM object.
4017 *
4018 * Object and page queues must be locked prior to entry.
4019 */
4020 static void
4021 vm_page_free_prepare(
4022 vm_page_t mem)
4023 {
4024 vm_page_free_prepare_queues(mem);
4025 vm_page_free_prepare_object(mem, TRUE);
4026 }
4027
4028
4029 void
4030 vm_page_free_prepare_queues(
4031 vm_page_t mem)
4032 {
4033 vm_object_t m_object;
4034
4035 VM_PAGE_CHECK(mem);
4036
4037 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4038 assert(!mem->vmp_cleaning);
4039 m_object = VM_PAGE_OBJECT(mem);
4040
4041 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4042 if (m_object) {
4043 vm_object_lock_assert_exclusive(m_object);
4044 }
4045 if (mem->vmp_laundry) {
4046 /*
4047 * We may have to free a page while it's being laundered
4048 * if we lost its pager (due to a forced unmount, for example).
4049 * We need to call vm_pageout_steal_laundry() before removing
4050 * the page from its VM object, so that we can remove it
4051 * from its pageout queue and adjust the laundry accounting
4052 */
4053 vm_pageout_steal_laundry(mem, TRUE);
4054 counter(++c_laundry_pages_freed);
4055 }
4056
4057 vm_page_queues_remove(mem, TRUE);
4058
4059 if (VM_PAGE_WIRED(mem)) {
4060 assert(mem->vmp_wire_count > 0);
4061
4062 if (m_object) {
4063 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4064 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4065 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4066
4067 assert(m_object->resident_page_count >=
4068 m_object->wired_page_count);
4069
4070 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4071 OSAddAtomic(+1, &vm_page_purgeable_count);
4072 assert(vm_page_purgeable_wired_count > 0);
4073 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4074 }
4075 if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4076 m_object->purgable == VM_PURGABLE_EMPTY) &&
4077 m_object->vo_owner != TASK_NULL) {
4078 task_t owner;
4079 int ledger_idx_volatile;
4080 int ledger_idx_nonvolatile;
4081 int ledger_idx_volatile_compressed;
4082 int ledger_idx_nonvolatile_compressed;
4083 boolean_t do_footprint;
4084
4085 owner = VM_OBJECT_OWNER(m_object);
4086 vm_object_ledger_tag_ledgers(
4087 m_object,
4088 &ledger_idx_volatile,
4089 &ledger_idx_nonvolatile,
4090 &ledger_idx_volatile_compressed,
4091 &ledger_idx_nonvolatile_compressed,
4092 &do_footprint);
4093 /*
4094 * While wired, this page was accounted
4095 * as "non-volatile" but it should now
4096 * be accounted as "volatile".
4097 */
4098 /* one less "non-volatile"... */
4099 ledger_debit(owner->ledger,
4100 ledger_idx_nonvolatile,
4101 PAGE_SIZE);
4102 if (do_footprint) {
4103 /* ... and "phys_footprint" */
4104 ledger_debit(owner->ledger,
4105 task_ledgers.phys_footprint,
4106 PAGE_SIZE);
4107 }
4108 /* one more "volatile" */
4109 ledger_credit(owner->ledger,
4110 ledger_idx_volatile,
4111 PAGE_SIZE);
4112 }
4113 }
4114 if (!mem->vmp_private && !mem->vmp_fictitious) {
4115 vm_page_wire_count--;
4116 }
4117
4118 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4119 mem->vmp_wire_count = 0;
4120 assert(!mem->vmp_gobbled);
4121 } else if (mem->vmp_gobbled) {
4122 if (!mem->vmp_private && !mem->vmp_fictitious) {
4123 vm_page_wire_count--;
4124 }
4125 vm_page_gobble_count--;
4126 }
4127 }
4128
4129
4130 void
4131 vm_page_free_prepare_object(
4132 vm_page_t mem,
4133 boolean_t remove_from_hash)
4134 {
4135 if (mem->vmp_tabled) {
4136 vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */
4137 }
4138 PAGE_WAKEUP(mem); /* clears wanted */
4139
4140 if (mem->vmp_private) {
4141 mem->vmp_private = FALSE;
4142 mem->vmp_fictitious = TRUE;
4143 VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4144 }
4145 if (!mem->vmp_fictitious) {
4146 assert(mem->vmp_pageq.next == 0);
4147 assert(mem->vmp_pageq.prev == 0);
4148 assert(mem->vmp_listq.next == 0);
4149 assert(mem->vmp_listq.prev == 0);
4150 #if CONFIG_BACKGROUND_QUEUE
4151 assert(mem->vmp_backgroundq.next == 0);
4152 assert(mem->vmp_backgroundq.prev == 0);
4153 #endif /* CONFIG_BACKGROUND_QUEUE */
4154 assert(mem->vmp_next_m == 0);
4155 ASSERT_PMAP_FREE(mem);
4156 vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4157 }
4158 }
4159
4160
4161 /*
4162 * vm_page_free:
4163 *
4164 * Returns the given page to the free list,
4165 * disassociating it with any VM object.
4166 *
4167 * Object and page queues must be locked prior to entry.
4168 */
4169 void
4170 vm_page_free(
4171 vm_page_t mem)
4172 {
4173 vm_page_free_prepare(mem);
4174
4175 if (mem->vmp_fictitious) {
4176 vm_page_release_fictitious(mem);
4177 } else {
4178 vm_page_release(mem,
4179 TRUE); /* page queues are locked */
4180 }
4181 }
4182
4183
4184 void
4185 vm_page_free_unlocked(
4186 vm_page_t mem,
4187 boolean_t remove_from_hash)
4188 {
4189 vm_page_lockspin_queues();
4190 vm_page_free_prepare_queues(mem);
4191 vm_page_unlock_queues();
4192
4193 vm_page_free_prepare_object(mem, remove_from_hash);
4194
4195 if (mem->vmp_fictitious) {
4196 vm_page_release_fictitious(mem);
4197 } else {
4198 vm_page_release(mem, FALSE); /* page queues are not locked */
4199 }
4200 }
4201
4202
4203 /*
4204 * Free a list of pages. The list can be up to several hundred pages,
4205 * as blocked up by vm_pageout_scan().
4206 * The big win is not having to take the free list lock once
4207 * per page.
4208 *
4209 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4210 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4211 */
4212 void
4213 vm_page_free_list(
4214 vm_page_t freeq,
4215 boolean_t prepare_object)
4216 {
4217 vm_page_t mem;
4218 vm_page_t nxt;
4219 vm_page_t local_freeq;
4220 int pg_count;
4221
4222 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4223 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4224
4225 while (freeq) {
4226 pg_count = 0;
4227 local_freeq = VM_PAGE_NULL;
4228 mem = freeq;
4229
4230 /*
4231 * break up the processing into smaller chunks so
4232 * that we can 'pipeline' the pages onto the
4233 * free list w/o introducing too much
4234 * contention on the global free queue lock
4235 */
4236 while (mem && pg_count < 64) {
4237 assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4238 (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4239 #if CONFIG_BACKGROUND_QUEUE
4240 assert(mem->vmp_backgroundq.next == 0 &&
4241 mem->vmp_backgroundq.prev == 0 &&
4242 mem->vmp_on_backgroundq == FALSE);
4243 #endif
4244 nxt = mem->vmp_snext;
4245 mem->vmp_snext = NULL;
4246 assert(mem->vmp_pageq.prev == 0);
4247
4248 if (vm_page_free_verify && !mem->vmp_fictitious && !mem->vmp_private) {
4249 ASSERT_PMAP_FREE(mem);
4250 }
4251 if (prepare_object == TRUE) {
4252 vm_page_free_prepare_object(mem, TRUE);
4253 }
4254
4255 if (!mem->vmp_fictitious) {
4256 assert(mem->vmp_busy);
4257
4258 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4259 vm_lopage_free_count < vm_lopage_free_limit &&
4260 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4261 vm_page_release(mem, FALSE); /* page queues are not locked */
4262 #if CONFIG_SECLUDED_MEMORY
4263 } else if (vm_page_secluded_count < vm_page_secluded_target &&
4264 num_tasks_can_use_secluded_mem == 0) {
4265 vm_page_release(mem,
4266 FALSE); /* page queues are not locked */
4267 #endif /* CONFIG_SECLUDED_MEMORY */
4268 } else {
4269 /*
4270 * IMPORTANT: we can't set the page "free" here
4271 * because that would make the page eligible for
4272 * a physically-contiguous allocation (see
4273 * vm_page_find_contiguous()) right away (we don't
4274 * hold the vm_page_queue_free lock). That would
4275 * cause trouble because the page is not actually
4276 * in the free queue yet...
4277 */
4278 mem->vmp_snext = local_freeq;
4279 local_freeq = mem;
4280 pg_count++;
4281
4282 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4283 }
4284 } else {
4285 assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4286 VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4287 vm_page_release_fictitious(mem);
4288 }
4289 mem = nxt;
4290 }
4291 freeq = mem;
4292
4293 if ((mem = local_freeq)) {
4294 unsigned int avail_free_count;
4295 unsigned int need_wakeup = 0;
4296 unsigned int need_priv_wakeup = 0;
4297 #if CONFIG_SECLUDED_MEMORY
4298 unsigned int need_wakeup_secluded = 0;
4299 #endif /* CONFIG_SECLUDED_MEMORY */
4300 event_t priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4301 boolean_t priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4302
4303 lck_mtx_lock_spin(&vm_page_queue_free_lock);
4304
4305 while (mem) {
4306 int color;
4307
4308 nxt = mem->vmp_snext;
4309
4310 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4311 assert(mem->vmp_busy);
4312 mem->vmp_lopage = FALSE;
4313 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4314
4315 color = VM_PAGE_GET_COLOR(mem);
4316 #if defined(__x86_64__)
4317 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4318 #else
4319 vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4320 mem, vmp_pageq);
4321 #endif
4322 mem = nxt;
4323 }
4324 vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4325 vm_page_free_count += pg_count;
4326 avail_free_count = vm_page_free_count;
4327
4328 VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4329
4330 if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4331 if (avail_free_count < vm_page_free_wanted_privileged) {
4332 need_priv_wakeup = avail_free_count;
4333 vm_page_free_wanted_privileged -= avail_free_count;
4334 avail_free_count = 0;
4335 } else {
4336 need_priv_wakeup = vm_page_free_wanted_privileged;
4337 avail_free_count -= vm_page_free_wanted_privileged;
4338 vm_page_free_wanted_privileged = 0;
4339 }
4340 }
4341 #if CONFIG_SECLUDED_MEMORY
4342 if (vm_page_free_wanted_secluded > 0 &&
4343 avail_free_count > vm_page_free_reserved) {
4344 unsigned int available_pages;
4345 available_pages = (avail_free_count -
4346 vm_page_free_reserved);
4347 if (available_pages <
4348 vm_page_free_wanted_secluded) {
4349 need_wakeup_secluded = available_pages;
4350 vm_page_free_wanted_secluded -=
4351 available_pages;
4352 avail_free_count -= available_pages;
4353 } else {
4354 need_wakeup_secluded =
4355 vm_page_free_wanted_secluded;
4356 avail_free_count -=
4357 vm_page_free_wanted_secluded;
4358 vm_page_free_wanted_secluded = 0;
4359 }
4360 }
4361 #endif /* CONFIG_SECLUDED_MEMORY */
4362 if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4363 unsigned int available_pages;
4364
4365 available_pages = avail_free_count - vm_page_free_reserved;
4366
4367 if (available_pages >= vm_page_free_wanted) {
4368 need_wakeup = vm_page_free_wanted;
4369 vm_page_free_wanted = 0;
4370 } else {
4371 need_wakeup = available_pages;
4372 vm_page_free_wanted -= available_pages;
4373 }
4374 }
4375 lck_mtx_unlock(&vm_page_queue_free_lock);
4376
4377 priv_wakeup_event = NULL;
4378 secluded_wakeup_event = NULL;
4379 normal_wakeup_event = NULL;
4380
4381 priv_wakeup_all = FALSE;
4382 secluded_wakeup_all = FALSE;
4383 normal_wakeup_all = FALSE;
4384
4385
4386 if (need_priv_wakeup != 0) {
4387 /*
4388 * There shouldn't be that many VM-privileged threads,
4389 * so let's wake them all up, even if we don't quite
4390 * have enough pages to satisfy them all.
4391 */
4392 priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4393 priv_wakeup_all = TRUE;
4394 }
4395 #if CONFIG_SECLUDED_MEMORY
4396 if (need_wakeup_secluded != 0 &&
4397 vm_page_free_wanted_secluded == 0) {
4398 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4399 secluded_wakeup_all = TRUE;
4400 need_wakeup_secluded = 0;
4401 } else {
4402 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4403 }
4404 #endif /* CONFIG_SECLUDED_MEMORY */
4405 if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4406 /*
4407 * We don't expect to have any more waiters
4408 * after this, so let's wake them all up at
4409 * once.
4410 */
4411 normal_wakeup_event = (event_t) &vm_page_free_count;
4412 normal_wakeup_all = TRUE;
4413 need_wakeup = 0;
4414 } else {
4415 normal_wakeup_event = (event_t) &vm_page_free_count;
4416 }
4417
4418 if (priv_wakeup_event ||
4419 #if CONFIG_SECLUDED_MEMORY
4420 secluded_wakeup_event ||
4421 #endif /* CONFIG_SECLUDED_MEMORY */
4422 normal_wakeup_event) {
4423 if (vps_dynamic_priority_enabled == TRUE) {
4424 thread_t thread_woken = NULL;
4425
4426 if (priv_wakeup_all == TRUE) {
4427 wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4428 }
4429
4430 #if CONFIG_SECLUDED_MEMORY
4431 if (secluded_wakeup_all == TRUE) {
4432 wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4433 }
4434
4435 while (need_wakeup_secluded-- != 0) {
4436 /*
4437 * Wake up one waiter per page we just released.
4438 */
4439 wakeup_one_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
4440 thread_deallocate(thread_woken);
4441 }
4442 #endif /* CONFIG_SECLUDED_MEMORY */
4443
4444 if (normal_wakeup_all == TRUE) {
4445 wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4446 }
4447
4448 while (need_wakeup-- != 0) {
4449 /*
4450 * Wake up one waiter per page we just released.
4451 */
4452 wakeup_one_with_inheritor(normal_wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
4453 thread_deallocate(thread_woken);
4454 }
4455 } else {
4456 /*
4457 * Non-priority-aware wakeups.
4458 */
4459
4460 if (priv_wakeup_all == TRUE) {
4461 thread_wakeup(priv_wakeup_event);
4462 }
4463
4464 #if CONFIG_SECLUDED_MEMORY
4465 if (secluded_wakeup_all == TRUE) {
4466 thread_wakeup(secluded_wakeup_event);
4467 }
4468
4469 while (need_wakeup_secluded-- != 0) {
4470 /*
4471 * Wake up one waiter per page we just released.
4472 */
4473 thread_wakeup_one(secluded_wakeup_event);
4474 }
4475
4476 #endif /* CONFIG_SECLUDED_MEMORY */
4477 if (normal_wakeup_all == TRUE) {
4478 thread_wakeup(normal_wakeup_event);
4479 }
4480
4481 while (need_wakeup-- != 0) {
4482 /*
4483 * Wake up one waiter per page we just released.
4484 */
4485 thread_wakeup_one(normal_wakeup_event);
4486 }
4487 }
4488 }
4489
4490 VM_CHECK_MEMORYSTATUS;
4491 }
4492 }
4493 }
4494
4495
4496 /*
4497 * vm_page_wire:
4498 *
4499 * Mark this page as wired down by yet
4500 * another map, removing it from paging queues
4501 * as necessary.
4502 *
4503 * The page's object and the page queues must be locked.
4504 */
4505
4506
4507 void
4508 vm_page_wire(
4509 vm_page_t mem,
4510 vm_tag_t tag,
4511 boolean_t check_memorystatus)
4512 {
4513 vm_object_t m_object;
4514
4515 m_object = VM_PAGE_OBJECT(mem);
4516
4517 // dbgLog(current_thread(), mem->vmp_offset, m_object, 1); /* (TEST/DEBUG) */
4518
4519 VM_PAGE_CHECK(mem);
4520 if (m_object) {
4521 vm_object_lock_assert_exclusive(m_object);
4522 } else {
4523 /*
4524 * In theory, the page should be in an object before it
4525 * gets wired, since we need to hold the object lock
4526 * to update some fields in the page structure.
4527 * However, some code (i386 pmap, for example) might want
4528 * to wire a page before it gets inserted into an object.
4529 * That's somewhat OK, as long as nobody else can get to
4530 * that page and update it at the same time.
4531 */
4532 }
4533 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4534 if (!VM_PAGE_WIRED(mem)) {
4535 if (mem->vmp_laundry) {
4536 vm_pageout_steal_laundry(mem, TRUE);
4537 }
4538
4539 vm_page_queues_remove(mem, TRUE);
4540
4541 assert(mem->vmp_wire_count == 0);
4542 mem->vmp_q_state = VM_PAGE_IS_WIRED;
4543
4544 if (m_object) {
4545 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4546 VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4547 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4548
4549 assert(m_object->resident_page_count >=
4550 m_object->wired_page_count);
4551 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4552 assert(vm_page_purgeable_count > 0);
4553 OSAddAtomic(-1, &vm_page_purgeable_count);
4554 OSAddAtomic(1, &vm_page_purgeable_wired_count);
4555 }
4556 if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4557 m_object->purgable == VM_PURGABLE_EMPTY) &&
4558 m_object->vo_owner != TASK_NULL) {
4559 task_t owner;
4560 int ledger_idx_volatile;
4561 int ledger_idx_nonvolatile;
4562 int ledger_idx_volatile_compressed;
4563 int ledger_idx_nonvolatile_compressed;
4564 boolean_t do_footprint;
4565
4566 owner = VM_OBJECT_OWNER(m_object);
4567 vm_object_ledger_tag_ledgers(
4568 m_object,
4569 &ledger_idx_volatile,
4570 &ledger_idx_nonvolatile,
4571 &ledger_idx_volatile_compressed,
4572 &ledger_idx_nonvolatile_compressed,
4573 &do_footprint);
4574 /* less volatile bytes */
4575 ledger_debit(owner->ledger,
4576 ledger_idx_volatile,
4577 PAGE_SIZE);
4578 /* more not-quite-volatile bytes */
4579 ledger_credit(owner->ledger,
4580 ledger_idx_nonvolatile,
4581 PAGE_SIZE);
4582 if (do_footprint) {
4583 /* more footprint */
4584 ledger_credit(owner->ledger,
4585 task_ledgers.phys_footprint,
4586 PAGE_SIZE);
4587 }
4588 }
4589 if (m_object->all_reusable) {
4590 /*
4591 * Wired pages are not counted as "re-usable"
4592 * in "all_reusable" VM objects, so nothing
4593 * to do here.
4594 */
4595 } else if (mem->vmp_reusable) {
4596 /*
4597 * This page is not "re-usable" when it's
4598 * wired, so adjust its state and the
4599 * accounting.
4600 */
4601 vm_object_reuse_pages(m_object,
4602 mem->vmp_offset,
4603 mem->vmp_offset + PAGE_SIZE_64,
4604 FALSE);
4605 }
4606 }
4607 assert(!mem->vmp_reusable);
4608
4609 if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4610 vm_page_wire_count++;
4611 }
4612 if (mem->vmp_gobbled) {
4613 vm_page_gobble_count--;
4614 }
4615 mem->vmp_gobbled = FALSE;
4616
4617 if (check_memorystatus == TRUE) {
4618 VM_CHECK_MEMORYSTATUS;
4619 }
4620 }
4621 assert(!mem->vmp_gobbled);
4622 assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4623 mem->vmp_wire_count++;
4624 if (__improbable(mem->vmp_wire_count == 0)) {
4625 panic("vm_page_wire(%p): wire_count overflow", mem);
4626 }
4627 VM_PAGE_CHECK(mem);
4628 }
4629
4630 /*
4631 * vm_page_unwire:
4632 *
4633 * Release one wiring of this page, potentially
4634 * enabling it to be paged again.
4635 *
4636 * The page's object and the page queues must be locked.
4637 */
4638 void
4639 vm_page_unwire(
4640 vm_page_t mem,
4641 boolean_t queueit)
4642 {
4643 vm_object_t m_object;
4644
4645 m_object = VM_PAGE_OBJECT(mem);
4646
4647 // dbgLog(current_thread(), mem->vmp_offset, m_object, 0); /* (TEST/DEBUG) */
4648
4649 VM_PAGE_CHECK(mem);
4650 assert(VM_PAGE_WIRED(mem));
4651 assert(mem->vmp_wire_count > 0);
4652 assert(!mem->vmp_gobbled);
4653 assert(m_object != VM_OBJECT_NULL);
4654 vm_object_lock_assert_exclusive(m_object);
4655 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4656 if (--mem->vmp_wire_count == 0) {
4657 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4658
4659 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4660 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4661 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4662 if (!mem->vmp_private && !mem->vmp_fictitious) {
4663 vm_page_wire_count--;
4664 }
4665
4666 assert(m_object->resident_page_count >=
4667 m_object->wired_page_count);
4668 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4669 OSAddAtomic(+1, &vm_page_purgeable_count);
4670 assert(vm_page_purgeable_wired_count > 0);
4671 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4672 }
4673 if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4674 m_object->purgable == VM_PURGABLE_EMPTY) &&
4675 m_object->vo_owner != TASK_NULL) {
4676 task_t owner;
4677 int ledger_idx_volatile;
4678 int ledger_idx_nonvolatile;
4679 int ledger_idx_volatile_compressed;
4680 int ledger_idx_nonvolatile_compressed;
4681 boolean_t do_footprint;
4682
4683 owner = VM_OBJECT_OWNER(m_object);
4684 vm_object_ledger_tag_ledgers(
4685 m_object,
4686 &ledger_idx_volatile,
4687 &ledger_idx_nonvolatile,
4688 &ledger_idx_volatile_compressed,
4689 &ledger_idx_nonvolatile_compressed,
4690 &do_footprint);
4691 /* more volatile bytes */
4692 ledger_credit(owner->ledger,
4693 ledger_idx_volatile,
4694 PAGE_SIZE);
4695 /* less not-quite-volatile bytes */
4696 ledger_debit(owner->ledger,
4697 ledger_idx_nonvolatile,
4698 PAGE_SIZE);
4699 if (do_footprint) {
4700 /* less footprint */
4701 ledger_debit(owner->ledger,
4702 task_ledgers.phys_footprint,
4703 PAGE_SIZE);
4704 }
4705 }
4706 assert(m_object != kernel_object);
4707 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4708
4709 if (queueit == TRUE) {
4710 if (m_object->purgable == VM_PURGABLE_EMPTY) {
4711 vm_page_deactivate(mem);
4712 } else {
4713 vm_page_activate(mem);
4714 }
4715 }
4716
4717 VM_CHECK_MEMORYSTATUS;
4718 }
4719 VM_PAGE_CHECK(mem);
4720 }
4721
4722 /*
4723 * vm_page_deactivate:
4724 *
4725 * Returns the given page to the inactive list,
4726 * indicating that no physical maps have access
4727 * to this page. [Used by the physical mapping system.]
4728 *
4729 * The page queues must be locked.
4730 */
4731 void
4732 vm_page_deactivate(
4733 vm_page_t m)
4734 {
4735 vm_page_deactivate_internal(m, TRUE);
4736 }
4737
4738
4739 void
4740 vm_page_deactivate_internal(
4741 vm_page_t m,
4742 boolean_t clear_hw_reference)
4743 {
4744 vm_object_t m_object;
4745
4746 m_object = VM_PAGE_OBJECT(m);
4747
4748 VM_PAGE_CHECK(m);
4749 assert(m_object != kernel_object);
4750 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4751
4752 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
4753 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4754 /*
4755 * This page is no longer very interesting. If it was
4756 * interesting (active or inactive/referenced), then we
4757 * clear the reference bit and (re)enter it in the
4758 * inactive queue. Note wired pages should not have
4759 * their reference bit cleared.
4760 */
4761 assert( !(m->vmp_absent && !m->vmp_unusual));
4762
4763 if (m->vmp_gobbled) { /* can this happen? */
4764 assert( !VM_PAGE_WIRED(m));
4765
4766 if (!m->vmp_private && !m->vmp_fictitious) {
4767 vm_page_wire_count--;
4768 }
4769 vm_page_gobble_count--;
4770 m->vmp_gobbled = FALSE;
4771 }
4772 /*
4773 * if this page is currently on the pageout queue, we can't do the
4774 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4775 * and we can't remove it manually since we would need the object lock
4776 * (which is not required here) to decrement the activity_in_progress
4777 * reference which is held on the object while the page is in the pageout queue...
4778 * just let the normal laundry processing proceed
4779 */
4780 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4781 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4782 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4783 VM_PAGE_WIRED(m)) {
4784 return;
4785 }
4786 if (!m->vmp_absent && clear_hw_reference == TRUE) {
4787 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4788 }
4789
4790 m->vmp_reference = FALSE;
4791 m->vmp_no_cache = FALSE;
4792
4793 if (!VM_PAGE_INACTIVE(m)) {
4794 vm_page_queues_remove(m, FALSE);
4795
4796 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4797 m->vmp_dirty && m_object->internal &&
4798 (m_object->purgable == VM_PURGABLE_DENY ||
4799 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4800 m_object->purgable == VM_PURGABLE_VOLATILE)) {
4801 vm_page_check_pageable_safe(m);
4802 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4803 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4804 vm_page_throttled_count++;
4805 } else {
4806 if (m_object->named && m_object->ref_count == 1) {
4807 vm_page_speculate(m, FALSE);
4808 #if DEVELOPMENT || DEBUG
4809 vm_page_speculative_recreated++;
4810 #endif
4811 } else {
4812 vm_page_enqueue_inactive(m, FALSE);
4813 }
4814 }
4815 }
4816 }
4817
4818 /*
4819 * vm_page_enqueue_cleaned
4820 *
4821 * Put the page on the cleaned queue, mark it cleaned, etc.
4822 * Being on the cleaned queue (and having m->clean_queue set)
4823 * does ** NOT ** guarantee that the page is clean!
4824 *
4825 * Call with the queues lock held.
4826 */
4827
4828 void
4829 vm_page_enqueue_cleaned(vm_page_t m)
4830 {
4831 vm_object_t m_object;
4832
4833 m_object = VM_PAGE_OBJECT(m);
4834
4835 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4836 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4837 assert( !(m->vmp_absent && !m->vmp_unusual));
4838
4839 if (VM_PAGE_WIRED(m)) {
4840 return;
4841 }
4842
4843 if (m->vmp_gobbled) {
4844 if (!m->vmp_private && !m->vmp_fictitious) {
4845 vm_page_wire_count--;
4846 }
4847 vm_page_gobble_count--;
4848 m->vmp_gobbled = FALSE;
4849 }
4850 /*
4851 * if this page is currently on the pageout queue, we can't do the
4852 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4853 * and we can't remove it manually since we would need the object lock
4854 * (which is not required here) to decrement the activity_in_progress
4855 * reference which is held on the object while the page is in the pageout queue...
4856 * just let the normal laundry processing proceed
4857 */
4858 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4859 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
4860 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4861 return;
4862 }
4863 vm_page_queues_remove(m, FALSE);
4864
4865 vm_page_check_pageable_safe(m);
4866 vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
4867 m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
4868 vm_page_cleaned_count++;
4869
4870 vm_page_inactive_count++;
4871 if (m_object->internal) {
4872 vm_page_pageable_internal_count++;
4873 } else {
4874 vm_page_pageable_external_count++;
4875 }
4876 #if CONFIG_BACKGROUND_QUEUE
4877 if (m->vmp_in_background) {
4878 vm_page_add_to_backgroundq(m, TRUE);
4879 }
4880 #endif
4881 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
4882 }
4883
4884 /*
4885 * vm_page_activate:
4886 *
4887 * Put the specified page on the active list (if appropriate).
4888 *
4889 * The page queues must be locked.
4890 */
4891
4892 void
4893 vm_page_activate(
4894 vm_page_t m)
4895 {
4896 vm_object_t m_object;
4897
4898 m_object = VM_PAGE_OBJECT(m);
4899
4900 VM_PAGE_CHECK(m);
4901 #ifdef FIXME_4778297
4902 assert(m_object != kernel_object);
4903 #endif
4904 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4905 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4906 assert( !(m->vmp_absent && !m->vmp_unusual));
4907
4908 if (m->vmp_gobbled) {
4909 assert( !VM_PAGE_WIRED(m));
4910 if (!m->vmp_private && !m->vmp_fictitious) {
4911 vm_page_wire_count--;
4912 }
4913 vm_page_gobble_count--;
4914 m->vmp_gobbled = FALSE;
4915 }
4916 /*
4917 * if this page is currently on the pageout queue, we can't do the
4918 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4919 * and we can't remove it manually since we would need the object lock
4920 * (which is not required here) to decrement the activity_in_progress
4921 * reference which is held on the object while the page is in the pageout queue...
4922 * just let the normal laundry processing proceed
4923 */
4924 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4925 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4926 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4927 return;
4928 }
4929
4930 #if DEBUG
4931 if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
4932 panic("vm_page_activate: already active");
4933 }
4934 #endif
4935
4936 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
4937 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
4938 DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
4939 }
4940
4941 vm_page_queues_remove(m, FALSE);
4942
4943 if (!VM_PAGE_WIRED(m)) {
4944 vm_page_check_pageable_safe(m);
4945 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4946 m->vmp_dirty && m_object->internal &&
4947 (m_object->purgable == VM_PURGABLE_DENY ||
4948 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4949 m_object->purgable == VM_PURGABLE_VOLATILE)) {
4950 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4951 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4952 vm_page_throttled_count++;
4953 } else {
4954 #if CONFIG_SECLUDED_MEMORY
4955 if (secluded_for_filecache &&
4956 vm_page_secluded_target != 0 &&
4957 num_tasks_can_use_secluded_mem == 0 &&
4958 m_object->eligible_for_secluded) {
4959 vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
4960 m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
4961 vm_page_secluded_count++;
4962 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4963 vm_page_secluded_count_inuse++;
4964 assert(!m_object->internal);
4965 // vm_page_pageable_external_count++;
4966 } else
4967 #endif /* CONFIG_SECLUDED_MEMORY */
4968 vm_page_enqueue_active(m, FALSE);
4969 }
4970 m->vmp_reference = TRUE;
4971 m->vmp_no_cache = FALSE;
4972 }
4973 VM_PAGE_CHECK(m);
4974 }
4975
4976
4977 /*
4978 * vm_page_speculate:
4979 *
4980 * Put the specified page on the speculative list (if appropriate).
4981 *
4982 * The page queues must be locked.
4983 */
4984 void
4985 vm_page_speculate(
4986 vm_page_t m,
4987 boolean_t new)
4988 {
4989 struct vm_speculative_age_q *aq;
4990 vm_object_t m_object;
4991
4992 m_object = VM_PAGE_OBJECT(m);
4993
4994 VM_PAGE_CHECK(m);
4995 vm_page_check_pageable_safe(m);
4996
4997 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4998 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4999 assert( !(m->vmp_absent && !m->vmp_unusual));
5000 assert(m_object->internal == FALSE);
5001
5002 /*
5003 * if this page is currently on the pageout queue, we can't do the
5004 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5005 * and we can't remove it manually since we would need the object lock
5006 * (which is not required here) to decrement the activity_in_progress
5007 * reference which is held on the object while the page is in the pageout queue...
5008 * just let the normal laundry processing proceed
5009 */
5010 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5011 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5012 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5013 return;
5014 }
5015
5016 vm_page_queues_remove(m, FALSE);
5017
5018 if (!VM_PAGE_WIRED(m)) {
5019 mach_timespec_t ts;
5020 clock_sec_t sec;
5021 clock_nsec_t nsec;
5022
5023 clock_get_system_nanotime(&sec, &nsec);
5024 ts.tv_sec = (unsigned int) sec;
5025 ts.tv_nsec = nsec;
5026
5027 if (vm_page_speculative_count == 0) {
5028 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5029 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5030
5031 aq = &vm_page_queue_speculative[speculative_age_index];
5032
5033 /*
5034 * set the timer to begin a new group
5035 */
5036 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5037 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5038
5039 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5040 } else {
5041 aq = &vm_page_queue_speculative[speculative_age_index];
5042
5043 if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5044 speculative_age_index++;
5045
5046 if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5047 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5048 }
5049 if (speculative_age_index == speculative_steal_index) {
5050 speculative_steal_index = speculative_age_index + 1;
5051
5052 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5053 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5054 }
5055 }
5056 aq = &vm_page_queue_speculative[speculative_age_index];
5057
5058 if (!vm_page_queue_empty(&aq->age_q)) {
5059 vm_page_speculate_ageit(aq);
5060 }
5061
5062 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5063 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5064
5065 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5066 }
5067 }
5068 vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5069 m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5070 vm_page_speculative_count++;
5071 vm_page_pageable_external_count++;
5072
5073 if (new == TRUE) {
5074 vm_object_lock_assert_exclusive(m_object);
5075
5076 m_object->pages_created++;
5077 #if DEVELOPMENT || DEBUG
5078 vm_page_speculative_created++;
5079 #endif
5080 }
5081 }
5082 VM_PAGE_CHECK(m);
5083 }
5084
5085
5086 /*
5087 * move pages from the specified aging bin to
5088 * the speculative bin that pageout_scan claims from
5089 *
5090 * The page queues must be locked.
5091 */
5092 void
5093 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5094 {
5095 struct vm_speculative_age_q *sq;
5096 vm_page_t t;
5097
5098 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5099
5100 if (vm_page_queue_empty(&sq->age_q)) {
5101 sq->age_q.next = aq->age_q.next;
5102 sq->age_q.prev = aq->age_q.prev;
5103
5104 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5105 t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5106
5107 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5108 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5109 } else {
5110 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5111 t->vmp_pageq.next = aq->age_q.next;
5112
5113 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5114 t->vmp_pageq.prev = sq->age_q.prev;
5115
5116 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5117 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5118
5119 sq->age_q.prev = aq->age_q.prev;
5120 }
5121 vm_page_queue_init(&aq->age_q);
5122 }
5123
5124
5125 void
5126 vm_page_lru(
5127 vm_page_t m)
5128 {
5129 VM_PAGE_CHECK(m);
5130 assert(VM_PAGE_OBJECT(m) != kernel_object);
5131 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5132
5133 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5134
5135 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5136 /*
5137 * we don't need to do all the other work that
5138 * vm_page_queues_remove and vm_page_enqueue_inactive
5139 * bring along for the ride
5140 */
5141 assert(!m->vmp_laundry);
5142 assert(!m->vmp_private);
5143
5144 m->vmp_no_cache = FALSE;
5145
5146 vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5147 vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5148
5149 return;
5150 }
5151 /*
5152 * if this page is currently on the pageout queue, we can't do the
5153 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5154 * and we can't remove it manually since we would need the object lock
5155 * (which is not required here) to decrement the activity_in_progress
5156 * reference which is held on the object while the page is in the pageout queue...
5157 * just let the normal laundry processing proceed
5158 */
5159 if (m->vmp_laundry || m->vmp_private ||
5160 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5161 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5162 VM_PAGE_WIRED(m)) {
5163 return;
5164 }
5165
5166 m->vmp_no_cache = FALSE;
5167
5168 vm_page_queues_remove(m, FALSE);
5169
5170 vm_page_enqueue_inactive(m, FALSE);
5171 }
5172
5173
5174 void
5175 vm_page_reactivate_all_throttled(void)
5176 {
5177 vm_page_t first_throttled, last_throttled;
5178 vm_page_t first_active;
5179 vm_page_t m;
5180 int extra_active_count;
5181 int extra_internal_count, extra_external_count;
5182 vm_object_t m_object;
5183
5184 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5185 return;
5186 }
5187
5188 extra_active_count = 0;
5189 extra_internal_count = 0;
5190 extra_external_count = 0;
5191 vm_page_lock_queues();
5192 if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5193 /*
5194 * Switch "throttled" pages to "active".
5195 */
5196 vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5197 VM_PAGE_CHECK(m);
5198 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5199
5200 m_object = VM_PAGE_OBJECT(m);
5201
5202 extra_active_count++;
5203 if (m_object->internal) {
5204 extra_internal_count++;
5205 } else {
5206 extra_external_count++;
5207 }
5208
5209 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5210 VM_PAGE_CHECK(m);
5211 #if CONFIG_BACKGROUND_QUEUE
5212 if (m->vmp_in_background) {
5213 vm_page_add_to_backgroundq(m, FALSE);
5214 }
5215 #endif
5216 }
5217
5218 /*
5219 * Transfer the entire throttled queue to a regular LRU page queues.
5220 * We insert it at the head of the active queue, so that these pages
5221 * get re-evaluated by the LRU algorithm first, since they've been
5222 * completely out of it until now.
5223 */
5224 first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5225 last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5226 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5227 if (vm_page_queue_empty(&vm_page_queue_active)) {
5228 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5229 } else {
5230 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5231 }
5232 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5233 first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5234 last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5235
5236 #if DEBUG
5237 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5238 #endif
5239 vm_page_queue_init(&vm_page_queue_throttled);
5240 /*
5241 * Adjust the global page counts.
5242 */
5243 vm_page_active_count += extra_active_count;
5244 vm_page_pageable_internal_count += extra_internal_count;
5245 vm_page_pageable_external_count += extra_external_count;
5246 vm_page_throttled_count = 0;
5247 }
5248 assert(vm_page_throttled_count == 0);
5249 assert(vm_page_queue_empty(&vm_page_queue_throttled));
5250 vm_page_unlock_queues();
5251 }
5252
5253
5254 /*
5255 * move pages from the indicated local queue to the global active queue
5256 * its ok to fail if we're below the hard limit and force == FALSE
5257 * the nolocks == TRUE case is to allow this function to be run on
5258 * the hibernate path
5259 */
5260
5261 void
5262 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5263 {
5264 struct vpl *lq;
5265 vm_page_t first_local, last_local;
5266 vm_page_t first_active;
5267 vm_page_t m;
5268 uint32_t count = 0;
5269
5270 if (vm_page_local_q == NULL) {
5271 return;
5272 }
5273
5274 lq = zpercpu_get_cpu(vm_page_local_q, lid);
5275
5276 if (nolocks == FALSE) {
5277 if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5278 if (!vm_page_trylockspin_queues()) {
5279 return;
5280 }
5281 } else {
5282 vm_page_lockspin_queues();
5283 }
5284
5285 VPL_LOCK(&lq->vpl_lock);
5286 }
5287 if (lq->vpl_count) {
5288 /*
5289 * Switch "local" pages to "active".
5290 */
5291 assert(!vm_page_queue_empty(&lq->vpl_queue));
5292
5293 vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5294 VM_PAGE_CHECK(m);
5295 vm_page_check_pageable_safe(m);
5296 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5297 assert(!m->vmp_fictitious);
5298
5299 if (m->vmp_local_id != lid) {
5300 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5301 }
5302
5303 m->vmp_local_id = 0;
5304 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5305 VM_PAGE_CHECK(m);
5306 #if CONFIG_BACKGROUND_QUEUE
5307 if (m->vmp_in_background) {
5308 vm_page_add_to_backgroundq(m, FALSE);
5309 }
5310 #endif
5311 count++;
5312 }
5313 if (count != lq->vpl_count) {
5314 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d\n", count, lq->vpl_count);
5315 }
5316
5317 /*
5318 * Transfer the entire local queue to a regular LRU page queues.
5319 */
5320 first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5321 last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5322 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5323
5324 if (vm_page_queue_empty(&vm_page_queue_active)) {
5325 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5326 } else {
5327 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5328 }
5329 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5330 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5331 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5332
5333 vm_page_queue_init(&lq->vpl_queue);
5334 /*
5335 * Adjust the global page counts.
5336 */
5337 vm_page_active_count += lq->vpl_count;
5338 vm_page_pageable_internal_count += lq->vpl_internal_count;
5339 vm_page_pageable_external_count += lq->vpl_external_count;
5340 lq->vpl_count = 0;
5341 lq->vpl_internal_count = 0;
5342 lq->vpl_external_count = 0;
5343 }
5344 assert(vm_page_queue_empty(&lq->vpl_queue));
5345
5346 if (nolocks == FALSE) {
5347 VPL_UNLOCK(&lq->vpl_lock);
5348
5349 vm_page_balance_inactive(count / 4);
5350 vm_page_unlock_queues();
5351 }
5352 }
5353
5354 /*
5355 * vm_page_part_zero_fill:
5356 *
5357 * Zero-fill a part of the page.
5358 */
5359 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5360 void
5361 vm_page_part_zero_fill(
5362 vm_page_t m,
5363 vm_offset_t m_pa,
5364 vm_size_t len)
5365 {
5366 #if 0
5367 /*
5368 * we don't hold the page queue lock
5369 * so this check isn't safe to make
5370 */
5371 VM_PAGE_CHECK(m);
5372 #endif
5373
5374 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5375 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5376 #else
5377 vm_page_t tmp;
5378 while (1) {
5379 tmp = vm_page_grab();
5380 if (tmp == VM_PAGE_NULL) {
5381 vm_page_wait(THREAD_UNINT);
5382 continue;
5383 }
5384 break;
5385 }
5386 vm_page_zero_fill(tmp);
5387 if (m_pa != 0) {
5388 vm_page_part_copy(m, 0, tmp, 0, m_pa);
5389 }
5390 if ((m_pa + len) < PAGE_SIZE) {
5391 vm_page_part_copy(m, m_pa + len, tmp,
5392 m_pa + len, PAGE_SIZE - (m_pa + len));
5393 }
5394 vm_page_copy(tmp, m);
5395 VM_PAGE_FREE(tmp);
5396 #endif
5397 }
5398
5399 /*
5400 * vm_page_zero_fill:
5401 *
5402 * Zero-fill the specified page.
5403 */
5404 void
5405 vm_page_zero_fill(
5406 vm_page_t m)
5407 {
5408 #if 0
5409 /*
5410 * we don't hold the page queue lock
5411 * so this check isn't safe to make
5412 */
5413 VM_PAGE_CHECK(m);
5414 #endif
5415
5416 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
5417 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5418 }
5419
5420 /*
5421 * vm_page_part_copy:
5422 *
5423 * copy part of one page to another
5424 */
5425
5426 void
5427 vm_page_part_copy(
5428 vm_page_t src_m,
5429 vm_offset_t src_pa,
5430 vm_page_t dst_m,
5431 vm_offset_t dst_pa,
5432 vm_size_t len)
5433 {
5434 #if 0
5435 /*
5436 * we don't hold the page queue lock
5437 * so this check isn't safe to make
5438 */
5439 VM_PAGE_CHECK(src_m);
5440 VM_PAGE_CHECK(dst_m);
5441 #endif
5442 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5443 VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5444 }
5445
5446 /*
5447 * vm_page_copy:
5448 *
5449 * Copy one page to another
5450 */
5451
5452 int vm_page_copy_cs_validations = 0;
5453 int vm_page_copy_cs_tainted = 0;
5454
5455 void
5456 vm_page_copy(
5457 vm_page_t src_m,
5458 vm_page_t dest_m)
5459 {
5460 vm_object_t src_m_object;
5461
5462 src_m_object = VM_PAGE_OBJECT(src_m);
5463
5464 #if 0
5465 /*
5466 * we don't hold the page queue lock
5467 * so this check isn't safe to make
5468 */
5469 VM_PAGE_CHECK(src_m);
5470 VM_PAGE_CHECK(dest_m);
5471 #endif
5472 vm_object_lock_assert_held(src_m_object);
5473
5474 if (src_m_object != VM_OBJECT_NULL &&
5475 src_m_object->code_signed) {
5476 /*
5477 * We're copying a page from a code-signed object.
5478 * Whoever ends up mapping the copy page might care about
5479 * the original page's integrity, so let's validate the
5480 * source page now.
5481 */
5482 vm_page_copy_cs_validations++;
5483 vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5484 #if DEVELOPMENT || DEBUG
5485 DTRACE_VM4(codesigned_copy,
5486 vm_object_t, src_m_object,
5487 vm_object_offset_t, src_m->vmp_offset,
5488 int, src_m->vmp_cs_validated,
5489 int, src_m->vmp_cs_tainted);
5490 #endif /* DEVELOPMENT || DEBUG */
5491 }
5492
5493 /*
5494 * Propagate the cs_tainted bit to the copy page. Do not propagate
5495 * the cs_validated bit.
5496 */
5497 dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5498 dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5499 if (dest_m->vmp_cs_tainted) {
5500 vm_page_copy_cs_tainted++;
5501 }
5502 dest_m->vmp_error = src_m->vmp_error; /* sliding src_m might have failed... */
5503 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5504 }
5505
5506 #if MACH_ASSERT
5507 static void
5508 _vm_page_print(
5509 vm_page_t p)
5510 {
5511 printf("vm_page %p: \n", p);
5512 printf(" pageq: next=%p prev=%p\n",
5513 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5514 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5515 printf(" listq: next=%p prev=%p\n",
5516 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5517 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5518 printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5519 printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5520 printf(" wire_count=%u\n", p->vmp_wire_count);
5521 printf(" q_state=%u\n", p->vmp_q_state);
5522
5523 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
5524 (p->vmp_laundry ? "" : "!"),
5525 (p->vmp_reference ? "" : "!"),
5526 (p->vmp_gobbled ? "" : "!"),
5527 (p->vmp_private ? "" : "!"));
5528 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5529 (p->vmp_busy ? "" : "!"),
5530 (p->vmp_wanted ? "" : "!"),
5531 (p->vmp_tabled ? "" : "!"),
5532 (p->vmp_fictitious ? "" : "!"),
5533 (p->vmp_pmapped ? "" : "!"),
5534 (p->vmp_wpmapped ? "" : "!"));
5535 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5536 (p->vmp_free_when_done ? "" : "!"),
5537 (p->vmp_absent ? "" : "!"),
5538 (p->vmp_error ? "" : "!"),
5539 (p->vmp_dirty ? "" : "!"),
5540 (p->vmp_cleaning ? "" : "!"),
5541 (p->vmp_precious ? "" : "!"),
5542 (p->vmp_clustered ? "" : "!"));
5543 printf(" %soverwriting, %srestart, %sunusual\n",
5544 (p->vmp_overwriting ? "" : "!"),
5545 (p->vmp_restart ? "" : "!"),
5546 (p->vmp_unusual ? "" : "!"));
5547 printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5548 p->vmp_cs_validated,
5549 p->vmp_cs_tainted,
5550 p->vmp_cs_nx,
5551 (p->vmp_no_cache ? "" : "!"));
5552
5553 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5554 }
5555
5556 /*
5557 * Check that the list of pages is ordered by
5558 * ascending physical address and has no holes.
5559 */
5560 static int
5561 vm_page_verify_contiguous(
5562 vm_page_t pages,
5563 unsigned int npages)
5564 {
5565 vm_page_t m;
5566 unsigned int page_count;
5567 vm_offset_t prev_addr;
5568
5569 prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5570 page_count = 1;
5571 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5572 if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5573 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5574 m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5575 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5576 panic("vm_page_verify_contiguous: not contiguous!");
5577 }
5578 prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5579 ++page_count;
5580 }
5581 if (page_count != npages) {
5582 printf("pages %p actual count 0x%x but requested 0x%x\n",
5583 pages, page_count, npages);
5584 panic("vm_page_verify_contiguous: count error");
5585 }
5586 return 1;
5587 }
5588
5589
5590 /*
5591 * Check the free lists for proper length etc.
5592 */
5593 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5594 static unsigned int
5595 vm_page_verify_free_list(
5596 vm_page_queue_head_t *vm_page_queue,
5597 unsigned int color,
5598 vm_page_t look_for_page,
5599 boolean_t expect_page)
5600 {
5601 unsigned int npages;
5602 vm_page_t m;
5603 vm_page_t prev_m;
5604 boolean_t found_page;
5605
5606 if (!vm_page_verify_this_free_list_enabled) {
5607 return 0;
5608 }
5609
5610 found_page = FALSE;
5611 npages = 0;
5612 prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5613
5614 vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5615 if (m == look_for_page) {
5616 found_page = TRUE;
5617 }
5618 if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5619 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
5620 color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5621 }
5622 if (!m->vmp_busy) {
5623 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
5624 color, npages, m);
5625 }
5626 if (color != (unsigned int) -1) {
5627 if (VM_PAGE_GET_COLOR(m) != color) {
5628 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
5629 color, npages, m, VM_PAGE_GET_COLOR(m), color);
5630 }
5631 if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5632 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d\n",
5633 color, npages, m, m->vmp_q_state);
5634 }
5635 } else {
5636 if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5637 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d\n",
5638 npages, m, m->vmp_q_state);
5639 }
5640 }
5641 ++npages;
5642 prev_m = m;
5643 }
5644 if (look_for_page != VM_PAGE_NULL) {
5645 unsigned int other_color;
5646
5647 if (expect_page && !found_page) {
5648 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5649 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5650 _vm_page_print(look_for_page);
5651 for (other_color = 0;
5652 other_color < vm_colors;
5653 other_color++) {
5654 if (other_color == color) {
5655 continue;
5656 }
5657 vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5658 other_color, look_for_page, FALSE);
5659 }
5660 if (color == (unsigned int) -1) {
5661 vm_page_verify_free_list(&vm_lopage_queue_free,
5662 (unsigned int) -1, look_for_page, FALSE);
5663 }
5664 panic("vm_page_verify_free_list(color=%u)\n", color);
5665 }
5666 if (!expect_page && found_page) {
5667 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5668 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5669 }
5670 }
5671 return npages;
5672 }
5673
5674 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5675 static void
5676 vm_page_verify_free_lists( void )
5677 {
5678 unsigned int color, npages, nlopages;
5679 boolean_t toggle = TRUE;
5680
5681 if (!vm_page_verify_all_free_lists_enabled) {
5682 return;
5683 }
5684
5685 npages = 0;
5686
5687 lck_mtx_lock(&vm_page_queue_free_lock);
5688
5689 if (vm_page_verify_this_free_list_enabled == TRUE) {
5690 /*
5691 * This variable has been set globally for extra checking of
5692 * each free list Q. Since we didn't set it, we don't own it
5693 * and we shouldn't toggle it.
5694 */
5695 toggle = FALSE;
5696 }
5697
5698 if (toggle == TRUE) {
5699 vm_page_verify_this_free_list_enabled = TRUE;
5700 }
5701
5702 for (color = 0; color < vm_colors; color++) {
5703 npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5704 color, VM_PAGE_NULL, FALSE);
5705 }
5706 nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5707 (unsigned int) -1,
5708 VM_PAGE_NULL, FALSE);
5709 if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5710 panic("vm_page_verify_free_lists: "
5711 "npages %u free_count %d nlopages %u lo_free_count %u",
5712 npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5713 }
5714
5715 if (toggle == TRUE) {
5716 vm_page_verify_this_free_list_enabled = FALSE;
5717 }
5718
5719 lck_mtx_unlock(&vm_page_queue_free_lock);
5720 }
5721
5722 #endif /* MACH_ASSERT */
5723
5724
5725 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5726
5727 /*
5728 * CONTIGUOUS PAGE ALLOCATION
5729 *
5730 * Find a region large enough to contain at least n pages
5731 * of contiguous physical memory.
5732 *
5733 * This is done by traversing the vm_page_t array in a linear fashion
5734 * we assume that the vm_page_t array has the avaiable physical pages in an
5735 * ordered, ascending list... this is currently true of all our implementations
5736 * and must remain so... there can be 'holes' in the array... we also can
5737 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
5738 * which use to happen via 'vm_page_convert'... that function was no longer
5739 * being called and was removed...
5740 *
5741 * The basic flow consists of stabilizing some of the interesting state of
5742 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
5743 * sweep at the beginning of the array looking for pages that meet our criterea
5744 * for a 'stealable' page... currently we are pretty conservative... if the page
5745 * meets this criterea and is physically contiguous to the previous page in the 'run'
5746 * we keep developing it. If we hit a page that doesn't fit, we reset our state
5747 * and start to develop a new run... if at this point we've already considered
5748 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
5749 * and mutex_pause (which will yield the processor), to keep the latency low w/r
5750 * to other threads trying to acquire free pages (or move pages from q to q),
5751 * and then continue from the spot we left off... we only make 1 pass through the
5752 * array. Once we have a 'run' that is long enough, we'll go into the loop which
5753 * which steals the pages from the queues they're currently on... pages on the free
5754 * queue can be stolen directly... pages that are on any of the other queues
5755 * must be removed from the object they are tabled on... this requires taking the
5756 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
5757 * or if the state of the page behind the vm_object lock is no longer viable, we'll
5758 * dump the pages we've currently stolen back to the free list, and pick up our
5759 * scan from the point where we aborted the 'current' run.
5760 *
5761 *
5762 * Requirements:
5763 * - neither vm_page_queue nor vm_free_list lock can be held on entry
5764 *
5765 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
5766 *
5767 * Algorithm:
5768 */
5769
5770 #define MAX_CONSIDERED_BEFORE_YIELD 1000
5771
5772
5773 #define RESET_STATE_OF_RUN() \
5774 MACRO_BEGIN \
5775 prevcontaddr = -2; \
5776 start_pnum = -1; \
5777 free_considered = 0; \
5778 substitute_needed = 0; \
5779 npages = 0; \
5780 MACRO_END
5781
5782 /*
5783 * Can we steal in-use (i.e. not free) pages when searching for
5784 * physically-contiguous pages ?
5785 */
5786 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
5787
5788 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
5789 #if DEBUG
5790 int vm_page_find_contig_debug = 0;
5791 #endif
5792
5793 static vm_page_t
5794 vm_page_find_contiguous(
5795 unsigned int contig_pages,
5796 ppnum_t max_pnum,
5797 ppnum_t pnum_mask,
5798 boolean_t wire,
5799 int flags)
5800 {
5801 vm_page_t m = NULL;
5802 ppnum_t prevcontaddr = 0;
5803 ppnum_t start_pnum = 0;
5804 unsigned int npages = 0, considered = 0, scanned = 0;
5805 unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
5806 unsigned int idx_last_contig_page_found = 0;
5807 int free_considered = 0, free_available = 0;
5808 int substitute_needed = 0;
5809 boolean_t wrapped, zone_gc_called = FALSE;
5810 kern_return_t kr;
5811 #if DEBUG
5812 clock_sec_t tv_start_sec = 0, tv_end_sec = 0;
5813 clock_usec_t tv_start_usec = 0, tv_end_usec = 0;
5814 #endif
5815
5816 int yielded = 0;
5817 int dumped_run = 0;
5818 int stolen_pages = 0;
5819 int compressed_pages = 0;
5820
5821
5822 if (contig_pages == 0) {
5823 return VM_PAGE_NULL;
5824 }
5825
5826 full_scan_again:
5827
5828 #if MACH_ASSERT
5829 vm_page_verify_free_lists();
5830 #endif
5831 #if DEBUG
5832 clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
5833 #endif
5834 PAGE_REPLACEMENT_ALLOWED(TRUE);
5835
5836 /*
5837 * If there are still delayed pages, try to free up some that match.
5838 */
5839 if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
5840 vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
5841 }
5842
5843 vm_page_lock_queues();
5844 lck_mtx_lock(&vm_page_queue_free_lock);
5845
5846 RESET_STATE_OF_RUN();
5847
5848 scanned = 0;
5849 considered = 0;
5850 free_available = vm_page_free_count - vm_page_free_reserved;
5851
5852 wrapped = FALSE;
5853
5854 if (flags & KMA_LOMEM) {
5855 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
5856 } else {
5857 idx_last_contig_page_found = vm_page_find_contiguous_last_idx;
5858 }
5859
5860 orig_last_idx = idx_last_contig_page_found;
5861 last_idx = orig_last_idx;
5862
5863 for (page_idx = last_idx, start_idx = last_idx;
5864 npages < contig_pages && page_idx < vm_pages_count;
5865 page_idx++) {
5866 retry:
5867 if (wrapped &&
5868 npages == 0 &&
5869 page_idx >= orig_last_idx) {
5870 /*
5871 * We're back where we started and we haven't
5872 * found any suitable contiguous range. Let's
5873 * give up.
5874 */
5875 break;
5876 }
5877 scanned++;
5878 m = &vm_pages[page_idx];
5879
5880 assert(!m->vmp_fictitious);
5881 assert(!m->vmp_private);
5882
5883 if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
5884 /* no more low pages... */
5885 break;
5886 }
5887 if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
5888 /*
5889 * not aligned
5890 */
5891 RESET_STATE_OF_RUN();
5892 } else if (VM_PAGE_WIRED(m) || m->vmp_gobbled ||
5893 m->vmp_laundry || m->vmp_wanted ||
5894 m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5895 /*
5896 * page is in a transient state
5897 * or a state we don't want to deal
5898 * with, so don't consider it which
5899 * means starting a new run
5900 */
5901 RESET_STATE_OF_RUN();
5902 } else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5903 (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5904 (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5905 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5906 /*
5907 * page needs to be on one of our queues (other then the pageout or special free queues)
5908 * or it needs to belong to the compressor pool (which is now indicated
5909 * by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
5910 * from the check for VM_PAGE_NOT_ON_Q)
5911 * in order for it to be stable behind the
5912 * locks we hold at this point...
5913 * if not, don't consider it which
5914 * means starting a new run
5915 */
5916 RESET_STATE_OF_RUN();
5917 } else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) && (!m->vmp_tabled || m->vmp_busy)) {
5918 /*
5919 * pages on the free list are always 'busy'
5920 * so we couldn't test for 'busy' in the check
5921 * for the transient states... pages that are
5922 * 'free' are never 'tabled', so we also couldn't
5923 * test for 'tabled'. So we check here to make
5924 * sure that a non-free page is not busy and is
5925 * tabled on an object...
5926 * if not, don't consider it which
5927 * means starting a new run
5928 */
5929 RESET_STATE_OF_RUN();
5930 } else {
5931 if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
5932 if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
5933 RESET_STATE_OF_RUN();
5934 goto did_consider;
5935 } else {
5936 npages = 1;
5937 start_idx = page_idx;
5938 start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
5939 }
5940 } else {
5941 npages++;
5942 }
5943 prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
5944
5945 VM_PAGE_CHECK(m);
5946 if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
5947 free_considered++;
5948 } else {
5949 /*
5950 * This page is not free.
5951 * If we can't steal used pages,
5952 * we have to give up this run
5953 * and keep looking.
5954 * Otherwise, we might need to
5955 * move the contents of this page
5956 * into a substitute page.
5957 */
5958 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
5959 if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
5960 substitute_needed++;
5961 }
5962 #else
5963 RESET_STATE_OF_RUN();
5964 #endif
5965 }
5966
5967 if ((free_considered + substitute_needed) > free_available) {
5968 /*
5969 * if we let this run continue
5970 * we will end up dropping the vm_page_free_count
5971 * below the reserve limit... we need to abort
5972 * this run, but we can at least re-consider this
5973 * page... thus the jump back to 'retry'
5974 */
5975 RESET_STATE_OF_RUN();
5976
5977 if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
5978 considered++;
5979 goto retry;
5980 }
5981 /*
5982 * free_available == 0
5983 * so can't consider any free pages... if
5984 * we went to retry in this case, we'd
5985 * get stuck looking at the same page
5986 * w/o making any forward progress
5987 * we also want to take this path if we've already
5988 * reached our limit that controls the lock latency
5989 */
5990 }
5991 }
5992 did_consider:
5993 if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
5994 PAGE_REPLACEMENT_ALLOWED(FALSE);
5995
5996 lck_mtx_unlock(&vm_page_queue_free_lock);
5997 vm_page_unlock_queues();
5998
5999 mutex_pause(0);
6000
6001 PAGE_REPLACEMENT_ALLOWED(TRUE);
6002
6003 vm_page_lock_queues();
6004 lck_mtx_lock(&vm_page_queue_free_lock);
6005
6006 RESET_STATE_OF_RUN();
6007 /*
6008 * reset our free page limit since we
6009 * dropped the lock protecting the vm_page_free_queue
6010 */
6011 free_available = vm_page_free_count - vm_page_free_reserved;
6012 considered = 0;
6013
6014 yielded++;
6015
6016 goto retry;
6017 }
6018 considered++;
6019 }
6020 m = VM_PAGE_NULL;
6021
6022 if (npages != contig_pages) {
6023 if (!wrapped) {
6024 /*
6025 * We didn't find a contiguous range but we didn't
6026 * start from the very first page.
6027 * Start again from the very first page.
6028 */
6029 RESET_STATE_OF_RUN();
6030 if (flags & KMA_LOMEM) {
6031 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = 0;
6032 } else {
6033 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6034 }
6035 last_idx = 0;
6036 page_idx = last_idx;
6037 wrapped = TRUE;
6038 goto retry;
6039 }
6040 lck_mtx_unlock(&vm_page_queue_free_lock);
6041 } else {
6042 vm_page_t m1;
6043 vm_page_t m2;
6044 unsigned int cur_idx;
6045 unsigned int tmp_start_idx;
6046 vm_object_t locked_object = VM_OBJECT_NULL;
6047 boolean_t abort_run = FALSE;
6048
6049 assert(page_idx - start_idx == contig_pages);
6050
6051 tmp_start_idx = start_idx;
6052
6053 /*
6054 * first pass through to pull the free pages
6055 * off of the free queue so that in case we
6056 * need substitute pages, we won't grab any
6057 * of the free pages in the run... we'll clear
6058 * the 'free' bit in the 2nd pass, and even in
6059 * an abort_run case, we'll collect all of the
6060 * free pages in this run and return them to the free list
6061 */
6062 while (start_idx < page_idx) {
6063 m1 = &vm_pages[start_idx++];
6064
6065 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6066 assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6067 #endif
6068
6069 if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6070 unsigned int color;
6071
6072 color = VM_PAGE_GET_COLOR(m1);
6073 #if MACH_ASSERT
6074 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6075 #endif
6076 vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6077
6078 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6079 #if MACH_ASSERT
6080 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6081 #endif
6082 /*
6083 * Clear the "free" bit so that this page
6084 * does not get considered for another
6085 * concurrent physically-contiguous allocation.
6086 */
6087 m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6088 assert(m1->vmp_busy);
6089
6090 vm_page_free_count--;
6091 }
6092 }
6093 if (flags & KMA_LOMEM) {
6094 vm_page_lomem_find_contiguous_last_idx = page_idx;
6095 } else {
6096 vm_page_find_contiguous_last_idx = page_idx;
6097 }
6098
6099 /*
6100 * we can drop the free queue lock at this point since
6101 * we've pulled any 'free' candidates off of the list
6102 * we need it dropped so that we can do a vm_page_grab
6103 * when substituing for pmapped/dirty pages
6104 */
6105 lck_mtx_unlock(&vm_page_queue_free_lock);
6106
6107 start_idx = tmp_start_idx;
6108 cur_idx = page_idx - 1;
6109
6110 while (start_idx++ < page_idx) {
6111 /*
6112 * must go through the list from back to front
6113 * so that the page list is created in the
6114 * correct order - low -> high phys addresses
6115 */
6116 m1 = &vm_pages[cur_idx--];
6117
6118 if (m1->vmp_object == 0) {
6119 /*
6120 * page has already been removed from
6121 * the free list in the 1st pass
6122 */
6123 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6124 assert(m1->vmp_offset == (vm_object_offset_t) -1);
6125 assert(m1->vmp_busy);
6126 assert(!m1->vmp_wanted);
6127 assert(!m1->vmp_laundry);
6128 } else {
6129 vm_object_t object;
6130 int refmod;
6131 boolean_t disconnected, reusable;
6132
6133 if (abort_run == TRUE) {
6134 continue;
6135 }
6136
6137 assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6138
6139 object = VM_PAGE_OBJECT(m1);
6140
6141 if (object != locked_object) {
6142 if (locked_object) {
6143 vm_object_unlock(locked_object);
6144 locked_object = VM_OBJECT_NULL;
6145 }
6146 if (vm_object_lock_try(object)) {
6147 locked_object = object;
6148 }
6149 }
6150 if (locked_object == VM_OBJECT_NULL ||
6151 (VM_PAGE_WIRED(m1) || m1->vmp_gobbled ||
6152 m1->vmp_laundry || m1->vmp_wanted ||
6153 m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) ||
6154 (m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6155 if (locked_object) {
6156 vm_object_unlock(locked_object);
6157 locked_object = VM_OBJECT_NULL;
6158 }
6159 tmp_start_idx = cur_idx;
6160 abort_run = TRUE;
6161 continue;
6162 }
6163
6164 disconnected = FALSE;
6165 reusable = FALSE;
6166
6167 if ((m1->vmp_reusable ||
6168 object->all_reusable) &&
6169 (m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
6170 !m1->vmp_dirty &&
6171 !m1->vmp_reference) {
6172 /* reusable page... */
6173 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6174 disconnected = TRUE;
6175 if (refmod == 0) {
6176 /*
6177 * ... not reused: can steal
6178 * without relocating contents.
6179 */
6180 reusable = TRUE;
6181 }
6182 }
6183
6184 if ((m1->vmp_pmapped &&
6185 !reusable) ||
6186 m1->vmp_dirty ||
6187 m1->vmp_precious) {
6188 vm_object_offset_t offset;
6189
6190 m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6191
6192 if (m2 == VM_PAGE_NULL) {
6193 if (locked_object) {
6194 vm_object_unlock(locked_object);
6195 locked_object = VM_OBJECT_NULL;
6196 }
6197 tmp_start_idx = cur_idx;
6198 abort_run = TRUE;
6199 continue;
6200 }
6201 if (!disconnected) {
6202 if (m1->vmp_pmapped) {
6203 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6204 } else {
6205 refmod = 0;
6206 }
6207 }
6208
6209 /* copy the page's contents */
6210 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6211 /* copy the page's state */
6212 assert(!VM_PAGE_WIRED(m1));
6213 assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6214 assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6215 assert(!m1->vmp_laundry);
6216 m2->vmp_reference = m1->vmp_reference;
6217 assert(!m1->vmp_gobbled);
6218 assert(!m1->vmp_private);
6219 m2->vmp_no_cache = m1->vmp_no_cache;
6220 m2->vmp_xpmapped = 0;
6221 assert(!m1->vmp_busy);
6222 assert(!m1->vmp_wanted);
6223 assert(!m1->vmp_fictitious);
6224 m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6225 m2->vmp_wpmapped = m1->vmp_wpmapped;
6226 assert(!m1->vmp_free_when_done);
6227 m2->vmp_absent = m1->vmp_absent;
6228 m2->vmp_error = m1->vmp_error;
6229 m2->vmp_dirty = m1->vmp_dirty;
6230 assert(!m1->vmp_cleaning);
6231 m2->vmp_precious = m1->vmp_precious;
6232 m2->vmp_clustered = m1->vmp_clustered;
6233 assert(!m1->vmp_overwriting);
6234 m2->vmp_restart = m1->vmp_restart;
6235 m2->vmp_unusual = m1->vmp_unusual;
6236 m2->vmp_cs_validated = m1->vmp_cs_validated;
6237 m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6238 m2->vmp_cs_nx = m1->vmp_cs_nx;
6239
6240 /*
6241 * If m1 had really been reusable,
6242 * we would have just stolen it, so
6243 * let's not propagate it's "reusable"
6244 * bit and assert that m2 is not
6245 * marked as "reusable".
6246 */
6247 // m2->vmp_reusable = m1->vmp_reusable;
6248 assert(!m2->vmp_reusable);
6249
6250 // assert(!m1->vmp_lopage);
6251
6252 if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6253 m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6254 }
6255
6256 /*
6257 * page may need to be flushed if
6258 * it is marshalled into a UPL
6259 * that is going to be used by a device
6260 * that doesn't support coherency
6261 */
6262 m2->vmp_written_by_kernel = TRUE;
6263
6264 /*
6265 * make sure we clear the ref/mod state
6266 * from the pmap layer... else we risk
6267 * inheriting state from the last time
6268 * this page was used...
6269 */
6270 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6271
6272 if (refmod & VM_MEM_REFERENCED) {
6273 m2->vmp_reference = TRUE;
6274 }
6275 if (refmod & VM_MEM_MODIFIED) {
6276 SET_PAGE_DIRTY(m2, TRUE);
6277 }
6278 offset = m1->vmp_offset;
6279
6280 /*
6281 * completely cleans up the state
6282 * of the page so that it is ready
6283 * to be put onto the free list, or
6284 * for this purpose it looks like it
6285 * just came off of the free list
6286 */
6287 vm_page_free_prepare(m1);
6288
6289 /*
6290 * now put the substitute page
6291 * on the object
6292 */
6293 vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
6294
6295 if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6296 m2->vmp_pmapped = TRUE;
6297 m2->vmp_wpmapped = TRUE;
6298
6299 PMAP_ENTER(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6300 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr);
6301
6302 assert(kr == KERN_SUCCESS);
6303
6304 compressed_pages++;
6305 } else {
6306 if (m2->vmp_reference) {
6307 vm_page_activate(m2);
6308 } else {
6309 vm_page_deactivate(m2);
6310 }
6311 }
6312 PAGE_WAKEUP_DONE(m2);
6313 } else {
6314 assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6315
6316 /*
6317 * completely cleans up the state
6318 * of the page so that it is ready
6319 * to be put onto the free list, or
6320 * for this purpose it looks like it
6321 * just came off of the free list
6322 */
6323 vm_page_free_prepare(m1);
6324 }
6325
6326 stolen_pages++;
6327 }
6328 #if CONFIG_BACKGROUND_QUEUE
6329 vm_page_assign_background_state(m1);
6330 #endif
6331 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6332 m1->vmp_snext = m;
6333 m = m1;
6334 }
6335 if (locked_object) {
6336 vm_object_unlock(locked_object);
6337 locked_object = VM_OBJECT_NULL;
6338 }
6339
6340 if (abort_run == TRUE) {
6341 /*
6342 * want the index of the last
6343 * page in this run that was
6344 * successfully 'stolen', so back
6345 * it up 1 for the auto-decrement on use
6346 * and 1 more to bump back over this page
6347 */
6348 page_idx = tmp_start_idx + 2;
6349 if (page_idx >= vm_pages_count) {
6350 if (wrapped) {
6351 if (m != VM_PAGE_NULL) {
6352 vm_page_unlock_queues();
6353 vm_page_free_list(m, FALSE);
6354 vm_page_lock_queues();
6355 m = VM_PAGE_NULL;
6356 }
6357 dumped_run++;
6358 goto done_scanning;
6359 }
6360 page_idx = last_idx = 0;
6361 wrapped = TRUE;
6362 }
6363 abort_run = FALSE;
6364
6365 /*
6366 * We didn't find a contiguous range but we didn't
6367 * start from the very first page.
6368 * Start again from the very first page.
6369 */
6370 RESET_STATE_OF_RUN();
6371
6372 if (flags & KMA_LOMEM) {
6373 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = page_idx;
6374 } else {
6375 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6376 }
6377
6378 last_idx = page_idx;
6379
6380 if (m != VM_PAGE_NULL) {
6381 vm_page_unlock_queues();
6382 vm_page_free_list(m, FALSE);
6383 vm_page_lock_queues();
6384 m = VM_PAGE_NULL;
6385 }
6386 dumped_run++;
6387
6388 lck_mtx_lock(&vm_page_queue_free_lock);
6389 /*
6390 * reset our free page limit since we
6391 * dropped the lock protecting the vm_page_free_queue
6392 */
6393 free_available = vm_page_free_count - vm_page_free_reserved;
6394 goto retry;
6395 }
6396
6397 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6398 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6399 assert(m1->vmp_wire_count == 0);
6400
6401 if (wire == TRUE) {
6402 m1->vmp_wire_count++;
6403 m1->vmp_q_state = VM_PAGE_IS_WIRED;
6404 } else {
6405 m1->vmp_gobbled = TRUE;
6406 }
6407 }
6408 if (wire == FALSE) {
6409 vm_page_gobble_count += npages;
6410 }
6411
6412 /*
6413 * gobbled pages are also counted as wired pages
6414 */
6415 vm_page_wire_count += npages;
6416
6417 assert(vm_page_verify_contiguous(m, npages));
6418 }
6419 done_scanning:
6420 PAGE_REPLACEMENT_ALLOWED(FALSE);
6421
6422 vm_page_unlock_queues();
6423
6424 #if DEBUG
6425 clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6426
6427 tv_end_sec -= tv_start_sec;
6428 if (tv_end_usec < tv_start_usec) {
6429 tv_end_sec--;
6430 tv_end_usec += 1000000;
6431 }
6432 tv_end_usec -= tv_start_usec;
6433 if (tv_end_usec >= 1000000) {
6434 tv_end_sec++;
6435 tv_end_sec -= 1000000;
6436 }
6437 if (vm_page_find_contig_debug) {
6438 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
6439 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6440 (long)tv_end_sec, tv_end_usec, orig_last_idx,
6441 scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6442 }
6443
6444 #endif
6445 #if MACH_ASSERT
6446 vm_page_verify_free_lists();
6447 #endif
6448 if (m == NULL && zone_gc_called == FALSE) {
6449 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6450 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6451 scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6452
6453 if (consider_buffer_cache_collect != NULL) {
6454 (void)(*consider_buffer_cache_collect)(1);
6455 }
6456
6457 consider_zone_gc(FALSE);
6458
6459 zone_gc_called = TRUE;
6460
6461 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6462 goto full_scan_again;
6463 }
6464
6465 return m;
6466 }
6467
6468 /*
6469 * Allocate a list of contiguous, wired pages.
6470 */
6471 kern_return_t
6472 cpm_allocate(
6473 vm_size_t size,
6474 vm_page_t *list,
6475 ppnum_t max_pnum,
6476 ppnum_t pnum_mask,
6477 boolean_t wire,
6478 int flags)
6479 {
6480 vm_page_t pages;
6481 unsigned int npages;
6482
6483 if (size % PAGE_SIZE != 0) {
6484 return KERN_INVALID_ARGUMENT;
6485 }
6486
6487 npages = (unsigned int) (size / PAGE_SIZE);
6488 if (npages != size / PAGE_SIZE) {
6489 /* 32-bit overflow */
6490 return KERN_INVALID_ARGUMENT;
6491 }
6492
6493 /*
6494 * Obtain a pointer to a subset of the free
6495 * list large enough to satisfy the request;
6496 * the region will be physically contiguous.
6497 */
6498 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6499
6500 if (pages == VM_PAGE_NULL) {
6501 return KERN_NO_SPACE;
6502 }
6503 /*
6504 * determine need for wakeups
6505 */
6506 if (vm_page_free_count < vm_page_free_min) {
6507 lck_mtx_lock(&vm_page_queue_free_lock);
6508 if (vm_pageout_running == FALSE) {
6509 lck_mtx_unlock(&vm_page_queue_free_lock);
6510 thread_wakeup((event_t) &vm_page_free_wanted);
6511 } else {
6512 lck_mtx_unlock(&vm_page_queue_free_lock);
6513 }
6514 }
6515
6516 VM_CHECK_MEMORYSTATUS;
6517
6518 /*
6519 * The CPM pages should now be available and
6520 * ordered by ascending physical address.
6521 */
6522 assert(vm_page_verify_contiguous(pages, npages));
6523
6524 *list = pages;
6525 return KERN_SUCCESS;
6526 }
6527
6528
6529 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6530
6531 /*
6532 * when working on a 'run' of pages, it is necessary to hold
6533 * the vm_page_queue_lock (a hot global lock) for certain operations
6534 * on the page... however, the majority of the work can be done
6535 * while merely holding the object lock... in fact there are certain
6536 * collections of pages that don't require any work brokered by the
6537 * vm_page_queue_lock... to mitigate the time spent behind the global
6538 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6539 * while doing all of the work that doesn't require the vm_page_queue_lock...
6540 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6541 * necessary work for each page... we will grab the busy bit on the page
6542 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6543 * if it can't immediately take the vm_page_queue_lock in order to compete
6544 * for the locks in the same order that vm_pageout_scan takes them.
6545 * the operation names are modeled after the names of the routines that
6546 * need to be called in order to make the changes very obvious in the
6547 * original loop
6548 */
6549
6550 void
6551 vm_page_do_delayed_work(
6552 vm_object_t object,
6553 vm_tag_t tag,
6554 struct vm_page_delayed_work *dwp,
6555 int dw_count)
6556 {
6557 int j;
6558 vm_page_t m;
6559 vm_page_t local_free_q = VM_PAGE_NULL;
6560
6561 /*
6562 * pageout_scan takes the vm_page_lock_queues first
6563 * then tries for the object lock... to avoid what
6564 * is effectively a lock inversion, we'll go to the
6565 * trouble of taking them in that same order... otherwise
6566 * if this object contains the majority of the pages resident
6567 * in the UBC (or a small set of large objects actively being
6568 * worked on contain the majority of the pages), we could
6569 * cause the pageout_scan thread to 'starve' in its attempt
6570 * to find pages to move to the free queue, since it has to
6571 * successfully acquire the object lock of any candidate page
6572 * before it can steal/clean it.
6573 */
6574 if (!vm_page_trylockspin_queues()) {
6575 vm_object_unlock(object);
6576
6577 /*
6578 * "Turnstile enabled vm_pageout_scan" can be runnable
6579 * for a very long time without getting on a core.
6580 * If this is a higher priority thread it could be
6581 * waiting here for a very long time respecting the fact
6582 * that pageout_scan would like its object after VPS does
6583 * a mutex_pause(0).
6584 * So we cap the number of yields in the vm_object_lock_avoid()
6585 * case to a single mutex_pause(0) which will give vm_pageout_scan
6586 * 10us to run and grab the object if needed.
6587 */
6588 vm_page_lockspin_queues();
6589
6590 for (j = 0;; j++) {
6591 if ((!vm_object_lock_avoid(object) ||
6592 (vps_dynamic_priority_enabled && (j > 0))) &&
6593 _vm_object_lock_try(object)) {
6594 break;
6595 }
6596 vm_page_unlock_queues();
6597 mutex_pause(j);
6598 vm_page_lockspin_queues();
6599 }
6600 }
6601 for (j = 0; j < dw_count; j++, dwp++) {
6602 m = dwp->dw_m;
6603
6604 if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6605 vm_pageout_throttle_up(m);
6606 }
6607 #if CONFIG_PHANTOM_CACHE
6608 if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6609 vm_phantom_cache_update(m);
6610 }
6611 #endif
6612 if (dwp->dw_mask & DW_vm_page_wire) {
6613 vm_page_wire(m, tag, FALSE);
6614 } else if (dwp->dw_mask & DW_vm_page_unwire) {
6615 boolean_t queueit;
6616
6617 queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6618
6619 vm_page_unwire(m, queueit);
6620 }
6621 if (dwp->dw_mask & DW_vm_page_free) {
6622 vm_page_free_prepare_queues(m);
6623
6624 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6625 /*
6626 * Add this page to our list of reclaimed pages,
6627 * to be freed later.
6628 */
6629 m->vmp_snext = local_free_q;
6630 local_free_q = m;
6631 } else {
6632 if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6633 vm_page_deactivate_internal(m, FALSE);
6634 } else if (dwp->dw_mask & DW_vm_page_activate) {
6635 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6636 vm_page_activate(m);
6637 }
6638 } else if (dwp->dw_mask & DW_vm_page_speculate) {
6639 vm_page_speculate(m, TRUE);
6640 } else if (dwp->dw_mask & DW_enqueue_cleaned) {
6641 /*
6642 * if we didn't hold the object lock and did this,
6643 * we might disconnect the page, then someone might
6644 * soft fault it back in, then we would put it on the
6645 * cleaned queue, and so we would have a referenced (maybe even dirty)
6646 * page on that queue, which we don't want
6647 */
6648 int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6649
6650 if ((refmod_state & VM_MEM_REFERENCED)) {
6651 /*
6652 * this page has been touched since it got cleaned; let's activate it
6653 * if it hasn't already been
6654 */
6655 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6656 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6657
6658 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6659 vm_page_activate(m);
6660 }
6661 } else {
6662 m->vmp_reference = FALSE;
6663 vm_page_enqueue_cleaned(m);
6664 }
6665 } else if (dwp->dw_mask & DW_vm_page_lru) {
6666 vm_page_lru(m);
6667 } else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6668 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6669 vm_page_queues_remove(m, TRUE);
6670 }
6671 }
6672 if (dwp->dw_mask & DW_set_reference) {
6673 m->vmp_reference = TRUE;
6674 } else if (dwp->dw_mask & DW_clear_reference) {
6675 m->vmp_reference = FALSE;
6676 }
6677
6678 if (dwp->dw_mask & DW_move_page) {
6679 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6680 vm_page_queues_remove(m, FALSE);
6681
6682 assert(VM_PAGE_OBJECT(m) != kernel_object);
6683
6684 vm_page_enqueue_inactive(m, FALSE);
6685 }
6686 }
6687 if (dwp->dw_mask & DW_clear_busy) {
6688 m->vmp_busy = FALSE;
6689 }
6690
6691 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
6692 PAGE_WAKEUP(m);
6693 }
6694 }
6695 }
6696 vm_page_unlock_queues();
6697
6698 if (local_free_q) {
6699 vm_page_free_list(local_free_q, TRUE);
6700 }
6701
6702 VM_CHECK_MEMORYSTATUS;
6703 }
6704
6705 kern_return_t
6706 vm_page_alloc_list(
6707 int page_count,
6708 int flags,
6709 vm_page_t *list)
6710 {
6711 vm_page_t lo_page_list = VM_PAGE_NULL;
6712 vm_page_t mem;
6713 int i;
6714
6715 if (!(flags & KMA_LOMEM)) {
6716 panic("vm_page_alloc_list: called w/o KMA_LOMEM");
6717 }
6718
6719 for (i = 0; i < page_count; i++) {
6720 mem = vm_page_grablo();
6721
6722 if (mem == VM_PAGE_NULL) {
6723 if (lo_page_list) {
6724 vm_page_free_list(lo_page_list, FALSE);
6725 }
6726
6727 *list = VM_PAGE_NULL;
6728
6729 return KERN_RESOURCE_SHORTAGE;
6730 }
6731 mem->vmp_snext = lo_page_list;
6732 lo_page_list = mem;
6733 }
6734 *list = lo_page_list;
6735
6736 return KERN_SUCCESS;
6737 }
6738
6739 void
6740 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
6741 {
6742 page->vmp_offset = offset;
6743 }
6744
6745 vm_page_t
6746 vm_page_get_next(vm_page_t page)
6747 {
6748 return page->vmp_snext;
6749 }
6750
6751 vm_object_offset_t
6752 vm_page_get_offset(vm_page_t page)
6753 {
6754 return page->vmp_offset;
6755 }
6756
6757 ppnum_t
6758 vm_page_get_phys_page(vm_page_t page)
6759 {
6760 return VM_PAGE_GET_PHYS_PAGE(page);
6761 }
6762
6763
6764 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6765
6766 #if HIBERNATION
6767
6768 static vm_page_t hibernate_gobble_queue;
6769
6770 static int hibernate_drain_pageout_queue(struct vm_pageout_queue *);
6771 static int hibernate_flush_dirty_pages(int);
6772 static int hibernate_flush_queue(vm_page_queue_head_t *, int);
6773
6774 void hibernate_flush_wait(void);
6775 void hibernate_mark_in_progress(void);
6776 void hibernate_clear_in_progress(void);
6777
6778 void hibernate_free_range(int, int);
6779 void hibernate_hash_insert_page(vm_page_t);
6780 uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
6781 uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
6782 ppnum_t hibernate_lookup_paddr(unsigned int);
6783
6784 struct hibernate_statistics {
6785 int hibernate_considered;
6786 int hibernate_reentered_on_q;
6787 int hibernate_found_dirty;
6788 int hibernate_skipped_cleaning;
6789 int hibernate_skipped_transient;
6790 int hibernate_skipped_precious;
6791 int hibernate_skipped_external;
6792 int hibernate_queue_nolock;
6793 int hibernate_queue_paused;
6794 int hibernate_throttled;
6795 int hibernate_throttle_timeout;
6796 int hibernate_drained;
6797 int hibernate_drain_timeout;
6798 int cd_lock_failed;
6799 int cd_found_precious;
6800 int cd_found_wired;
6801 int cd_found_busy;
6802 int cd_found_unusual;
6803 int cd_found_cleaning;
6804 int cd_found_laundry;
6805 int cd_found_dirty;
6806 int cd_found_xpmapped;
6807 int cd_skipped_xpmapped;
6808 int cd_local_free;
6809 int cd_total_free;
6810 int cd_vm_page_wire_count;
6811 int cd_vm_struct_pages_unneeded;
6812 int cd_pages;
6813 int cd_discarded;
6814 int cd_count_wire;
6815 } hibernate_stats;
6816
6817
6818 /*
6819 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
6820 * so that we don't overrun the estimated image size, which would
6821 * result in a hibernation failure.
6822 */
6823 #define HIBERNATE_XPMAPPED_LIMIT 40000
6824
6825
6826 static int
6827 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
6828 {
6829 wait_result_t wait_result;
6830
6831 vm_page_lock_queues();
6832
6833 while (!vm_page_queue_empty(&q->pgo_pending)) {
6834 q->pgo_draining = TRUE;
6835
6836 assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
6837
6838 vm_page_unlock_queues();
6839
6840 wait_result = thread_block(THREAD_CONTINUE_NULL);
6841
6842 if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
6843 hibernate_stats.hibernate_drain_timeout++;
6844
6845 if (q == &vm_pageout_queue_external) {
6846 return 0;
6847 }
6848
6849 return 1;
6850 }
6851 vm_page_lock_queues();
6852
6853 hibernate_stats.hibernate_drained++;
6854 }
6855 vm_page_unlock_queues();
6856
6857 return 0;
6858 }
6859
6860
6861 boolean_t hibernate_skip_external = FALSE;
6862
6863 static int
6864 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
6865 {
6866 vm_page_t m;
6867 vm_object_t l_object = NULL;
6868 vm_object_t m_object = NULL;
6869 int refmod_state = 0;
6870 int try_failed_count = 0;
6871 int retval = 0;
6872 int current_run = 0;
6873 struct vm_pageout_queue *iq;
6874 struct vm_pageout_queue *eq;
6875 struct vm_pageout_queue *tq;
6876
6877 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
6878 VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
6879
6880 iq = &vm_pageout_queue_internal;
6881 eq = &vm_pageout_queue_external;
6882
6883 vm_page_lock_queues();
6884
6885 while (qcount && !vm_page_queue_empty(q)) {
6886 if (current_run++ == 1000) {
6887 if (hibernate_should_abort()) {
6888 retval = 1;
6889 break;
6890 }
6891 current_run = 0;
6892 }
6893
6894 m = (vm_page_t) vm_page_queue_first(q);
6895 m_object = VM_PAGE_OBJECT(m);
6896
6897 /*
6898 * check to see if we currently are working
6899 * with the same object... if so, we've
6900 * already got the lock
6901 */
6902 if (m_object != l_object) {
6903 /*
6904 * the object associated with candidate page is
6905 * different from the one we were just working
6906 * with... dump the lock if we still own it
6907 */
6908 if (l_object != NULL) {
6909 vm_object_unlock(l_object);
6910 l_object = NULL;
6911 }
6912 /*
6913 * Try to lock object; since we've alread got the
6914 * page queues lock, we can only 'try' for this one.
6915 * if the 'try' fails, we need to do a mutex_pause
6916 * to allow the owner of the object lock a chance to
6917 * run...
6918 */
6919 if (!vm_object_lock_try_scan(m_object)) {
6920 if (try_failed_count > 20) {
6921 hibernate_stats.hibernate_queue_nolock++;
6922
6923 goto reenter_pg_on_q;
6924 }
6925
6926 vm_page_unlock_queues();
6927 mutex_pause(try_failed_count++);
6928 vm_page_lock_queues();
6929
6930 hibernate_stats.hibernate_queue_paused++;
6931 continue;
6932 } else {
6933 l_object = m_object;
6934 }
6935 }
6936 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error) {
6937 /*
6938 * page is not to be cleaned
6939 * put it back on the head of its queue
6940 */
6941 if (m->vmp_cleaning) {
6942 hibernate_stats.hibernate_skipped_cleaning++;
6943 } else {
6944 hibernate_stats.hibernate_skipped_transient++;
6945 }
6946
6947 goto reenter_pg_on_q;
6948 }
6949 if (m_object->copy == VM_OBJECT_NULL) {
6950 if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
6951 /*
6952 * let the normal hibernate image path
6953 * deal with these
6954 */
6955 goto reenter_pg_on_q;
6956 }
6957 }
6958 if (!m->vmp_dirty && m->vmp_pmapped) {
6959 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
6960
6961 if ((refmod_state & VM_MEM_MODIFIED)) {
6962 SET_PAGE_DIRTY(m, FALSE);
6963 }
6964 } else {
6965 refmod_state = 0;
6966 }
6967
6968 if (!m->vmp_dirty) {
6969 /*
6970 * page is not to be cleaned
6971 * put it back on the head of its queue
6972 */
6973 if (m->vmp_precious) {
6974 hibernate_stats.hibernate_skipped_precious++;
6975 }
6976
6977 goto reenter_pg_on_q;
6978 }
6979
6980 if (hibernate_skip_external == TRUE && !m_object->internal) {
6981 hibernate_stats.hibernate_skipped_external++;
6982
6983 goto reenter_pg_on_q;
6984 }
6985 tq = NULL;
6986
6987 if (m_object->internal) {
6988 if (VM_PAGE_Q_THROTTLED(iq)) {
6989 tq = iq;
6990 }
6991 } else if (VM_PAGE_Q_THROTTLED(eq)) {
6992 tq = eq;
6993 }
6994
6995 if (tq != NULL) {
6996 wait_result_t wait_result;
6997 int wait_count = 5;
6998
6999 if (l_object != NULL) {
7000 vm_object_unlock(l_object);
7001 l_object = NULL;
7002 }
7003
7004 while (retval == 0) {
7005 tq->pgo_throttled = TRUE;
7006
7007 assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7008
7009 vm_page_unlock_queues();
7010
7011 wait_result = thread_block(THREAD_CONTINUE_NULL);
7012
7013 vm_page_lock_queues();
7014
7015 if (wait_result != THREAD_TIMED_OUT) {
7016 break;
7017 }
7018 if (!VM_PAGE_Q_THROTTLED(tq)) {
7019 break;
7020 }
7021
7022 if (hibernate_should_abort()) {
7023 retval = 1;
7024 }
7025
7026 if (--wait_count == 0) {
7027 hibernate_stats.hibernate_throttle_timeout++;
7028
7029 if (tq == eq) {
7030 hibernate_skip_external = TRUE;
7031 break;
7032 }
7033 retval = 1;
7034 }
7035 }
7036 if (retval) {
7037 break;
7038 }
7039
7040 hibernate_stats.hibernate_throttled++;
7041
7042 continue;
7043 }
7044 /*
7045 * we've already factored out pages in the laundry which
7046 * means this page can't be on the pageout queue so it's
7047 * safe to do the vm_page_queues_remove
7048 */
7049 vm_page_queues_remove(m, TRUE);
7050
7051 if (m_object->internal == TRUE) {
7052 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7053 }
7054
7055 vm_pageout_cluster(m);
7056
7057 hibernate_stats.hibernate_found_dirty++;
7058
7059 goto next_pg;
7060
7061 reenter_pg_on_q:
7062 vm_page_queue_remove(q, m, vmp_pageq);
7063 vm_page_queue_enter(q, m, vmp_pageq);
7064
7065 hibernate_stats.hibernate_reentered_on_q++;
7066 next_pg:
7067 hibernate_stats.hibernate_considered++;
7068
7069 qcount--;
7070 try_failed_count = 0;
7071 }
7072 if (l_object != NULL) {
7073 vm_object_unlock(l_object);
7074 l_object = NULL;
7075 }
7076
7077 vm_page_unlock_queues();
7078
7079 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7080
7081 return retval;
7082 }
7083
7084
7085 static int
7086 hibernate_flush_dirty_pages(int pass)
7087 {
7088 struct vm_speculative_age_q *aq;
7089 uint32_t i;
7090
7091 if (vm_page_local_q) {
7092 zpercpu_foreach_cpu(lid) {
7093 vm_page_reactivate_local(lid, TRUE, FALSE);
7094 }
7095 }
7096
7097 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7098 int qcount;
7099 vm_page_t m;
7100
7101 aq = &vm_page_queue_speculative[i];
7102
7103 if (vm_page_queue_empty(&aq->age_q)) {
7104 continue;
7105 }
7106 qcount = 0;
7107
7108 vm_page_lockspin_queues();
7109
7110 vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7111 qcount++;
7112 }
7113 vm_page_unlock_queues();
7114
7115 if (qcount) {
7116 if (hibernate_flush_queue(&aq->age_q, qcount)) {
7117 return 1;
7118 }
7119 }
7120 }
7121 if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7122 return 1;
7123 }
7124 /* XXX FBDP TODO: flush secluded queue */
7125 if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7126 return 1;
7127 }
7128 if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7129 return 1;
7130 }
7131 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7132 return 1;
7133 }
7134
7135 if (pass == 1) {
7136 vm_compressor_record_warmup_start();
7137 }
7138
7139 if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7140 if (pass == 1) {
7141 vm_compressor_record_warmup_end();
7142 }
7143 return 1;
7144 }
7145 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7146 if (pass == 1) {
7147 vm_compressor_record_warmup_end();
7148 }
7149 return 1;
7150 }
7151 if (pass == 1) {
7152 vm_compressor_record_warmup_end();
7153 }
7154
7155 if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7156 return 1;
7157 }
7158
7159 return 0;
7160 }
7161
7162
7163 void
7164 hibernate_reset_stats()
7165 {
7166 bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7167 }
7168
7169
7170 int
7171 hibernate_flush_memory()
7172 {
7173 int retval;
7174
7175 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7176
7177 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7178
7179 hibernate_cleaning_in_progress = TRUE;
7180 hibernate_skip_external = FALSE;
7181
7182 if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7183 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7184
7185 vm_compressor_flush();
7186
7187 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7188
7189 if (consider_buffer_cache_collect != NULL) {
7190 unsigned int orig_wire_count;
7191
7192 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7193 orig_wire_count = vm_page_wire_count;
7194
7195 (void)(*consider_buffer_cache_collect)(1);
7196 consider_zone_gc(FALSE);
7197
7198 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7199
7200 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7201 }
7202 }
7203 hibernate_cleaning_in_progress = FALSE;
7204
7205 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7206
7207 if (retval) {
7208 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7209 }
7210
7211
7212 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7213 hibernate_stats.hibernate_considered,
7214 hibernate_stats.hibernate_reentered_on_q,
7215 hibernate_stats.hibernate_found_dirty);
7216 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7217 hibernate_stats.hibernate_skipped_cleaning,
7218 hibernate_stats.hibernate_skipped_transient,
7219 hibernate_stats.hibernate_skipped_precious,
7220 hibernate_stats.hibernate_skipped_external,
7221 hibernate_stats.hibernate_queue_nolock);
7222 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7223 hibernate_stats.hibernate_queue_paused,
7224 hibernate_stats.hibernate_throttled,
7225 hibernate_stats.hibernate_throttle_timeout,
7226 hibernate_stats.hibernate_drained,
7227 hibernate_stats.hibernate_drain_timeout);
7228
7229 return retval;
7230 }
7231
7232
7233 static void
7234 hibernate_page_list_zero(hibernate_page_list_t *list)
7235 {
7236 uint32_t bank;
7237 hibernate_bitmap_t * bitmap;
7238
7239 bitmap = &list->bank_bitmap[0];
7240 for (bank = 0; bank < list->bank_count; bank++) {
7241 uint32_t last_bit;
7242
7243 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7244 // set out-of-bound bits at end of bitmap.
7245 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7246 if (last_bit) {
7247 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7248 }
7249
7250 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7251 }
7252 }
7253
7254 void
7255 hibernate_free_gobble_pages(void)
7256 {
7257 vm_page_t m, next;
7258 uint32_t count = 0;
7259
7260 m = (vm_page_t) hibernate_gobble_queue;
7261 while (m) {
7262 next = m->vmp_snext;
7263 vm_page_free(m);
7264 count++;
7265 m = next;
7266 }
7267 hibernate_gobble_queue = VM_PAGE_NULL;
7268
7269 if (count) {
7270 HIBLOG("Freed %d pages\n", count);
7271 }
7272 }
7273
7274 static boolean_t
7275 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7276 {
7277 vm_object_t object = NULL;
7278 int refmod_state;
7279 boolean_t discard = FALSE;
7280
7281 do{
7282 if (m->vmp_private) {
7283 panic("hibernate_consider_discard: private");
7284 }
7285
7286 object = VM_PAGE_OBJECT(m);
7287
7288 if (!vm_object_lock_try(object)) {
7289 object = NULL;
7290 if (!preflight) {
7291 hibernate_stats.cd_lock_failed++;
7292 }
7293 break;
7294 }
7295 if (VM_PAGE_WIRED(m)) {
7296 if (!preflight) {
7297 hibernate_stats.cd_found_wired++;
7298 }
7299 break;
7300 }
7301 if (m->vmp_precious) {
7302 if (!preflight) {
7303 hibernate_stats.cd_found_precious++;
7304 }
7305 break;
7306 }
7307 if (m->vmp_busy || !object->alive) {
7308 /*
7309 * Somebody is playing with this page.
7310 */
7311 if (!preflight) {
7312 hibernate_stats.cd_found_busy++;
7313 }
7314 break;
7315 }
7316 if (m->vmp_absent || m->vmp_unusual || m->vmp_error) {
7317 /*
7318 * If it's unusual in anyway, ignore it
7319 */
7320 if (!preflight) {
7321 hibernate_stats.cd_found_unusual++;
7322 }
7323 break;
7324 }
7325 if (m->vmp_cleaning) {
7326 if (!preflight) {
7327 hibernate_stats.cd_found_cleaning++;
7328 }
7329 break;
7330 }
7331 if (m->vmp_laundry) {
7332 if (!preflight) {
7333 hibernate_stats.cd_found_laundry++;
7334 }
7335 break;
7336 }
7337 if (!m->vmp_dirty) {
7338 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7339
7340 if (refmod_state & VM_MEM_REFERENCED) {
7341 m->vmp_reference = TRUE;
7342 }
7343 if (refmod_state & VM_MEM_MODIFIED) {
7344 SET_PAGE_DIRTY(m, FALSE);
7345 }
7346 }
7347
7348 /*
7349 * If it's clean or purgeable we can discard the page on wakeup.
7350 */
7351 discard = (!m->vmp_dirty)
7352 || (VM_PURGABLE_VOLATILE == object->purgable)
7353 || (VM_PURGABLE_EMPTY == object->purgable);
7354
7355
7356 if (discard == FALSE) {
7357 if (!preflight) {
7358 hibernate_stats.cd_found_dirty++;
7359 }
7360 } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7361 if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7362 if (!preflight) {
7363 hibernate_stats.cd_found_xpmapped++;
7364 }
7365 discard = FALSE;
7366 } else {
7367 if (!preflight) {
7368 hibernate_stats.cd_skipped_xpmapped++;
7369 }
7370 }
7371 }
7372 }while (FALSE);
7373
7374 if (object) {
7375 vm_object_unlock(object);
7376 }
7377
7378 return discard;
7379 }
7380
7381
7382 static void
7383 hibernate_discard_page(vm_page_t m)
7384 {
7385 vm_object_t m_object;
7386
7387 if (m->vmp_absent || m->vmp_unusual || m->vmp_error) {
7388 /*
7389 * If it's unusual in anyway, ignore
7390 */
7391 return;
7392 }
7393
7394 m_object = VM_PAGE_OBJECT(m);
7395
7396 #if MACH_ASSERT || DEBUG
7397 if (!vm_object_lock_try(m_object)) {
7398 panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7399 }
7400 #else
7401 /* No need to lock page queue for token delete, hibernate_vm_unlock()
7402 * makes sure these locks are uncontended before sleep */
7403 #endif /* MACH_ASSERT || DEBUG */
7404
7405 if (m->vmp_pmapped == TRUE) {
7406 __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7407 }
7408
7409 if (m->vmp_laundry) {
7410 panic("hibernate_discard_page(%p) laundry", m);
7411 }
7412 if (m->vmp_private) {
7413 panic("hibernate_discard_page(%p) private", m);
7414 }
7415 if (m->vmp_fictitious) {
7416 panic("hibernate_discard_page(%p) fictitious", m);
7417 }
7418
7419 if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7420 /* object should be on a queue */
7421 assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7422 purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7423 assert(old_queue);
7424 if (m_object->purgeable_when_ripe) {
7425 vm_purgeable_token_delete_first(old_queue);
7426 }
7427 vm_object_lock_assert_exclusive(m_object);
7428 m_object->purgable = VM_PURGABLE_EMPTY;
7429
7430 /*
7431 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
7432 * accounted in the "volatile" ledger, so no change here.
7433 * We have to update vm_page_purgeable_count, though, since we're
7434 * effectively purging this object.
7435 */
7436 unsigned int delta;
7437 assert(m_object->resident_page_count >= m_object->wired_page_count);
7438 delta = (m_object->resident_page_count - m_object->wired_page_count);
7439 assert(vm_page_purgeable_count >= delta);
7440 assert(delta > 0);
7441 OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7442 }
7443
7444 vm_page_free(m);
7445
7446 #if MACH_ASSERT || DEBUG
7447 vm_object_unlock(m_object);
7448 #endif /* MACH_ASSERT || DEBUG */
7449 }
7450
7451 /*
7452 * Grab locks for hibernate_page_list_setall()
7453 */
7454 void
7455 hibernate_vm_lock_queues(void)
7456 {
7457 vm_object_lock(compressor_object);
7458 vm_page_lock_queues();
7459 lck_mtx_lock(&vm_page_queue_free_lock);
7460 lck_mtx_lock(&vm_purgeable_queue_lock);
7461
7462 if (vm_page_local_q) {
7463 zpercpu_foreach(lq, vm_page_local_q) {
7464 VPL_LOCK(&lq->vpl_lock);
7465 }
7466 }
7467 }
7468
7469 void
7470 hibernate_vm_unlock_queues(void)
7471 {
7472 if (vm_page_local_q) {
7473 zpercpu_foreach(lq, vm_page_local_q) {
7474 VPL_UNLOCK(&lq->vpl_lock);
7475 }
7476 }
7477 lck_mtx_unlock(&vm_purgeable_queue_lock);
7478 lck_mtx_unlock(&vm_page_queue_free_lock);
7479 vm_page_unlock_queues();
7480 vm_object_unlock(compressor_object);
7481 }
7482
7483 /*
7484 * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7485 * pages known to VM to not need saving are subtracted.
7486 * Wired pages to be saved are present in page_list_wired, pageable in page_list.
7487 */
7488
7489 void
7490 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7491 hibernate_page_list_t * page_list_wired,
7492 hibernate_page_list_t * page_list_pal,
7493 boolean_t preflight,
7494 boolean_t will_discard,
7495 uint32_t * pagesOut)
7496 {
7497 uint64_t start, end, nsec;
7498 vm_page_t m;
7499 vm_page_t next;
7500 uint32_t pages = page_list->page_count;
7501 uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7502 uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7503 uint32_t count_wire = pages;
7504 uint32_t count_discard_active = 0;
7505 uint32_t count_discard_inactive = 0;
7506 uint32_t count_discard_cleaned = 0;
7507 uint32_t count_discard_purgeable = 0;
7508 uint32_t count_discard_speculative = 0;
7509 uint32_t count_discard_vm_struct_pages = 0;
7510 uint32_t i;
7511 uint32_t bank;
7512 hibernate_bitmap_t * bitmap;
7513 hibernate_bitmap_t * bitmap_wired;
7514 boolean_t discard_all;
7515 boolean_t discard;
7516
7517 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7518
7519 if (preflight) {
7520 page_list = NULL;
7521 page_list_wired = NULL;
7522 page_list_pal = NULL;
7523 discard_all = FALSE;
7524 } else {
7525 discard_all = will_discard;
7526 }
7527
7528 #if MACH_ASSERT || DEBUG
7529 if (!preflight) {
7530 assert(hibernate_vm_locks_are_safe());
7531 vm_page_lock_queues();
7532 if (vm_page_local_q) {
7533 zpercpu_foreach(lq, vm_page_local_q) {
7534 VPL_LOCK(&lq->vpl_lock);
7535 }
7536 }
7537 }
7538 #endif /* MACH_ASSERT || DEBUG */
7539
7540
7541 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7542
7543 clock_get_uptime(&start);
7544
7545 if (!preflight) {
7546 hibernate_page_list_zero(page_list);
7547 hibernate_page_list_zero(page_list_wired);
7548 hibernate_page_list_zero(page_list_pal);
7549
7550 hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7551 hibernate_stats.cd_pages = pages;
7552 }
7553
7554 if (vm_page_local_q) {
7555 zpercpu_foreach_cpu(lid) {
7556 vm_page_reactivate_local(lid, TRUE, !preflight);
7557 }
7558 }
7559
7560 if (preflight) {
7561 vm_object_lock(compressor_object);
7562 vm_page_lock_queues();
7563 lck_mtx_lock(&vm_page_queue_free_lock);
7564 }
7565
7566 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7567
7568 hibernation_vmqueues_inspection = TRUE;
7569
7570 m = (vm_page_t) hibernate_gobble_queue;
7571 while (m) {
7572 pages--;
7573 count_wire--;
7574 if (!preflight) {
7575 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7576 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7577 }
7578 m = m->vmp_snext;
7579 }
7580
7581 if (!preflight) {
7582 percpu_foreach(free_pages_head, free_pages) {
7583 for (m = *free_pages_head; m; m = m->vmp_snext) {
7584 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7585
7586 pages--;
7587 count_wire--;
7588 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7589 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7590
7591 hibernate_stats.cd_local_free++;
7592 hibernate_stats.cd_total_free++;
7593 }
7594 }
7595 }
7596
7597 for (i = 0; i < vm_colors; i++) {
7598 vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7599 assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
7600
7601 pages--;
7602 count_wire--;
7603 if (!preflight) {
7604 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7605 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7606
7607 hibernate_stats.cd_total_free++;
7608 }
7609 }
7610 }
7611
7612 vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
7613 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
7614
7615 pages--;
7616 count_wire--;
7617 if (!preflight) {
7618 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7619 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7620
7621 hibernate_stats.cd_total_free++;
7622 }
7623 }
7624
7625 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
7626 while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
7627 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
7628
7629 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7630 discard = FALSE;
7631 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
7632 && hibernate_consider_discard(m, preflight)) {
7633 if (!preflight) {
7634 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7635 }
7636 count_discard_inactive++;
7637 discard = discard_all;
7638 } else {
7639 count_throttled++;
7640 }
7641 count_wire--;
7642 if (!preflight) {
7643 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7644 }
7645
7646 if (discard) {
7647 hibernate_discard_page(m);
7648 }
7649 m = next;
7650 }
7651
7652 m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
7653 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7654 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7655
7656 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7657 discard = FALSE;
7658 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7659 hibernate_consider_discard(m, preflight)) {
7660 if (!preflight) {
7661 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7662 }
7663 if (m->vmp_dirty) {
7664 count_discard_purgeable++;
7665 } else {
7666 count_discard_inactive++;
7667 }
7668 discard = discard_all;
7669 } else {
7670 count_anonymous++;
7671 }
7672 count_wire--;
7673 if (!preflight) {
7674 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7675 }
7676 if (discard) {
7677 hibernate_discard_page(m);
7678 }
7679 m = next;
7680 }
7681
7682 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
7683 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
7684 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
7685
7686 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7687 discard = FALSE;
7688 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7689 hibernate_consider_discard(m, preflight)) {
7690 if (!preflight) {
7691 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7692 }
7693 if (m->vmp_dirty) {
7694 count_discard_purgeable++;
7695 } else {
7696 count_discard_cleaned++;
7697 }
7698 discard = discard_all;
7699 } else {
7700 count_cleaned++;
7701 }
7702 count_wire--;
7703 if (!preflight) {
7704 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7705 }
7706 if (discard) {
7707 hibernate_discard_page(m);
7708 }
7709 m = next;
7710 }
7711
7712 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7713 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7714 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
7715
7716 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7717 discard = FALSE;
7718 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
7719 hibernate_consider_discard(m, preflight)) {
7720 if (!preflight) {
7721 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7722 }
7723 if (m->vmp_dirty) {
7724 count_discard_purgeable++;
7725 } else {
7726 count_discard_active++;
7727 }
7728 discard = discard_all;
7729 } else {
7730 count_active++;
7731 }
7732 count_wire--;
7733 if (!preflight) {
7734 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7735 }
7736 if (discard) {
7737 hibernate_discard_page(m);
7738 }
7739 m = next;
7740 }
7741
7742 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7743 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7744 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7745
7746 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7747 discard = FALSE;
7748 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7749 hibernate_consider_discard(m, preflight)) {
7750 if (!preflight) {
7751 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7752 }
7753 if (m->vmp_dirty) {
7754 count_discard_purgeable++;
7755 } else {
7756 count_discard_inactive++;
7757 }
7758 discard = discard_all;
7759 } else {
7760 count_inactive++;
7761 }
7762 count_wire--;
7763 if (!preflight) {
7764 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7765 }
7766 if (discard) {
7767 hibernate_discard_page(m);
7768 }
7769 m = next;
7770 }
7771 /* XXX FBDP TODO: secluded queue */
7772
7773 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7774 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7775 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7776 assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
7777 "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
7778 m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
7779
7780 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7781 discard = FALSE;
7782 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7783 hibernate_consider_discard(m, preflight)) {
7784 if (!preflight) {
7785 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7786 }
7787 count_discard_speculative++;
7788 discard = discard_all;
7789 } else {
7790 count_speculative++;
7791 }
7792 count_wire--;
7793 if (!preflight) {
7794 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7795 }
7796 if (discard) {
7797 hibernate_discard_page(m);
7798 }
7799 m = next;
7800 }
7801 }
7802
7803 vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
7804 assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
7805
7806 count_compressor++;
7807 count_wire--;
7808 if (!preflight) {
7809 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7810 }
7811 }
7812
7813 if (preflight == FALSE && discard_all == TRUE) {
7814 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
7815
7816 HIBLOG("hibernate_teardown started\n");
7817 count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
7818 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
7819
7820 pages -= count_discard_vm_struct_pages;
7821 count_wire -= count_discard_vm_struct_pages;
7822
7823 hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
7824
7825 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
7826 }
7827
7828 if (!preflight) {
7829 // pull wired from hibernate_bitmap
7830 bitmap = &page_list->bank_bitmap[0];
7831 bitmap_wired = &page_list_wired->bank_bitmap[0];
7832 for (bank = 0; bank < page_list->bank_count; bank++) {
7833 for (i = 0; i < bitmap->bitmapwords; i++) {
7834 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
7835 }
7836 bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
7837 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
7838 }
7839 }
7840
7841 // machine dependent adjustments
7842 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
7843
7844 if (!preflight) {
7845 hibernate_stats.cd_count_wire = count_wire;
7846 hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
7847 count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
7848 }
7849
7850 clock_get_uptime(&end);
7851 absolutetime_to_nanoseconds(end - start, &nsec);
7852 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
7853
7854 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
7855 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
7856 discard_all ? "did" : "could",
7857 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
7858
7859 if (hibernate_stats.cd_skipped_xpmapped) {
7860 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
7861 }
7862
7863 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
7864
7865 if (preflight && will_discard) {
7866 *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
7867 }
7868
7869 hibernation_vmqueues_inspection = FALSE;
7870
7871 #if MACH_ASSERT || DEBUG
7872 if (!preflight) {
7873 if (vm_page_local_q) {
7874 zpercpu_foreach(lq, vm_page_local_q) {
7875 VPL_UNLOCK(&lq->vpl_lock);
7876 }
7877 }
7878 vm_page_unlock_queues();
7879 }
7880 #endif /* MACH_ASSERT || DEBUG */
7881
7882 if (preflight) {
7883 lck_mtx_unlock(&vm_page_queue_free_lock);
7884 vm_page_unlock_queues();
7885 vm_object_unlock(compressor_object);
7886 }
7887
7888 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
7889 }
7890
7891 void
7892 hibernate_page_list_discard(hibernate_page_list_t * page_list)
7893 {
7894 uint64_t start, end, nsec;
7895 vm_page_t m;
7896 vm_page_t next;
7897 uint32_t i;
7898 uint32_t count_discard_active = 0;
7899 uint32_t count_discard_inactive = 0;
7900 uint32_t count_discard_purgeable = 0;
7901 uint32_t count_discard_cleaned = 0;
7902 uint32_t count_discard_speculative = 0;
7903
7904
7905 #if MACH_ASSERT || DEBUG
7906 vm_page_lock_queues();
7907 if (vm_page_local_q) {
7908 zpercpu_foreach(lq, vm_page_local_q) {
7909 VPL_LOCK(&lq->vpl_lock);
7910 }
7911 }
7912 #endif /* MACH_ASSERT || DEBUG */
7913
7914 clock_get_uptime(&start);
7915
7916 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
7917 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7918 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7919
7920 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7921 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7922 if (m->vmp_dirty) {
7923 count_discard_purgeable++;
7924 } else {
7925 count_discard_inactive++;
7926 }
7927 hibernate_discard_page(m);
7928 }
7929 m = next;
7930 }
7931
7932 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7933 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7934 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7935 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
7936
7937 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7938 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7939 count_discard_speculative++;
7940 hibernate_discard_page(m);
7941 }
7942 m = next;
7943 }
7944 }
7945
7946 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7947 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7948 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7949
7950 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7951 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7952 if (m->vmp_dirty) {
7953 count_discard_purgeable++;
7954 } else {
7955 count_discard_inactive++;
7956 }
7957 hibernate_discard_page(m);
7958 }
7959 m = next;
7960 }
7961 /* XXX FBDP TODO: secluded queue */
7962
7963 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7964 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7965 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
7966
7967 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7968 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7969 if (m->vmp_dirty) {
7970 count_discard_purgeable++;
7971 } else {
7972 count_discard_active++;
7973 }
7974 hibernate_discard_page(m);
7975 }
7976 m = next;
7977 }
7978
7979 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
7980 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
7981 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
7982
7983 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7984 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7985 if (m->vmp_dirty) {
7986 count_discard_purgeable++;
7987 } else {
7988 count_discard_cleaned++;
7989 }
7990 hibernate_discard_page(m);
7991 }
7992 m = next;
7993 }
7994
7995 #if MACH_ASSERT || DEBUG
7996 if (vm_page_local_q) {
7997 zpercpu_foreach(lq, vm_page_local_q) {
7998 VPL_UNLOCK(&lq->vpl_lock);
7999 }
8000 }
8001 vm_page_unlock_queues();
8002 #endif /* MACH_ASSERT || DEBUG */
8003
8004 clock_get_uptime(&end);
8005 absolutetime_to_nanoseconds(end - start, &nsec);
8006 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8007 nsec / 1000000ULL,
8008 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8009 }
8010
8011 boolean_t hibernate_paddr_map_inited = FALSE;
8012 unsigned int hibernate_teardown_last_valid_compact_indx = -1;
8013 vm_page_t hibernate_rebuild_hash_list = NULL;
8014
8015 unsigned int hibernate_teardown_found_tabled_pages = 0;
8016 unsigned int hibernate_teardown_found_created_pages = 0;
8017 unsigned int hibernate_teardown_found_free_pages = 0;
8018 unsigned int hibernate_teardown_vm_page_free_count;
8019
8020
8021 struct ppnum_mapping {
8022 struct ppnum_mapping *ppnm_next;
8023 ppnum_t ppnm_base_paddr;
8024 unsigned int ppnm_sindx;
8025 unsigned int ppnm_eindx;
8026 };
8027
8028 struct ppnum_mapping *ppnm_head;
8029 struct ppnum_mapping *ppnm_last_found = NULL;
8030
8031
8032 void
8033 hibernate_create_paddr_map(void)
8034 {
8035 unsigned int i;
8036 ppnum_t next_ppnum_in_run = 0;
8037 struct ppnum_mapping *ppnm = NULL;
8038
8039 if (hibernate_paddr_map_inited == FALSE) {
8040 for (i = 0; i < vm_pages_count; i++) {
8041 if (ppnm) {
8042 ppnm->ppnm_eindx = i;
8043 }
8044
8045 if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8046 ppnm = zalloc_permanent_type(struct ppnum_mapping);
8047
8048 ppnm->ppnm_next = ppnm_head;
8049 ppnm_head = ppnm;
8050
8051 ppnm->ppnm_sindx = i;
8052 ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8053 }
8054 next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8055 }
8056 ppnm->ppnm_eindx++;
8057
8058 hibernate_paddr_map_inited = TRUE;
8059 }
8060 }
8061
8062 ppnum_t
8063 hibernate_lookup_paddr(unsigned int indx)
8064 {
8065 struct ppnum_mapping *ppnm = NULL;
8066
8067 ppnm = ppnm_last_found;
8068
8069 if (ppnm) {
8070 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8071 goto done;
8072 }
8073 }
8074 for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8075 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8076 ppnm_last_found = ppnm;
8077 break;
8078 }
8079 }
8080 if (ppnm == NULL) {
8081 panic("hibernate_lookup_paddr of %d failed\n", indx);
8082 }
8083 done:
8084 return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8085 }
8086
8087
8088 uint32_t
8089 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8090 {
8091 addr64_t saddr_aligned;
8092 addr64_t eaddr_aligned;
8093 addr64_t addr;
8094 ppnum_t paddr;
8095 unsigned int mark_as_unneeded_pages = 0;
8096
8097 saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8098 eaddr_aligned = eaddr & ~PAGE_MASK_64;
8099
8100 for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8101 paddr = pmap_find_phys(kernel_pmap, addr);
8102
8103 assert(paddr);
8104
8105 hibernate_page_bitset(page_list, TRUE, paddr);
8106 hibernate_page_bitset(page_list_wired, TRUE, paddr);
8107
8108 mark_as_unneeded_pages++;
8109 }
8110 return mark_as_unneeded_pages;
8111 }
8112
8113
8114 void
8115 hibernate_hash_insert_page(vm_page_t mem)
8116 {
8117 vm_page_bucket_t *bucket;
8118 int hash_id;
8119 vm_object_t m_object;
8120
8121 m_object = VM_PAGE_OBJECT(mem);
8122
8123 assert(mem->vmp_hashed);
8124 assert(m_object);
8125 assert(mem->vmp_offset != (vm_object_offset_t) -1);
8126
8127 /*
8128 * Insert it into the object_object/offset hash table
8129 */
8130 hash_id = vm_page_hash(m_object, mem->vmp_offset);
8131 bucket = &vm_page_buckets[hash_id];
8132
8133 mem->vmp_next_m = bucket->page_list;
8134 bucket->page_list = VM_PAGE_PACK_PTR(mem);
8135 }
8136
8137
8138 void
8139 hibernate_free_range(int sindx, int eindx)
8140 {
8141 vm_page_t mem;
8142 unsigned int color;
8143
8144 while (sindx < eindx) {
8145 mem = &vm_pages[sindx];
8146
8147 vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8148
8149 mem->vmp_lopage = FALSE;
8150 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8151
8152 color = VM_PAGE_GET_COLOR(mem);
8153 #if defined(__x86_64__)
8154 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8155 #else
8156 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8157 #endif
8158 vm_page_free_count++;
8159
8160 sindx++;
8161 }
8162 }
8163
8164 void
8165 hibernate_rebuild_vm_structs(void)
8166 {
8167 int i, cindx, sindx, eindx;
8168 vm_page_t mem, tmem, mem_next;
8169 AbsoluteTime startTime, endTime;
8170 uint64_t nsec;
8171
8172 if (hibernate_rebuild_needed == FALSE) {
8173 return;
8174 }
8175
8176 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8177 HIBLOG("hibernate_rebuild started\n");
8178
8179 clock_get_uptime(&startTime);
8180
8181 pal_hib_rebuild_pmap_structs();
8182
8183 bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8184 eindx = vm_pages_count;
8185
8186 /*
8187 * Mark all the vm_pages[] that have not been initialized yet as being
8188 * transient. This is needed to ensure that buddy page search is corrrect.
8189 * Without this random data in these vm_pages[] can trip the buddy search
8190 */
8191 for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8192 vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8193 }
8194
8195 for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8196 mem = &vm_pages[cindx];
8197 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8198 /*
8199 * hibernate_teardown_vm_structs leaves the location where
8200 * this vm_page_t must be located in "next".
8201 */
8202 tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8203 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8204
8205 sindx = (int)(tmem - &vm_pages[0]);
8206
8207 if (mem != tmem) {
8208 /*
8209 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8210 * so move it back to its real location
8211 */
8212 *tmem = *mem;
8213 mem = tmem;
8214 }
8215 if (mem->vmp_hashed) {
8216 hibernate_hash_insert_page(mem);
8217 }
8218 /*
8219 * the 'hole' between this vm_page_t and the previous
8220 * vm_page_t we moved needs to be initialized as
8221 * a range of free vm_page_t's
8222 */
8223 hibernate_free_range(sindx + 1, eindx);
8224
8225 eindx = sindx;
8226 }
8227 if (sindx) {
8228 hibernate_free_range(0, sindx);
8229 }
8230
8231 assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8232
8233 /*
8234 * process the list of vm_page_t's that were entered in the hash,
8235 * but were not located in the vm_pages arrary... these are
8236 * vm_page_t's that were created on the fly (i.e. fictitious)
8237 */
8238 for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8239 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8240
8241 mem->vmp_next_m = 0;
8242 hibernate_hash_insert_page(mem);
8243 }
8244 hibernate_rebuild_hash_list = NULL;
8245
8246 clock_get_uptime(&endTime);
8247 SUB_ABSOLUTETIME(&endTime, &startTime);
8248 absolutetime_to_nanoseconds(endTime, &nsec);
8249
8250 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8251
8252 hibernate_rebuild_needed = FALSE;
8253
8254 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8255 }
8256
8257 uint32_t
8258 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8259 {
8260 unsigned int i;
8261 unsigned int compact_target_indx;
8262 vm_page_t mem, mem_next;
8263 vm_page_bucket_t *bucket;
8264 unsigned int mark_as_unneeded_pages = 0;
8265 unsigned int unneeded_vm_page_bucket_pages = 0;
8266 unsigned int unneeded_vm_pages_pages = 0;
8267 unsigned int unneeded_pmap_pages = 0;
8268 addr64_t start_of_unneeded = 0;
8269 addr64_t end_of_unneeded = 0;
8270
8271
8272 if (hibernate_should_abort()) {
8273 return 0;
8274 }
8275
8276 hibernate_rebuild_needed = TRUE;
8277
8278 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8279 vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8280 vm_page_cleaned_count, compressor_object->resident_page_count);
8281
8282 for (i = 0; i < vm_page_bucket_count; i++) {
8283 bucket = &vm_page_buckets[i];
8284
8285 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8286 assert(mem->vmp_hashed);
8287
8288 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8289
8290 if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8291 mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8292 hibernate_rebuild_hash_list = mem;
8293 }
8294 }
8295 }
8296 unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8297 mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8298
8299 hibernate_teardown_vm_page_free_count = vm_page_free_count;
8300
8301 compact_target_indx = 0;
8302
8303 for (i = 0; i < vm_pages_count; i++) {
8304 mem = &vm_pages[i];
8305
8306 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8307 unsigned int color;
8308
8309 assert(mem->vmp_busy);
8310 assert(!mem->vmp_lopage);
8311
8312 color = VM_PAGE_GET_COLOR(mem);
8313
8314 vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8315
8316 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8317
8318 vm_page_free_count--;
8319
8320 hibernate_teardown_found_free_pages++;
8321
8322 if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8323 compact_target_indx = i;
8324 }
8325 } else {
8326 /*
8327 * record this vm_page_t's original location
8328 * we need this even if it doesn't get moved
8329 * as an indicator to the rebuild function that
8330 * we don't have to move it
8331 */
8332 mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8333
8334 if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8335 /*
8336 * we've got a hole to fill, so
8337 * move this vm_page_t to it's new home
8338 */
8339 vm_pages[compact_target_indx] = *mem;
8340 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8341
8342 hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8343 compact_target_indx++;
8344 } else {
8345 hibernate_teardown_last_valid_compact_indx = i;
8346 }
8347 }
8348 }
8349 unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8350 (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8351 mark_as_unneeded_pages += unneeded_vm_pages_pages;
8352
8353 pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8354
8355 if (start_of_unneeded) {
8356 unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8357 mark_as_unneeded_pages += unneeded_pmap_pages;
8358 }
8359 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8360
8361 return mark_as_unneeded_pages;
8362 }
8363
8364
8365 #endif /* HIBERNATION */
8366
8367 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8368
8369 #include <mach_vm_debug.h>
8370 #if MACH_VM_DEBUG
8371
8372 #include <mach_debug/hash_info.h>
8373 #include <vm/vm_debug.h>
8374
8375 /*
8376 * Routine: vm_page_info
8377 * Purpose:
8378 * Return information about the global VP table.
8379 * Fills the buffer with as much information as possible
8380 * and returns the desired size of the buffer.
8381 * Conditions:
8382 * Nothing locked. The caller should provide
8383 * possibly-pageable memory.
8384 */
8385
8386 unsigned int
8387 vm_page_info(
8388 hash_info_bucket_t *info,
8389 unsigned int count)
8390 {
8391 unsigned int i;
8392 lck_spin_t *bucket_lock;
8393
8394 if (vm_page_bucket_count < count) {
8395 count = vm_page_bucket_count;
8396 }
8397
8398 for (i = 0; i < count; i++) {
8399 vm_page_bucket_t *bucket = &vm_page_buckets[i];
8400 unsigned int bucket_count = 0;
8401 vm_page_t m;
8402
8403 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8404 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8405
8406 for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8407 m != VM_PAGE_NULL;
8408 m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8409 bucket_count++;
8410 }
8411
8412 lck_spin_unlock(bucket_lock);
8413
8414 /* don't touch pageable memory while holding locks */
8415 info[i].hib_count = bucket_count;
8416 }
8417
8418 return vm_page_bucket_count;
8419 }
8420 #endif /* MACH_VM_DEBUG */
8421
8422 #if VM_PAGE_BUCKETS_CHECK
8423 void
8424 vm_page_buckets_check(void)
8425 {
8426 unsigned int i;
8427 vm_page_t p;
8428 unsigned int p_hash;
8429 vm_page_bucket_t *bucket;
8430 lck_spin_t *bucket_lock;
8431
8432 if (!vm_page_buckets_check_ready) {
8433 return;
8434 }
8435
8436 #if HIBERNATION
8437 if (hibernate_rebuild_needed ||
8438 hibernate_rebuild_hash_list) {
8439 panic("BUCKET_CHECK: hibernation in progress: "
8440 "rebuild_needed=%d rebuild_hash_list=%p\n",
8441 hibernate_rebuild_needed,
8442 hibernate_rebuild_hash_list);
8443 }
8444 #endif /* HIBERNATION */
8445
8446 #if VM_PAGE_FAKE_BUCKETS
8447 char *cp;
8448 for (cp = (char *) vm_page_fake_buckets_start;
8449 cp < (char *) vm_page_fake_buckets_end;
8450 cp++) {
8451 if (*cp != 0x5a) {
8452 panic("BUCKET_CHECK: corruption at %p in fake buckets "
8453 "[0x%llx:0x%llx]\n",
8454 cp,
8455 (uint64_t) vm_page_fake_buckets_start,
8456 (uint64_t) vm_page_fake_buckets_end);
8457 }
8458 }
8459 #endif /* VM_PAGE_FAKE_BUCKETS */
8460
8461 for (i = 0; i < vm_page_bucket_count; i++) {
8462 vm_object_t p_object;
8463
8464 bucket = &vm_page_buckets[i];
8465 if (!bucket->page_list) {
8466 continue;
8467 }
8468
8469 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8470 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8471 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8472
8473 while (p != VM_PAGE_NULL) {
8474 p_object = VM_PAGE_OBJECT(p);
8475
8476 if (!p->vmp_hashed) {
8477 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8478 "hash %d in bucket %d at %p "
8479 "is not hashed\n",
8480 p, p_object, p->vmp_offset,
8481 p_hash, i, bucket);
8482 }
8483 p_hash = vm_page_hash(p_object, p->vmp_offset);
8484 if (p_hash != i) {
8485 panic("BUCKET_CHECK: corruption in bucket %d "
8486 "at %p: page %p object %p offset 0x%llx "
8487 "hash %d\n",
8488 i, bucket, p, p_object, p->vmp_offset,
8489 p_hash);
8490 }
8491 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8492 }
8493 lck_spin_unlock(bucket_lock);
8494 }
8495
8496 // printf("BUCKET_CHECK: checked buckets\n");
8497 }
8498 #endif /* VM_PAGE_BUCKETS_CHECK */
8499
8500 /*
8501 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8502 * local queues if they exist... its the only spot in the system where we add pages
8503 * to those queues... once on those queues, those pages can only move to one of the
8504 * global page queues or the free queues... they NEVER move from local q to local q.
8505 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8506 * the global vm_page_queue_lock at this point... we still need to take the local lock
8507 * in case this operation is being run on a different CPU then the local queue's identity,
8508 * but we don't have to worry about the page moving to a global queue or becoming wired
8509 * while we're grabbing the local lock since those operations would require the global
8510 * vm_page_queue_lock to be held, and we already own it.
8511 *
8512 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8513 * 'wired' and local are ALWAYS mutually exclusive conditions.
8514 */
8515
8516 #if CONFIG_BACKGROUND_QUEUE
8517 void
8518 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq)
8519 #else
8520 void
8521 vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq)
8522 #endif
8523 {
8524 boolean_t was_pageable = TRUE;
8525 vm_object_t m_object;
8526
8527 m_object = VM_PAGE_OBJECT(mem);
8528
8529 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8530
8531 if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8532 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8533 #if CONFIG_BACKGROUND_QUEUE
8534 if (remove_from_backgroundq == TRUE) {
8535 vm_page_remove_from_backgroundq(mem);
8536 }
8537 if (mem->vmp_on_backgroundq) {
8538 assert(mem->vmp_backgroundq.next != 0);
8539 assert(mem->vmp_backgroundq.prev != 0);
8540 } else {
8541 assert(mem->vmp_backgroundq.next == 0);
8542 assert(mem->vmp_backgroundq.prev == 0);
8543 }
8544 #endif /* CONFIG_BACKGROUND_QUEUE */
8545 return;
8546 }
8547
8548 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8549 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8550 #if CONFIG_BACKGROUND_QUEUE
8551 assert(mem->vmp_backgroundq.next == 0 &&
8552 mem->vmp_backgroundq.prev == 0 &&
8553 mem->vmp_on_backgroundq == FALSE);
8554 #endif
8555 return;
8556 }
8557 if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
8558 /*
8559 * might put these guys on a list for debugging purposes
8560 * if we do, we'll need to remove this assert
8561 */
8562 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8563 #if CONFIG_BACKGROUND_QUEUE
8564 assert(mem->vmp_backgroundq.next == 0 &&
8565 mem->vmp_backgroundq.prev == 0 &&
8566 mem->vmp_on_backgroundq == FALSE);
8567 #endif
8568 return;
8569 }
8570
8571 assert(m_object != compressor_object);
8572 assert(m_object != kernel_object);
8573 assert(m_object != vm_submap_object);
8574 assert(!mem->vmp_fictitious);
8575
8576 switch (mem->vmp_q_state) {
8577 case VM_PAGE_ON_ACTIVE_LOCAL_Q:
8578 {
8579 struct vpl *lq;
8580
8581 lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
8582 VPL_LOCK(&lq->vpl_lock);
8583 vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
8584 mem->vmp_local_id = 0;
8585 lq->vpl_count--;
8586 if (m_object->internal) {
8587 lq->vpl_internal_count--;
8588 } else {
8589 lq->vpl_external_count--;
8590 }
8591 VPL_UNLOCK(&lq->vpl_lock);
8592 was_pageable = FALSE;
8593 break;
8594 }
8595 case VM_PAGE_ON_ACTIVE_Q:
8596 {
8597 vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
8598 vm_page_active_count--;
8599 break;
8600 }
8601
8602 case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
8603 {
8604 assert(m_object->internal == TRUE);
8605
8606 vm_page_inactive_count--;
8607 vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
8608 vm_page_anonymous_count--;
8609
8610 vm_purgeable_q_advance_all();
8611 vm_page_balance_inactive(3);
8612 break;
8613 }
8614
8615 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
8616 {
8617 assert(m_object->internal == FALSE);
8618
8619 vm_page_inactive_count--;
8620 vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
8621 vm_purgeable_q_advance_all();
8622 vm_page_balance_inactive(3);
8623 break;
8624 }
8625
8626 case VM_PAGE_ON_INACTIVE_CLEANED_Q:
8627 {
8628 assert(m_object->internal == FALSE);
8629
8630 vm_page_inactive_count--;
8631 vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
8632 vm_page_cleaned_count--;
8633 vm_page_balance_inactive(3);
8634 break;
8635 }
8636
8637 case VM_PAGE_ON_THROTTLED_Q:
8638 {
8639 assert(m_object->internal == TRUE);
8640
8641 vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
8642 vm_page_throttled_count--;
8643 was_pageable = FALSE;
8644 break;
8645 }
8646
8647 case VM_PAGE_ON_SPECULATIVE_Q:
8648 {
8649 assert(m_object->internal == FALSE);
8650
8651 vm_page_remque(&mem->vmp_pageq);
8652 vm_page_speculative_count--;
8653 vm_page_balance_inactive(3);
8654 break;
8655 }
8656
8657 #if CONFIG_SECLUDED_MEMORY
8658 case VM_PAGE_ON_SECLUDED_Q:
8659 {
8660 vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
8661 vm_page_secluded_count--;
8662 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
8663 if (m_object == VM_OBJECT_NULL) {
8664 vm_page_secluded_count_free--;
8665 was_pageable = FALSE;
8666 } else {
8667 assert(!m_object->internal);
8668 vm_page_secluded_count_inuse--;
8669 was_pageable = FALSE;
8670 // was_pageable = TRUE;
8671 }
8672 break;
8673 }
8674 #endif /* CONFIG_SECLUDED_MEMORY */
8675
8676 default:
8677 {
8678 /*
8679 * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
8680 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
8681 * the caller is responsible for determing if the page is on that queue, and if so, must
8682 * either first remove it (it needs both the page queues lock and the object lock to do
8683 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
8684 *
8685 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
8686 * or any of the undefined states
8687 */
8688 panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem, mem->vmp_q_state);
8689 break;
8690 }
8691 }
8692 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8693 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
8694
8695 #if CONFIG_BACKGROUND_QUEUE
8696 if (remove_from_backgroundq == TRUE) {
8697 vm_page_remove_from_backgroundq(mem);
8698 }
8699 #endif
8700 if (was_pageable) {
8701 if (m_object->internal) {
8702 vm_page_pageable_internal_count--;
8703 } else {
8704 vm_page_pageable_external_count--;
8705 }
8706 }
8707 }
8708
8709 void
8710 vm_page_remove_internal(vm_page_t page)
8711 {
8712 vm_object_t __object = VM_PAGE_OBJECT(page);
8713 if (page == __object->memq_hint) {
8714 vm_page_t __new_hint;
8715 vm_page_queue_entry_t __qe;
8716 __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
8717 if (vm_page_queue_end(&__object->memq, __qe)) {
8718 __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
8719 if (vm_page_queue_end(&__object->memq, __qe)) {
8720 __qe = NULL;
8721 }
8722 }
8723 __new_hint = (vm_page_t)((uintptr_t) __qe);
8724 __object->memq_hint = __new_hint;
8725 }
8726 vm_page_queue_remove(&__object->memq, page, vmp_listq);
8727 #if CONFIG_SECLUDED_MEMORY
8728 if (__object->eligible_for_secluded) {
8729 vm_page_secluded.eligible_for_secluded--;
8730 }
8731 #endif /* CONFIG_SECLUDED_MEMORY */
8732 }
8733
8734 void
8735 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
8736 {
8737 vm_object_t m_object;
8738
8739 m_object = VM_PAGE_OBJECT(mem);
8740
8741 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8742 assert(!mem->vmp_fictitious);
8743 assert(!mem->vmp_laundry);
8744 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8745 vm_page_check_pageable_safe(mem);
8746
8747 if (m_object->internal) {
8748 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
8749
8750 if (first == TRUE) {
8751 vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
8752 } else {
8753 vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
8754 }
8755
8756 vm_page_anonymous_count++;
8757 vm_page_pageable_internal_count++;
8758 } else {
8759 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
8760
8761 if (first == TRUE) {
8762 vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
8763 } else {
8764 vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
8765 }
8766
8767 vm_page_pageable_external_count++;
8768 }
8769 vm_page_inactive_count++;
8770 token_new_pagecount++;
8771
8772 #if CONFIG_BACKGROUND_QUEUE
8773 if (mem->vmp_in_background) {
8774 vm_page_add_to_backgroundq(mem, FALSE);
8775 }
8776 #endif
8777 }
8778
8779 void
8780 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
8781 {
8782 vm_object_t m_object;
8783
8784 m_object = VM_PAGE_OBJECT(mem);
8785
8786 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8787 assert(!mem->vmp_fictitious);
8788 assert(!mem->vmp_laundry);
8789 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8790 vm_page_check_pageable_safe(mem);
8791
8792 mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
8793 if (first == TRUE) {
8794 vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
8795 } else {
8796 vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
8797 }
8798 vm_page_active_count++;
8799
8800 if (m_object->internal) {
8801 vm_page_pageable_internal_count++;
8802 } else {
8803 vm_page_pageable_external_count++;
8804 }
8805
8806 #if CONFIG_BACKGROUND_QUEUE
8807 if (mem->vmp_in_background) {
8808 vm_page_add_to_backgroundq(mem, FALSE);
8809 }
8810 #endif
8811 vm_page_balance_inactive(3);
8812 }
8813
8814 /*
8815 * Pages from special kernel objects shouldn't
8816 * be placed on pageable queues.
8817 */
8818 void
8819 vm_page_check_pageable_safe(vm_page_t page)
8820 {
8821 vm_object_t page_object;
8822
8823 page_object = VM_PAGE_OBJECT(page);
8824
8825 if (page_object == kernel_object) {
8826 panic("vm_page_check_pageable_safe: trying to add page" \
8827 "from kernel object (%p) to pageable queue", kernel_object);
8828 }
8829
8830 if (page_object == compressor_object) {
8831 panic("vm_page_check_pageable_safe: trying to add page" \
8832 "from compressor object (%p) to pageable queue", compressor_object);
8833 }
8834
8835 if (page_object == vm_submap_object) {
8836 panic("vm_page_check_pageable_safe: trying to add page" \
8837 "from submap object (%p) to pageable queue", vm_submap_object);
8838 }
8839 }
8840
8841 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
8842 * wired page diagnose
8843 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8844
8845 #include <libkern/OSKextLibPrivate.h>
8846
8847 #define KA_SIZE(namelen, subtotalscount) \
8848 (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
8849
8850 #define KA_NAME(alloc) \
8851 ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
8852
8853 #define KA_NAME_LEN(alloc) \
8854 (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
8855
8856 vm_tag_t
8857 vm_tag_bt(void)
8858 {
8859 uintptr_t* frameptr;
8860 uintptr_t* frameptr_next;
8861 uintptr_t retaddr;
8862 uintptr_t kstackb, kstackt;
8863 const vm_allocation_site_t * site;
8864 thread_t cthread;
8865 kern_allocation_name_t name;
8866
8867 cthread = current_thread();
8868 if (__improbable(cthread == NULL)) {
8869 return VM_KERN_MEMORY_OSFMK;
8870 }
8871
8872 if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
8873 if (!name->tag) {
8874 vm_tag_alloc(name);
8875 }
8876 return name->tag;
8877 }
8878
8879 kstackb = cthread->kernel_stack;
8880 kstackt = kstackb + kernel_stack_size;
8881
8882 /* Load stack frame pointer (EBP on x86) into frameptr */
8883 frameptr = __builtin_frame_address(0);
8884 site = NULL;
8885 while (frameptr != NULL) {
8886 /* Verify thread stack bounds */
8887 if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
8888 break;
8889 }
8890
8891 /* Next frame pointer is pointed to by the previous one */
8892 frameptr_next = (uintptr_t*) *frameptr;
8893
8894 /* Pull return address from one spot above the frame pointer */
8895 retaddr = *(frameptr + 1);
8896
8897 #if defined(HAS_APPLE_PAC)
8898 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
8899 #endif
8900
8901 if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
8902 || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
8903 site = OSKextGetAllocationSiteForCaller(retaddr);
8904 break;
8905 }
8906 frameptr = frameptr_next;
8907 }
8908
8909 return site ? site->tag : VM_KERN_MEMORY_NONE;
8910 }
8911
8912 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
8913
8914 void
8915 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
8916 {
8917 vm_tag_t tag;
8918 uint64_t avail;
8919 uint32_t idx;
8920 vm_allocation_site_t * prev;
8921
8922 if (site->tag) {
8923 return;
8924 }
8925
8926 idx = 0;
8927 while (TRUE) {
8928 avail = free_tag_bits[idx];
8929 if (avail) {
8930 tag = (vm_tag_t)__builtin_clzll(avail);
8931 avail &= ~(1ULL << (63 - tag));
8932 free_tag_bits[idx] = avail;
8933 tag += (idx << 6);
8934 break;
8935 }
8936 idx++;
8937 if (idx >= ARRAY_COUNT(free_tag_bits)) {
8938 for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
8939 prev = vm_allocation_sites[idx];
8940 if (!prev) {
8941 continue;
8942 }
8943 if (!KA_NAME_LEN(prev)) {
8944 continue;
8945 }
8946 if (!prev->tag) {
8947 continue;
8948 }
8949 if (prev->total) {
8950 continue;
8951 }
8952 if (1 != prev->refcount) {
8953 continue;
8954 }
8955
8956 assert(idx == prev->tag);
8957 tag = (vm_tag_t)idx;
8958 prev->tag = VM_KERN_MEMORY_NONE;
8959 *releasesiteP = prev;
8960 break;
8961 }
8962 if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
8963 tag = VM_KERN_MEMORY_ANY;
8964 }
8965 break;
8966 }
8967 }
8968 site->tag = tag;
8969
8970 OSAddAtomic16(1, &site->refcount);
8971
8972 if (VM_KERN_MEMORY_ANY != tag) {
8973 vm_allocation_sites[tag] = site;
8974 }
8975
8976 if (tag > vm_allocation_tag_highest) {
8977 vm_allocation_tag_highest = tag;
8978 }
8979 }
8980
8981 static void
8982 vm_tag_free_locked(vm_tag_t tag)
8983 {
8984 uint64_t avail;
8985 uint32_t idx;
8986 uint64_t bit;
8987
8988 if (VM_KERN_MEMORY_ANY == tag) {
8989 return;
8990 }
8991
8992 idx = (tag >> 6);
8993 avail = free_tag_bits[idx];
8994 tag &= 63;
8995 bit = (1ULL << (63 - tag));
8996 assert(!(avail & bit));
8997 free_tag_bits[idx] = (avail | bit);
8998 }
8999
9000 static void
9001 vm_tag_init(void)
9002 {
9003 vm_tag_t tag;
9004 for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9005 vm_tag_free_locked(tag);
9006 }
9007
9008 for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9009 vm_tag_free_locked(tag);
9010 }
9011 }
9012
9013 vm_tag_t
9014 vm_tag_alloc(vm_allocation_site_t * site)
9015 {
9016 vm_tag_t tag;
9017 vm_allocation_site_t * releasesite;
9018
9019 if (VM_TAG_BT & site->flags) {
9020 tag = vm_tag_bt();
9021 if (VM_KERN_MEMORY_NONE != tag) {
9022 return tag;
9023 }
9024 }
9025
9026 if (!site->tag) {
9027 releasesite = NULL;
9028 lck_spin_lock(&vm_allocation_sites_lock);
9029 vm_tag_alloc_locked(site, &releasesite);
9030 lck_spin_unlock(&vm_allocation_sites_lock);
9031 if (releasesite) {
9032 kern_allocation_name_release(releasesite);
9033 }
9034 }
9035
9036 return site->tag;
9037 }
9038
9039 void
9040 vm_tag_update_size(vm_tag_t tag, int64_t delta)
9041 {
9042 vm_allocation_site_t * allocation;
9043 uint64_t prior;
9044
9045 assert(VM_KERN_MEMORY_NONE != tag);
9046 assert(tag < VM_MAX_TAG_VALUE);
9047
9048 allocation = vm_allocation_sites[tag];
9049 assert(allocation);
9050
9051 if (delta < 0) {
9052 assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation);
9053 }
9054 prior = OSAddAtomic64(delta, &allocation->total);
9055
9056 #if DEBUG || DEVELOPMENT
9057
9058 uint64_t new, peak;
9059 new = prior + delta;
9060 do{
9061 peak = allocation->peak;
9062 if (new <= peak) {
9063 break;
9064 }
9065 }while (!OSCompareAndSwap64(peak, new, &allocation->peak));
9066
9067 #endif /* DEBUG || DEVELOPMENT */
9068
9069 if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9070 return;
9071 }
9072
9073 if (!prior && !allocation->tag) {
9074 vm_tag_alloc(allocation);
9075 }
9076 }
9077
9078 void
9079 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
9080 {
9081 uint64_t prior;
9082
9083 if (delta < 0) {
9084 assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
9085 }
9086 prior = OSAddAtomic64(delta, &allocation->total);
9087
9088 #if DEBUG || DEVELOPMENT
9089
9090 uint64_t new, peak;
9091 new = prior + delta;
9092 do{
9093 peak = allocation->peak;
9094 if (new <= peak) {
9095 break;
9096 }
9097 }while (!OSCompareAndSwap64(peak, new, &allocation->peak));
9098
9099 #endif /* DEBUG || DEVELOPMENT */
9100
9101 if (!prior && !allocation->tag) {
9102 vm_tag_alloc(allocation);
9103 }
9104 }
9105
9106 #if VM_MAX_TAG_ZONES
9107
9108 void
9109 vm_allocation_zones_init(void)
9110 {
9111 kern_return_t ret;
9112 vm_offset_t addr;
9113 vm_size_t size;
9114
9115 size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9116 + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
9117
9118 ret = kernel_memory_allocate(kernel_map,
9119 &addr, round_page(size), 0,
9120 KMA_ZERO, VM_KERN_MEMORY_DIAG);
9121 assert(KERN_SUCCESS == ret);
9122
9123 vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9124 addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9125
9126 // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
9127 // in vm_tag_update_zone_size() won't recurse
9128 vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG] = (vm_allocation_zone_total_t *) addr;
9129 addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
9130 vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr;
9131 }
9132
9133 void
9134 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx)
9135 {
9136 vm_allocation_zone_total_t * zone;
9137
9138 assert(VM_KERN_MEMORY_NONE != tag);
9139 assert(tag < VM_MAX_TAG_VALUE);
9140
9141 if (zidx >= VM_MAX_TAG_ZONES) {
9142 return;
9143 }
9144
9145 zone = vm_allocation_zone_totals[tag];
9146 if (!zone) {
9147 zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG);
9148 if (!zone) {
9149 return;
9150 }
9151 bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
9152 if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag])) {
9153 kfree(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
9154 }
9155 }
9156 }
9157
9158 void
9159 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste)
9160 {
9161 vm_allocation_zone_total_t * zone;
9162 uint32_t new;
9163
9164 assert(VM_KERN_MEMORY_NONE != tag);
9165 assert(tag < VM_MAX_TAG_VALUE);
9166
9167 if (zidx >= VM_MAX_TAG_ZONES) {
9168 return;
9169 }
9170
9171 zone = vm_allocation_zone_totals[tag];
9172 assert(zone);
9173 zone += zidx;
9174
9175 /* the zone is locked */
9176 if (delta < 0) {
9177 assertf(zone->total >= ((uint64_t)-delta), "zidx %d, tag %d, %p", zidx, tag, zone);
9178 zone->total += delta;
9179 } else {
9180 zone->total += delta;
9181 if (zone->total > zone->peak) {
9182 zone->peak = zone->total;
9183 }
9184 if (dwaste) {
9185 new = zone->waste;
9186 if (zone->wastediv < 65536) {
9187 zone->wastediv++;
9188 } else {
9189 new -= (new >> 16);
9190 }
9191 __assert_only bool ov = os_add_overflow(new, dwaste, &new);
9192 assert(!ov);
9193 zone->waste = new;
9194 }
9195 }
9196 }
9197
9198 #endif /* VM_MAX_TAG_ZONES */
9199
9200 void
9201 kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
9202 {
9203 kern_allocation_name_t other;
9204 struct vm_allocation_total * total;
9205 uint32_t subidx;
9206
9207 subidx = 0;
9208 assert(VM_KERN_MEMORY_NONE != subtag);
9209 lck_spin_lock(&vm_allocation_sites_lock);
9210 for (; subidx < allocation->subtotalscount; subidx++) {
9211 if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) {
9212 allocation->subtotals[subidx].tag = (vm_tag_t)subtag;
9213 break;
9214 }
9215 if (subtag == allocation->subtotals[subidx].tag) {
9216 break;
9217 }
9218 }
9219 lck_spin_unlock(&vm_allocation_sites_lock);
9220 assert(subidx < allocation->subtotalscount);
9221 if (subidx >= allocation->subtotalscount) {
9222 return;
9223 }
9224
9225 total = &allocation->subtotals[subidx];
9226 other = vm_allocation_sites[subtag];
9227 assert(other);
9228
9229 if (delta < 0) {
9230 assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9231 assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9232 }
9233 OSAddAtomic64(delta, &other->mapped);
9234 OSAddAtomic64(delta, &total->total);
9235 }
9236
9237 const char *
9238 kern_allocation_get_name(kern_allocation_name_t allocation)
9239 {
9240 return KA_NAME(allocation);
9241 }
9242
9243 kern_allocation_name_t
9244 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9245 {
9246 uint16_t namelen;
9247
9248 namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9249
9250 kern_allocation_name_t allocation;
9251 allocation = kheap_alloc(KHEAP_DATA_BUFFERS,
9252 KA_SIZE(namelen, subtotalscount), Z_WAITOK);
9253 bzero(allocation, KA_SIZE(namelen, subtotalscount));
9254
9255 allocation->refcount = 1;
9256 allocation->subtotalscount = subtotalscount;
9257 allocation->flags = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9258 strlcpy(KA_NAME(allocation), name, namelen + 1);
9259
9260 return allocation;
9261 }
9262
9263 void
9264 kern_allocation_name_release(kern_allocation_name_t allocation)
9265 {
9266 assert(allocation->refcount > 0);
9267 if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9268 kheap_free(KHEAP_DATA_BUFFERS, allocation,
9269 KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9270 }
9271 }
9272
9273 vm_tag_t
9274 kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
9275 {
9276 return vm_tag_alloc(allocation);
9277 }
9278
9279 #if !VM_TAG_ACTIVE_UPDATE
9280 static void
9281 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9282 {
9283 if (!object->wired_page_count) {
9284 return;
9285 }
9286 if (object != kernel_object) {
9287 assert(object->wire_tag < num_info);
9288 info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9289 }
9290 }
9291
9292 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9293 unsigned int num_info, vm_object_t object);
9294
9295 static void
9296 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9297 vm_page_iterate_proc proc, purgeable_q_t queue,
9298 int group)
9299 {
9300 vm_object_t object;
9301
9302 for (object = (vm_object_t) queue_first(&queue->objq[group]);
9303 !queue_end(&queue->objq[group], (queue_entry_t) object);
9304 object = (vm_object_t) queue_next(&object->objq)) {
9305 proc(info, num_info, object);
9306 }
9307 }
9308
9309 static void
9310 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9311 vm_page_iterate_proc proc)
9312 {
9313 vm_object_t object;
9314
9315 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9316 queue_iterate(&vm_objects_wired,
9317 object,
9318 vm_object_t,
9319 wired_objq)
9320 {
9321 proc(info, num_info, object);
9322 }
9323 lck_spin_unlock(&vm_objects_wired_lock);
9324 }
9325 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9326
9327 static uint64_t
9328 process_account(mach_memory_info_t * info, unsigned int num_info,
9329 uint64_t zones_collectable_bytes, boolean_t iterated)
9330 {
9331 size_t namelen;
9332 unsigned int idx, count, nextinfo;
9333 vm_allocation_site_t * site;
9334 lck_spin_lock(&vm_allocation_sites_lock);
9335
9336 for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9337 site = vm_allocation_sites[idx];
9338 if (!site) {
9339 continue;
9340 }
9341 info[idx].mapped = site->mapped;
9342 info[idx].tag = site->tag;
9343 if (!iterated) {
9344 info[idx].size = site->total;
9345 #if DEBUG || DEVELOPMENT
9346 info[idx].peak = site->peak;
9347 #endif /* DEBUG || DEVELOPMENT */
9348 } else {
9349 if (!site->subtotalscount && (site->total != info[idx].size)) {
9350 printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9351 info[idx].size = site->total;
9352 }
9353 }
9354 info[idx].flags |= VM_KERN_SITE_WIRED;
9355 if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9356 info[idx].site = idx;
9357 info[idx].flags |= VM_KERN_SITE_TAG;
9358 if (VM_KERN_MEMORY_ZONE == idx) {
9359 info[idx].flags |= VM_KERN_SITE_HIDE;
9360 info[idx].flags &= ~VM_KERN_SITE_WIRED;
9361 info[idx].collectable_bytes = zones_collectable_bytes;
9362 }
9363 } else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9364 info[idx].site = 0;
9365 info[idx].flags |= VM_KERN_SITE_NAMED;
9366 if (namelen > sizeof(info[idx].name)) {
9367 namelen = sizeof(info[idx].name);
9368 }
9369 strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9370 } else if (VM_TAG_KMOD & site->flags) {
9371 info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
9372 info[idx].flags |= VM_KERN_SITE_KMOD;
9373 } else {
9374 info[idx].site = VM_KERNEL_UNSLIDE(site);
9375 info[idx].flags |= VM_KERN_SITE_KERNEL;
9376 }
9377 }
9378
9379 nextinfo = (vm_allocation_tag_highest + 1);
9380 count = nextinfo;
9381 if (count >= num_info) {
9382 count = num_info;
9383 }
9384
9385 for (idx = 0; idx < count; idx++) {
9386 site = vm_allocation_sites[idx];
9387 if (!site) {
9388 continue;
9389 }
9390 #if VM_MAX_TAG_ZONES
9391 vm_allocation_zone_total_t * zone;
9392 unsigned int zidx;
9393 vm_size_t elem_size;
9394
9395 if (vm_allocation_zone_totals
9396 && (zone = vm_allocation_zone_totals[idx])
9397 && (nextinfo < num_info)) {
9398 for (zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) {
9399 if (!zone[zidx].peak) {
9400 continue;
9401 }
9402 info[nextinfo] = info[idx];
9403 info[nextinfo].zone = (uint16_t)zone_index_from_tag_index(zidx, &elem_size);
9404 info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
9405 info[nextinfo].flags |= VM_KERN_SITE_ZONE;
9406 info[nextinfo].size = zone[zidx].total;
9407 info[nextinfo].peak = zone[zidx].peak;
9408 info[nextinfo].mapped = 0;
9409 if (zone[zidx].wastediv) {
9410 info[nextinfo].collectable_bytes = ((zone[zidx].waste * zone[zidx].total / elem_size) / zone[zidx].wastediv);
9411 }
9412 nextinfo++;
9413 }
9414 }
9415 #endif /* VM_MAX_TAG_ZONES */
9416 if (site->subtotalscount) {
9417 uint64_t mapped, mapcost, take;
9418 uint32_t sub;
9419 vm_tag_t alloctag;
9420
9421 info[idx].size = site->total;
9422 mapped = info[idx].size;
9423 info[idx].mapped = mapped;
9424 mapcost = 0;
9425 for (sub = 0; sub < site->subtotalscount; sub++) {
9426 alloctag = site->subtotals[sub].tag;
9427 assert(alloctag < num_info);
9428 if (info[alloctag].name[0]) {
9429 continue;
9430 }
9431 take = site->subtotals[sub].total;
9432 if (take > info[alloctag].size) {
9433 take = info[alloctag].size;
9434 }
9435 if (take > mapped) {
9436 take = mapped;
9437 }
9438 info[alloctag].mapped -= take;
9439 info[alloctag].size -= take;
9440 mapped -= take;
9441 mapcost += take;
9442 }
9443 info[idx].size = mapcost;
9444 }
9445 }
9446 lck_spin_unlock(&vm_allocation_sites_lock);
9447
9448 return 0;
9449 }
9450
9451 uint32_t
9452 vm_page_diagnose_estimate(void)
9453 {
9454 vm_allocation_site_t * site;
9455 uint32_t count = zone_view_count;
9456 uint32_t idx;
9457
9458 lck_spin_lock(&vm_allocation_sites_lock);
9459 for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9460 site = vm_allocation_sites[idx];
9461 if (!site) {
9462 continue;
9463 }
9464 count++;
9465 #if VM_MAX_TAG_ZONES
9466 if (vm_allocation_zone_totals) {
9467 vm_allocation_zone_total_t * zone;
9468 zone = vm_allocation_zone_totals[idx];
9469 if (!zone) {
9470 continue;
9471 }
9472 for (uint32_t zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) {
9473 if (zone[zidx].peak) {
9474 count++;
9475 }
9476 }
9477 }
9478 #endif
9479 }
9480 lck_spin_unlock(&vm_allocation_sites_lock);
9481
9482 /* some slop for new tags created */
9483 count += 8;
9484 count += VM_KERN_COUNTER_COUNT;
9485
9486 return count;
9487 }
9488
9489 static void
9490 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
9491 bool percpu)
9492 {
9493 zpercpu_foreach(zs, zstats) {
9494 info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
9495 }
9496 if (percpu) {
9497 info->size *= zpercpu_count();
9498 }
9499 info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
9500 }
9501
9502 static void
9503 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
9504 {
9505 vm_page_diagnose_zone_stats(info, z->z_stats, z->percpu);
9506 snprintf(info->name, sizeof(info->name),
9507 "%s%s[raw]", zone_heap_name(z), z->z_name);
9508 }
9509
9510 static int
9511 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
9512 {
9513 struct kheap_zones *zones = kheap->kh_zones;
9514 int i = 0;
9515
9516 for (; i < zones->max_k_zone; i++) {
9517 vm_page_diagnose_zone(info + i, zones->k_zone[i]);
9518 }
9519
9520 for (kalloc_heap_t kh = zones->views; kh; kh = kh->kh_next, i++) {
9521 vm_page_diagnose_zone_stats(info + i, kh->kh_stats, false);
9522 snprintf(info[i].name, sizeof(info[i].name),
9523 "%skalloc[%s]", kheap->kh_name, kh->kh_name);
9524 }
9525
9526 return i;
9527 }
9528
9529 kern_return_t
9530 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes)
9531 {
9532 uint64_t wired_size;
9533 uint64_t wired_managed_size;
9534 uint64_t wired_reserved_size;
9535 boolean_t iterate;
9536 mach_memory_info_t * counts;
9537 uint32_t i;
9538
9539 bzero(info, num_info * sizeof(mach_memory_info_t));
9540
9541 if (!vm_page_wire_count_initial) {
9542 return KERN_ABORTED;
9543 }
9544
9545 #if CONFIG_EMBEDDED
9546 wired_size = ptoa_64(vm_page_wire_count);
9547 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
9548 #else
9549 wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
9550 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
9551 #endif
9552 wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
9553
9554 wired_size += booter_size;
9555
9556 assert(num_info >= VM_KERN_COUNTER_COUNT);
9557 num_info -= VM_KERN_COUNTER_COUNT;
9558 counts = &info[num_info];
9559
9560 #define SET_COUNT(xcount, xsize, xflags) \
9561 counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
9562 counts[xcount].site = (xcount); \
9563 counts[xcount].size = (xsize); \
9564 counts[xcount].mapped = (xsize); \
9565 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
9566
9567 SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
9568 SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
9569 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
9570 SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
9571 SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
9572 SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
9573 SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
9574 SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
9575 SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
9576
9577 #define SET_MAP(xcount, xsize, xfree, xlargest) \
9578 counts[xcount].site = (xcount); \
9579 counts[xcount].size = (xsize); \
9580 counts[xcount].mapped = (xsize); \
9581 counts[xcount].free = (xfree); \
9582 counts[xcount].largest = (xlargest); \
9583 counts[xcount].flags = VM_KERN_SITE_COUNTER;
9584
9585 vm_map_size_t map_size, map_free, map_largest;
9586
9587 vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
9588 SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
9589
9590 zone_map_sizes(&map_size, &map_free, &map_largest);
9591 SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
9592
9593 vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
9594 SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
9595
9596 assert(num_info >= zone_view_count);
9597 num_info -= zone_view_count;
9598 counts = &info[num_info];
9599 i = 0;
9600
9601 i += vm_page_diagnose_heap(counts + i, KHEAP_DEFAULT);
9602 if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
9603 i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
9604 }
9605 if (KHEAP_KEXT->kh_heap_id == KHEAP_ID_KEXT) {
9606 i += vm_page_diagnose_heap(counts + i, KHEAP_KEXT);
9607 }
9608 assert(i <= zone_view_count);
9609
9610 zone_index_foreach(zidx) {
9611 zone_t z = &zone_array[zidx];
9612 zone_view_t zv = z->z_views;
9613
9614 if (zv == NULL) {
9615 continue;
9616 }
9617
9618 if (z->kalloc_heap == KHEAP_ID_NONE) {
9619 vm_page_diagnose_zone(counts + i, z);
9620 i++;
9621 assert(i <= zone_view_count);
9622 }
9623
9624 for (; zv; zv = zv->zv_next) {
9625 vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
9626 z->percpu);
9627 snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
9628 zone_heap_name(z), z->z_name, zv->zv_name);
9629 i++;
9630 assert(i <= zone_view_count);
9631 }
9632 }
9633
9634 iterate = !VM_TAG_ACTIVE_UPDATE;
9635 if (iterate) {
9636 enum { kMaxKernelDepth = 1 };
9637 vm_map_t maps[kMaxKernelDepth];
9638 vm_map_entry_t entries[kMaxKernelDepth];
9639 vm_map_t map;
9640 vm_map_entry_t entry;
9641 vm_object_offset_t offset;
9642 vm_page_t page;
9643 int stackIdx, count;
9644
9645 #if !VM_TAG_ACTIVE_UPDATE
9646 vm_page_iterate_objects(info, num_info, &vm_page_count_object);
9647 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9648
9649 map = kernel_map;
9650 stackIdx = 0;
9651 while (map) {
9652 vm_map_lock(map);
9653 for (entry = map->hdr.links.next; map; entry = entry->links.next) {
9654 if (entry->is_sub_map) {
9655 assert(stackIdx < kMaxKernelDepth);
9656 maps[stackIdx] = map;
9657 entries[stackIdx] = entry;
9658 stackIdx++;
9659 map = VME_SUBMAP(entry);
9660 entry = NULL;
9661 break;
9662 }
9663 if (VME_OBJECT(entry) == kernel_object) {
9664 count = 0;
9665 vm_object_lock(VME_OBJECT(entry));
9666 for (offset = entry->links.start; offset < entry->links.end; offset += page_size) {
9667 page = vm_page_lookup(VME_OBJECT(entry), offset);
9668 if (page && VM_PAGE_WIRED(page)) {
9669 count++;
9670 }
9671 }
9672 vm_object_unlock(VME_OBJECT(entry));
9673
9674 if (count) {
9675 assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
9676 assert(VME_ALIAS(entry) < num_info);
9677 info[VME_ALIAS(entry)].size += ptoa_64(count);
9678 }
9679 }
9680 while (map && (entry == vm_map_last_entry(map))) {
9681 vm_map_unlock(map);
9682 if (!stackIdx) {
9683 map = NULL;
9684 } else {
9685 --stackIdx;
9686 map = maps[stackIdx];
9687 entry = entries[stackIdx];
9688 }
9689 }
9690 }
9691 }
9692 }
9693
9694 process_account(info, num_info, zones_collectable_bytes, iterate);
9695
9696 return KERN_SUCCESS;
9697 }
9698
9699 #if DEBUG || DEVELOPMENT
9700
9701 kern_return_t
9702 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
9703 {
9704 kern_return_t ret;
9705 vm_size_t zsize;
9706 vm_map_t map;
9707 vm_map_entry_t entry;
9708
9709 zsize = zone_element_info((void *) addr, tag);
9710 if (zsize) {
9711 *zone_size = *size = zsize;
9712 return KERN_SUCCESS;
9713 }
9714
9715 *zone_size = 0;
9716 ret = KERN_INVALID_ADDRESS;
9717 for (map = kernel_map; map;) {
9718 vm_map_lock(map);
9719 if (!vm_map_lookup_entry(map, addr, &entry)) {
9720 break;
9721 }
9722 if (entry->is_sub_map) {
9723 if (map != kernel_map) {
9724 break;
9725 }
9726 map = VME_SUBMAP(entry);
9727 continue;
9728 }
9729 if (entry->vme_start != addr) {
9730 break;
9731 }
9732 *tag = (vm_tag_t)VME_ALIAS(entry);
9733 *size = (entry->vme_end - addr);
9734 ret = KERN_SUCCESS;
9735 break;
9736 }
9737 if (map != kernel_map) {
9738 vm_map_unlock(map);
9739 }
9740 vm_map_unlock(kernel_map);
9741
9742 return ret;
9743 }
9744
9745 #endif /* DEBUG || DEVELOPMENT */
9746
9747 uint32_t
9748 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
9749 {
9750 vm_allocation_site_t * site;
9751 uint32_t kmodId;
9752
9753 kmodId = 0;
9754 lck_spin_lock(&vm_allocation_sites_lock);
9755 if ((site = vm_allocation_sites[tag])) {
9756 if (VM_TAG_KMOD & site->flags) {
9757 kmodId = OSKextGetKmodIDForSite(site, name, namelen);
9758 }
9759 }
9760 lck_spin_unlock(&vm_allocation_sites_lock);
9761
9762 return kmodId;
9763 }
9764
9765
9766 #if CONFIG_SECLUDED_MEMORY
9767 /*
9768 * Note that there's no locking around other accesses to vm_page_secluded_target.
9769 * That should be OK, since these are the only place where it can be changed after
9770 * initialization. Other users (like vm_pageout) may see the wrong value briefly,
9771 * but will eventually get the correct value. This brief mismatch is OK as pageout
9772 * and page freeing will auto-adjust the vm_page_secluded_count to match the target
9773 * over time.
9774 */
9775 unsigned int vm_page_secluded_suppress_cnt = 0;
9776 unsigned int vm_page_secluded_save_target;
9777
9778 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
9779 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
9780
9781 void
9782 start_secluded_suppression(task_t task)
9783 {
9784 if (task->task_suppressed_secluded) {
9785 return;
9786 }
9787 lck_spin_lock(&secluded_suppress_slock);
9788 if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
9789 task->task_suppressed_secluded = TRUE;
9790 vm_page_secluded_save_target = vm_page_secluded_target;
9791 vm_page_secluded_target = 0;
9792 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9793 }
9794 lck_spin_unlock(&secluded_suppress_slock);
9795 }
9796
9797 void
9798 stop_secluded_suppression(task_t task)
9799 {
9800 lck_spin_lock(&secluded_suppress_slock);
9801 if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
9802 task->task_suppressed_secluded = FALSE;
9803 vm_page_secluded_target = vm_page_secluded_save_target;
9804 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9805 }
9806 lck_spin_unlock(&secluded_suppress_slock);
9807 }
9808
9809 #endif /* CONFIG_SECLUDED_MEMORY */