2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Resident memory management module.
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
73 #include <kern/counters.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
83 #include <vm/vm_init.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
88 #include <kern/misc_protos.h>
89 #include <mach_debug/zone_info.h>
91 #include <pexpert/pexpert.h>
92 #include <san/kasan.h>
94 #include <vm/vm_protos.h>
95 #include <vm/memory_object.h>
96 #include <vm/vm_purgeable_internal.h>
97 #include <vm/vm_compressor.h>
98 #if defined (__x86_64__)
99 #include <i386/misc_protos.h>
102 #if CONFIG_PHANTOM_CACHE
103 #include <vm/vm_phantom_cache.h>
107 #include <IOKit/IOHibernatePrivate.h>
108 #include <machine/pal_hibernate.h>
109 #endif /* HIBERNATION */
111 #include <sys/kdebug.h>
113 #if defined(HAS_APPLE_PAC)
116 #if defined(__arm64__)
117 #include <arm/cpu_internal.h>
118 #endif /* defined(__arm64__) */
122 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
124 #else /* MACH_ASSERT */
126 #define ASSERT_PMAP_FREE(mem) /* nothing */
128 #endif /* MACH_ASSERT */
130 extern boolean_t vm_pageout_running
;
131 extern thread_t vm_pageout_scan_thread
;
132 extern boolean_t vps_dynamic_priority_enabled
;
134 char vm_page_inactive_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
135 char vm_page_pageable_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
136 char vm_page_non_speculative_pageable_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
137 char vm_page_active_or_inactive_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
139 #if CONFIG_SECLUDED_MEMORY
140 struct vm_page_secluded_data vm_page_secluded
;
141 #endif /* CONFIG_SECLUDED_MEMORY */
143 #if DEVELOPMENT || DEBUG
144 extern struct memory_object_pager_ops shared_region_pager_ops
;
145 unsigned int shared_region_pagers_resident_count
= 0;
146 unsigned int shared_region_pagers_resident_peak
= 0;
147 #endif /* DEVELOPMENT || DEBUG */
149 int PERCPU_DATA(start_color
);
150 vm_page_t
PERCPU_DATA(free_pages
);
151 boolean_t hibernate_cleaning_in_progress
= FALSE
;
152 boolean_t vm_page_free_verify
= TRUE
;
154 uint32_t vm_lopage_free_count
= 0;
155 uint32_t vm_lopage_free_limit
= 0;
156 uint32_t vm_lopage_lowater
= 0;
157 boolean_t vm_lopage_refill
= FALSE
;
158 boolean_t vm_lopage_needed
= FALSE
;
160 lck_mtx_ext_t vm_page_queue_lock_ext
;
161 lck_mtx_ext_t vm_page_queue_free_lock_ext
;
162 lck_mtx_ext_t vm_purgeable_queue_lock_ext
;
164 int speculative_age_index
= 0;
165 int speculative_steal_index
= 0;
166 struct vm_speculative_age_q vm_page_queue_speculative
[VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1];
168 boolean_t hibernation_vmqueues_inspection
= FALSE
; /* Tracks if the hibernation code is looking at the VM queues.
169 * Updated and checked behind the vm_page_queues_lock. */
171 static void vm_page_free_prepare(vm_page_t page
);
172 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr
);
174 static void vm_tag_init(void);
176 /* for debugging purposes */
177 SECURITY_READ_ONLY_EARLY(vm_packing_params_t
) vm_page_packing_params
=
178 VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR
);
181 * Associated with page of user-allocatable memory is a
186 * These variables record the values returned by vm_page_bootstrap,
187 * for debugging purposes. The implementation of pmap_steal_memory
188 * and pmap_startup here also uses them internally.
191 vm_offset_t virtual_space_start
;
192 vm_offset_t virtual_space_end
;
193 uint32_t vm_page_pages
;
196 * The vm_page_lookup() routine, which provides for fast
197 * (virtual memory object, offset) to page lookup, employs
198 * the following hash table. The vm_page_{insert,remove}
199 * routines install and remove associations in the table.
200 * [This table is often called the virtual-to-physical,
204 vm_page_packed_t page_list
;
205 #if MACH_PAGE_HASH_STATS
206 int cur_count
; /* current count */
207 int hi_count
; /* high water mark */
208 #endif /* MACH_PAGE_HASH_STATS */
212 #define BUCKETS_PER_LOCK 16
214 vm_page_bucket_t
*vm_page_buckets
; /* Array of buckets */
215 unsigned int vm_page_bucket_count
= 0; /* How big is array? */
216 unsigned int vm_page_hash_mask
; /* Mask for hash function */
217 unsigned int vm_page_hash_shift
; /* Shift for hash function */
218 uint32_t vm_page_bucket_hash
; /* Basic bucket hash */
219 unsigned int vm_page_bucket_lock_count
= 0; /* How big is array of locks? */
221 #ifndef VM_TAG_ACTIVE_UPDATE
222 #error VM_TAG_ACTIVE_UPDATE
224 #ifndef VM_MAX_TAG_ZONES
225 #error VM_MAX_TAG_ZONES
228 boolean_t vm_tag_active_update
= VM_TAG_ACTIVE_UPDATE
;
229 lck_spin_t
*vm_page_bucket_locks
;
231 vm_allocation_site_t vm_allocation_sites_static
[VM_KERN_MEMORY_FIRST_DYNAMIC
+ 1];
232 vm_allocation_site_t
* vm_allocation_sites
[VM_MAX_TAG_VALUE
];
234 vm_allocation_zone_total_t
** vm_allocation_zone_totals
;
235 #endif /* VM_MAX_TAG_ZONES */
237 vm_tag_t vm_allocation_tag_highest
;
239 #if VM_PAGE_BUCKETS_CHECK
240 boolean_t vm_page_buckets_check_ready
= FALSE
;
241 #if VM_PAGE_FAKE_BUCKETS
242 vm_page_bucket_t
*vm_page_fake_buckets
; /* decoy buckets */
243 vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
244 #endif /* VM_PAGE_FAKE_BUCKETS */
245 #endif /* VM_PAGE_BUCKETS_CHECK */
249 #if MACH_PAGE_HASH_STATS
250 /* This routine is only for debug. It is intended to be called by
251 * hand by a developer using a kernel debugger. This routine prints
252 * out vm_page_hash table statistics to the kernel debug console.
262 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
263 if (vm_page_buckets
[i
].hi_count
) {
265 highsum
+= vm_page_buckets
[i
].hi_count
;
266 if (vm_page_buckets
[i
].hi_count
> maxdepth
) {
267 maxdepth
= vm_page_buckets
[i
].hi_count
;
271 printf("Total number of buckets: %d\n", vm_page_bucket_count
);
272 printf("Number used buckets: %d = %d%%\n",
273 numbuckets
, 100 * numbuckets
/ vm_page_bucket_count
);
274 printf("Number unused buckets: %d = %d%%\n",
275 vm_page_bucket_count
- numbuckets
,
276 100 * (vm_page_bucket_count
- numbuckets
) / vm_page_bucket_count
);
277 printf("Sum of bucket max depth: %d\n", highsum
);
278 printf("Average bucket depth: %d.%2d\n",
279 highsum
/ vm_page_bucket_count
,
280 highsum
% vm_page_bucket_count
);
281 printf("Maximum bucket depth: %d\n", maxdepth
);
283 #endif /* MACH_PAGE_HASH_STATS */
286 * The virtual page size is currently implemented as a runtime
287 * variable, but is constant once initialized using vm_set_page_size.
288 * This initialization must be done in the machine-dependent
289 * bootstrap sequence, before calling other machine-independent
292 * All references to the virtual page size outside this
293 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
296 #if defined(__arm__) || defined(__arm64__)
301 vm_size_t page_size
= PAGE_SIZE
;
302 vm_size_t page_mask
= PAGE_MASK
;
303 int page_shift
= PAGE_SHIFT
;
306 SECURITY_READ_ONLY_LATE(vm_page_t
) vm_pages
= VM_PAGE_NULL
;
307 SECURITY_READ_ONLY_LATE(vm_page_t
) vm_page_array_beginning_addr
;
308 vm_page_t vm_page_array_ending_addr
;
310 unsigned int vm_pages_count
= 0;
313 * Resident pages that represent real memory
314 * are allocated from a set of free lists,
317 unsigned int vm_colors
;
318 unsigned int vm_color_mask
; /* mask is == (vm_colors-1) */
319 unsigned int vm_cache_geometry_colors
= 0; /* set by hw dependent code during startup */
320 unsigned int vm_free_magazine_refill_limit
= 0;
323 struct vm_page_queue_free_head
{
324 vm_page_queue_head_t qhead
;
325 } VM_PAGE_PACKED_ALIGNED
;
327 struct vm_page_queue_free_head vm_page_queue_free
[MAX_COLORS
];
330 unsigned int vm_page_free_wanted
;
331 unsigned int vm_page_free_wanted_privileged
;
332 #if CONFIG_SECLUDED_MEMORY
333 unsigned int vm_page_free_wanted_secluded
;
334 #endif /* CONFIG_SECLUDED_MEMORY */
335 unsigned int vm_page_free_count
;
338 * Occasionally, the virtual memory system uses
339 * resident page structures that do not refer to
340 * real pages, for example to leave a page with
341 * important state information in the VP table.
343 * These page structures are allocated the way
344 * most other kernel structures are.
346 SECURITY_READ_ONLY_LATE(zone_t
) vm_page_zone
;
347 vm_locks_array_t vm_page_locks
;
349 LCK_ATTR_DECLARE(vm_page_lck_attr
, 0, 0);
350 LCK_GRP_DECLARE(vm_page_lck_grp_free
, "vm_page_free");
351 LCK_GRP_DECLARE(vm_page_lck_grp_queue
, "vm_page_queue");
352 LCK_GRP_DECLARE(vm_page_lck_grp_local
, "vm_page_queue_local");
353 LCK_GRP_DECLARE(vm_page_lck_grp_purge
, "vm_page_purge");
354 LCK_GRP_DECLARE(vm_page_lck_grp_alloc
, "vm_page_alloc");
355 LCK_GRP_DECLARE(vm_page_lck_grp_bucket
, "vm_page_bucket");
356 LCK_MTX_EARLY_DECLARE_ATTR(vm_page_alloc_lock
, &vm_page_lck_grp_alloc
, &vm_page_lck_attr
);
357 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock
, &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
358 LCK_SPIN_DECLARE_ATTR(vm_allocation_sites_lock
, &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
360 unsigned int vm_page_local_q_soft_limit
= 250;
361 unsigned int vm_page_local_q_hard_limit
= 500;
362 struct vpl
*__zpercpu vm_page_local_q
;
364 /* N.B. Guard and fictitious pages must not
365 * be assigned a zero phys_page value.
368 * Fictitious pages don't have a physical address,
369 * but we must initialize phys_page to something.
370 * For debugging, this should be a strange value
371 * that the pmap module can recognize in assertions.
373 const ppnum_t vm_page_fictitious_addr
= (ppnum_t
) -1;
376 * Guard pages are not accessible so they don't
377 * need a physical address, but we need to enter
379 * Let's make it recognizable and make sure that
380 * we don't use a real physical page with that
383 const ppnum_t vm_page_guard_addr
= (ppnum_t
) -2;
386 * Resident page structures are also chained on
387 * queues that are used by the page replacement
388 * system (pageout daemon). These queues are
389 * defined here, but are shared by the pageout
390 * module. The inactive queue is broken into
391 * file backed and anonymous for convenience as the
392 * pageout daemon often assignes a higher
393 * importance to anonymous pages (less likely to pick)
395 vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED
;
396 vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED
;
397 #if CONFIG_SECLUDED_MEMORY
398 vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED
;
399 #endif /* CONFIG_SECLUDED_MEMORY */
400 vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED
; /* inactive memory queue for anonymous pages */
401 vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED
;
403 queue_head_t vm_objects_wired
;
405 void vm_update_darkwake_mode(boolean_t
);
407 #if CONFIG_BACKGROUND_QUEUE
408 vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED
;
409 uint32_t vm_page_background_target
;
410 uint32_t vm_page_background_target_snapshot
;
411 uint32_t vm_page_background_count
;
412 uint64_t vm_page_background_promoted_count
;
414 uint32_t vm_page_background_internal_count
;
415 uint32_t vm_page_background_external_count
;
417 uint32_t vm_page_background_mode
;
418 uint32_t vm_page_background_exclude_external
;
421 unsigned int vm_page_active_count
;
422 unsigned int vm_page_inactive_count
;
423 unsigned int vm_page_kernelcache_count
;
424 #if CONFIG_SECLUDED_MEMORY
425 unsigned int vm_page_secluded_count
;
426 unsigned int vm_page_secluded_count_free
;
427 unsigned int vm_page_secluded_count_inuse
;
428 unsigned int vm_page_secluded_count_over_target
;
429 #endif /* CONFIG_SECLUDED_MEMORY */
430 unsigned int vm_page_anonymous_count
;
431 unsigned int vm_page_throttled_count
;
432 unsigned int vm_page_speculative_count
;
434 unsigned int vm_page_wire_count
;
435 unsigned int vm_page_wire_count_on_boot
= 0;
436 unsigned int vm_page_stolen_count
= 0;
437 unsigned int vm_page_wire_count_initial
;
438 unsigned int vm_page_gobble_count
= 0;
439 unsigned int vm_page_kern_lpage_count
= 0;
441 uint64_t booter_size
; /* external so it can be found in core dumps */
443 #define VM_PAGE_WIRE_COUNT_WARNING 0
444 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
446 unsigned int vm_page_purgeable_count
= 0; /* # of pages purgeable now */
447 unsigned int vm_page_purgeable_wired_count
= 0; /* # of purgeable pages that are wired now */
448 uint64_t vm_page_purged_count
= 0; /* total count of purged pages */
450 unsigned int vm_page_xpmapped_external_count
= 0;
451 unsigned int vm_page_external_count
= 0;
452 unsigned int vm_page_internal_count
= 0;
453 unsigned int vm_page_pageable_external_count
= 0;
454 unsigned int vm_page_pageable_internal_count
= 0;
456 #if DEVELOPMENT || DEBUG
457 unsigned int vm_page_speculative_recreated
= 0;
458 unsigned int vm_page_speculative_created
= 0;
459 unsigned int vm_page_speculative_used
= 0;
462 vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED
;
464 unsigned int vm_page_cleaned_count
= 0;
466 uint64_t max_valid_dma_address
= 0xffffffffffffffffULL
;
467 ppnum_t max_valid_low_ppnum
= PPNUM_MAX
;
471 * Several page replacement parameters are also
472 * shared with this module, so that page allocation
473 * (done here in vm_page_alloc) can trigger the
476 unsigned int vm_page_free_target
= 0;
477 unsigned int vm_page_free_min
= 0;
478 unsigned int vm_page_throttle_limit
= 0;
479 unsigned int vm_page_inactive_target
= 0;
480 #if CONFIG_SECLUDED_MEMORY
481 unsigned int vm_page_secluded_target
= 0;
482 #endif /* CONFIG_SECLUDED_MEMORY */
483 unsigned int vm_page_anonymous_min
= 0;
484 unsigned int vm_page_free_reserved
= 0;
488 * The VM system has a couple of heuristics for deciding
489 * that pages are "uninteresting" and should be placed
490 * on the inactive queue as likely candidates for replacement.
491 * These variables let the heuristics be controlled at run-time
492 * to make experimentation easier.
495 boolean_t vm_page_deactivate_hint
= TRUE
;
497 struct vm_page_stats_reusable vm_page_stats_reusable
;
502 * Sets the page size, perhaps based upon the memory
503 * size. Must be called before any use of page-size
504 * dependent functions.
506 * Sets page_shift and page_mask from page_size.
509 vm_set_page_size(void)
511 page_size
= PAGE_SIZE
;
512 page_mask
= PAGE_MASK
;
513 page_shift
= PAGE_SHIFT
;
515 if ((page_mask
& page_size
) != 0) {
516 panic("vm_set_page_size: page size not a power of two");
519 for (page_shift
= 0;; page_shift
++) {
520 if ((1U << page_shift
) == page_size
) {
526 #if defined (__x86_64__)
528 #define MAX_CLUMP_SIZE 16
529 #define DEFAULT_CLUMP_SIZE 4
531 unsigned int vm_clump_size
, vm_clump_mask
, vm_clump_shift
, vm_clump_promote_threshold
;
533 #if DEVELOPMENT || DEBUG
534 unsigned long vm_clump_stats
[MAX_CLUMP_SIZE
+ 1];
535 unsigned long vm_clump_allocs
, vm_clump_inserts
, vm_clump_inrange
, vm_clump_promotes
;
538 vm_clump_update_stats(unsigned int c
)
540 assert(c
<= vm_clump_size
);
541 if (c
> 0 && c
<= vm_clump_size
) {
542 vm_clump_stats
[c
] += c
;
544 vm_clump_allocs
+= c
;
546 #endif /* if DEVELOPMENT || DEBUG */
548 /* Called once to setup the VM clump knobs */
550 vm_page_setup_clump( void )
552 unsigned int override
, n
;
554 vm_clump_size
= DEFAULT_CLUMP_SIZE
;
555 if (PE_parse_boot_argn("clump_size", &override
, sizeof(override
))) {
556 vm_clump_size
= override
;
559 if (vm_clump_size
> MAX_CLUMP_SIZE
) {
560 panic("vm_page_setup_clump:: clump_size is too large!");
562 if (vm_clump_size
< 1) {
563 panic("vm_page_setup_clump:: clump_size must be >= 1");
565 if ((vm_clump_size
& (vm_clump_size
- 1)) != 0) {
566 panic("vm_page_setup_clump:: clump_size must be a power of 2");
569 vm_clump_promote_threshold
= vm_clump_size
;
570 vm_clump_mask
= vm_clump_size
- 1;
571 for (vm_clump_shift
= 0, n
= vm_clump_size
; n
> 1; n
>>= 1, vm_clump_shift
++) {
575 #if DEVELOPMENT || DEBUG
576 bzero(vm_clump_stats
, sizeof(vm_clump_stats
));
577 vm_clump_allocs
= vm_clump_inserts
= vm_clump_inrange
= vm_clump_promotes
= 0;
578 #endif /* if DEVELOPMENT || DEBUG */
581 #endif /* #if defined (__x86_64__) */
583 #define COLOR_GROUPS_TO_STEAL 4
585 /* Called once during statup, once the cache geometry is known.
588 vm_page_set_colors( void )
590 unsigned int n
, override
;
592 #if defined (__x86_64__)
593 /* adjust #colors because we need to color outside the clump boundary */
594 vm_cache_geometry_colors
>>= vm_clump_shift
;
596 if (PE_parse_boot_argn("colors", &override
, sizeof(override
))) { /* colors specified as a boot-arg? */
598 } else if (vm_cache_geometry_colors
) { /* do we know what the cache geometry is? */
599 n
= vm_cache_geometry_colors
;
601 n
= DEFAULT_COLORS
; /* use default if all else fails */
606 if (n
> MAX_COLORS
) {
610 /* the count must be a power of 2 */
611 if ((n
& (n
- 1)) != 0) {
612 n
= DEFAULT_COLORS
; /* use default if all else fails */
615 vm_color_mask
= n
- 1;
617 vm_free_magazine_refill_limit
= vm_colors
* COLOR_GROUPS_TO_STEAL
;
619 #if defined (__x86_64__)
620 /* adjust for reduction in colors due to clumping and multiple cores */
622 vm_free_magazine_refill_limit
*= (vm_clump_size
* real_ncpus
);
628 * During single threaded early boot we don't initialize all pages.
629 * This avoids some delay during boot. They'll be initialized and
630 * added to the free list as needed or after we are multithreaded by
631 * what becomes the pageout thread.
633 static boolean_t fill
= FALSE
;
634 static unsigned int fillval
;
635 uint_t vm_delayed_count
= 0; /* when non-zero, indicates we may have more pages to init */
636 ppnum_t delay_above_pnum
= PPNUM_MAX
;
639 * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
640 * If ARM ever uses delayed page initialization, this value may need to be quite different.
642 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
645 * When we have to dip into more delayed pages due to low memory, free up
646 * a large chunk to get things back to normal. This avoids contention on the
647 * delayed code allocating page by page.
649 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
652 * Get and initialize the next delayed page.
655 vm_get_delayed_page(int grab_options
)
661 * Get a new page if we have one.
663 lck_mtx_lock(&vm_page_queue_free_lock
);
664 if (vm_delayed_count
== 0) {
665 lck_mtx_unlock(&vm_page_queue_free_lock
);
668 if (!pmap_next_page(&pnum
)) {
669 vm_delayed_count
= 0;
670 lck_mtx_unlock(&vm_page_queue_free_lock
);
674 assert(vm_delayed_count
> 0);
677 #if defined(__x86_64__)
678 /* x86 cluster code requires increasing phys_page in vm_pages[] */
679 if (vm_pages_count
> 0) {
680 assert(pnum
> vm_pages
[vm_pages_count
- 1].vmp_phys_page
);
683 p
= &vm_pages
[vm_pages_count
];
684 assert(p
< vm_page_array_ending_addr
);
685 vm_page_init(p
, pnum
, FALSE
);
688 lck_mtx_unlock(&vm_page_queue_free_lock
);
691 * These pages were initially counted as wired, undo that now.
693 if (grab_options
& VM_PAGE_GRAB_Q_LOCK_HELD
) {
694 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
696 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
697 vm_page_lockspin_queues();
699 --vm_page_wire_count
;
700 --vm_page_wire_count_initial
;
701 if (vm_page_wire_count_on_boot
!= 0) {
702 --vm_page_wire_count_on_boot
;
704 if (!(grab_options
& VM_PAGE_GRAB_Q_LOCK_HELD
)) {
705 vm_page_unlock_queues();
710 fillPage(pnum
, fillval
);
715 static void vm_page_module_init_delayed(void);
718 * Free all remaining delayed pages to the free lists.
721 vm_free_delayed_pages(void)
724 vm_page_t list
= NULL
;
726 vm_offset_t start_free_va
;
729 while ((p
= vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE
)) != NULL
) {
730 if (vm_himemory_mode
) {
731 vm_page_release(p
, FALSE
);
740 * Free the pages in reverse order if not himemory mode.
741 * Hence the low memory pages will be first on free lists. (LIFO)
743 while (list
!= NULL
) {
747 vm_page_release(p
, FALSE
);
749 #if DEVELOPMENT || DEBUG
750 kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt
);
754 * Free up any unused full pages at the end of the vm_pages[] array
756 start_free_va
= round_page((vm_offset_t
)&vm_pages
[vm_pages_count
]);
758 #if defined(__x86_64__)
760 * Since x86 might have used large pages for vm_pages[], we can't
761 * free starting in the middle of a partially used large page.
763 if (pmap_query_pagesize(kernel_pmap
, start_free_va
) == I386_LPGBYTES
) {
764 start_free_va
= ((start_free_va
+ I386_LPGMASK
) & ~I386_LPGMASK
);
767 if (start_free_va
< (vm_offset_t
)vm_page_array_ending_addr
) {
768 free_size
= trunc_page((vm_offset_t
)vm_page_array_ending_addr
- start_free_va
);
770 ml_static_mfree(start_free_va
, (vm_offset_t
)free_size
);
771 vm_page_array_ending_addr
= (void *)start_free_va
;
774 * Note there's no locking here, as only this thread will ever change this value.
775 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
777 vm_page_stolen_count
-= (free_size
>> PAGE_SHIFT
);
779 #if DEVELOPMENT || DEBUG
780 kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
781 (long)free_size
, (long)start_free_va
);
788 * now we can create the VM page array zone
790 vm_page_module_init_delayed();
794 * Try and free up enough delayed pages to match a contig memory allocation.
797 vm_free_delayed_pages_contig(
807 * Treat 0 as the absolute max page number.
810 max_pnum
= PPNUM_MAX
;
814 * Free till we get a properly aligned start page
817 p
= vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE
);
821 pnum
= VM_PAGE_GET_PHYS_PAGE(p
);
822 vm_page_release(p
, FALSE
);
823 if (pnum
>= max_pnum
) {
826 if ((pnum
& pnum_mask
) == 0) {
832 * Having a healthy pool of free pages will help performance. We don't
833 * want to fall back to the delayed code for every page allocation.
835 if (vm_page_free_count
< VM_DELAY_PAGE_CHUNK
) {
836 npages
+= VM_DELAY_PAGE_CHUNK
;
840 * Now free up the pages
842 for (cnt
= 1; cnt
< npages
; ++cnt
) {
843 p
= vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE
);
847 vm_page_release(p
, FALSE
);
851 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
854 vm_page_init_local_q(unsigned int num_cpus
)
856 struct vpl
*t_local_q
;
859 * no point in this for a uni-processor system
862 ml_cpu_info_t cpu_info
;
865 * Force the allocation alignment to a cacheline,
866 * because the `vpl` struct has a lock and will be taken
867 * cross CPU so we want to isolate the rest of the per-CPU
868 * data to avoid false sharing due to this lock being taken.
871 ml_cpu_get_info(&cpu_info
);
873 t_local_q
= zalloc_percpu_permanent(sizeof(struct vpl
),
874 cpu_info
.cache_line_size
- 1);
876 zpercpu_foreach(lq
, t_local_q
) {
877 VPL_LOCK_INIT(lq
, &vm_page_lck_grp_local
, &vm_page_lck_attr
);
878 vm_page_queue_init(&lq
->vpl_queue
);
881 /* make the initialization visible to all cores */
882 os_atomic_store(&vm_page_local_q
, t_local_q
, release
);
887 * vm_init_before_launchd
889 * This should be called right before launchd is loaded.
892 vm_init_before_launchd()
894 vm_page_lockspin_queues();
895 vm_page_wire_count_on_boot
= vm_page_wire_count
;
896 vm_page_unlock_queues();
903 * Initializes the resident memory module.
905 * Allocates memory for the page cells, and
906 * for the object/offset-to-page hash table headers.
907 * Each page cell is initialized and placed on the free list.
908 * Returns the range of available kernel virtual memory.
922 * Initialize the page queues.
925 lck_mtx_init_ext(&vm_page_queue_free_lock
, &vm_page_queue_free_lock_ext
, &vm_page_lck_grp_free
, &vm_page_lck_attr
);
926 lck_mtx_init_ext(&vm_page_queue_lock
, &vm_page_queue_lock_ext
, &vm_page_lck_grp_queue
, &vm_page_lck_attr
);
927 lck_mtx_init_ext(&vm_purgeable_queue_lock
, &vm_purgeable_queue_lock_ext
, &vm_page_lck_grp_purge
, &vm_page_lck_attr
);
929 for (i
= 0; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
932 purgeable_queues
[i
].token_q_head
= 0;
933 purgeable_queues
[i
].token_q_tail
= 0;
934 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++) {
935 queue_init(&purgeable_queues
[i
].objq
[group
]);
938 purgeable_queues
[i
].type
= i
;
939 purgeable_queues
[i
].new_pages
= 0;
941 purgeable_queues
[i
].debug_count_tokens
= 0;
942 purgeable_queues
[i
].debug_count_objects
= 0;
946 purgeable_nonvolatile_count
= 0;
947 queue_init(&purgeable_nonvolatile_queue
);
949 for (i
= 0; i
< MAX_COLORS
; i
++) {
950 vm_page_queue_init(&vm_page_queue_free
[i
].qhead
);
953 vm_page_queue_init(&vm_lopage_queue_free
);
954 vm_page_queue_init(&vm_page_queue_active
);
955 vm_page_queue_init(&vm_page_queue_inactive
);
956 #if CONFIG_SECLUDED_MEMORY
957 vm_page_queue_init(&vm_page_queue_secluded
);
958 #endif /* CONFIG_SECLUDED_MEMORY */
959 vm_page_queue_init(&vm_page_queue_cleaned
);
960 vm_page_queue_init(&vm_page_queue_throttled
);
961 vm_page_queue_init(&vm_page_queue_anonymous
);
962 queue_init(&vm_objects_wired
);
964 for (i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++) {
965 vm_page_queue_init(&vm_page_queue_speculative
[i
].age_q
);
967 vm_page_queue_speculative
[i
].age_ts
.tv_sec
= 0;
968 vm_page_queue_speculative
[i
].age_ts
.tv_nsec
= 0;
970 #if CONFIG_BACKGROUND_QUEUE
971 vm_page_queue_init(&vm_page_queue_background
);
973 vm_page_background_count
= 0;
974 vm_page_background_internal_count
= 0;
975 vm_page_background_external_count
= 0;
976 vm_page_background_promoted_count
= 0;
978 vm_page_background_target
= (unsigned int)(atop_64(max_mem
) / 25);
980 if (vm_page_background_target
> VM_PAGE_BACKGROUND_TARGET_MAX
) {
981 vm_page_background_target
= VM_PAGE_BACKGROUND_TARGET_MAX
;
984 vm_page_background_mode
= VM_PAGE_BG_LEVEL_1
;
985 vm_page_background_exclude_external
= 0;
987 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode
, sizeof(vm_page_background_mode
));
988 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external
, sizeof(vm_page_background_exclude_external
));
989 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target
, sizeof(vm_page_background_target
));
991 if (vm_page_background_mode
> VM_PAGE_BG_LEVEL_1
) {
992 vm_page_background_mode
= VM_PAGE_BG_LEVEL_1
;
995 vm_page_free_wanted
= 0;
996 vm_page_free_wanted_privileged
= 0;
997 #if CONFIG_SECLUDED_MEMORY
998 vm_page_free_wanted_secluded
= 0;
999 #endif /* CONFIG_SECLUDED_MEMORY */
1001 #if defined (__x86_64__)
1002 /* this must be called before vm_page_set_colors() */
1003 vm_page_setup_clump();
1006 vm_page_set_colors();
1008 bzero(vm_page_inactive_states
, sizeof(vm_page_inactive_states
));
1009 vm_page_inactive_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
1010 vm_page_inactive_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
1011 vm_page_inactive_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
1013 bzero(vm_page_pageable_states
, sizeof(vm_page_pageable_states
));
1014 vm_page_pageable_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
1015 vm_page_pageable_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
1016 vm_page_pageable_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
1017 vm_page_pageable_states
[VM_PAGE_ON_ACTIVE_Q
] = 1;
1018 vm_page_pageable_states
[VM_PAGE_ON_SPECULATIVE_Q
] = 1;
1019 vm_page_pageable_states
[VM_PAGE_ON_THROTTLED_Q
] = 1;
1020 #if CONFIG_SECLUDED_MEMORY
1021 vm_page_pageable_states
[VM_PAGE_ON_SECLUDED_Q
] = 1;
1022 #endif /* CONFIG_SECLUDED_MEMORY */
1024 bzero(vm_page_non_speculative_pageable_states
, sizeof(vm_page_non_speculative_pageable_states
));
1025 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
1026 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
1027 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
1028 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_ACTIVE_Q
] = 1;
1029 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_THROTTLED_Q
] = 1;
1030 #if CONFIG_SECLUDED_MEMORY
1031 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_SECLUDED_Q
] = 1;
1032 #endif /* CONFIG_SECLUDED_MEMORY */
1034 bzero(vm_page_active_or_inactive_states
, sizeof(vm_page_active_or_inactive_states
));
1035 vm_page_active_or_inactive_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
1036 vm_page_active_or_inactive_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
1037 vm_page_active_or_inactive_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
1038 vm_page_active_or_inactive_states
[VM_PAGE_ON_ACTIVE_Q
] = 1;
1039 #if CONFIG_SECLUDED_MEMORY
1040 vm_page_active_or_inactive_states
[VM_PAGE_ON_SECLUDED_Q
] = 1;
1041 #endif /* CONFIG_SECLUDED_MEMORY */
1043 for (vm_tag_t t
= 0; t
< VM_KERN_MEMORY_FIRST_DYNAMIC
; t
++) {
1044 vm_allocation_sites_static
[t
].refcount
= 2;
1045 vm_allocation_sites_static
[t
].tag
= t
;
1046 vm_allocation_sites
[t
] = &vm_allocation_sites_static
[t
];
1048 vm_allocation_sites_static
[VM_KERN_MEMORY_FIRST_DYNAMIC
].refcount
= 2;
1049 vm_allocation_sites_static
[VM_KERN_MEMORY_FIRST_DYNAMIC
].tag
= VM_KERN_MEMORY_ANY
;
1050 vm_allocation_sites
[VM_KERN_MEMORY_ANY
] = &vm_allocation_sites_static
[VM_KERN_MEMORY_FIRST_DYNAMIC
];
1053 * Steal memory for the map and zone subsystems.
1055 kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL
);
1058 * Allocate (and initialize) the virtual-to-physical
1059 * table hash buckets.
1061 * The number of buckets should be a power of two to
1062 * get a good hash function. The following computation
1063 * chooses the first power of two that is greater
1064 * than the number of physical pages in the system.
1067 if (vm_page_bucket_count
== 0) {
1068 unsigned int npages
= pmap_free_pages();
1070 vm_page_bucket_count
= 1;
1071 while (vm_page_bucket_count
< npages
) {
1072 vm_page_bucket_count
<<= 1;
1075 vm_page_bucket_lock_count
= (vm_page_bucket_count
+ BUCKETS_PER_LOCK
- 1) / BUCKETS_PER_LOCK
;
1077 vm_page_hash_mask
= vm_page_bucket_count
- 1;
1080 * Calculate object shift value for hashing algorithm:
1081 * O = log2(sizeof(struct vm_object))
1082 * B = log2(vm_page_bucket_count)
1083 * hash shifts the object left by
1086 size
= vm_page_bucket_count
;
1087 for (log1
= 0; size
> 1; log1
++) {
1090 size
= sizeof(struct vm_object
);
1091 for (log2
= 0; size
> 1; log2
++) {
1094 vm_page_hash_shift
= log1
/ 2 - log2
+ 1;
1096 vm_page_bucket_hash
= 1 << ((log1
+ 1) >> 1); /* Get (ceiling of sqrt of table size) */
1097 vm_page_bucket_hash
|= 1 << ((log1
+ 1) >> 2); /* Get (ceiling of quadroot of table size) */
1098 vm_page_bucket_hash
|= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1100 if (vm_page_hash_mask
& vm_page_bucket_count
) {
1101 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1104 #if VM_PAGE_BUCKETS_CHECK
1105 #if VM_PAGE_FAKE_BUCKETS
1107 * Allocate a decoy set of page buckets, to detect
1108 * any stomping there.
1110 vm_page_fake_buckets
= (vm_page_bucket_t
*)
1111 pmap_steal_memory(vm_page_bucket_count
*
1112 sizeof(vm_page_bucket_t
));
1113 vm_page_fake_buckets_start
= (vm_map_offset_t
) vm_page_fake_buckets
;
1114 vm_page_fake_buckets_end
=
1115 vm_map_round_page((vm_page_fake_buckets_start
+
1116 (vm_page_bucket_count
*
1117 sizeof(vm_page_bucket_t
))),
1120 for (cp
= (char *)vm_page_fake_buckets_start
;
1121 cp
< (char *)vm_page_fake_buckets_end
;
1125 #endif /* VM_PAGE_FAKE_BUCKETS */
1126 #endif /* VM_PAGE_BUCKETS_CHECK */
1128 kernel_debug_string_early("vm_page_buckets");
1129 vm_page_buckets
= (vm_page_bucket_t
*)
1130 pmap_steal_memory(vm_page_bucket_count
*
1131 sizeof(vm_page_bucket_t
));
1133 kernel_debug_string_early("vm_page_bucket_locks");
1134 vm_page_bucket_locks
= (lck_spin_t
*)
1135 pmap_steal_memory(vm_page_bucket_lock_count
*
1136 sizeof(lck_spin_t
));
1138 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
1139 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
1141 bucket
->page_list
= VM_PAGE_PACK_PTR(VM_PAGE_NULL
);
1142 #if MACH_PAGE_HASH_STATS
1143 bucket
->cur_count
= 0;
1144 bucket
->hi_count
= 0;
1145 #endif /* MACH_PAGE_HASH_STATS */
1148 for (i
= 0; i
< vm_page_bucket_lock_count
; i
++) {
1149 lck_spin_init(&vm_page_bucket_locks
[i
], &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
1154 #if VM_PAGE_BUCKETS_CHECK
1155 vm_page_buckets_check_ready
= TRUE
;
1156 #endif /* VM_PAGE_BUCKETS_CHECK */
1159 * Machine-dependent code allocates the resident page table.
1160 * It uses vm_page_init to initialize the page frames.
1161 * The code also returns to us the virtual space available
1162 * to the kernel. We don't trust the pmap module
1163 * to get the alignment right.
1166 kernel_debug_string_early("pmap_startup");
1167 pmap_startup(&virtual_space_start
, &virtual_space_end
);
1168 virtual_space_start
= round_page(virtual_space_start
);
1169 virtual_space_end
= trunc_page(virtual_space_end
);
1171 *startp
= virtual_space_start
;
1172 *endp
= virtual_space_end
;
1175 * Compute the initial "wire" count.
1176 * Up until now, the pages which have been set aside are not under
1177 * the VM system's control, so although they aren't explicitly
1178 * wired, they nonetheless can't be moved. At this moment,
1179 * all VM managed pages are "free", courtesy of pmap_startup.
1181 assert((unsigned int) atop_64(max_mem
) == atop_64(max_mem
));
1182 vm_page_wire_count
= ((unsigned int) atop_64(max_mem
)) -
1183 vm_page_free_count
- vm_lopage_free_count
;
1184 #if CONFIG_SECLUDED_MEMORY
1185 vm_page_wire_count
-= vm_page_secluded_count
;
1187 vm_page_wire_count_initial
= vm_page_wire_count
;
1189 /* capture this for later use */
1190 booter_size
= ml_get_booter_memory_size();
1192 printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1193 vm_page_free_count
, vm_page_wire_count
, vm_delayed_count
);
1195 kernel_debug_string_early("vm_page_bootstrap complete");
1198 #ifndef MACHINE_PAGES
1200 * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1201 * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1202 * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1205 pmap_steal_memory_internal(
1207 boolean_t might_free
)
1211 vm_offset_t map_addr
;
1215 * Size needs to be aligned to word size.
1217 size
= (size
+ sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1220 * On the first call, get the initial values for virtual address space
1221 * and page align them.
1223 if (virtual_space_start
== virtual_space_end
) {
1224 pmap_virtual_space(&virtual_space_start
, &virtual_space_end
);
1225 virtual_space_start
= round_page(virtual_space_start
);
1226 virtual_space_end
= trunc_page(virtual_space_end
);
1228 #if defined(__x86_64__)
1230 * Release remaining unused section of preallocated KVA and the 4K page tables
1231 * that map it. This makes the VA available for large page mappings.
1233 Idle_PTs_release(virtual_space_start
, virtual_space_end
);
1238 * Allocate the virtual space for this request. On x86, we'll align to a large page
1239 * address if the size is big enough to back with at least 1 large page.
1241 #if defined(__x86_64__)
1242 if (size
>= I386_LPGBYTES
) {
1243 virtual_space_start
= ((virtual_space_start
+ I386_LPGMASK
) & ~I386_LPGMASK
);
1246 addr
= virtual_space_start
;
1247 virtual_space_start
+= size
;
1249 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
1252 * Allocate and map physical pages to back the new virtual space.
1254 map_addr
= round_page(addr
);
1255 while (map_addr
< addr
+ size
) {
1256 #if defined(__x86_64__)
1258 * Back with a large page if properly aligned on x86
1260 if ((map_addr
& I386_LPGMASK
) == 0 &&
1261 map_addr
+ I386_LPGBYTES
<= addr
+ size
&&
1262 pmap_pre_expand_large(kernel_pmap
, map_addr
) == KERN_SUCCESS
&&
1263 pmap_next_page_large(&phys_page
) == KERN_SUCCESS
) {
1264 kr
= pmap_enter(kernel_pmap
, map_addr
, phys_page
,
1265 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
1266 VM_WIMG_USE_DEFAULT
| VM_MEM_SUPERPAGE
, FALSE
);
1268 if (kr
!= KERN_SUCCESS
) {
1269 panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1270 (unsigned long)map_addr
, phys_page
);
1272 map_addr
+= I386_LPGBYTES
;
1273 vm_page_wire_count
+= I386_LPGBYTES
>> PAGE_SHIFT
;
1274 vm_page_stolen_count
+= I386_LPGBYTES
>> PAGE_SHIFT
;
1275 vm_page_kern_lpage_count
++;
1280 if (!pmap_next_page_hi(&phys_page
, might_free
)) {
1281 panic("pmap_steal_memory() size: 0x%llx\n", (uint64_t)size
);
1284 #if defined(__x86_64__)
1285 pmap_pre_expand(kernel_pmap
, map_addr
);
1288 kr
= pmap_enter(kernel_pmap
, map_addr
, phys_page
,
1289 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
1290 VM_WIMG_USE_DEFAULT
, FALSE
);
1292 if (kr
!= KERN_SUCCESS
) {
1293 panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1294 (unsigned long)map_addr
, phys_page
);
1296 map_addr
+= PAGE_SIZE
;
1299 * Account for newly stolen memory
1301 vm_page_wire_count
++;
1302 vm_page_stolen_count
++;
1305 #if defined(__x86_64__)
1307 * The call with might_free is currently the last use of pmap_steal_memory*().
1308 * Notify the pmap layer to record which high pages were allocated so far.
1311 pmap_hi_pages_done();
1315 kasan_notify_address(round_page(addr
), size
);
1317 return (void *) addr
;
1324 return pmap_steal_memory_internal(size
, FALSE
);
1328 pmap_steal_freeable_memory(
1331 return pmap_steal_memory_internal(size
, TRUE
);
1334 #if CONFIG_SECLUDED_MEMORY
1335 /* boot-args to control secluded memory */
1336 unsigned int secluded_mem_mb
= 0; /* # of MBs of RAM to seclude */
1337 int secluded_for_iokit
= 1; /* IOKit can use secluded memory */
1338 int secluded_for_apps
= 1; /* apps can use secluded memory */
1339 int secluded_for_filecache
= 2; /* filecache can use seclude memory */
1341 int secluded_for_fbdp
= 0;
1343 uint64_t secluded_shutoff_trigger
= 0;
1344 uint64_t secluded_shutoff_headroom
= 150 * 1024 * 1024; /* original value from N56 */
1345 #endif /* CONFIG_SECLUDED_MEMORY */
1348 #if defined(__arm__) || defined(__arm64__)
1349 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1350 unsigned int vm_first_phys_ppnum
= 0;
1353 void vm_page_release_startup(vm_page_t mem
);
1356 vm_offset_t
*startp
,
1359 unsigned int i
, npages
;
1364 uint_t low_page_count
= 0;
1366 #if defined(__LP64__)
1368 * make sure we are aligned on a 64 byte boundary
1369 * for VM_PAGE_PACK_PTR (it clips off the low-order
1370 * 6 bits of the pointer)
1372 if (virtual_space_start
!= virtual_space_end
) {
1373 virtual_space_start
= round_page(virtual_space_start
);
1378 * We calculate how many page frames we will have
1379 * and then allocate the page structures in one chunk.
1381 * Note that the calculation here doesn't take into account
1382 * the memory needed to map what's being allocated, i.e. the page
1383 * table entries. So the actual number of pages we get will be
1384 * less than this. To do someday: include that in the computation.
1386 mem_sz
= pmap_free_pages() * (uint64_t)PAGE_SIZE
;
1387 mem_sz
+= round_page(virtual_space_start
) - virtual_space_start
; /* Account for any slop */
1388 npages
= (uint_t
)(mem_sz
/ (PAGE_SIZE
+ sizeof(*vm_pages
))); /* scaled to include the vm_page_ts */
1390 vm_pages
= (vm_page_t
) pmap_steal_freeable_memory(npages
* sizeof *vm_pages
);
1393 * Check if we want to initialize pages to a known value
1395 if (PE_parse_boot_argn("fill", &fillval
, sizeof(fillval
))) {
1399 /* This slows down booting the DEBUG kernel, particularly on
1400 * large memory systems, but is worthwhile in deterministically
1401 * trapping uninitialized memory usage.
1405 fillval
= 0xDEB8F177;
1409 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval
);
1412 #if CONFIG_SECLUDED_MEMORY
1414 * Figure out how much secluded memory to have before we start
1415 * release pages to free lists.
1416 * The default, if specified nowhere else, is no secluded mem.
1418 secluded_mem_mb
= 0;
1419 if (max_mem
> 1 * 1024 * 1024 * 1024) {
1420 /* default to 90MB for devices with > 1GB of RAM */
1421 secluded_mem_mb
= 90;
1423 /* override with value from device tree, if provided */
1424 PE_get_default("kern.secluded_mem_mb",
1425 &secluded_mem_mb
, sizeof(secluded_mem_mb
));
1426 /* override with value from boot-args, if provided */
1427 PE_parse_boot_argn("secluded_mem_mb",
1429 sizeof(secluded_mem_mb
));
1431 vm_page_secluded_target
= (unsigned int)
1432 ((secluded_mem_mb
* 1024ULL * 1024ULL) / PAGE_SIZE
);
1433 PE_parse_boot_argn("secluded_for_iokit",
1434 &secluded_for_iokit
,
1435 sizeof(secluded_for_iokit
));
1436 PE_parse_boot_argn("secluded_for_apps",
1438 sizeof(secluded_for_apps
));
1439 PE_parse_boot_argn("secluded_for_filecache",
1440 &secluded_for_filecache
,
1441 sizeof(secluded_for_filecache
));
1443 PE_parse_boot_argn("secluded_for_fbdp",
1445 sizeof(secluded_for_fbdp
));
1449 * Allow a really large app to effectively use secluded memory until it exits.
1451 if (vm_page_secluded_target
!= 0) {
1453 * Get an amount from boot-args, else use 1/2 of max_mem.
1454 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1455 * used munch to induce jetsam thrashing of false idle daemons on N56.
1457 int secluded_shutoff_mb
;
1458 if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb
,
1459 sizeof(secluded_shutoff_mb
))) {
1460 secluded_shutoff_trigger
= (uint64_t)secluded_shutoff_mb
* 1024 * 1024;
1462 secluded_shutoff_trigger
= max_mem
/ 2;
1465 /* ensure the headroom value is sensible and avoid underflows */
1466 assert(secluded_shutoff_trigger
== 0 || secluded_shutoff_trigger
> secluded_shutoff_headroom
);
1469 #endif /* CONFIG_SECLUDED_MEMORY */
1471 #if defined(__x86_64__)
1474 * Decide how much memory we delay freeing at boot time.
1476 uint32_t delay_above_gb
;
1477 if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb
, sizeof(delay_above_gb
))) {
1478 delay_above_gb
= DEFAULT_DELAY_ABOVE_PHYS_GB
;
1481 if (delay_above_gb
== 0) {
1482 delay_above_pnum
= PPNUM_MAX
;
1484 delay_above_pnum
= delay_above_gb
* (1024 * 1024 * 1024 / PAGE_SIZE
);
1487 /* make sure we have sane breathing room: 1G above low memory */
1488 if (delay_above_pnum
<= max_valid_low_ppnum
) {
1489 delay_above_pnum
= max_valid_low_ppnum
+ ((1024 * 1024 * 1024) >> PAGE_SHIFT
);
1492 if (delay_above_pnum
< PPNUM_MAX
) {
1493 printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum
);
1496 #endif /* defined(__x86_64__) */
1499 * Initialize and release the page frames.
1501 kernel_debug_string_early("page_frame_init");
1503 vm_page_array_beginning_addr
= &vm_pages
[0];
1504 vm_page_array_ending_addr
= &vm_pages
[npages
]; /* used by ptr packing/unpacking code */
1505 #if VM_PAGE_PACKED_FROM_ARRAY
1506 if (npages
>= VM_PAGE_PACKED_FROM_ARRAY
) {
1507 panic("pmap_startup(): too many pages to support vm_page packing");
1511 vm_delayed_count
= 0;
1513 absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns
);
1515 for (i
= 0; i
< npages
; i
++) {
1516 /* Did we run out of pages? */
1517 if (!pmap_next_page(&phys_page
)) {
1521 if (phys_page
< max_valid_low_ppnum
) {
1525 /* Are we at high enough pages to delay the rest? */
1526 if (low_page_count
> vm_lopage_free_limit
&& phys_page
> delay_above_pnum
) {
1527 vm_delayed_count
= pmap_free_pages();
1531 #if defined(__arm__) || defined(__arm64__)
1533 vm_first_phys_ppnum
= phys_page
;
1534 patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr
,
1535 (void *)vm_page_array_ending_addr
, vm_first_phys_ppnum
);
1537 assert((i
+ vm_first_phys_ppnum
) == phys_page
);
1540 #if defined(__x86_64__)
1541 /* The x86 clump freeing code requires increasing ppn's to work correctly */
1543 assert(phys_page
> vm_pages
[i
- 1].vmp_phys_page
);
1547 vm_page_init(&vm_pages
[i
], phys_page
, FALSE
);
1549 fillPage(phys_page
, fillval
);
1551 if (vm_himemory_mode
) {
1552 vm_page_release_startup(&vm_pages
[i
]);
1555 vm_page_pages
= vm_pages_count
; /* used to report to user space */
1557 if (!vm_himemory_mode
) {
1559 vm_page_release_startup(&vm_pages
[--i
]);
1563 absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns
);
1564 printf("pmap_startup() init/release time: %lld microsec\n", (now_ns
- start_ns
) / NSEC_PER_USEC
);
1565 printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count
);
1567 #if defined(__LP64__)
1568 if ((vm_page_t
)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages
[0]))) != &vm_pages
[0]) {
1569 panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages
[0]);
1572 if ((vm_page_t
)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages
[vm_pages_count
- 1]))) != &vm_pages
[vm_pages_count
- 1]) {
1573 panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages
[vm_pages_count
- 1]);
1577 VM_CHECK_MEMORYSTATUS
;
1580 * We have to re-align virtual_space_start,
1581 * because pmap_steal_memory has been using it.
1583 virtual_space_start
= round_page(virtual_space_start
);
1584 *startp
= virtual_space_start
;
1585 *endp
= virtual_space_end
;
1587 #endif /* MACHINE_PAGES */
1590 * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1591 * or frees to this zone. It's just here for reporting purposes via zprint command.
1592 * This needs to be done after all initially delayed pages are put on the free lists.
1595 vm_page_module_init_delayed(void)
1597 (void)zone_create_ext("vm pages array", sizeof(struct vm_page
),
1598 ZC_NOGZALLOC
, ZONE_ID_ANY
, ^(zone_t z
) {
1599 uint64_t vm_page_zone_pages
, vm_page_array_zone_data_size
;
1601 zone_set_exhaustible(z
, 0);
1603 * Reflect size and usage information for vm_pages[].
1606 z
->countavail
= (uint32_t)(vm_page_array_ending_addr
- vm_pages
);
1607 z
->countfree
= z
->countavail
- vm_pages_count
;
1608 zpercpu_get_cpu(z
->z_stats
, 0)->zs_mem_allocated
=
1609 vm_pages_count
* sizeof(struct vm_page
);
1610 vm_page_array_zone_data_size
= (uintptr_t)((void *)vm_page_array_ending_addr
- (void *)vm_pages
);
1611 vm_page_zone_pages
= atop(round_page((vm_offset_t
)vm_page_array_zone_data_size
));
1612 z
->page_count
+= vm_page_zone_pages
;
1613 /* since zone accounts for these, take them out of stolen */
1614 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages
);
1619 * Create the vm_pages zone. This is used for the vm_page structures for the pages
1620 * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1621 * this needs to happen in early VM bootstrap.
1626 vm_page_module_init(void)
1628 vm_size_t vm_page_with_ppnum_size
;
1631 * Since the pointers to elements in this zone will be packed, they
1632 * must have appropriate size. Not strictly what sizeof() reports.
1634 vm_page_with_ppnum_size
=
1635 (sizeof(struct vm_page_with_ppnum
) + (VM_PAGE_PACKED_PTR_ALIGNMENT
- 1)) &
1636 ~(VM_PAGE_PACKED_PTR_ALIGNMENT
- 1);
1638 vm_page_zone
= zone_create_ext("vm pages", vm_page_with_ppnum_size
,
1639 ZC_ALLOW_FOREIGN
| ZC_NOGZALLOC
| ZC_ALIGNMENT_REQUIRED
|
1640 ZC_NOCALLOUT
, ZONE_ID_ANY
, ^(zone_t z
) {
1641 #if defined(__LP64__)
1642 zone_set_submap_idx(z
, Z_SUBMAP_IDX_VA_RESTRICTED_MAP
);
1644 zone_set_exhaustible(z
, 0);
1647 STARTUP(ZALLOC
, STARTUP_RANK_SECOND
, vm_page_module_init
);
1650 * Routine: vm_page_create
1652 * After the VM system is up, machine-dependent code
1653 * may stumble across more physical memory. For example,
1654 * memory that it was reserving for a frame buffer.
1655 * vm_page_create turns this memory into available pages.
1666 for (phys_page
= start
;
1669 while ((m
= (vm_page_t
) vm_page_grab_fictitious_common(phys_page
))
1671 vm_page_more_fictitious();
1674 m
->vmp_fictitious
= FALSE
;
1675 pmap_clear_noencrypt(phys_page
);
1677 lck_mtx_lock(&vm_page_queue_free_lock
);
1679 lck_mtx_unlock(&vm_page_queue_free_lock
);
1680 vm_page_release(m
, FALSE
);
1687 * Distributes the object/offset key pair among hash buckets.
1689 * NOTE: The bucket count must be a power of 2
1691 #define vm_page_hash(object, offset) (\
1692 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1693 & vm_page_hash_mask)
1697 * vm_page_insert: [ internal use only ]
1699 * Inserts the given mem entry into the object/object-page
1700 * table and object list.
1702 * The object must be locked.
1708 vm_object_offset_t offset
)
1710 vm_page_insert_internal(mem
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, TRUE
, FALSE
, FALSE
, NULL
);
1714 vm_page_insert_wired(
1717 vm_object_offset_t offset
,
1720 vm_page_insert_internal(mem
, object
, offset
, tag
, FALSE
, TRUE
, FALSE
, FALSE
, NULL
);
1724 vm_page_insert_internal(
1727 vm_object_offset_t offset
,
1729 boolean_t queues_lock_held
,
1730 boolean_t insert_in_hash
,
1731 boolean_t batch_pmap_op
,
1732 boolean_t batch_accounting
,
1733 uint64_t *delayed_ledger_update
)
1735 vm_page_bucket_t
*bucket
;
1736 lck_spin_t
*bucket_lock
;
1739 int ledger_idx_volatile
;
1740 int ledger_idx_nonvolatile
;
1741 int ledger_idx_volatile_compressed
;
1742 int ledger_idx_nonvolatile_compressed
;
1743 boolean_t do_footprint
;
1747 * we may not hold the page queue lock
1748 * so this check isn't safe to make
1753 assertf(page_aligned(offset
), "0x%llx\n", offset
);
1755 assert(!VM_PAGE_WIRED(mem
) || mem
->vmp_private
|| mem
->vmp_fictitious
|| (tag
!= VM_KERN_MEMORY_NONE
));
1757 /* the vm_submap_object is only a placeholder for submaps */
1758 assert(object
!= vm_submap_object
);
1760 vm_object_lock_assert_exclusive(object
);
1761 LCK_MTX_ASSERT(&vm_page_queue_lock
,
1762 queues_lock_held
? LCK_MTX_ASSERT_OWNED
1763 : LCK_MTX_ASSERT_NOTOWNED
);
1765 if (queues_lock_held
== FALSE
) {
1766 assert(!VM_PAGE_PAGEABLE(mem
));
1769 if (insert_in_hash
== TRUE
) {
1770 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1771 if (mem
->vmp_tabled
|| mem
->vmp_object
) {
1772 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1773 "already in (obj=%p,off=0x%llx)",
1774 mem
, object
, offset
, VM_PAGE_OBJECT(mem
), mem
->vmp_offset
);
1777 if (object
->internal
&& (offset
>= object
->vo_size
)) {
1778 panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1779 mem
, object
, offset
, object
->vo_size
);
1782 assert(vm_page_lookup(object
, offset
) == VM_PAGE_NULL
);
1785 * Record the object/offset pair in this page
1788 mem
->vmp_object
= VM_PAGE_PACK_OBJECT(object
);
1789 mem
->vmp_offset
= offset
;
1791 #if CONFIG_SECLUDED_MEMORY
1792 if (object
->eligible_for_secluded
) {
1793 vm_page_secluded
.eligible_for_secluded
++;
1795 #endif /* CONFIG_SECLUDED_MEMORY */
1798 * Insert it into the object_object/offset hash table
1800 hash_id
= vm_page_hash(object
, offset
);
1801 bucket
= &vm_page_buckets
[hash_id
];
1802 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
1804 lck_spin_lock_grp(bucket_lock
, &vm_page_lck_grp_bucket
);
1806 mem
->vmp_next_m
= bucket
->page_list
;
1807 bucket
->page_list
= VM_PAGE_PACK_PTR(mem
);
1808 assert(mem
== (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
)));
1810 #if MACH_PAGE_HASH_STATS
1811 if (++bucket
->cur_count
> bucket
->hi_count
) {
1812 bucket
->hi_count
= bucket
->cur_count
;
1814 #endif /* MACH_PAGE_HASH_STATS */
1815 mem
->vmp_hashed
= TRUE
;
1816 lck_spin_unlock(bucket_lock
);
1820 unsigned int cache_attr
;
1822 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
1824 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
1825 PMAP_SET_CACHE_ATTR(mem
, object
, cache_attr
, batch_pmap_op
);
1829 * Now link into the object's list of backed pages.
1831 vm_page_queue_enter(&object
->memq
, mem
, vmp_listq
);
1832 object
->memq_hint
= mem
;
1833 mem
->vmp_tabled
= TRUE
;
1836 * Show that the object has one more resident page.
1839 object
->resident_page_count
++;
1840 if (VM_PAGE_WIRED(mem
)) {
1841 assert(mem
->vmp_wire_count
> 0);
1842 VM_OBJECT_WIRED_PAGE_UPDATE_START(object
);
1843 VM_OBJECT_WIRED_PAGE_ADD(object
, mem
);
1844 VM_OBJECT_WIRED_PAGE_UPDATE_END(object
, tag
);
1846 assert(object
->resident_page_count
>= object
->wired_page_count
);
1848 #if DEVELOPMENT || DEBUG
1849 if (object
->object_is_shared_cache
&&
1850 object
->pager
!= NULL
&&
1851 object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
1853 assert(!object
->internal
);
1854 new = OSAddAtomic(+1, &shared_region_pagers_resident_count
);
1856 old
= shared_region_pagers_resident_peak
;
1857 } while (old
< new &&
1858 !OSCompareAndSwap(old
, new, &shared_region_pagers_resident_peak
));
1860 #endif /* DEVELOPMENT || DEBUG */
1862 if (batch_accounting
== FALSE
) {
1863 if (object
->internal
) {
1864 OSAddAtomic(1, &vm_page_internal_count
);
1866 OSAddAtomic(1, &vm_page_external_count
);
1871 * It wouldn't make sense to insert a "reusable" page in
1872 * an object (the page would have been marked "reusable" only
1873 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1874 * in the object at that time).
1875 * But a page could be inserted in a "all_reusable" object, if
1876 * something faults it in (a vm_read() from another task or a
1877 * "use-after-free" issue in user space, for example). It can
1878 * also happen if we're relocating a page from that object to
1879 * a different physical page during a physically-contiguous
1882 assert(!mem
->vmp_reusable
);
1883 if (object
->all_reusable
) {
1884 OSAddAtomic(+1, &vm_page_stats_reusable
.reusable_count
);
1887 if (object
->purgable
== VM_PURGABLE_DENY
&&
1888 !object
->vo_ledger_tag
) {
1891 owner
= VM_OBJECT_OWNER(object
);
1892 vm_object_ledger_tag_ledgers(object
,
1893 &ledger_idx_volatile
,
1894 &ledger_idx_nonvolatile
,
1895 &ledger_idx_volatile_compressed
,
1896 &ledger_idx_nonvolatile_compressed
,
1900 (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1901 object
->purgable
== VM_PURGABLE_DENY
||
1902 VM_PAGE_WIRED(mem
))) {
1903 if (delayed_ledger_update
) {
1904 *delayed_ledger_update
+= PAGE_SIZE
;
1906 /* more non-volatile bytes */
1907 ledger_credit(owner
->ledger
,
1908 ledger_idx_nonvolatile
,
1911 /* more footprint */
1912 ledger_credit(owner
->ledger
,
1913 task_ledgers
.phys_footprint
,
1918 (object
->purgable
== VM_PURGABLE_VOLATILE
||
1919 object
->purgable
== VM_PURGABLE_EMPTY
)) {
1920 assert(!VM_PAGE_WIRED(mem
));
1921 /* more volatile bytes */
1922 ledger_credit(owner
->ledger
,
1923 ledger_idx_volatile
,
1927 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1928 if (VM_PAGE_WIRED(mem
)) {
1929 OSAddAtomic(+1, &vm_page_purgeable_wired_count
);
1931 OSAddAtomic(+1, &vm_page_purgeable_count
);
1933 } else if (object
->purgable
== VM_PURGABLE_EMPTY
&&
1934 mem
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
) {
1936 * This page belongs to a purged VM object but hasn't
1937 * been purged (because it was "busy").
1938 * It's in the "throttled" queue and hence not
1939 * visible to vm_pageout_scan(). Move it to a pageable
1940 * queue, so that it can eventually be reclaimed, instead
1941 * of lingering in the "empty" object.
1943 if (queues_lock_held
== FALSE
) {
1944 vm_page_lockspin_queues();
1946 vm_page_deactivate(mem
);
1947 if (queues_lock_held
== FALSE
) {
1948 vm_page_unlock_queues();
1952 #if VM_OBJECT_TRACKING_OP_MODIFIED
1953 if (vm_object_tracking_inited
&&
1955 object
->resident_page_count
== 0 &&
1956 object
->pager
== NULL
&&
1957 object
->shadow
!= NULL
&&
1958 object
->shadow
->copy
== object
) {
1959 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
1962 numsaved
= OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
1963 btlog_add_entry(vm_object_tracking_btlog
,
1965 VM_OBJECT_TRACKING_OP_MODIFIED
,
1969 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
1975 * Exactly like vm_page_insert, except that we first
1976 * remove any existing page at the given offset in object.
1978 * The object must be locked.
1984 vm_object_offset_t offset
)
1986 vm_page_bucket_t
*bucket
;
1987 vm_page_t found_m
= VM_PAGE_NULL
;
1988 lck_spin_t
*bucket_lock
;
1993 * we don't hold the page queue lock
1994 * so this check isn't safe to make
1998 vm_object_lock_assert_exclusive(object
);
1999 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2000 if (mem
->vmp_tabled
|| mem
->vmp_object
) {
2001 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2002 "already in (obj=%p,off=0x%llx)",
2003 mem
, object
, offset
, VM_PAGE_OBJECT(mem
), mem
->vmp_offset
);
2006 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
2008 assert(!VM_PAGE_PAGEABLE(mem
));
2011 * Record the object/offset pair in this page
2013 mem
->vmp_object
= VM_PAGE_PACK_OBJECT(object
);
2014 mem
->vmp_offset
= offset
;
2017 * Insert it into the object_object/offset hash table,
2018 * replacing any page that might have been there.
2021 hash_id
= vm_page_hash(object
, offset
);
2022 bucket
= &vm_page_buckets
[hash_id
];
2023 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
2025 lck_spin_lock_grp(bucket_lock
, &vm_page_lck_grp_bucket
);
2027 if (bucket
->page_list
) {
2028 vm_page_packed_t
*mp
= &bucket
->page_list
;
2029 vm_page_t m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(*mp
));
2033 * compare packed object pointers
2035 if (m
->vmp_object
== mem
->vmp_object
&& m
->vmp_offset
== offset
) {
2037 * Remove old page from hash list
2039 *mp
= m
->vmp_next_m
;
2040 m
->vmp_hashed
= FALSE
;
2041 m
->vmp_next_m
= VM_PAGE_PACK_PTR(NULL
);
2046 mp
= &m
->vmp_next_m
;
2047 } while ((m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(*mp
))));
2049 mem
->vmp_next_m
= bucket
->page_list
;
2051 mem
->vmp_next_m
= VM_PAGE_PACK_PTR(NULL
);
2054 * insert new page at head of hash list
2056 bucket
->page_list
= VM_PAGE_PACK_PTR(mem
);
2057 mem
->vmp_hashed
= TRUE
;
2059 lck_spin_unlock(bucket_lock
);
2063 * there was already a page at the specified
2064 * offset for this object... remove it from
2065 * the object and free it back to the free list
2067 vm_page_free_unlocked(found_m
, FALSE
);
2069 vm_page_insert_internal(mem
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, FALSE
, FALSE
, FALSE
, NULL
);
2073 * vm_page_remove: [ internal use only ]
2075 * Removes the given mem entry from the object/offset-page
2076 * table and the object page list.
2078 * The object must be locked.
2084 boolean_t remove_from_hash
)
2086 vm_page_bucket_t
*bucket
;
2088 lck_spin_t
*bucket_lock
;
2091 vm_object_t m_object
;
2092 int ledger_idx_volatile
;
2093 int ledger_idx_nonvolatile
;
2094 int ledger_idx_volatile_compressed
;
2095 int ledger_idx_nonvolatile_compressed
;
2098 m_object
= VM_PAGE_OBJECT(mem
);
2100 vm_object_lock_assert_exclusive(m_object
);
2101 assert(mem
->vmp_tabled
);
2102 assert(!mem
->vmp_cleaning
);
2103 assert(!mem
->vmp_laundry
);
2105 if (VM_PAGE_PAGEABLE(mem
)) {
2106 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2110 * we don't hold the page queue lock
2111 * so this check isn't safe to make
2115 if (remove_from_hash
== TRUE
) {
2117 * Remove from the object_object/offset hash table
2119 hash_id
= vm_page_hash(m_object
, mem
->vmp_offset
);
2120 bucket
= &vm_page_buckets
[hash_id
];
2121 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
2123 lck_spin_lock_grp(bucket_lock
, &vm_page_lck_grp_bucket
);
2125 if ((this = (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
))) == mem
) {
2126 /* optimize for common case */
2128 bucket
->page_list
= mem
->vmp_next_m
;
2130 vm_page_packed_t
*prev
;
2132 for (prev
= &this->vmp_next_m
;
2133 (this = (vm_page_t
)(VM_PAGE_UNPACK_PTR(*prev
))) != mem
;
2134 prev
= &this->vmp_next_m
) {
2137 *prev
= this->vmp_next_m
;
2139 #if MACH_PAGE_HASH_STATS
2140 bucket
->cur_count
--;
2141 #endif /* MACH_PAGE_HASH_STATS */
2142 mem
->vmp_hashed
= FALSE
;
2143 this->vmp_next_m
= VM_PAGE_PACK_PTR(NULL
);
2144 lck_spin_unlock(bucket_lock
);
2147 * Now remove from the object's list of backed pages.
2150 vm_page_remove_internal(mem
);
2153 * And show that the object has one fewer resident
2157 assert(m_object
->resident_page_count
> 0);
2158 m_object
->resident_page_count
--;
2160 #if DEVELOPMENT || DEBUG
2161 if (m_object
->object_is_shared_cache
&&
2162 m_object
->pager
!= NULL
&&
2163 m_object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
2164 assert(!m_object
->internal
);
2165 OSAddAtomic(-1, &shared_region_pagers_resident_count
);
2167 #endif /* DEVELOPMENT || DEBUG */
2169 if (m_object
->internal
) {
2171 assert(vm_page_internal_count
);
2174 OSAddAtomic(-1, &vm_page_internal_count
);
2176 assert(vm_page_external_count
);
2177 OSAddAtomic(-1, &vm_page_external_count
);
2179 if (mem
->vmp_xpmapped
) {
2180 assert(vm_page_xpmapped_external_count
);
2181 OSAddAtomic(-1, &vm_page_xpmapped_external_count
);
2184 if (!m_object
->internal
&&
2185 m_object
->cached_list
.next
&&
2186 m_object
->cached_list
.prev
) {
2187 if (m_object
->resident_page_count
== 0) {
2188 vm_object_cache_remove(m_object
);
2192 if (VM_PAGE_WIRED(mem
)) {
2193 assert(mem
->vmp_wire_count
> 0);
2194 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object
);
2195 VM_OBJECT_WIRED_PAGE_REMOVE(m_object
, mem
);
2196 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object
, m_object
->wire_tag
);
2198 assert(m_object
->resident_page_count
>=
2199 m_object
->wired_page_count
);
2200 if (mem
->vmp_reusable
) {
2201 assert(m_object
->reusable_page_count
> 0);
2202 m_object
->reusable_page_count
--;
2203 assert(m_object
->reusable_page_count
<=
2204 m_object
->resident_page_count
);
2205 mem
->vmp_reusable
= FALSE
;
2206 OSAddAtomic(-1, &vm_page_stats_reusable
.reusable_count
);
2207 vm_page_stats_reusable
.reused_remove
++;
2208 } else if (m_object
->all_reusable
) {
2209 OSAddAtomic(-1, &vm_page_stats_reusable
.reusable_count
);
2210 vm_page_stats_reusable
.reused_remove
++;
2213 if (m_object
->purgable
== VM_PURGABLE_DENY
&&
2214 !m_object
->vo_ledger_tag
) {
2217 owner
= VM_OBJECT_OWNER(m_object
);
2218 vm_object_ledger_tag_ledgers(m_object
,
2219 &ledger_idx_volatile
,
2220 &ledger_idx_nonvolatile
,
2221 &ledger_idx_volatile_compressed
,
2222 &ledger_idx_nonvolatile_compressed
,
2226 (m_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
2227 m_object
->purgable
== VM_PURGABLE_DENY
||
2228 VM_PAGE_WIRED(mem
))) {
2229 /* less non-volatile bytes */
2230 ledger_debit(owner
->ledger
,
2231 ledger_idx_nonvolatile
,
2234 /* less footprint */
2235 ledger_debit(owner
->ledger
,
2236 task_ledgers
.phys_footprint
,
2240 (m_object
->purgable
== VM_PURGABLE_VOLATILE
||
2241 m_object
->purgable
== VM_PURGABLE_EMPTY
)) {
2242 assert(!VM_PAGE_WIRED(mem
));
2243 /* less volatile bytes */
2244 ledger_debit(owner
->ledger
,
2245 ledger_idx_volatile
,
2248 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
2249 if (VM_PAGE_WIRED(mem
)) {
2250 assert(vm_page_purgeable_wired_count
> 0);
2251 OSAddAtomic(-1, &vm_page_purgeable_wired_count
);
2253 assert(vm_page_purgeable_count
> 0);
2254 OSAddAtomic(-1, &vm_page_purgeable_count
);
2258 if (m_object
->set_cache_attr
== TRUE
) {
2259 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem
), 0);
2262 mem
->vmp_tabled
= FALSE
;
2263 mem
->vmp_object
= 0;
2264 mem
->vmp_offset
= (vm_object_offset_t
) -1;
2271 * Returns the page associated with the object/offset
2272 * pair specified; if none is found, VM_PAGE_NULL is returned.
2274 * The object must be locked. No side effects.
2277 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
2279 #if DEBUG_VM_PAGE_LOOKUP
2283 uint64_t vpl_empty_obj
;
2284 uint64_t vpl_bucket_NULL
;
2285 uint64_t vpl_hit_hint
;
2286 uint64_t vpl_hit_hint_next
;
2287 uint64_t vpl_hit_hint_prev
;
2293 uint64_t vpl_fast_elapsed
;
2294 uint64_t vpl_slow_elapsed
;
2295 } vm_page_lookup_stats
__attribute__((aligned(8)));
2299 #define KDP_VM_PAGE_WALK_MAX 1000
2304 vm_object_offset_t offset
)
2307 int num_traversed
= 0;
2310 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2313 vm_page_queue_iterate(&object
->memq
, cur_page
, vmp_listq
) {
2314 if (cur_page
->vmp_offset
== offset
) {
2319 if (num_traversed
>= KDP_VM_PAGE_WALK_MAX
) {
2320 return VM_PAGE_NULL
;
2324 return VM_PAGE_NULL
;
2330 vm_object_offset_t offset
)
2333 vm_page_bucket_t
*bucket
;
2334 vm_page_queue_entry_t qe
;
2335 lck_spin_t
*bucket_lock
= NULL
;
2337 #if DEBUG_VM_PAGE_LOOKUP
2338 uint64_t start
, elapsed
;
2340 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_total
);
2342 vm_object_lock_assert_held(object
);
2343 assertf(page_aligned(offset
), "offset 0x%llx\n", offset
);
2345 if (object
->resident_page_count
== 0) {
2346 #if DEBUG_VM_PAGE_LOOKUP
2347 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_empty_obj
);
2349 return VM_PAGE_NULL
;
2352 mem
= object
->memq_hint
;
2354 if (mem
!= VM_PAGE_NULL
) {
2355 assert(VM_PAGE_OBJECT(mem
) == object
);
2357 if (mem
->vmp_offset
== offset
) {
2358 #if DEBUG_VM_PAGE_LOOKUP
2359 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit_hint
);
2363 qe
= (vm_page_queue_entry_t
)vm_page_queue_next(&mem
->vmp_listq
);
2365 if (!vm_page_queue_end(&object
->memq
, qe
)) {
2366 vm_page_t next_page
;
2368 next_page
= (vm_page_t
)((uintptr_t)qe
);
2369 assert(VM_PAGE_OBJECT(next_page
) == object
);
2371 if (next_page
->vmp_offset
== offset
) {
2372 object
->memq_hint
= next_page
; /* new hint */
2373 #if DEBUG_VM_PAGE_LOOKUP
2374 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit_hint_next
);
2379 qe
= (vm_page_queue_entry_t
)vm_page_queue_prev(&mem
->vmp_listq
);
2381 if (!vm_page_queue_end(&object
->memq
, qe
)) {
2382 vm_page_t prev_page
;
2384 prev_page
= (vm_page_t
)((uintptr_t)qe
);
2385 assert(VM_PAGE_OBJECT(prev_page
) == object
);
2387 if (prev_page
->vmp_offset
== offset
) {
2388 object
->memq_hint
= prev_page
; /* new hint */
2389 #if DEBUG_VM_PAGE_LOOKUP
2390 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit_hint_prev
);
2397 * Search the hash table for this object/offset pair
2399 hash_id
= vm_page_hash(object
, offset
);
2400 bucket
= &vm_page_buckets
[hash_id
];
2403 * since we hold the object lock, we are guaranteed that no
2404 * new pages can be inserted into this object... this in turn
2405 * guarantess that the page we're looking for can't exist
2406 * if the bucket it hashes to is currently NULL even when looked
2407 * at outside the scope of the hash bucket lock... this is a
2408 * really cheap optimiztion to avoid taking the lock
2410 if (!bucket
->page_list
) {
2411 #if DEBUG_VM_PAGE_LOOKUP
2412 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_bucket_NULL
);
2414 return VM_PAGE_NULL
;
2417 #if DEBUG_VM_PAGE_LOOKUP
2418 start
= mach_absolute_time();
2420 if (object
->resident_page_count
<= VM_PAGE_HASH_LOOKUP_THRESHOLD
) {
2422 * on average, it's roughly 3 times faster to run a short memq list
2423 * than to take the spin lock and go through the hash list
2425 mem
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
2427 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)mem
)) {
2428 if (mem
->vmp_offset
== offset
) {
2432 mem
= (vm_page_t
)vm_page_queue_next(&mem
->vmp_listq
);
2434 if (vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)mem
)) {
2438 vm_page_object_t packed_object
;
2440 packed_object
= VM_PAGE_PACK_OBJECT(object
);
2442 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
2444 lck_spin_lock_grp(bucket_lock
, &vm_page_lck_grp_bucket
);
2446 for (mem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
));
2447 mem
!= VM_PAGE_NULL
;
2448 mem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->vmp_next_m
))) {
2451 * we don't hold the page queue lock
2452 * so this check isn't safe to make
2456 if ((mem
->vmp_object
== packed_object
) && (mem
->vmp_offset
== offset
)) {
2460 lck_spin_unlock(bucket_lock
);
2463 #if DEBUG_VM_PAGE_LOOKUP
2464 elapsed
= mach_absolute_time() - start
;
2467 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_slow
);
2468 OSAddAtomic64(elapsed
, &vm_page_lookup_stats
.vpl_slow_elapsed
);
2470 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_fast
);
2471 OSAddAtomic64(elapsed
, &vm_page_lookup_stats
.vpl_fast_elapsed
);
2473 if (mem
!= VM_PAGE_NULL
) {
2474 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit
);
2476 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_miss
);
2479 if (mem
!= VM_PAGE_NULL
) {
2480 assert(VM_PAGE_OBJECT(mem
) == object
);
2482 object
->memq_hint
= mem
;
2491 * Move the given memory entry from its
2492 * current object to the specified target object/offset.
2494 * The object must be locked.
2499 vm_object_t new_object
,
2500 vm_object_offset_t new_offset
)
2502 boolean_t internal_to_external
, external_to_internal
;
2504 vm_object_t m_object
;
2506 m_object
= VM_PAGE_OBJECT(mem
);
2508 assert(m_object
!= new_object
);
2512 * Changes to mem->vmp_object require the page lock because
2513 * the pageout daemon uses that lock to get the object.
2515 vm_page_lockspin_queues();
2517 internal_to_external
= FALSE
;
2518 external_to_internal
= FALSE
;
2520 if (mem
->vmp_q_state
== VM_PAGE_ON_ACTIVE_LOCAL_Q
) {
2522 * it's much easier to get the vm_page_pageable_xxx accounting correct
2523 * if we first move the page to the active queue... it's going to end
2524 * up there anyway, and we don't do vm_page_rename's frequently enough
2525 * for this to matter.
2527 vm_page_queues_remove(mem
, FALSE
);
2528 vm_page_activate(mem
);
2530 if (VM_PAGE_PAGEABLE(mem
)) {
2531 if (m_object
->internal
&& !new_object
->internal
) {
2532 internal_to_external
= TRUE
;
2534 if (!m_object
->internal
&& new_object
->internal
) {
2535 external_to_internal
= TRUE
;
2539 tag
= m_object
->wire_tag
;
2540 vm_page_remove(mem
, TRUE
);
2541 vm_page_insert_internal(mem
, new_object
, new_offset
, tag
, TRUE
, TRUE
, FALSE
, FALSE
, NULL
);
2543 if (internal_to_external
) {
2544 vm_page_pageable_internal_count
--;
2545 vm_page_pageable_external_count
++;
2546 } else if (external_to_internal
) {
2547 vm_page_pageable_external_count
--;
2548 vm_page_pageable_internal_count
++;
2551 vm_page_unlock_queues();
2557 * Initialize the fields in a new page.
2558 * This takes a structure with random values and initializes it
2559 * so that it can be given to vm_page_release or vm_page_insert.
2573 if ((phys_page
!= vm_page_fictitious_addr
) && (phys_page
!= vm_page_guard_addr
)) {
2574 if (!(pmap_valid_page(phys_page
))) {
2575 panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page
);
2581 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2582 * try to use initial values which match 0. This minimizes the number of writes
2583 * needed for boot-time initialization.
2585 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2587 assert(VM_PAGE_NOT_ON_Q
== 0);
2588 assert(sizeof(*mem
) % sizeof(uintptr_t) == 0);
2589 for (p
= (uintptr_t *)(void *)mem
, i
= sizeof(*mem
) / sizeof(uintptr_t); i
!= 0; --i
) {
2592 mem
->vmp_offset
= (vm_object_offset_t
)-1;
2593 mem
->vmp_busy
= TRUE
;
2594 mem
->vmp_lopage
= lopage
;
2596 VM_PAGE_SET_PHYS_PAGE(mem
, phys_page
);
2599 * we're leaving this turned off for now... currently pages
2600 * come off the free list and are either immediately dirtied/referenced
2601 * due to zero-fill or COW faults, or are used to read or write files...
2602 * in the file I/O case, the UPL mechanism takes care of clearing
2603 * the state of the HW ref/mod bits in a somewhat fragile way.
2604 * Since we may change the way this works in the future (to toughen it up),
2605 * I'm leaving this as a reminder of where these bits could get cleared
2609 * make sure both the h/w referenced and modified bits are
2610 * clear at this point... we are especially dependent on
2611 * not finding a 'stale' h/w modified in a number of spots
2612 * once this page goes back into use
2614 pmap_clear_refmod(phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
2619 * vm_page_grab_fictitious:
2621 * Remove a fictitious page from the free list.
2622 * Returns VM_PAGE_NULL if there are no free pages.
2624 int c_vm_page_grab_fictitious
= 0;
2625 int c_vm_page_grab_fictitious_failed
= 0;
2626 int c_vm_page_release_fictitious
= 0;
2627 int c_vm_page_more_fictitious
= 0;
2630 vm_page_grab_fictitious_common(
2635 if ((m
= (vm_page_t
)zalloc_noblock(vm_page_zone
))) {
2636 vm_page_init(m
, phys_addr
, FALSE
);
2637 m
->vmp_fictitious
= TRUE
;
2639 c_vm_page_grab_fictitious
++;
2641 c_vm_page_grab_fictitious_failed
++;
2648 vm_page_grab_fictitious(void)
2650 return vm_page_grab_fictitious_common(vm_page_fictitious_addr
);
2657 vm_page_grab_guard(void)
2660 page
= vm_page_grab_fictitious_common(vm_page_guard_addr
);
2662 OSAddAtomic(1, &vm_guard_count
);
2669 * vm_page_release_fictitious:
2671 * Release a fictitious page to the zone pool
2674 vm_page_release_fictitious(
2677 assert((m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) || (m
->vmp_q_state
== VM_PAGE_IS_WIRED
));
2678 assert(m
->vmp_fictitious
);
2679 assert(VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_fictitious_addr
||
2680 VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
);
2683 if (VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
) {
2684 OSAddAtomic(-1, &vm_guard_count
);
2687 c_vm_page_release_fictitious
++;
2689 zfree(vm_page_zone
, m
);
2693 * vm_page_more_fictitious:
2695 * Add more fictitious pages to the zone.
2696 * Allowed to block. This routine is way intimate
2697 * with the zones code, for several reasons:
2698 * 1. we need to carve some page structures out of physical
2699 * memory before zones work, so they _cannot_ come from
2700 * the zone restricted submap.
2701 * 2. the zone needs to be collectable in order to prevent
2702 * growth without bound. These structures are used by
2703 * the device pager (by the hundreds and thousands), as
2704 * private pages for pageout, and as blocking pages for
2705 * pagein. Temporary bursts in demand should not result in
2706 * permanent allocation of a resource.
2707 * 3. To smooth allocation humps, we allocate single pages
2708 * with kernel_memory_allocate(), and cram them into the
2713 vm_page_more_fictitious(void)
2716 kern_return_t retval
;
2718 c_vm_page_more_fictitious
++;
2721 * Allocate a single page from the zone restricted submap. Do not wait
2722 * if no physical pages are immediately available, and do not zero the
2723 * space. We need our own blocking lock here to prevent having multiple,
2724 * simultaneous requests from piling up on the zone restricted submap
2726 * Exactly one (of our) threads should be potentially waiting on the map
2727 * lock. If winner is not vm-privileged, then the page allocation will
2728 * fail, and it will temporarily block here in the vm_page_wait().
2730 lck_mtx_lock(&vm_page_alloc_lock
);
2732 * If another thread allocated space, just bail out now.
2734 if (os_atomic_load(&vm_page_zone
->countfree
, relaxed
) > 5) {
2736 * The number "5" is a small number that is larger than the
2737 * number of fictitious pages that any single caller will
2738 * attempt to allocate. Otherwise, a thread will attempt to
2739 * acquire a fictitious page (vm_page_grab_fictitious), fail,
2740 * release all of the resources and locks already acquired,
2741 * and then call this routine. This routine finds the pages
2742 * that the caller released, so fails to allocate new space.
2743 * The process repeats infinitely. The largest known number
2744 * of fictitious pages required in this manner is 2. 5 is
2745 * simply a somewhat larger number.
2747 lck_mtx_unlock(&vm_page_alloc_lock
);
2751 retval
= kernel_memory_allocate(zone_submap(vm_page_zone
),
2752 &addr
, PAGE_SIZE
, 0, KMA_ZERO
| KMA_KOBJECT
| KMA_NOPAGEWAIT
,
2753 VM_KERN_MEMORY_ZONE
);
2755 if (retval
!= KERN_SUCCESS
) {
2757 * No page was available. Drop the
2758 * lock to give another thread a chance at it, and
2759 * wait for the pageout daemon to make progress.
2761 lck_mtx_unlock(&vm_page_alloc_lock
);
2762 vm_page_wait(THREAD_UNINT
);
2766 zcram(vm_page_zone
, addr
, PAGE_SIZE
);
2768 lck_mtx_unlock(&vm_page_alloc_lock
);
2775 * Return true if it is not likely that a non-vm_privileged thread
2776 * can get memory without blocking. Advisory only, since the
2777 * situation may change under us.
2782 /* No locking, at worst we will fib. */
2783 return vm_page_free_count
<= vm_page_free_reserved
;
2786 boolean_t vm_darkwake_mode
= FALSE
;
2789 * vm_update_darkwake_mode():
2791 * Tells the VM that the system is in / out of darkwake.
2793 * Today, the VM only lowers/raises the background queue target
2794 * so as to favor consuming more/less background pages when
2795 * darwake is ON/OFF.
2797 * We might need to do more things in the future.
2801 vm_update_darkwake_mode(boolean_t darkwake_mode
)
2803 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
2805 vm_page_lockspin_queues();
2807 if (vm_darkwake_mode
== darkwake_mode
) {
2811 vm_page_unlock_queues();
2815 vm_darkwake_mode
= darkwake_mode
;
2817 if (vm_darkwake_mode
== TRUE
) {
2818 #if CONFIG_BACKGROUND_QUEUE
2820 /* save background target to restore later */
2821 vm_page_background_target_snapshot
= vm_page_background_target
;
2823 /* target is set to 0...no protection for background pages */
2824 vm_page_background_target
= 0;
2826 #endif /* CONFIG_BACKGROUND_QUEUE */
2827 } else if (vm_darkwake_mode
== FALSE
) {
2828 #if CONFIG_BACKGROUND_QUEUE
2830 if (vm_page_background_target_snapshot
) {
2831 vm_page_background_target
= vm_page_background_target_snapshot
;
2833 #endif /* CONFIG_BACKGROUND_QUEUE */
2835 vm_page_unlock_queues();
2838 #if CONFIG_BACKGROUND_QUEUE
2841 vm_page_update_background_state(vm_page_t mem
)
2843 if (vm_page_background_mode
== VM_PAGE_BG_DISABLED
) {
2847 if (mem
->vmp_in_background
== FALSE
) {
2851 task_t my_task
= current_task();
2854 if (task_get_darkwake_mode(my_task
)) {
2859 #if BACKGROUNDQ_BASED_ON_QOS
2860 if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS
) <= THREAD_QOS_LEGACY
) {
2865 if (proc_get_effective_task_policy(my_task
, TASK_POLICY_DARWIN_BG
)) {
2870 vm_page_lockspin_queues();
2872 mem
->vmp_in_background
= FALSE
;
2873 vm_page_background_promoted_count
++;
2875 vm_page_remove_from_backgroundq(mem
);
2877 vm_page_unlock_queues();
2882 vm_page_assign_background_state(vm_page_t mem
)
2884 if (vm_page_background_mode
== VM_PAGE_BG_DISABLED
) {
2888 task_t my_task
= current_task();
2891 if (task_get_darkwake_mode(my_task
)) {
2892 mem
->vmp_in_background
= TRUE
;
2897 #if BACKGROUNDQ_BASED_ON_QOS
2898 if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS
) <= THREAD_QOS_LEGACY
) {
2899 mem
->vmp_in_background
= TRUE
;
2901 mem
->vmp_in_background
= FALSE
;
2905 mem
->vmp_in_background
= proc_get_effective_task_policy(my_task
, TASK_POLICY_DARWIN_BG
);
2912 vm_page_remove_from_backgroundq(
2915 vm_object_t m_object
;
2917 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2919 if (mem
->vmp_on_backgroundq
) {
2920 vm_page_queue_remove(&vm_page_queue_background
, mem
, vmp_backgroundq
);
2922 mem
->vmp_backgroundq
.next
= 0;
2923 mem
->vmp_backgroundq
.prev
= 0;
2924 mem
->vmp_on_backgroundq
= FALSE
;
2926 vm_page_background_count
--;
2928 m_object
= VM_PAGE_OBJECT(mem
);
2930 if (m_object
->internal
) {
2931 vm_page_background_internal_count
--;
2933 vm_page_background_external_count
--;
2936 assert(VM_PAGE_UNPACK_PTR(mem
->vmp_backgroundq
.next
) == (uintptr_t)NULL
&&
2937 VM_PAGE_UNPACK_PTR(mem
->vmp_backgroundq
.prev
) == (uintptr_t)NULL
);
2943 vm_page_add_to_backgroundq(
2947 vm_object_t m_object
;
2949 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2951 if (vm_page_background_mode
== VM_PAGE_BG_DISABLED
) {
2955 if (mem
->vmp_on_backgroundq
== FALSE
) {
2956 m_object
= VM_PAGE_OBJECT(mem
);
2958 if (vm_page_background_exclude_external
&& !m_object
->internal
) {
2962 if (first
== TRUE
) {
2963 vm_page_queue_enter_first(&vm_page_queue_background
, mem
, vmp_backgroundq
);
2965 vm_page_queue_enter(&vm_page_queue_background
, mem
, vmp_backgroundq
);
2967 mem
->vmp_on_backgroundq
= TRUE
;
2969 vm_page_background_count
++;
2971 if (m_object
->internal
) {
2972 vm_page_background_internal_count
++;
2974 vm_page_background_external_count
++;
2979 #endif /* CONFIG_BACKGROUND_QUEUE */
2982 * This can be switched to FALSE to help debug drivers
2983 * that are having problems with memory > 4G.
2985 boolean_t vm_himemory_mode
= TRUE
;
2988 * this interface exists to support hardware controllers
2989 * incapable of generating DMAs with more than 32 bits
2990 * of address on platforms with physical memory > 4G...
2992 unsigned int vm_lopages_allocated_q
= 0;
2993 unsigned int vm_lopages_allocated_cpm_success
= 0;
2994 unsigned int vm_lopages_allocated_cpm_failed
= 0;
2995 vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED
;
2998 vm_page_grablo(void)
3002 if (vm_lopage_needed
== FALSE
) {
3003 return vm_page_grab();
3006 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3008 if (!vm_page_queue_empty(&vm_lopage_queue_free
)) {
3009 vm_page_queue_remove_first(&vm_lopage_queue_free
, mem
, vmp_pageq
);
3010 assert(vm_lopage_free_count
);
3011 assert(mem
->vmp_q_state
== VM_PAGE_ON_FREE_LOPAGE_Q
);
3012 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3014 vm_lopage_free_count
--;
3015 vm_lopages_allocated_q
++;
3017 if (vm_lopage_free_count
< vm_lopage_lowater
) {
3018 vm_lopage_refill
= TRUE
;
3021 lck_mtx_unlock(&vm_page_queue_free_lock
);
3023 #if CONFIG_BACKGROUND_QUEUE
3024 vm_page_assign_background_state(mem
);
3027 lck_mtx_unlock(&vm_page_queue_free_lock
);
3029 if (cpm_allocate(PAGE_SIZE
, &mem
, atop(PPNUM_MAX
), 0, FALSE
, KMA_LOMEM
) != KERN_SUCCESS
) {
3030 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3031 vm_lopages_allocated_cpm_failed
++;
3032 lck_mtx_unlock(&vm_page_queue_free_lock
);
3034 return VM_PAGE_NULL
;
3036 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3038 mem
->vmp_busy
= TRUE
;
3040 vm_page_lockspin_queues();
3042 mem
->vmp_gobbled
= FALSE
;
3043 vm_page_gobble_count
--;
3044 vm_page_wire_count
--;
3046 vm_lopages_allocated_cpm_success
++;
3047 vm_page_unlock_queues();
3049 assert(mem
->vmp_busy
);
3050 assert(!mem
->vmp_pmapped
);
3051 assert(!mem
->vmp_wpmapped
);
3052 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
)));
3054 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
3056 disable_preemption();
3057 *PERCPU_GET(vm_page_grab_count
) += 1;
3058 VM_DEBUG_EVENT(vm_page_grab
, VM_PAGE_GRAB
, DBG_FUNC_NONE
, 0, 1, 0, 0);
3059 enable_preemption();
3068 * first try to grab a page from the per-cpu free list...
3069 * this must be done while pre-emption is disabled... if
3070 * a page is available, we're done...
3071 * if no page is available, grab the vm_page_queue_free_lock
3072 * and see if current number of free pages would allow us
3073 * to grab at least 1... if not, return VM_PAGE_NULL as before...
3074 * if there are pages available, disable preemption and
3075 * recheck the state of the per-cpu free list... we could
3076 * have been preempted and moved to a different cpu, or
3077 * some other thread could have re-filled it... if still
3078 * empty, figure out how many pages we can steal from the
3079 * global free queue and move to the per-cpu queue...
3080 * return 1 of these pages when done... only wakeup the
3081 * pageout_scan thread if we moved pages from the global
3082 * list... no need for the wakeup if we've satisfied the
3083 * request from the per-cpu queue.
3086 #if CONFIG_SECLUDED_MEMORY
3087 vm_page_t
vm_page_grab_secluded(void);
3088 #endif /* CONFIG_SECLUDED_MEMORY */
3091 vm_page_grab_diags(void);
3096 return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE
);
3100 boolean_t hibernate_rebuild_needed
= FALSE
;
3101 #endif /* HIBERNATION */
3104 vm_page_grab_options(
3109 disable_preemption();
3111 if ((mem
= *PERCPU_GET(free_pages
))) {
3112 return_page_from_cpu_list
:
3113 assert(mem
->vmp_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
);
3116 if (hibernate_rebuild_needed
) {
3117 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__
, __LINE__
);
3119 #endif /* HIBERNATION */
3121 vm_page_grab_diags();
3123 vm_offset_t pcpu_base
= current_percpu_base();
3124 *PERCPU_GET_WITH_BASE(pcpu_base
, vm_page_grab_count
) += 1;
3125 *PERCPU_GET_WITH_BASE(pcpu_base
, free_pages
) = mem
->vmp_snext
;
3126 VM_DEBUG_EVENT(vm_page_grab
, VM_PAGE_GRAB
, DBG_FUNC_NONE
, grab_options
, 0, 0, 0);
3128 enable_preemption();
3129 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
3130 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3132 assert(mem
->vmp_listq
.next
== 0 && mem
->vmp_listq
.prev
== 0);
3133 assert(mem
->vmp_tabled
== FALSE
);
3134 assert(mem
->vmp_object
== 0);
3135 assert(!mem
->vmp_laundry
);
3136 ASSERT_PMAP_FREE(mem
);
3137 assert(mem
->vmp_busy
);
3138 assert(!mem
->vmp_pmapped
);
3139 assert(!mem
->vmp_wpmapped
);
3140 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
)));
3142 #if CONFIG_BACKGROUND_QUEUE
3143 vm_page_assign_background_state(mem
);
3147 enable_preemption();
3151 * Optionally produce warnings if the wire or gobble
3152 * counts exceed some threshold.
3154 #if VM_PAGE_WIRE_COUNT_WARNING
3155 if (vm_page_wire_count
>= VM_PAGE_WIRE_COUNT_WARNING
) {
3156 printf("mk: vm_page_grab(): high wired page count of %d\n",
3157 vm_page_wire_count
);
3160 #if VM_PAGE_GOBBLE_COUNT_WARNING
3161 if (vm_page_gobble_count
>= VM_PAGE_GOBBLE_COUNT_WARNING
) {
3162 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3163 vm_page_gobble_count
);
3168 * If free count is low and we have delayed pages from early boot,
3169 * get one of those instead.
3171 if (__improbable(vm_delayed_count
> 0 &&
3172 vm_page_free_count
<= vm_page_free_target
&&
3173 (mem
= vm_get_delayed_page(grab_options
)) != NULL
)) {
3177 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3180 * Only let privileged threads (involved in pageout)
3181 * dip into the reserved pool.
3183 if ((vm_page_free_count
< vm_page_free_reserved
) &&
3184 !(current_thread()->options
& TH_OPT_VMPRIV
)) {
3185 /* no page for us in the free queue... */
3186 lck_mtx_unlock(&vm_page_queue_free_lock
);
3189 #if CONFIG_SECLUDED_MEMORY
3190 /* ... but can we try and grab from the secluded queue? */
3191 if (vm_page_secluded_count
> 0 &&
3192 ((grab_options
& VM_PAGE_GRAB_SECLUDED
) ||
3193 task_can_use_secluded_mem(current_task(), TRUE
))) {
3194 mem
= vm_page_grab_secluded();
3195 if (grab_options
& VM_PAGE_GRAB_SECLUDED
) {
3196 vm_page_secluded
.grab_for_iokit
++;
3198 vm_page_secluded
.grab_for_iokit_success
++;
3202 VM_CHECK_MEMORYSTATUS
;
3204 disable_preemption();
3205 vm_page_grab_diags();
3206 *PERCPU_GET(vm_page_grab_count
) += 1;
3207 VM_DEBUG_EVENT(vm_page_grab
, VM_PAGE_GRAB
, DBG_FUNC_NONE
, grab_options
, 0, 0, 0);
3208 enable_preemption();
3213 #else /* CONFIG_SECLUDED_MEMORY */
3214 (void) grab_options
;
3215 #endif /* CONFIG_SECLUDED_MEMORY */
3219 unsigned int pages_to_steal
;
3221 unsigned int clump_end
, sub_count
;
3223 while (vm_page_free_count
== 0) {
3224 lck_mtx_unlock(&vm_page_queue_free_lock
);
3226 * must be a privileged thread to be
3227 * in this state since a non-privileged
3228 * thread would have bailed if we were
3229 * under the vm_page_free_reserved mark
3232 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3235 disable_preemption();
3237 if ((mem
= *PERCPU_GET(free_pages
))) {
3238 lck_mtx_unlock(&vm_page_queue_free_lock
);
3241 * we got preempted and moved to another processor
3242 * or we got preempted and someone else ran and filled the cache
3244 goto return_page_from_cpu_list
;
3246 if (vm_page_free_count
<= vm_page_free_reserved
) {
3249 if (vm_free_magazine_refill_limit
<= (vm_page_free_count
- vm_page_free_reserved
)) {
3250 pages_to_steal
= vm_free_magazine_refill_limit
;
3252 pages_to_steal
= (vm_page_free_count
- vm_page_free_reserved
);
3255 color
= *PERCPU_GET(start_color
);
3258 vm_page_free_count
-= pages_to_steal
;
3259 clump_end
= sub_count
= 0;
3261 while (pages_to_steal
--) {
3262 while (vm_page_queue_empty(&vm_page_queue_free
[color
].qhead
)) {
3263 color
= (color
+ 1) & vm_color_mask
;
3265 #if defined(__x86_64__)
3266 vm_page_queue_remove_first_with_clump(&vm_page_queue_free
[color
].qhead
,
3269 vm_page_queue_remove_first(&vm_page_queue_free
[color
].qhead
,
3273 assert(mem
->vmp_q_state
== VM_PAGE_ON_FREE_Q
);
3275 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
3277 #if defined(__arm__) || defined(__arm64__)
3278 color
= (color
+ 1) & vm_color_mask
;
3281 #if DEVELOPMENT || DEBUG
3285 vm_clump_update_stats(sub_count
);
3287 color
= (color
+ 1) & vm_color_mask
;
3291 color
= (color
+ 1) & vm_color_mask
;
3294 #endif /* if DEVELOPMENT || DEBUG */
3296 #endif /* if defined(__arm__) || defined(__arm64__) */
3301 tail
->vmp_snext
= mem
;
3305 assert(mem
->vmp_listq
.next
== 0 && mem
->vmp_listq
.prev
== 0);
3306 assert(mem
->vmp_tabled
== FALSE
);
3307 assert(mem
->vmp_object
== 0);
3308 assert(!mem
->vmp_laundry
);
3310 mem
->vmp_q_state
= VM_PAGE_ON_FREE_LOCAL_Q
;
3312 ASSERT_PMAP_FREE(mem
);
3313 assert(mem
->vmp_busy
);
3314 assert(!mem
->vmp_pmapped
);
3315 assert(!mem
->vmp_wpmapped
);
3316 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
)));
3318 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3319 vm_clump_update_stats(sub_count
);
3321 lck_mtx_unlock(&vm_page_queue_free_lock
);
3324 if (hibernate_rebuild_needed
) {
3325 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__
, __LINE__
);
3327 #endif /* HIBERNATION */
3328 vm_offset_t pcpu_base
= current_percpu_base();
3329 *PERCPU_GET_WITH_BASE(pcpu_base
, free_pages
) = head
->vmp_snext
;
3330 *PERCPU_GET_WITH_BASE(pcpu_base
, start_color
) = color
;
3333 * satisfy this request
3335 vm_page_grab_diags();
3336 *PERCPU_GET_WITH_BASE(pcpu_base
, vm_page_grab_count
) += 1;
3337 VM_DEBUG_EVENT(vm_page_grab
, VM_PAGE_GRAB
, DBG_FUNC_NONE
, grab_options
, 0, 0, 0);
3339 assert(mem
->vmp_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
);
3341 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
3342 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3344 enable_preemption();
3347 * Decide if we should poke the pageout daemon.
3348 * We do this if the free count is less than the low
3349 * water mark. VM Pageout Scan will keep running till
3350 * the free_count > free_target (& hence above free_min).
3351 * This wakeup is to catch the possibility of the counts
3352 * dropping between VM Pageout Scan parking and this check.
3354 * We don't have the counts locked ... if they change a little,
3355 * it doesn't really matter.
3357 if (vm_page_free_count
< vm_page_free_min
) {
3358 lck_mtx_lock(&vm_page_queue_free_lock
);
3359 if (vm_pageout_running
== FALSE
) {
3360 lck_mtx_unlock(&vm_page_queue_free_lock
);
3361 thread_wakeup((event_t
) &vm_page_free_wanted
);
3363 lck_mtx_unlock(&vm_page_queue_free_lock
);
3367 VM_CHECK_MEMORYSTATUS
;
3370 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
3372 #if CONFIG_BACKGROUND_QUEUE
3373 vm_page_assign_background_state(mem
);
3379 #if CONFIG_SECLUDED_MEMORY
3381 vm_page_grab_secluded(void)
3387 if (vm_page_secluded_count
== 0) {
3388 /* no secluded pages to grab... */
3389 return VM_PAGE_NULL
;
3392 /* secluded queue is protected by the VM page queue lock */
3393 vm_page_lock_queues();
3395 if (vm_page_secluded_count
== 0) {
3396 /* no secluded pages to grab... */
3397 vm_page_unlock_queues();
3398 return VM_PAGE_NULL
;
3402 /* can we grab from the secluded queue? */
3403 if (vm_page_secluded_count
> vm_page_secluded_target
||
3404 (vm_page_secluded_count
> 0 &&
3405 task_can_use_secluded_mem(current_task(), TRUE
))) {
3408 /* can't grab from secluded queue... */
3409 vm_page_unlock_queues();
3410 return VM_PAGE_NULL
;
3414 /* we can grab a page from secluded queue! */
3415 assert((vm_page_secluded_count_free
+
3416 vm_page_secluded_count_inuse
) ==
3417 vm_page_secluded_count
);
3418 if (current_task()->task_can_use_secluded_mem
) {
3419 assert(num_tasks_can_use_secluded_mem
> 0);
3421 assert(!vm_page_queue_empty(&vm_page_queue_secluded
));
3422 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3423 mem
= (vm_page_t
)vm_page_queue_first(&vm_page_queue_secluded
);
3424 assert(mem
->vmp_q_state
== VM_PAGE_ON_SECLUDED_Q
);
3425 vm_page_queues_remove(mem
, TRUE
);
3427 object
= VM_PAGE_OBJECT(mem
);
3429 assert(!mem
->vmp_fictitious
);
3430 assert(!VM_PAGE_WIRED(mem
));
3431 if (object
== VM_OBJECT_NULL
) {
3432 /* free for grab! */
3433 vm_page_unlock_queues();
3434 vm_page_secluded
.grab_success_free
++;
3436 assert(mem
->vmp_busy
);
3437 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3438 assert(VM_PAGE_OBJECT(mem
) == VM_OBJECT_NULL
);
3439 assert(mem
->vmp_pageq
.next
== 0);
3440 assert(mem
->vmp_pageq
.prev
== 0);
3441 assert(mem
->vmp_listq
.next
== 0);
3442 assert(mem
->vmp_listq
.prev
== 0);
3443 #if CONFIG_BACKGROUND_QUEUE
3444 assert(mem
->vmp_on_backgroundq
== 0);
3445 assert(mem
->vmp_backgroundq
.next
== 0);
3446 assert(mem
->vmp_backgroundq
.prev
== 0);
3447 #endif /* CONFIG_BACKGROUND_QUEUE */
3451 assert(!object
->internal
);
3452 // vm_page_pageable_external_count--;
3454 if (!vm_object_lock_try(object
)) {
3455 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
3456 vm_page_secluded
.grab_failure_locked
++;
3457 reactivate_secluded_page
:
3458 vm_page_activate(mem
);
3459 vm_page_unlock_queues();
3460 return VM_PAGE_NULL
;
3462 if (mem
->vmp_busy
||
3463 mem
->vmp_cleaning
||
3465 /* can't steal page in this state... */
3466 vm_object_unlock(object
);
3467 vm_page_secluded
.grab_failure_state
++;
3468 goto reactivate_secluded_page
;
3471 mem
->vmp_busy
= TRUE
;
3472 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem
));
3473 if (refmod_state
& VM_MEM_REFERENCED
) {
3474 mem
->vmp_reference
= TRUE
;
3476 if (refmod_state
& VM_MEM_MODIFIED
) {
3477 SET_PAGE_DIRTY(mem
, FALSE
);
3479 if (mem
->vmp_dirty
|| mem
->vmp_precious
) {
3480 /* can't grab a dirty page; re-activate */
3481 // printf("SECLUDED: dirty page %p\n", mem);
3482 PAGE_WAKEUP_DONE(mem
);
3483 vm_page_secluded
.grab_failure_dirty
++;
3484 vm_object_unlock(object
);
3485 goto reactivate_secluded_page
;
3487 if (mem
->vmp_reference
) {
3488 /* it's been used but we do need to grab a page... */
3491 vm_page_unlock_queues();
3493 /* finish what vm_page_free() would have done... */
3494 vm_page_free_prepare_object(mem
, TRUE
);
3495 vm_object_unlock(object
);
3496 object
= VM_OBJECT_NULL
;
3497 if (vm_page_free_verify
) {
3498 ASSERT_PMAP_FREE(mem
);
3500 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
3501 vm_page_secluded
.grab_success_other
++;
3503 assert(mem
->vmp_busy
);
3504 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3505 assert(VM_PAGE_OBJECT(mem
) == VM_OBJECT_NULL
);
3506 assert(mem
->vmp_pageq
.next
== 0);
3507 assert(mem
->vmp_pageq
.prev
== 0);
3508 assert(mem
->vmp_listq
.next
== 0);
3509 assert(mem
->vmp_listq
.prev
== 0);
3510 #if CONFIG_BACKGROUND_QUEUE
3511 assert(mem
->vmp_on_backgroundq
== 0);
3512 assert(mem
->vmp_backgroundq
.next
== 0);
3513 assert(mem
->vmp_backgroundq
.prev
== 0);
3514 #endif /* CONFIG_BACKGROUND_QUEUE */
3520 vm_page_secluded_drain(void)
3522 vm_page_t local_freeq
;
3524 uint64_t num_reclaimed
;
3525 unsigned int saved_secluded_count
, saved_secluded_target
;
3531 vm_page_lock_queues();
3533 saved_secluded_count
= vm_page_secluded_count
;
3534 saved_secluded_target
= vm_page_secluded_target
;
3535 vm_page_secluded_target
= 0;
3536 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3537 while (vm_page_secluded_count
) {
3538 vm_page_t secluded_page
;
3540 assert((vm_page_secluded_count_free
+
3541 vm_page_secluded_count_inuse
) ==
3542 vm_page_secluded_count
);
3543 secluded_page
= (vm_page_t
)vm_page_queue_first(&vm_page_queue_secluded
);
3544 assert(secluded_page
->vmp_q_state
== VM_PAGE_ON_SECLUDED_Q
);
3546 vm_page_queues_remove(secluded_page
, FALSE
);
3547 assert(!secluded_page
->vmp_fictitious
);
3548 assert(!VM_PAGE_WIRED(secluded_page
));
3550 if (secluded_page
->vmp_object
== 0) {
3551 /* transfer to free queue */
3552 assert(secluded_page
->vmp_busy
);
3553 secluded_page
->vmp_snext
= local_freeq
;
3554 local_freeq
= secluded_page
;
3557 /* transfer to head of active queue */
3558 vm_page_enqueue_active(secluded_page
, FALSE
);
3559 secluded_page
= VM_PAGE_NULL
;
3563 vm_page_secluded_target
= saved_secluded_target
;
3564 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3566 // printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3568 vm_page_unlock_queues();
3571 vm_page_free_list(local_freeq
, TRUE
);
3576 return num_reclaimed
;
3578 #endif /* CONFIG_SECLUDED_MEMORY */
3582 vm_page_grab_diags()
3584 #if DEVELOPMENT || DEBUG
3585 task_t task
= current_task();
3590 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed
, 1);
3591 #endif /* DEVELOPMENT || DEBUG */
3597 * Return a page to the free list.
3603 boolean_t page_queues_locked
)
3606 int need_wakeup
= 0;
3607 int need_priv_wakeup
= 0;
3608 #if CONFIG_SECLUDED_MEMORY
3609 int need_secluded_wakeup
= 0;
3610 #endif /* CONFIG_SECLUDED_MEMORY */
3611 event_t wakeup_event
= NULL
;
3613 if (page_queues_locked
) {
3614 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3616 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
3619 assert(!mem
->vmp_private
&& !mem
->vmp_fictitious
);
3620 if (vm_page_free_verify
) {
3621 ASSERT_PMAP_FREE(mem
);
3623 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
3625 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
3627 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3629 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3630 assert(mem
->vmp_busy
);
3631 assert(!mem
->vmp_laundry
);
3632 assert(mem
->vmp_object
== 0);
3633 assert(mem
->vmp_pageq
.next
== 0 && mem
->vmp_pageq
.prev
== 0);
3634 assert(mem
->vmp_listq
.next
== 0 && mem
->vmp_listq
.prev
== 0);
3635 #if CONFIG_BACKGROUND_QUEUE
3636 assert(mem
->vmp_backgroundq
.next
== 0 &&
3637 mem
->vmp_backgroundq
.prev
== 0 &&
3638 mem
->vmp_on_backgroundq
== FALSE
);
3640 if ((mem
->vmp_lopage
== TRUE
|| vm_lopage_refill
== TRUE
) &&
3641 vm_lopage_free_count
< vm_lopage_free_limit
&&
3642 VM_PAGE_GET_PHYS_PAGE(mem
) < max_valid_low_ppnum
) {
3644 * this exists to support hardware controllers
3645 * incapable of generating DMAs with more than 32 bits
3646 * of address on platforms with physical memory > 4G...
3648 vm_page_queue_enter_first(&vm_lopage_queue_free
, mem
, vmp_pageq
);
3649 vm_lopage_free_count
++;
3651 if (vm_lopage_free_count
>= vm_lopage_free_limit
) {
3652 vm_lopage_refill
= FALSE
;
3655 mem
->vmp_q_state
= VM_PAGE_ON_FREE_LOPAGE_Q
;
3656 mem
->vmp_lopage
= TRUE
;
3657 #if CONFIG_SECLUDED_MEMORY
3658 } else if (vm_page_free_count
> vm_page_free_reserved
&&
3659 vm_page_secluded_count
< vm_page_secluded_target
&&
3660 num_tasks_can_use_secluded_mem
== 0) {
3662 * XXX FBDP TODO: also avoid refilling secluded queue
3663 * when some IOKit objects are already grabbing from it...
3665 if (!page_queues_locked
) {
3666 if (!vm_page_trylock_queues()) {
3667 /* take locks in right order */
3668 lck_mtx_unlock(&vm_page_queue_free_lock
);
3669 vm_page_lock_queues();
3670 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3673 mem
->vmp_lopage
= FALSE
;
3674 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3675 vm_page_queue_enter_first(&vm_page_queue_secluded
, mem
, vmp_pageq
);
3676 mem
->vmp_q_state
= VM_PAGE_ON_SECLUDED_Q
;
3677 vm_page_secluded_count
++;
3678 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3679 vm_page_secluded_count_free
++;
3680 if (!page_queues_locked
) {
3681 vm_page_unlock_queues();
3683 LCK_MTX_ASSERT(&vm_page_queue_free_lock
, LCK_MTX_ASSERT_OWNED
);
3684 if (vm_page_free_wanted_secluded
> 0) {
3685 vm_page_free_wanted_secluded
--;
3686 need_secluded_wakeup
= 1;
3688 #endif /* CONFIG_SECLUDED_MEMORY */
3690 mem
->vmp_lopage
= FALSE
;
3691 mem
->vmp_q_state
= VM_PAGE_ON_FREE_Q
;
3693 color
= VM_PAGE_GET_COLOR(mem
);
3694 #if defined(__x86_64__)
3695 vm_page_queue_enter_clump(&vm_page_queue_free
[color
].qhead
, mem
);
3697 vm_page_queue_enter(&vm_page_queue_free
[color
].qhead
, mem
, vmp_pageq
);
3699 vm_page_free_count
++;
3701 * Check if we should wake up someone waiting for page.
3702 * But don't bother waking them unless they can allocate.
3704 * We wakeup only one thread, to prevent starvation.
3705 * Because the scheduling system handles wait queues FIFO,
3706 * if we wakeup all waiting threads, one greedy thread
3707 * can starve multiple niceguy threads. When the threads
3708 * all wakeup, the greedy threads runs first, grabs the page,
3709 * and waits for another page. It will be the first to run
3710 * when the next page is freed.
3712 * However, there is a slight danger here.
3713 * The thread we wake might not use the free page.
3714 * Then the other threads could wait indefinitely
3715 * while the page goes unused. To forestall this,
3716 * the pageout daemon will keep making free pages
3717 * as long as vm_page_free_wanted is non-zero.
3720 assert(vm_page_free_count
> 0);
3721 if (vm_page_free_wanted_privileged
> 0) {
3722 vm_page_free_wanted_privileged
--;
3723 need_priv_wakeup
= 1;
3724 #if CONFIG_SECLUDED_MEMORY
3725 } else if (vm_page_free_wanted_secluded
> 0 &&
3726 vm_page_free_count
> vm_page_free_reserved
) {
3727 vm_page_free_wanted_secluded
--;
3728 need_secluded_wakeup
= 1;
3729 #endif /* CONFIG_SECLUDED_MEMORY */
3730 } else if (vm_page_free_wanted
> 0 &&
3731 vm_page_free_count
> vm_page_free_reserved
) {
3732 vm_page_free_wanted
--;
3736 vm_pageout_vminfo
.vm_page_pages_freed
++;
3738 VM_DEBUG_CONSTANT_EVENT(vm_page_release
, VM_PAGE_RELEASE
, DBG_FUNC_NONE
, 1, 0, 0, 0);
3740 lck_mtx_unlock(&vm_page_queue_free_lock
);
3742 if (need_priv_wakeup
) {
3743 wakeup_event
= &vm_page_free_wanted_privileged
;
3745 #if CONFIG_SECLUDED_MEMORY
3746 else if (need_secluded_wakeup
) {
3747 wakeup_event
= &vm_page_free_wanted_secluded
;
3749 #endif /* CONFIG_SECLUDED_MEMORY */
3750 else if (need_wakeup
) {
3751 wakeup_event
= &vm_page_free_count
;
3755 if (vps_dynamic_priority_enabled
== TRUE
) {
3756 thread_t thread_woken
= NULL
;
3757 wakeup_one_with_inheritor((event_t
) wakeup_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &thread_woken
);
3758 thread_deallocate(thread_woken
);
3760 thread_wakeup_one((event_t
) wakeup_event
);
3764 VM_CHECK_MEMORYSTATUS
;
3768 * This version of vm_page_release() is used only at startup
3769 * when we are single-threaded and pages are being released
3770 * for the first time. Hence, no locking or unnecessary checks are made.
3771 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3774 vm_page_release_startup(
3777 vm_page_queue_t queue_free
;
3779 if (vm_lopage_free_count
< vm_lopage_free_limit
&&
3780 VM_PAGE_GET_PHYS_PAGE(mem
) < max_valid_low_ppnum
) {
3781 mem
->vmp_lopage
= TRUE
;
3782 mem
->vmp_q_state
= VM_PAGE_ON_FREE_LOPAGE_Q
;
3783 vm_lopage_free_count
++;
3784 queue_free
= &vm_lopage_queue_free
;
3785 #if CONFIG_SECLUDED_MEMORY
3786 } else if (vm_page_secluded_count
< vm_page_secluded_target
) {
3787 mem
->vmp_lopage
= FALSE
;
3788 mem
->vmp_q_state
= VM_PAGE_ON_SECLUDED_Q
;
3789 vm_page_secluded_count
++;
3790 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3791 vm_page_secluded_count_free
++;
3792 queue_free
= &vm_page_queue_secluded
;
3793 #endif /* CONFIG_SECLUDED_MEMORY */
3795 mem
->vmp_lopage
= FALSE
;
3796 mem
->vmp_q_state
= VM_PAGE_ON_FREE_Q
;
3797 vm_page_free_count
++;
3798 queue_free
= &vm_page_queue_free
[VM_PAGE_GET_COLOR(mem
)].qhead
;
3800 if (mem
->vmp_q_state
== VM_PAGE_ON_FREE_Q
) {
3801 #if defined(__x86_64__)
3802 vm_page_queue_enter_clump(queue_free
, mem
);
3804 vm_page_queue_enter(queue_free
, mem
, vmp_pageq
);
3807 vm_page_queue_enter_first(queue_free
, mem
, vmp_pageq
);
3814 * Wait for a page to become available.
3815 * If there are plenty of free pages, then we don't sleep.
3818 * TRUE: There may be another page, try again
3819 * FALSE: We were interrupted out of our wait, don't try again
3827 * We can't use vm_page_free_reserved to make this
3828 * determination. Consider: some thread might
3829 * need to allocate two pages. The first allocation
3830 * succeeds, the second fails. After the first page is freed,
3831 * a call to vm_page_wait must really block.
3833 kern_return_t wait_result
;
3834 int need_wakeup
= 0;
3835 int is_privileged
= current_thread()->options
& TH_OPT_VMPRIV
;
3836 event_t wait_event
= NULL
;
3838 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3840 if (is_privileged
&& vm_page_free_count
) {
3841 lck_mtx_unlock(&vm_page_queue_free_lock
);
3845 if (vm_page_free_count
>= vm_page_free_target
) {
3846 lck_mtx_unlock(&vm_page_queue_free_lock
);
3850 if (is_privileged
) {
3851 if (vm_page_free_wanted_privileged
++ == 0) {
3854 wait_event
= (event_t
)&vm_page_free_wanted_privileged
;
3855 #if CONFIG_SECLUDED_MEMORY
3856 } else if (secluded_for_apps
&&
3857 task_can_use_secluded_mem(current_task(), FALSE
)) {
3859 /* XXX FBDP: need pageq lock for this... */
3860 /* XXX FBDP: might wait even if pages available, */
3861 /* XXX FBDP: hopefully not for too long... */
3862 if (vm_page_secluded_count
> 0) {
3863 lck_mtx_unlock(&vm_page_queue_free_lock
);
3867 if (vm_page_free_wanted_secluded
++ == 0) {
3870 wait_event
= (event_t
)&vm_page_free_wanted_secluded
;
3871 #endif /* CONFIG_SECLUDED_MEMORY */
3873 if (vm_page_free_wanted
++ == 0) {
3876 wait_event
= (event_t
)&vm_page_free_count
;
3880 * We don't do a vm_pageout_scan wakeup if we already have
3881 * some waiters because vm_pageout_scan checks for waiters
3882 * before it returns and does so behind the vm_page_queue_free_lock,
3883 * which we own when we bump the waiter counts.
3886 if (vps_dynamic_priority_enabled
== TRUE
) {
3888 * We are waking up vm_pageout_scan here. If it needs
3889 * the vm_page_queue_free_lock before we unlock it
3890 * we'll end up just blocking and incur an extra
3891 * context switch. Could be a perf. issue.
3894 counter(c_vm_page_wait_block
++);
3897 thread_wakeup((event_t
)&vm_page_free_wanted
);
3901 * LD: This event is going to get recorded every time because
3902 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
3903 * We just block in that routine.
3905 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block
, VM_PAGE_WAIT_BLOCK
, DBG_FUNC_START
,
3906 vm_page_free_wanted_privileged
,
3907 vm_page_free_wanted
,
3908 #if CONFIG_SECLUDED_MEMORY
3909 vm_page_free_wanted_secluded
,
3910 #else /* CONFIG_SECLUDED_MEMORY */
3912 #endif /* CONFIG_SECLUDED_MEMORY */
3914 wait_result
= lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock
,
3917 vm_pageout_scan_thread
,
3921 wait_result
= assert_wait(wait_event
, interruptible
);
3923 lck_mtx_unlock(&vm_page_queue_free_lock
);
3924 counter(c_vm_page_wait_block
++);
3927 thread_wakeup((event_t
)&vm_page_free_wanted
);
3930 if (wait_result
== THREAD_WAITING
) {
3931 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block
, VM_PAGE_WAIT_BLOCK
, DBG_FUNC_START
,
3932 vm_page_free_wanted_privileged
,
3933 vm_page_free_wanted
,
3934 #if CONFIG_SECLUDED_MEMORY
3935 vm_page_free_wanted_secluded
,
3936 #else /* CONFIG_SECLUDED_MEMORY */
3938 #endif /* CONFIG_SECLUDED_MEMORY */
3940 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
3941 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block
,
3942 VM_PAGE_WAIT_BLOCK
, DBG_FUNC_END
, 0, 0, 0, 0);
3946 return (wait_result
== THREAD_AWAKENED
) || (wait_result
== THREAD_NOT_WAITING
);
3952 * Allocate and return a memory cell associated
3953 * with this VM object/offset pair.
3955 * Object must be locked.
3961 vm_object_offset_t offset
)
3966 vm_object_lock_assert_exclusive(object
);
3968 #if CONFIG_SECLUDED_MEMORY
3969 if (object
->can_grab_secluded
) {
3970 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
3972 #endif /* CONFIG_SECLUDED_MEMORY */
3973 mem
= vm_page_grab_options(grab_options
);
3974 if (mem
== VM_PAGE_NULL
) {
3975 return VM_PAGE_NULL
;
3978 vm_page_insert(mem
, object
, offset
);
3984 * vm_page_alloc_guard:
3986 * Allocate a fictitious page which will be used
3987 * as a guard page. The page will be inserted into
3988 * the object and returned to the caller.
3992 vm_page_alloc_guard(
3994 vm_object_offset_t offset
)
3998 vm_object_lock_assert_exclusive(object
);
3999 mem
= vm_page_grab_guard();
4000 if (mem
== VM_PAGE_NULL
) {
4001 return VM_PAGE_NULL
;
4004 vm_page_insert(mem
, object
, offset
);
4010 counter(unsigned int c_laundry_pages_freed
= 0; )
4013 * vm_page_free_prepare:
4015 * Removes page from any queue it may be on
4016 * and disassociates it from its VM object.
4018 * Object and page queues must be locked prior to entry.
4021 vm_page_free_prepare(
4024 vm_page_free_prepare_queues(mem
);
4025 vm_page_free_prepare_object(mem
, TRUE
);
4030 vm_page_free_prepare_queues(
4033 vm_object_t m_object
;
4037 assert(mem
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
);
4038 assert(!mem
->vmp_cleaning
);
4039 m_object
= VM_PAGE_OBJECT(mem
);
4041 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4043 vm_object_lock_assert_exclusive(m_object
);
4045 if (mem
->vmp_laundry
) {
4047 * We may have to free a page while it's being laundered
4048 * if we lost its pager (due to a forced unmount, for example).
4049 * We need to call vm_pageout_steal_laundry() before removing
4050 * the page from its VM object, so that we can remove it
4051 * from its pageout queue and adjust the laundry accounting
4053 vm_pageout_steal_laundry(mem
, TRUE
);
4054 counter(++c_laundry_pages_freed
);
4057 vm_page_queues_remove(mem
, TRUE
);
4059 if (VM_PAGE_WIRED(mem
)) {
4060 assert(mem
->vmp_wire_count
> 0);
4063 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object
);
4064 VM_OBJECT_WIRED_PAGE_REMOVE(m_object
, mem
);
4065 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object
, m_object
->wire_tag
);
4067 assert(m_object
->resident_page_count
>=
4068 m_object
->wired_page_count
);
4070 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
4071 OSAddAtomic(+1, &vm_page_purgeable_count
);
4072 assert(vm_page_purgeable_wired_count
> 0);
4073 OSAddAtomic(-1, &vm_page_purgeable_wired_count
);
4075 if ((m_object
->purgable
== VM_PURGABLE_VOLATILE
||
4076 m_object
->purgable
== VM_PURGABLE_EMPTY
) &&
4077 m_object
->vo_owner
!= TASK_NULL
) {
4079 int ledger_idx_volatile
;
4080 int ledger_idx_nonvolatile
;
4081 int ledger_idx_volatile_compressed
;
4082 int ledger_idx_nonvolatile_compressed
;
4083 boolean_t do_footprint
;
4085 owner
= VM_OBJECT_OWNER(m_object
);
4086 vm_object_ledger_tag_ledgers(
4088 &ledger_idx_volatile
,
4089 &ledger_idx_nonvolatile
,
4090 &ledger_idx_volatile_compressed
,
4091 &ledger_idx_nonvolatile_compressed
,
4094 * While wired, this page was accounted
4095 * as "non-volatile" but it should now
4096 * be accounted as "volatile".
4098 /* one less "non-volatile"... */
4099 ledger_debit(owner
->ledger
,
4100 ledger_idx_nonvolatile
,
4103 /* ... and "phys_footprint" */
4104 ledger_debit(owner
->ledger
,
4105 task_ledgers
.phys_footprint
,
4108 /* one more "volatile" */
4109 ledger_credit(owner
->ledger
,
4110 ledger_idx_volatile
,
4114 if (!mem
->vmp_private
&& !mem
->vmp_fictitious
) {
4115 vm_page_wire_count
--;
4118 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
4119 mem
->vmp_wire_count
= 0;
4120 assert(!mem
->vmp_gobbled
);
4121 } else if (mem
->vmp_gobbled
) {
4122 if (!mem
->vmp_private
&& !mem
->vmp_fictitious
) {
4123 vm_page_wire_count
--;
4125 vm_page_gobble_count
--;
4131 vm_page_free_prepare_object(
4133 boolean_t remove_from_hash
)
4135 if (mem
->vmp_tabled
) {
4136 vm_page_remove(mem
, remove_from_hash
); /* clears tabled, object, offset */
4138 PAGE_WAKEUP(mem
); /* clears wanted */
4140 if (mem
->vmp_private
) {
4141 mem
->vmp_private
= FALSE
;
4142 mem
->vmp_fictitious
= TRUE
;
4143 VM_PAGE_SET_PHYS_PAGE(mem
, vm_page_fictitious_addr
);
4145 if (!mem
->vmp_fictitious
) {
4146 assert(mem
->vmp_pageq
.next
== 0);
4147 assert(mem
->vmp_pageq
.prev
== 0);
4148 assert(mem
->vmp_listq
.next
== 0);
4149 assert(mem
->vmp_listq
.prev
== 0);
4150 #if CONFIG_BACKGROUND_QUEUE
4151 assert(mem
->vmp_backgroundq
.next
== 0);
4152 assert(mem
->vmp_backgroundq
.prev
== 0);
4153 #endif /* CONFIG_BACKGROUND_QUEUE */
4154 assert(mem
->vmp_next_m
== 0);
4155 ASSERT_PMAP_FREE(mem
);
4156 vm_page_init(mem
, VM_PAGE_GET_PHYS_PAGE(mem
), mem
->vmp_lopage
);
4164 * Returns the given page to the free list,
4165 * disassociating it with any VM object.
4167 * Object and page queues must be locked prior to entry.
4173 vm_page_free_prepare(mem
);
4175 if (mem
->vmp_fictitious
) {
4176 vm_page_release_fictitious(mem
);
4178 vm_page_release(mem
,
4179 TRUE
); /* page queues are locked */
4185 vm_page_free_unlocked(
4187 boolean_t remove_from_hash
)
4189 vm_page_lockspin_queues();
4190 vm_page_free_prepare_queues(mem
);
4191 vm_page_unlock_queues();
4193 vm_page_free_prepare_object(mem
, remove_from_hash
);
4195 if (mem
->vmp_fictitious
) {
4196 vm_page_release_fictitious(mem
);
4198 vm_page_release(mem
, FALSE
); /* page queues are not locked */
4204 * Free a list of pages. The list can be up to several hundred pages,
4205 * as blocked up by vm_pageout_scan().
4206 * The big win is not having to take the free list lock once
4209 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4210 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4215 boolean_t prepare_object
)
4219 vm_page_t local_freeq
;
4222 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
4223 LCK_MTX_ASSERT(&vm_page_queue_free_lock
, LCK_MTX_ASSERT_NOTOWNED
);
4227 local_freeq
= VM_PAGE_NULL
;
4231 * break up the processing into smaller chunks so
4232 * that we can 'pipeline' the pages onto the
4233 * free list w/o introducing too much
4234 * contention on the global free queue lock
4236 while (mem
&& pg_count
< 64) {
4237 assert((mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) ||
4238 (mem
->vmp_q_state
== VM_PAGE_IS_WIRED
));
4239 #if CONFIG_BACKGROUND_QUEUE
4240 assert(mem
->vmp_backgroundq
.next
== 0 &&
4241 mem
->vmp_backgroundq
.prev
== 0 &&
4242 mem
->vmp_on_backgroundq
== FALSE
);
4244 nxt
= mem
->vmp_snext
;
4245 mem
->vmp_snext
= NULL
;
4246 assert(mem
->vmp_pageq
.prev
== 0);
4248 if (vm_page_free_verify
&& !mem
->vmp_fictitious
&& !mem
->vmp_private
) {
4249 ASSERT_PMAP_FREE(mem
);
4251 if (prepare_object
== TRUE
) {
4252 vm_page_free_prepare_object(mem
, TRUE
);
4255 if (!mem
->vmp_fictitious
) {
4256 assert(mem
->vmp_busy
);
4258 if ((mem
->vmp_lopage
== TRUE
|| vm_lopage_refill
== TRUE
) &&
4259 vm_lopage_free_count
< vm_lopage_free_limit
&&
4260 VM_PAGE_GET_PHYS_PAGE(mem
) < max_valid_low_ppnum
) {
4261 vm_page_release(mem
, FALSE
); /* page queues are not locked */
4262 #if CONFIG_SECLUDED_MEMORY
4263 } else if (vm_page_secluded_count
< vm_page_secluded_target
&&
4264 num_tasks_can_use_secluded_mem
== 0) {
4265 vm_page_release(mem
,
4266 FALSE
); /* page queues are not locked */
4267 #endif /* CONFIG_SECLUDED_MEMORY */
4270 * IMPORTANT: we can't set the page "free" here
4271 * because that would make the page eligible for
4272 * a physically-contiguous allocation (see
4273 * vm_page_find_contiguous()) right away (we don't
4274 * hold the vm_page_queue_free lock). That would
4275 * cause trouble because the page is not actually
4276 * in the free queue yet...
4278 mem
->vmp_snext
= local_freeq
;
4282 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
4285 assert(VM_PAGE_GET_PHYS_PAGE(mem
) == vm_page_fictitious_addr
||
4286 VM_PAGE_GET_PHYS_PAGE(mem
) == vm_page_guard_addr
);
4287 vm_page_release_fictitious(mem
);
4293 if ((mem
= local_freeq
)) {
4294 unsigned int avail_free_count
;
4295 unsigned int need_wakeup
= 0;
4296 unsigned int need_priv_wakeup
= 0;
4297 #if CONFIG_SECLUDED_MEMORY
4298 unsigned int need_wakeup_secluded
= 0;
4299 #endif /* CONFIG_SECLUDED_MEMORY */
4300 event_t priv_wakeup_event
, secluded_wakeup_event
, normal_wakeup_event
;
4301 boolean_t priv_wakeup_all
, secluded_wakeup_all
, normal_wakeup_all
;
4303 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
4308 nxt
= mem
->vmp_snext
;
4310 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
4311 assert(mem
->vmp_busy
);
4312 mem
->vmp_lopage
= FALSE
;
4313 mem
->vmp_q_state
= VM_PAGE_ON_FREE_Q
;
4315 color
= VM_PAGE_GET_COLOR(mem
);
4316 #if defined(__x86_64__)
4317 vm_page_queue_enter_clump(&vm_page_queue_free
[color
].qhead
, mem
);
4319 vm_page_queue_enter(&vm_page_queue_free
[color
].qhead
,
4324 vm_pageout_vminfo
.vm_page_pages_freed
+= pg_count
;
4325 vm_page_free_count
+= pg_count
;
4326 avail_free_count
= vm_page_free_count
;
4328 VM_DEBUG_CONSTANT_EVENT(vm_page_release
, VM_PAGE_RELEASE
, DBG_FUNC_NONE
, pg_count
, 0, 0, 0);
4330 if (vm_page_free_wanted_privileged
> 0 && avail_free_count
> 0) {
4331 if (avail_free_count
< vm_page_free_wanted_privileged
) {
4332 need_priv_wakeup
= avail_free_count
;
4333 vm_page_free_wanted_privileged
-= avail_free_count
;
4334 avail_free_count
= 0;
4336 need_priv_wakeup
= vm_page_free_wanted_privileged
;
4337 avail_free_count
-= vm_page_free_wanted_privileged
;
4338 vm_page_free_wanted_privileged
= 0;
4341 #if CONFIG_SECLUDED_MEMORY
4342 if (vm_page_free_wanted_secluded
> 0 &&
4343 avail_free_count
> vm_page_free_reserved
) {
4344 unsigned int available_pages
;
4345 available_pages
= (avail_free_count
-
4346 vm_page_free_reserved
);
4347 if (available_pages
<
4348 vm_page_free_wanted_secluded
) {
4349 need_wakeup_secluded
= available_pages
;
4350 vm_page_free_wanted_secluded
-=
4352 avail_free_count
-= available_pages
;
4354 need_wakeup_secluded
=
4355 vm_page_free_wanted_secluded
;
4357 vm_page_free_wanted_secluded
;
4358 vm_page_free_wanted_secluded
= 0;
4361 #endif /* CONFIG_SECLUDED_MEMORY */
4362 if (vm_page_free_wanted
> 0 && avail_free_count
> vm_page_free_reserved
) {
4363 unsigned int available_pages
;
4365 available_pages
= avail_free_count
- vm_page_free_reserved
;
4367 if (available_pages
>= vm_page_free_wanted
) {
4368 need_wakeup
= vm_page_free_wanted
;
4369 vm_page_free_wanted
= 0;
4371 need_wakeup
= available_pages
;
4372 vm_page_free_wanted
-= available_pages
;
4375 lck_mtx_unlock(&vm_page_queue_free_lock
);
4377 priv_wakeup_event
= NULL
;
4378 secluded_wakeup_event
= NULL
;
4379 normal_wakeup_event
= NULL
;
4381 priv_wakeup_all
= FALSE
;
4382 secluded_wakeup_all
= FALSE
;
4383 normal_wakeup_all
= FALSE
;
4386 if (need_priv_wakeup
!= 0) {
4388 * There shouldn't be that many VM-privileged threads,
4389 * so let's wake them all up, even if we don't quite
4390 * have enough pages to satisfy them all.
4392 priv_wakeup_event
= (event_t
)&vm_page_free_wanted_privileged
;
4393 priv_wakeup_all
= TRUE
;
4395 #if CONFIG_SECLUDED_MEMORY
4396 if (need_wakeup_secluded
!= 0 &&
4397 vm_page_free_wanted_secluded
== 0) {
4398 secluded_wakeup_event
= (event_t
)&vm_page_free_wanted_secluded
;
4399 secluded_wakeup_all
= TRUE
;
4400 need_wakeup_secluded
= 0;
4402 secluded_wakeup_event
= (event_t
)&vm_page_free_wanted_secluded
;
4404 #endif /* CONFIG_SECLUDED_MEMORY */
4405 if (need_wakeup
!= 0 && vm_page_free_wanted
== 0) {
4407 * We don't expect to have any more waiters
4408 * after this, so let's wake them all up at
4411 normal_wakeup_event
= (event_t
) &vm_page_free_count
;
4412 normal_wakeup_all
= TRUE
;
4415 normal_wakeup_event
= (event_t
) &vm_page_free_count
;
4418 if (priv_wakeup_event
||
4419 #if CONFIG_SECLUDED_MEMORY
4420 secluded_wakeup_event
||
4421 #endif /* CONFIG_SECLUDED_MEMORY */
4422 normal_wakeup_event
) {
4423 if (vps_dynamic_priority_enabled
== TRUE
) {
4424 thread_t thread_woken
= NULL
;
4426 if (priv_wakeup_all
== TRUE
) {
4427 wakeup_all_with_inheritor(priv_wakeup_event
, THREAD_AWAKENED
);
4430 #if CONFIG_SECLUDED_MEMORY
4431 if (secluded_wakeup_all
== TRUE
) {
4432 wakeup_all_with_inheritor(secluded_wakeup_event
, THREAD_AWAKENED
);
4435 while (need_wakeup_secluded
-- != 0) {
4437 * Wake up one waiter per page we just released.
4439 wakeup_one_with_inheritor(secluded_wakeup_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &thread_woken
);
4440 thread_deallocate(thread_woken
);
4442 #endif /* CONFIG_SECLUDED_MEMORY */
4444 if (normal_wakeup_all
== TRUE
) {
4445 wakeup_all_with_inheritor(normal_wakeup_event
, THREAD_AWAKENED
);
4448 while (need_wakeup
-- != 0) {
4450 * Wake up one waiter per page we just released.
4452 wakeup_one_with_inheritor(normal_wakeup_event
, THREAD_AWAKENED
, LCK_WAKE_DO_NOT_TRANSFER_PUSH
, &thread_woken
);
4453 thread_deallocate(thread_woken
);
4457 * Non-priority-aware wakeups.
4460 if (priv_wakeup_all
== TRUE
) {
4461 thread_wakeup(priv_wakeup_event
);
4464 #if CONFIG_SECLUDED_MEMORY
4465 if (secluded_wakeup_all
== TRUE
) {
4466 thread_wakeup(secluded_wakeup_event
);
4469 while (need_wakeup_secluded
-- != 0) {
4471 * Wake up one waiter per page we just released.
4473 thread_wakeup_one(secluded_wakeup_event
);
4476 #endif /* CONFIG_SECLUDED_MEMORY */
4477 if (normal_wakeup_all
== TRUE
) {
4478 thread_wakeup(normal_wakeup_event
);
4481 while (need_wakeup
-- != 0) {
4483 * Wake up one waiter per page we just released.
4485 thread_wakeup_one(normal_wakeup_event
);
4490 VM_CHECK_MEMORYSTATUS
;
4499 * Mark this page as wired down by yet
4500 * another map, removing it from paging queues
4503 * The page's object and the page queues must be locked.
4511 boolean_t check_memorystatus
)
4513 vm_object_t m_object
;
4515 m_object
= VM_PAGE_OBJECT(mem
);
4517 // dbgLog(current_thread(), mem->vmp_offset, m_object, 1); /* (TEST/DEBUG) */
4521 vm_object_lock_assert_exclusive(m_object
);
4524 * In theory, the page should be in an object before it
4525 * gets wired, since we need to hold the object lock
4526 * to update some fields in the page structure.
4527 * However, some code (i386 pmap, for example) might want
4528 * to wire a page before it gets inserted into an object.
4529 * That's somewhat OK, as long as nobody else can get to
4530 * that page and update it at the same time.
4533 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4534 if (!VM_PAGE_WIRED(mem
)) {
4535 if (mem
->vmp_laundry
) {
4536 vm_pageout_steal_laundry(mem
, TRUE
);
4539 vm_page_queues_remove(mem
, TRUE
);
4541 assert(mem
->vmp_wire_count
== 0);
4542 mem
->vmp_q_state
= VM_PAGE_IS_WIRED
;
4545 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object
);
4546 VM_OBJECT_WIRED_PAGE_ADD(m_object
, mem
);
4547 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object
, tag
);
4549 assert(m_object
->resident_page_count
>=
4550 m_object
->wired_page_count
);
4551 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
4552 assert(vm_page_purgeable_count
> 0);
4553 OSAddAtomic(-1, &vm_page_purgeable_count
);
4554 OSAddAtomic(1, &vm_page_purgeable_wired_count
);
4556 if ((m_object
->purgable
== VM_PURGABLE_VOLATILE
||
4557 m_object
->purgable
== VM_PURGABLE_EMPTY
) &&
4558 m_object
->vo_owner
!= TASK_NULL
) {
4560 int ledger_idx_volatile
;
4561 int ledger_idx_nonvolatile
;
4562 int ledger_idx_volatile_compressed
;
4563 int ledger_idx_nonvolatile_compressed
;
4564 boolean_t do_footprint
;
4566 owner
= VM_OBJECT_OWNER(m_object
);
4567 vm_object_ledger_tag_ledgers(
4569 &ledger_idx_volatile
,
4570 &ledger_idx_nonvolatile
,
4571 &ledger_idx_volatile_compressed
,
4572 &ledger_idx_nonvolatile_compressed
,
4574 /* less volatile bytes */
4575 ledger_debit(owner
->ledger
,
4576 ledger_idx_volatile
,
4578 /* more not-quite-volatile bytes */
4579 ledger_credit(owner
->ledger
,
4580 ledger_idx_nonvolatile
,
4583 /* more footprint */
4584 ledger_credit(owner
->ledger
,
4585 task_ledgers
.phys_footprint
,
4589 if (m_object
->all_reusable
) {
4591 * Wired pages are not counted as "re-usable"
4592 * in "all_reusable" VM objects, so nothing
4595 } else if (mem
->vmp_reusable
) {
4597 * This page is not "re-usable" when it's
4598 * wired, so adjust its state and the
4601 vm_object_reuse_pages(m_object
,
4603 mem
->vmp_offset
+ PAGE_SIZE_64
,
4607 assert(!mem
->vmp_reusable
);
4609 if (!mem
->vmp_private
&& !mem
->vmp_fictitious
&& !mem
->vmp_gobbled
) {
4610 vm_page_wire_count
++;
4612 if (mem
->vmp_gobbled
) {
4613 vm_page_gobble_count
--;
4615 mem
->vmp_gobbled
= FALSE
;
4617 if (check_memorystatus
== TRUE
) {
4618 VM_CHECK_MEMORYSTATUS
;
4621 assert(!mem
->vmp_gobbled
);
4622 assert(mem
->vmp_q_state
== VM_PAGE_IS_WIRED
);
4623 mem
->vmp_wire_count
++;
4624 if (__improbable(mem
->vmp_wire_count
== 0)) {
4625 panic("vm_page_wire(%p): wire_count overflow", mem
);
4633 * Release one wiring of this page, potentially
4634 * enabling it to be paged again.
4636 * The page's object and the page queues must be locked.
4643 vm_object_t m_object
;
4645 m_object
= VM_PAGE_OBJECT(mem
);
4647 // dbgLog(current_thread(), mem->vmp_offset, m_object, 0); /* (TEST/DEBUG) */
4650 assert(VM_PAGE_WIRED(mem
));
4651 assert(mem
->vmp_wire_count
> 0);
4652 assert(!mem
->vmp_gobbled
);
4653 assert(m_object
!= VM_OBJECT_NULL
);
4654 vm_object_lock_assert_exclusive(m_object
);
4655 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4656 if (--mem
->vmp_wire_count
== 0) {
4657 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
4659 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object
);
4660 VM_OBJECT_WIRED_PAGE_REMOVE(m_object
, mem
);
4661 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object
, m_object
->wire_tag
);
4662 if (!mem
->vmp_private
&& !mem
->vmp_fictitious
) {
4663 vm_page_wire_count
--;
4666 assert(m_object
->resident_page_count
>=
4667 m_object
->wired_page_count
);
4668 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
4669 OSAddAtomic(+1, &vm_page_purgeable_count
);
4670 assert(vm_page_purgeable_wired_count
> 0);
4671 OSAddAtomic(-1, &vm_page_purgeable_wired_count
);
4673 if ((m_object
->purgable
== VM_PURGABLE_VOLATILE
||
4674 m_object
->purgable
== VM_PURGABLE_EMPTY
) &&
4675 m_object
->vo_owner
!= TASK_NULL
) {
4677 int ledger_idx_volatile
;
4678 int ledger_idx_nonvolatile
;
4679 int ledger_idx_volatile_compressed
;
4680 int ledger_idx_nonvolatile_compressed
;
4681 boolean_t do_footprint
;
4683 owner
= VM_OBJECT_OWNER(m_object
);
4684 vm_object_ledger_tag_ledgers(
4686 &ledger_idx_volatile
,
4687 &ledger_idx_nonvolatile
,
4688 &ledger_idx_volatile_compressed
,
4689 &ledger_idx_nonvolatile_compressed
,
4691 /* more volatile bytes */
4692 ledger_credit(owner
->ledger
,
4693 ledger_idx_volatile
,
4695 /* less not-quite-volatile bytes */
4696 ledger_debit(owner
->ledger
,
4697 ledger_idx_nonvolatile
,
4700 /* less footprint */
4701 ledger_debit(owner
->ledger
,
4702 task_ledgers
.phys_footprint
,
4706 assert(m_object
!= kernel_object
);
4707 assert(mem
->vmp_pageq
.next
== 0 && mem
->vmp_pageq
.prev
== 0);
4709 if (queueit
== TRUE
) {
4710 if (m_object
->purgable
== VM_PURGABLE_EMPTY
) {
4711 vm_page_deactivate(mem
);
4713 vm_page_activate(mem
);
4717 VM_CHECK_MEMORYSTATUS
;
4723 * vm_page_deactivate:
4725 * Returns the given page to the inactive list,
4726 * indicating that no physical maps have access
4727 * to this page. [Used by the physical mapping system.]
4729 * The page queues must be locked.
4735 vm_page_deactivate_internal(m
, TRUE
);
4740 vm_page_deactivate_internal(
4742 boolean_t clear_hw_reference
)
4744 vm_object_t m_object
;
4746 m_object
= VM_PAGE_OBJECT(m
);
4749 assert(m_object
!= kernel_object
);
4750 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4752 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
4753 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4755 * This page is no longer very interesting. If it was
4756 * interesting (active or inactive/referenced), then we
4757 * clear the reference bit and (re)enter it in the
4758 * inactive queue. Note wired pages should not have
4759 * their reference bit cleared.
4761 assert( !(m
->vmp_absent
&& !m
->vmp_unusual
));
4763 if (m
->vmp_gobbled
) { /* can this happen? */
4764 assert( !VM_PAGE_WIRED(m
));
4766 if (!m
->vmp_private
&& !m
->vmp_fictitious
) {
4767 vm_page_wire_count
--;
4769 vm_page_gobble_count
--;
4770 m
->vmp_gobbled
= FALSE
;
4773 * if this page is currently on the pageout queue, we can't do the
4774 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4775 * and we can't remove it manually since we would need the object lock
4776 * (which is not required here) to decrement the activity_in_progress
4777 * reference which is held on the object while the page is in the pageout queue...
4778 * just let the normal laundry processing proceed
4780 if (m
->vmp_laundry
|| m
->vmp_private
|| m
->vmp_fictitious
||
4781 (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
4782 (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) ||
4786 if (!m
->vmp_absent
&& clear_hw_reference
== TRUE
) {
4787 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m
));
4790 m
->vmp_reference
= FALSE
;
4791 m
->vmp_no_cache
= FALSE
;
4793 if (!VM_PAGE_INACTIVE(m
)) {
4794 vm_page_queues_remove(m
, FALSE
);
4796 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4797 m
->vmp_dirty
&& m_object
->internal
&&
4798 (m_object
->purgable
== VM_PURGABLE_DENY
||
4799 m_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
4800 m_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
4801 vm_page_check_pageable_safe(m
);
4802 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vmp_pageq
);
4803 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
4804 vm_page_throttled_count
++;
4806 if (m_object
->named
&& m_object
->ref_count
== 1) {
4807 vm_page_speculate(m
, FALSE
);
4808 #if DEVELOPMENT || DEBUG
4809 vm_page_speculative_recreated
++;
4812 vm_page_enqueue_inactive(m
, FALSE
);
4819 * vm_page_enqueue_cleaned
4821 * Put the page on the cleaned queue, mark it cleaned, etc.
4822 * Being on the cleaned queue (and having m->clean_queue set)
4823 * does ** NOT ** guarantee that the page is clean!
4825 * Call with the queues lock held.
4829 vm_page_enqueue_cleaned(vm_page_t m
)
4831 vm_object_t m_object
;
4833 m_object
= VM_PAGE_OBJECT(m
);
4835 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4836 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4837 assert( !(m
->vmp_absent
&& !m
->vmp_unusual
));
4839 if (VM_PAGE_WIRED(m
)) {
4843 if (m
->vmp_gobbled
) {
4844 if (!m
->vmp_private
&& !m
->vmp_fictitious
) {
4845 vm_page_wire_count
--;
4847 vm_page_gobble_count
--;
4848 m
->vmp_gobbled
= FALSE
;
4851 * if this page is currently on the pageout queue, we can't do the
4852 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4853 * and we can't remove it manually since we would need the object lock
4854 * (which is not required here) to decrement the activity_in_progress
4855 * reference which is held on the object while the page is in the pageout queue...
4856 * just let the normal laundry processing proceed
4858 if (m
->vmp_laundry
|| m
->vmp_private
|| m
->vmp_fictitious
||
4859 (m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) ||
4860 (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
4863 vm_page_queues_remove(m
, FALSE
);
4865 vm_page_check_pageable_safe(m
);
4866 vm_page_queue_enter(&vm_page_queue_cleaned
, m
, vmp_pageq
);
4867 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_CLEANED_Q
;
4868 vm_page_cleaned_count
++;
4870 vm_page_inactive_count
++;
4871 if (m_object
->internal
) {
4872 vm_page_pageable_internal_count
++;
4874 vm_page_pageable_external_count
++;
4876 #if CONFIG_BACKGROUND_QUEUE
4877 if (m
->vmp_in_background
) {
4878 vm_page_add_to_backgroundq(m
, TRUE
);
4881 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned
, 1);
4887 * Put the specified page on the active list (if appropriate).
4889 * The page queues must be locked.
4896 vm_object_t m_object
;
4898 m_object
= VM_PAGE_OBJECT(m
);
4901 #ifdef FIXME_4778297
4902 assert(m_object
!= kernel_object
);
4904 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4905 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4906 assert( !(m
->vmp_absent
&& !m
->vmp_unusual
));
4908 if (m
->vmp_gobbled
) {
4909 assert( !VM_PAGE_WIRED(m
));
4910 if (!m
->vmp_private
&& !m
->vmp_fictitious
) {
4911 vm_page_wire_count
--;
4913 vm_page_gobble_count
--;
4914 m
->vmp_gobbled
= FALSE
;
4917 * if this page is currently on the pageout queue, we can't do the
4918 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4919 * and we can't remove it manually since we would need the object lock
4920 * (which is not required here) to decrement the activity_in_progress
4921 * reference which is held on the object while the page is in the pageout queue...
4922 * just let the normal laundry processing proceed
4924 if (m
->vmp_laundry
|| m
->vmp_private
|| m
->vmp_fictitious
||
4925 (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
4926 (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
4931 if (m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_Q
) {
4932 panic("vm_page_activate: already active");
4936 if (m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
4937 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
4938 DTRACE_VM2(pgfrec
, int, 1, (uint64_t *), NULL
);
4941 vm_page_queues_remove(m
, FALSE
);
4943 if (!VM_PAGE_WIRED(m
)) {
4944 vm_page_check_pageable_safe(m
);
4945 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4946 m
->vmp_dirty
&& m_object
->internal
&&
4947 (m_object
->purgable
== VM_PURGABLE_DENY
||
4948 m_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
4949 m_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
4950 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vmp_pageq
);
4951 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
4952 vm_page_throttled_count
++;
4954 #if CONFIG_SECLUDED_MEMORY
4955 if (secluded_for_filecache
&&
4956 vm_page_secluded_target
!= 0 &&
4957 num_tasks_can_use_secluded_mem
== 0 &&
4958 m_object
->eligible_for_secluded
) {
4959 vm_page_queue_enter(&vm_page_queue_secluded
, m
, vmp_pageq
);
4960 m
->vmp_q_state
= VM_PAGE_ON_SECLUDED_Q
;
4961 vm_page_secluded_count
++;
4962 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4963 vm_page_secluded_count_inuse
++;
4964 assert(!m_object
->internal
);
4965 // vm_page_pageable_external_count++;
4967 #endif /* CONFIG_SECLUDED_MEMORY */
4968 vm_page_enqueue_active(m
, FALSE
);
4970 m
->vmp_reference
= TRUE
;
4971 m
->vmp_no_cache
= FALSE
;
4978 * vm_page_speculate:
4980 * Put the specified page on the speculative list (if appropriate).
4982 * The page queues must be locked.
4989 struct vm_speculative_age_q
*aq
;
4990 vm_object_t m_object
;
4992 m_object
= VM_PAGE_OBJECT(m
);
4995 vm_page_check_pageable_safe(m
);
4997 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4998 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4999 assert( !(m
->vmp_absent
&& !m
->vmp_unusual
));
5000 assert(m_object
->internal
== FALSE
);
5003 * if this page is currently on the pageout queue, we can't do the
5004 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5005 * and we can't remove it manually since we would need the object lock
5006 * (which is not required here) to decrement the activity_in_progress
5007 * reference which is held on the object while the page is in the pageout queue...
5008 * just let the normal laundry processing proceed
5010 if (m
->vmp_laundry
|| m
->vmp_private
|| m
->vmp_fictitious
||
5011 (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
5012 (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
5016 vm_page_queues_remove(m
, FALSE
);
5018 if (!VM_PAGE_WIRED(m
)) {
5023 clock_get_system_nanotime(&sec
, &nsec
);
5024 ts
.tv_sec
= (unsigned int) sec
;
5027 if (vm_page_speculative_count
== 0) {
5028 speculative_age_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
5029 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
5031 aq
= &vm_page_queue_speculative
[speculative_age_index
];
5034 * set the timer to begin a new group
5036 aq
->age_ts
.tv_sec
= vm_pageout_state
.vm_page_speculative_q_age_ms
/ 1000;
5037 aq
->age_ts
.tv_nsec
= (vm_pageout_state
.vm_page_speculative_q_age_ms
% 1000) * 1000 * NSEC_PER_USEC
;
5039 ADD_MACH_TIMESPEC(&aq
->age_ts
, &ts
);
5041 aq
= &vm_page_queue_speculative
[speculative_age_index
];
5043 if (CMP_MACH_TIMESPEC(&ts
, &aq
->age_ts
) >= 0) {
5044 speculative_age_index
++;
5046 if (speculative_age_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
5047 speculative_age_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
5049 if (speculative_age_index
== speculative_steal_index
) {
5050 speculative_steal_index
= speculative_age_index
+ 1;
5052 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
5053 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
5056 aq
= &vm_page_queue_speculative
[speculative_age_index
];
5058 if (!vm_page_queue_empty(&aq
->age_q
)) {
5059 vm_page_speculate_ageit(aq
);
5062 aq
->age_ts
.tv_sec
= vm_pageout_state
.vm_page_speculative_q_age_ms
/ 1000;
5063 aq
->age_ts
.tv_nsec
= (vm_pageout_state
.vm_page_speculative_q_age_ms
% 1000) * 1000 * NSEC_PER_USEC
;
5065 ADD_MACH_TIMESPEC(&aq
->age_ts
, &ts
);
5068 vm_page_enqueue_tail(&aq
->age_q
, &m
->vmp_pageq
);
5069 m
->vmp_q_state
= VM_PAGE_ON_SPECULATIVE_Q
;
5070 vm_page_speculative_count
++;
5071 vm_page_pageable_external_count
++;
5074 vm_object_lock_assert_exclusive(m_object
);
5076 m_object
->pages_created
++;
5077 #if DEVELOPMENT || DEBUG
5078 vm_page_speculative_created
++;
5087 * move pages from the specified aging bin to
5088 * the speculative bin that pageout_scan claims from
5090 * The page queues must be locked.
5093 vm_page_speculate_ageit(struct vm_speculative_age_q
*aq
)
5095 struct vm_speculative_age_q
*sq
;
5098 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
5100 if (vm_page_queue_empty(&sq
->age_q
)) {
5101 sq
->age_q
.next
= aq
->age_q
.next
;
5102 sq
->age_q
.prev
= aq
->age_q
.prev
;
5104 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(sq
->age_q
.next
);
5105 t
->vmp_pageq
.prev
= VM_PAGE_PACK_PTR(&sq
->age_q
);
5107 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(sq
->age_q
.prev
);
5108 t
->vmp_pageq
.next
= VM_PAGE_PACK_PTR(&sq
->age_q
);
5110 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(sq
->age_q
.prev
);
5111 t
->vmp_pageq
.next
= aq
->age_q
.next
;
5113 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(aq
->age_q
.next
);
5114 t
->vmp_pageq
.prev
= sq
->age_q
.prev
;
5116 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(aq
->age_q
.prev
);
5117 t
->vmp_pageq
.next
= VM_PAGE_PACK_PTR(&sq
->age_q
);
5119 sq
->age_q
.prev
= aq
->age_q
.prev
;
5121 vm_page_queue_init(&aq
->age_q
);
5130 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
5131 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
5133 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
5135 if (m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
) {
5137 * we don't need to do all the other work that
5138 * vm_page_queues_remove and vm_page_enqueue_inactive
5139 * bring along for the ride
5141 assert(!m
->vmp_laundry
);
5142 assert(!m
->vmp_private
);
5144 m
->vmp_no_cache
= FALSE
;
5146 vm_page_queue_remove(&vm_page_queue_inactive
, m
, vmp_pageq
);
5147 vm_page_queue_enter(&vm_page_queue_inactive
, m
, vmp_pageq
);
5152 * if this page is currently on the pageout queue, we can't do the
5153 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5154 * and we can't remove it manually since we would need the object lock
5155 * (which is not required here) to decrement the activity_in_progress
5156 * reference which is held on the object while the page is in the pageout queue...
5157 * just let the normal laundry processing proceed
5159 if (m
->vmp_laundry
|| m
->vmp_private
||
5160 (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
5161 (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) ||
5166 m
->vmp_no_cache
= FALSE
;
5168 vm_page_queues_remove(m
, FALSE
);
5170 vm_page_enqueue_inactive(m
, FALSE
);
5175 vm_page_reactivate_all_throttled(void)
5177 vm_page_t first_throttled
, last_throttled
;
5178 vm_page_t first_active
;
5180 int extra_active_count
;
5181 int extra_internal_count
, extra_external_count
;
5182 vm_object_t m_object
;
5184 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5188 extra_active_count
= 0;
5189 extra_internal_count
= 0;
5190 extra_external_count
= 0;
5191 vm_page_lock_queues();
5192 if (!vm_page_queue_empty(&vm_page_queue_throttled
)) {
5194 * Switch "throttled" pages to "active".
5196 vm_page_queue_iterate(&vm_page_queue_throttled
, m
, vmp_pageq
) {
5198 assert(m
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
);
5200 m_object
= VM_PAGE_OBJECT(m
);
5202 extra_active_count
++;
5203 if (m_object
->internal
) {
5204 extra_internal_count
++;
5206 extra_external_count
++;
5209 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
5211 #if CONFIG_BACKGROUND_QUEUE
5212 if (m
->vmp_in_background
) {
5213 vm_page_add_to_backgroundq(m
, FALSE
);
5219 * Transfer the entire throttled queue to a regular LRU page queues.
5220 * We insert it at the head of the active queue, so that these pages
5221 * get re-evaluated by the LRU algorithm first, since they've been
5222 * completely out of it until now.
5224 first_throttled
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
5225 last_throttled
= (vm_page_t
) vm_page_queue_last(&vm_page_queue_throttled
);
5226 first_active
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
5227 if (vm_page_queue_empty(&vm_page_queue_active
)) {
5228 vm_page_queue_active
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled
);
5230 first_active
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled
);
5232 vm_page_queue_active
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled
);
5233 first_throttled
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active
);
5234 last_throttled
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active
);
5237 printf("reactivated %d throttled pages\n", vm_page_throttled_count
);
5239 vm_page_queue_init(&vm_page_queue_throttled
);
5241 * Adjust the global page counts.
5243 vm_page_active_count
+= extra_active_count
;
5244 vm_page_pageable_internal_count
+= extra_internal_count
;
5245 vm_page_pageable_external_count
+= extra_external_count
;
5246 vm_page_throttled_count
= 0;
5248 assert(vm_page_throttled_count
== 0);
5249 assert(vm_page_queue_empty(&vm_page_queue_throttled
));
5250 vm_page_unlock_queues();
5255 * move pages from the indicated local queue to the global active queue
5256 * its ok to fail if we're below the hard limit and force == FALSE
5257 * the nolocks == TRUE case is to allow this function to be run on
5258 * the hibernate path
5262 vm_page_reactivate_local(uint32_t lid
, boolean_t force
, boolean_t nolocks
)
5265 vm_page_t first_local
, last_local
;
5266 vm_page_t first_active
;
5270 if (vm_page_local_q
== NULL
) {
5274 lq
= zpercpu_get_cpu(vm_page_local_q
, lid
);
5276 if (nolocks
== FALSE
) {
5277 if (lq
->vpl_count
< vm_page_local_q_hard_limit
&& force
== FALSE
) {
5278 if (!vm_page_trylockspin_queues()) {
5282 vm_page_lockspin_queues();
5285 VPL_LOCK(&lq
->vpl_lock
);
5287 if (lq
->vpl_count
) {
5289 * Switch "local" pages to "active".
5291 assert(!vm_page_queue_empty(&lq
->vpl_queue
));
5293 vm_page_queue_iterate(&lq
->vpl_queue
, m
, vmp_pageq
) {
5295 vm_page_check_pageable_safe(m
);
5296 assert(m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_LOCAL_Q
);
5297 assert(!m
->vmp_fictitious
);
5299 if (m
->vmp_local_id
!= lid
) {
5300 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m
);
5303 m
->vmp_local_id
= 0;
5304 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
5306 #if CONFIG_BACKGROUND_QUEUE
5307 if (m
->vmp_in_background
) {
5308 vm_page_add_to_backgroundq(m
, FALSE
);
5313 if (count
!= lq
->vpl_count
) {
5314 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d\n", count
, lq
->vpl_count
);
5318 * Transfer the entire local queue to a regular LRU page queues.
5320 first_local
= (vm_page_t
) vm_page_queue_first(&lq
->vpl_queue
);
5321 last_local
= (vm_page_t
) vm_page_queue_last(&lq
->vpl_queue
);
5322 first_active
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
5324 if (vm_page_queue_empty(&vm_page_queue_active
)) {
5325 vm_page_queue_active
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
5327 first_active
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
5329 vm_page_queue_active
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
5330 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active
);
5331 last_local
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active
);
5333 vm_page_queue_init(&lq
->vpl_queue
);
5335 * Adjust the global page counts.
5337 vm_page_active_count
+= lq
->vpl_count
;
5338 vm_page_pageable_internal_count
+= lq
->vpl_internal_count
;
5339 vm_page_pageable_external_count
+= lq
->vpl_external_count
;
5341 lq
->vpl_internal_count
= 0;
5342 lq
->vpl_external_count
= 0;
5344 assert(vm_page_queue_empty(&lq
->vpl_queue
));
5346 if (nolocks
== FALSE
) {
5347 VPL_UNLOCK(&lq
->vpl_lock
);
5349 vm_page_balance_inactive(count
/ 4);
5350 vm_page_unlock_queues();
5355 * vm_page_part_zero_fill:
5357 * Zero-fill a part of the page.
5359 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5361 vm_page_part_zero_fill(
5368 * we don't hold the page queue lock
5369 * so this check isn't safe to make
5374 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5375 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m
), m_pa
, len
);
5379 tmp
= vm_page_grab();
5380 if (tmp
== VM_PAGE_NULL
) {
5381 vm_page_wait(THREAD_UNINT
);
5386 vm_page_zero_fill(tmp
);
5388 vm_page_part_copy(m
, 0, tmp
, 0, m_pa
);
5390 if ((m_pa
+ len
) < PAGE_SIZE
) {
5391 vm_page_part_copy(m
, m_pa
+ len
, tmp
,
5392 m_pa
+ len
, PAGE_SIZE
- (m_pa
+ len
));
5394 vm_page_copy(tmp
, m
);
5400 * vm_page_zero_fill:
5402 * Zero-fill the specified page.
5410 * we don't hold the page queue lock
5411 * so this check isn't safe to make
5416 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
5417 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m
));
5421 * vm_page_part_copy:
5423 * copy part of one page to another
5436 * we don't hold the page queue lock
5437 * so this check isn't safe to make
5439 VM_PAGE_CHECK(src_m
);
5440 VM_PAGE_CHECK(dst_m
);
5442 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m
), src_pa
,
5443 VM_PAGE_GET_PHYS_PAGE(dst_m
), dst_pa
, len
);
5449 * Copy one page to another
5452 int vm_page_copy_cs_validations
= 0;
5453 int vm_page_copy_cs_tainted
= 0;
5460 vm_object_t src_m_object
;
5462 src_m_object
= VM_PAGE_OBJECT(src_m
);
5466 * we don't hold the page queue lock
5467 * so this check isn't safe to make
5469 VM_PAGE_CHECK(src_m
);
5470 VM_PAGE_CHECK(dest_m
);
5472 vm_object_lock_assert_held(src_m_object
);
5474 if (src_m_object
!= VM_OBJECT_NULL
&&
5475 src_m_object
->code_signed
) {
5477 * We're copying a page from a code-signed object.
5478 * Whoever ends up mapping the copy page might care about
5479 * the original page's integrity, so let's validate the
5482 vm_page_copy_cs_validations
++;
5483 vm_page_validate_cs(src_m
, PAGE_SIZE
, 0);
5484 #if DEVELOPMENT || DEBUG
5485 DTRACE_VM4(codesigned_copy
,
5486 vm_object_t
, src_m_object
,
5487 vm_object_offset_t
, src_m
->vmp_offset
,
5488 int, src_m
->vmp_cs_validated
,
5489 int, src_m
->vmp_cs_tainted
);
5490 #endif /* DEVELOPMENT || DEBUG */
5494 * Propagate the cs_tainted bit to the copy page. Do not propagate
5495 * the cs_validated bit.
5497 dest_m
->vmp_cs_tainted
= src_m
->vmp_cs_tainted
;
5498 dest_m
->vmp_cs_nx
= src_m
->vmp_cs_nx
;
5499 if (dest_m
->vmp_cs_tainted
) {
5500 vm_page_copy_cs_tainted
++;
5502 dest_m
->vmp_error
= src_m
->vmp_error
; /* sliding src_m might have failed... */
5503 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m
), VM_PAGE_GET_PHYS_PAGE(dest_m
));
5511 printf("vm_page %p: \n", p
);
5512 printf(" pageq: next=%p prev=%p\n",
5513 (vm_page_t
)VM_PAGE_UNPACK_PTR(p
->vmp_pageq
.next
),
5514 (vm_page_t
)VM_PAGE_UNPACK_PTR(p
->vmp_pageq
.prev
));
5515 printf(" listq: next=%p prev=%p\n",
5516 (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->vmp_listq
.next
)),
5517 (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->vmp_listq
.prev
)));
5518 printf(" next=%p\n", (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->vmp_next_m
)));
5519 printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p
), p
->vmp_offset
);
5520 printf(" wire_count=%u\n", p
->vmp_wire_count
);
5521 printf(" q_state=%u\n", p
->vmp_q_state
);
5523 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
5524 (p
->vmp_laundry
? "" : "!"),
5525 (p
->vmp_reference
? "" : "!"),
5526 (p
->vmp_gobbled
? "" : "!"),
5527 (p
->vmp_private
? "" : "!"));
5528 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5529 (p
->vmp_busy
? "" : "!"),
5530 (p
->vmp_wanted
? "" : "!"),
5531 (p
->vmp_tabled
? "" : "!"),
5532 (p
->vmp_fictitious
? "" : "!"),
5533 (p
->vmp_pmapped
? "" : "!"),
5534 (p
->vmp_wpmapped
? "" : "!"));
5535 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5536 (p
->vmp_free_when_done
? "" : "!"),
5537 (p
->vmp_absent
? "" : "!"),
5538 (p
->vmp_error
? "" : "!"),
5539 (p
->vmp_dirty
? "" : "!"),
5540 (p
->vmp_cleaning
? "" : "!"),
5541 (p
->vmp_precious
? "" : "!"),
5542 (p
->vmp_clustered
? "" : "!"));
5543 printf(" %soverwriting, %srestart, %sunusual\n",
5544 (p
->vmp_overwriting
? "" : "!"),
5545 (p
->vmp_restart
? "" : "!"),
5546 (p
->vmp_unusual
? "" : "!"));
5547 printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5548 p
->vmp_cs_validated
,
5551 (p
->vmp_no_cache
? "" : "!"));
5553 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p
));
5557 * Check that the list of pages is ordered by
5558 * ascending physical address and has no holes.
5561 vm_page_verify_contiguous(
5563 unsigned int npages
)
5566 unsigned int page_count
;
5567 vm_offset_t prev_addr
;
5569 prev_addr
= VM_PAGE_GET_PHYS_PAGE(pages
);
5571 for (m
= NEXT_PAGE(pages
); m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
5572 if (VM_PAGE_GET_PHYS_PAGE(m
) != prev_addr
+ 1) {
5573 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5574 m
, (long)prev_addr
, VM_PAGE_GET_PHYS_PAGE(m
));
5575 printf("pages %p page_count %d npages %d\n", pages
, page_count
, npages
);
5576 panic("vm_page_verify_contiguous: not contiguous!");
5578 prev_addr
= VM_PAGE_GET_PHYS_PAGE(m
);
5581 if (page_count
!= npages
) {
5582 printf("pages %p actual count 0x%x but requested 0x%x\n",
5583 pages
, page_count
, npages
);
5584 panic("vm_page_verify_contiguous: count error");
5591 * Check the free lists for proper length etc.
5593 static boolean_t vm_page_verify_this_free_list_enabled
= FALSE
;
5595 vm_page_verify_free_list(
5596 vm_page_queue_head_t
*vm_page_queue
,
5598 vm_page_t look_for_page
,
5599 boolean_t expect_page
)
5601 unsigned int npages
;
5604 boolean_t found_page
;
5606 if (!vm_page_verify_this_free_list_enabled
) {
5612 prev_m
= (vm_page_t
)((uintptr_t)vm_page_queue
);
5614 vm_page_queue_iterate(vm_page_queue
, m
, vmp_pageq
) {
5615 if (m
== look_for_page
) {
5618 if ((vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.prev
) != prev_m
) {
5619 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
5620 color
, npages
, m
, (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.prev
), prev_m
);
5623 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
5626 if (color
!= (unsigned int) -1) {
5627 if (VM_PAGE_GET_COLOR(m
) != color
) {
5628 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
5629 color
, npages
, m
, VM_PAGE_GET_COLOR(m
), color
);
5631 if (m
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
) {
5632 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d\n",
5633 color
, npages
, m
, m
->vmp_q_state
);
5636 if (m
->vmp_q_state
!= VM_PAGE_ON_FREE_LOCAL_Q
) {
5637 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d\n",
5638 npages
, m
, m
->vmp_q_state
);
5644 if (look_for_page
!= VM_PAGE_NULL
) {
5645 unsigned int other_color
;
5647 if (expect_page
&& !found_page
) {
5648 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5649 color
, npages
, look_for_page
, VM_PAGE_GET_PHYS_PAGE(look_for_page
));
5650 _vm_page_print(look_for_page
);
5651 for (other_color
= 0;
5652 other_color
< vm_colors
;
5654 if (other_color
== color
) {
5657 vm_page_verify_free_list(&vm_page_queue_free
[other_color
].qhead
,
5658 other_color
, look_for_page
, FALSE
);
5660 if (color
== (unsigned int) -1) {
5661 vm_page_verify_free_list(&vm_lopage_queue_free
,
5662 (unsigned int) -1, look_for_page
, FALSE
);
5664 panic("vm_page_verify_free_list(color=%u)\n", color
);
5666 if (!expect_page
&& found_page
) {
5667 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5668 color
, npages
, look_for_page
, VM_PAGE_GET_PHYS_PAGE(look_for_page
));
5674 static boolean_t vm_page_verify_all_free_lists_enabled
= FALSE
;
5676 vm_page_verify_free_lists( void )
5678 unsigned int color
, npages
, nlopages
;
5679 boolean_t toggle
= TRUE
;
5681 if (!vm_page_verify_all_free_lists_enabled
) {
5687 lck_mtx_lock(&vm_page_queue_free_lock
);
5689 if (vm_page_verify_this_free_list_enabled
== TRUE
) {
5691 * This variable has been set globally for extra checking of
5692 * each free list Q. Since we didn't set it, we don't own it
5693 * and we shouldn't toggle it.
5698 if (toggle
== TRUE
) {
5699 vm_page_verify_this_free_list_enabled
= TRUE
;
5702 for (color
= 0; color
< vm_colors
; color
++) {
5703 npages
+= vm_page_verify_free_list(&vm_page_queue_free
[color
].qhead
,
5704 color
, VM_PAGE_NULL
, FALSE
);
5706 nlopages
= vm_page_verify_free_list(&vm_lopage_queue_free
,
5708 VM_PAGE_NULL
, FALSE
);
5709 if (npages
!= vm_page_free_count
|| nlopages
!= vm_lopage_free_count
) {
5710 panic("vm_page_verify_free_lists: "
5711 "npages %u free_count %d nlopages %u lo_free_count %u",
5712 npages
, vm_page_free_count
, nlopages
, vm_lopage_free_count
);
5715 if (toggle
== TRUE
) {
5716 vm_page_verify_this_free_list_enabled
= FALSE
;
5719 lck_mtx_unlock(&vm_page_queue_free_lock
);
5722 #endif /* MACH_ASSERT */
5725 extern boolean_t(*volatile consider_buffer_cache_collect
)(int);
5728 * CONTIGUOUS PAGE ALLOCATION
5730 * Find a region large enough to contain at least n pages
5731 * of contiguous physical memory.
5733 * This is done by traversing the vm_page_t array in a linear fashion
5734 * we assume that the vm_page_t array has the avaiable physical pages in an
5735 * ordered, ascending list... this is currently true of all our implementations
5736 * and must remain so... there can be 'holes' in the array... we also can
5737 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
5738 * which use to happen via 'vm_page_convert'... that function was no longer
5739 * being called and was removed...
5741 * The basic flow consists of stabilizing some of the interesting state of
5742 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
5743 * sweep at the beginning of the array looking for pages that meet our criterea
5744 * for a 'stealable' page... currently we are pretty conservative... if the page
5745 * meets this criterea and is physically contiguous to the previous page in the 'run'
5746 * we keep developing it. If we hit a page that doesn't fit, we reset our state
5747 * and start to develop a new run... if at this point we've already considered
5748 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
5749 * and mutex_pause (which will yield the processor), to keep the latency low w/r
5750 * to other threads trying to acquire free pages (or move pages from q to q),
5751 * and then continue from the spot we left off... we only make 1 pass through the
5752 * array. Once we have a 'run' that is long enough, we'll go into the loop which
5753 * which steals the pages from the queues they're currently on... pages on the free
5754 * queue can be stolen directly... pages that are on any of the other queues
5755 * must be removed from the object they are tabled on... this requires taking the
5756 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
5757 * or if the state of the page behind the vm_object lock is no longer viable, we'll
5758 * dump the pages we've currently stolen back to the free list, and pick up our
5759 * scan from the point where we aborted the 'current' run.
5763 * - neither vm_page_queue nor vm_free_list lock can be held on entry
5765 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
5770 #define MAX_CONSIDERED_BEFORE_YIELD 1000
5773 #define RESET_STATE_OF_RUN() \
5775 prevcontaddr = -2; \
5777 free_considered = 0; \
5778 substitute_needed = 0; \
5783 * Can we steal in-use (i.e. not free) pages when searching for
5784 * physically-contiguous pages ?
5786 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
5788 static unsigned int vm_page_find_contiguous_last_idx
= 0, vm_page_lomem_find_contiguous_last_idx
= 0;
5790 int vm_page_find_contig_debug
= 0;
5794 vm_page_find_contiguous(
5795 unsigned int contig_pages
,
5802 ppnum_t prevcontaddr
= 0;
5803 ppnum_t start_pnum
= 0;
5804 unsigned int npages
= 0, considered
= 0, scanned
= 0;
5805 unsigned int page_idx
= 0, start_idx
= 0, last_idx
= 0, orig_last_idx
= 0;
5806 unsigned int idx_last_contig_page_found
= 0;
5807 int free_considered
= 0, free_available
= 0;
5808 int substitute_needed
= 0;
5809 boolean_t wrapped
, zone_gc_called
= FALSE
;
5812 clock_sec_t tv_start_sec
= 0, tv_end_sec
= 0;
5813 clock_usec_t tv_start_usec
= 0, tv_end_usec
= 0;
5818 int stolen_pages
= 0;
5819 int compressed_pages
= 0;
5822 if (contig_pages
== 0) {
5823 return VM_PAGE_NULL
;
5829 vm_page_verify_free_lists();
5832 clock_get_system_microtime(&tv_start_sec
, &tv_start_usec
);
5834 PAGE_REPLACEMENT_ALLOWED(TRUE
);
5837 * If there are still delayed pages, try to free up some that match.
5839 if (__improbable(vm_delayed_count
!= 0 && contig_pages
!= 0)) {
5840 vm_free_delayed_pages_contig(contig_pages
, max_pnum
, pnum_mask
);
5843 vm_page_lock_queues();
5844 lck_mtx_lock(&vm_page_queue_free_lock
);
5846 RESET_STATE_OF_RUN();
5850 free_available
= vm_page_free_count
- vm_page_free_reserved
;
5854 if (flags
& KMA_LOMEM
) {
5855 idx_last_contig_page_found
= vm_page_lomem_find_contiguous_last_idx
;
5857 idx_last_contig_page_found
= vm_page_find_contiguous_last_idx
;
5860 orig_last_idx
= idx_last_contig_page_found
;
5861 last_idx
= orig_last_idx
;
5863 for (page_idx
= last_idx
, start_idx
= last_idx
;
5864 npages
< contig_pages
&& page_idx
< vm_pages_count
;
5869 page_idx
>= orig_last_idx
) {
5871 * We're back where we started and we haven't
5872 * found any suitable contiguous range. Let's
5878 m
= &vm_pages
[page_idx
];
5880 assert(!m
->vmp_fictitious
);
5881 assert(!m
->vmp_private
);
5883 if (max_pnum
&& VM_PAGE_GET_PHYS_PAGE(m
) > max_pnum
) {
5884 /* no more low pages... */
5887 if (!npages
& ((VM_PAGE_GET_PHYS_PAGE(m
) & pnum_mask
) != 0)) {
5891 RESET_STATE_OF_RUN();
5892 } else if (VM_PAGE_WIRED(m
) || m
->vmp_gobbled
||
5893 m
->vmp_laundry
|| m
->vmp_wanted
||
5894 m
->vmp_cleaning
|| m
->vmp_overwriting
|| m
->vmp_free_when_done
) {
5896 * page is in a transient state
5897 * or a state we don't want to deal
5898 * with, so don't consider it which
5899 * means starting a new run
5901 RESET_STATE_OF_RUN();
5902 } else if ((m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) ||
5903 (m
->vmp_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
) ||
5904 (m
->vmp_q_state
== VM_PAGE_ON_FREE_LOPAGE_Q
) ||
5905 (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
5907 * page needs to be on one of our queues (other then the pageout or special free queues)
5908 * or it needs to belong to the compressor pool (which is now indicated
5909 * by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
5910 * from the check for VM_PAGE_NOT_ON_Q)
5911 * in order for it to be stable behind the
5912 * locks we hold at this point...
5913 * if not, don't consider it which
5914 * means starting a new run
5916 RESET_STATE_OF_RUN();
5917 } else if ((m
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
) && (!m
->vmp_tabled
|| m
->vmp_busy
)) {
5919 * pages on the free list are always 'busy'
5920 * so we couldn't test for 'busy' in the check
5921 * for the transient states... pages that are
5922 * 'free' are never 'tabled', so we also couldn't
5923 * test for 'tabled'. So we check here to make
5924 * sure that a non-free page is not busy and is
5925 * tabled on an object...
5926 * if not, don't consider it which
5927 * means starting a new run
5929 RESET_STATE_OF_RUN();
5931 if (VM_PAGE_GET_PHYS_PAGE(m
) != prevcontaddr
+ 1) {
5932 if ((VM_PAGE_GET_PHYS_PAGE(m
) & pnum_mask
) != 0) {
5933 RESET_STATE_OF_RUN();
5937 start_idx
= page_idx
;
5938 start_pnum
= VM_PAGE_GET_PHYS_PAGE(m
);
5943 prevcontaddr
= VM_PAGE_GET_PHYS_PAGE(m
);
5946 if (m
->vmp_q_state
== VM_PAGE_ON_FREE_Q
) {
5950 * This page is not free.
5951 * If we can't steal used pages,
5952 * we have to give up this run
5954 * Otherwise, we might need to
5955 * move the contents of this page
5956 * into a substitute page.
5958 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
5959 if (m
->vmp_pmapped
|| m
->vmp_dirty
|| m
->vmp_precious
) {
5960 substitute_needed
++;
5963 RESET_STATE_OF_RUN();
5967 if ((free_considered
+ substitute_needed
) > free_available
) {
5969 * if we let this run continue
5970 * we will end up dropping the vm_page_free_count
5971 * below the reserve limit... we need to abort
5972 * this run, but we can at least re-consider this
5973 * page... thus the jump back to 'retry'
5975 RESET_STATE_OF_RUN();
5977 if (free_available
&& considered
<= MAX_CONSIDERED_BEFORE_YIELD
) {
5982 * free_available == 0
5983 * so can't consider any free pages... if
5984 * we went to retry in this case, we'd
5985 * get stuck looking at the same page
5986 * w/o making any forward progress
5987 * we also want to take this path if we've already
5988 * reached our limit that controls the lock latency
5993 if (considered
> MAX_CONSIDERED_BEFORE_YIELD
&& npages
<= 1) {
5994 PAGE_REPLACEMENT_ALLOWED(FALSE
);
5996 lck_mtx_unlock(&vm_page_queue_free_lock
);
5997 vm_page_unlock_queues();
6001 PAGE_REPLACEMENT_ALLOWED(TRUE
);
6003 vm_page_lock_queues();
6004 lck_mtx_lock(&vm_page_queue_free_lock
);
6006 RESET_STATE_OF_RUN();
6008 * reset our free page limit since we
6009 * dropped the lock protecting the vm_page_free_queue
6011 free_available
= vm_page_free_count
- vm_page_free_reserved
;
6022 if (npages
!= contig_pages
) {
6025 * We didn't find a contiguous range but we didn't
6026 * start from the very first page.
6027 * Start again from the very first page.
6029 RESET_STATE_OF_RUN();
6030 if (flags
& KMA_LOMEM
) {
6031 idx_last_contig_page_found
= vm_page_lomem_find_contiguous_last_idx
= 0;
6033 idx_last_contig_page_found
= vm_page_find_contiguous_last_idx
= 0;
6036 page_idx
= last_idx
;
6040 lck_mtx_unlock(&vm_page_queue_free_lock
);
6044 unsigned int cur_idx
;
6045 unsigned int tmp_start_idx
;
6046 vm_object_t locked_object
= VM_OBJECT_NULL
;
6047 boolean_t abort_run
= FALSE
;
6049 assert(page_idx
- start_idx
== contig_pages
);
6051 tmp_start_idx
= start_idx
;
6054 * first pass through to pull the free pages
6055 * off of the free queue so that in case we
6056 * need substitute pages, we won't grab any
6057 * of the free pages in the run... we'll clear
6058 * the 'free' bit in the 2nd pass, and even in
6059 * an abort_run case, we'll collect all of the
6060 * free pages in this run and return them to the free list
6062 while (start_idx
< page_idx
) {
6063 m1
= &vm_pages
[start_idx
++];
6065 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6066 assert(m1
->vmp_q_state
== VM_PAGE_ON_FREE_Q
);
6069 if (m1
->vmp_q_state
== VM_PAGE_ON_FREE_Q
) {
6072 color
= VM_PAGE_GET_COLOR(m1
);
6074 vm_page_verify_free_list(&vm_page_queue_free
[color
].qhead
, color
, m1
, TRUE
);
6076 vm_page_queue_remove(&vm_page_queue_free
[color
].qhead
, m1
, vmp_pageq
);
6078 VM_PAGE_ZERO_PAGEQ_ENTRY(m1
);
6080 vm_page_verify_free_list(&vm_page_queue_free
[color
].qhead
, color
, VM_PAGE_NULL
, FALSE
);
6083 * Clear the "free" bit so that this page
6084 * does not get considered for another
6085 * concurrent physically-contiguous allocation.
6087 m1
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
6088 assert(m1
->vmp_busy
);
6090 vm_page_free_count
--;
6093 if (flags
& KMA_LOMEM
) {
6094 vm_page_lomem_find_contiguous_last_idx
= page_idx
;
6096 vm_page_find_contiguous_last_idx
= page_idx
;
6100 * we can drop the free queue lock at this point since
6101 * we've pulled any 'free' candidates off of the list
6102 * we need it dropped so that we can do a vm_page_grab
6103 * when substituing for pmapped/dirty pages
6105 lck_mtx_unlock(&vm_page_queue_free_lock
);
6107 start_idx
= tmp_start_idx
;
6108 cur_idx
= page_idx
- 1;
6110 while (start_idx
++ < page_idx
) {
6112 * must go through the list from back to front
6113 * so that the page list is created in the
6114 * correct order - low -> high phys addresses
6116 m1
= &vm_pages
[cur_idx
--];
6118 if (m1
->vmp_object
== 0) {
6120 * page has already been removed from
6121 * the free list in the 1st pass
6123 assert(m1
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
6124 assert(m1
->vmp_offset
== (vm_object_offset_t
) -1);
6125 assert(m1
->vmp_busy
);
6126 assert(!m1
->vmp_wanted
);
6127 assert(!m1
->vmp_laundry
);
6131 boolean_t disconnected
, reusable
;
6133 if (abort_run
== TRUE
) {
6137 assert(m1
->vmp_q_state
!= VM_PAGE_NOT_ON_Q
);
6139 object
= VM_PAGE_OBJECT(m1
);
6141 if (object
!= locked_object
) {
6142 if (locked_object
) {
6143 vm_object_unlock(locked_object
);
6144 locked_object
= VM_OBJECT_NULL
;
6146 if (vm_object_lock_try(object
)) {
6147 locked_object
= object
;
6150 if (locked_object
== VM_OBJECT_NULL
||
6151 (VM_PAGE_WIRED(m1
) || m1
->vmp_gobbled
||
6152 m1
->vmp_laundry
|| m1
->vmp_wanted
||
6153 m1
->vmp_cleaning
|| m1
->vmp_overwriting
|| m1
->vmp_free_when_done
|| m1
->vmp_busy
) ||
6154 (m1
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
6155 if (locked_object
) {
6156 vm_object_unlock(locked_object
);
6157 locked_object
= VM_OBJECT_NULL
;
6159 tmp_start_idx
= cur_idx
;
6164 disconnected
= FALSE
;
6167 if ((m1
->vmp_reusable
||
6168 object
->all_reusable
) &&
6169 (m1
->vmp_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
) &&
6171 !m1
->vmp_reference
) {
6172 /* reusable page... */
6173 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1
));
6174 disconnected
= TRUE
;
6177 * ... not reused: can steal
6178 * without relocating contents.
6184 if ((m1
->vmp_pmapped
&&
6188 vm_object_offset_t offset
;
6190 m2
= vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD
);
6192 if (m2
== VM_PAGE_NULL
) {
6193 if (locked_object
) {
6194 vm_object_unlock(locked_object
);
6195 locked_object
= VM_OBJECT_NULL
;
6197 tmp_start_idx
= cur_idx
;
6201 if (!disconnected
) {
6202 if (m1
->vmp_pmapped
) {
6203 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1
));
6209 /* copy the page's contents */
6210 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1
), VM_PAGE_GET_PHYS_PAGE(m2
));
6211 /* copy the page's state */
6212 assert(!VM_PAGE_WIRED(m1
));
6213 assert(m1
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
);
6214 assert(m1
->vmp_q_state
!= VM_PAGE_ON_PAGEOUT_Q
);
6215 assert(!m1
->vmp_laundry
);
6216 m2
->vmp_reference
= m1
->vmp_reference
;
6217 assert(!m1
->vmp_gobbled
);
6218 assert(!m1
->vmp_private
);
6219 m2
->vmp_no_cache
= m1
->vmp_no_cache
;
6220 m2
->vmp_xpmapped
= 0;
6221 assert(!m1
->vmp_busy
);
6222 assert(!m1
->vmp_wanted
);
6223 assert(!m1
->vmp_fictitious
);
6224 m2
->vmp_pmapped
= m1
->vmp_pmapped
; /* should flush cache ? */
6225 m2
->vmp_wpmapped
= m1
->vmp_wpmapped
;
6226 assert(!m1
->vmp_free_when_done
);
6227 m2
->vmp_absent
= m1
->vmp_absent
;
6228 m2
->vmp_error
= m1
->vmp_error
;
6229 m2
->vmp_dirty
= m1
->vmp_dirty
;
6230 assert(!m1
->vmp_cleaning
);
6231 m2
->vmp_precious
= m1
->vmp_precious
;
6232 m2
->vmp_clustered
= m1
->vmp_clustered
;
6233 assert(!m1
->vmp_overwriting
);
6234 m2
->vmp_restart
= m1
->vmp_restart
;
6235 m2
->vmp_unusual
= m1
->vmp_unusual
;
6236 m2
->vmp_cs_validated
= m1
->vmp_cs_validated
;
6237 m2
->vmp_cs_tainted
= m1
->vmp_cs_tainted
;
6238 m2
->vmp_cs_nx
= m1
->vmp_cs_nx
;
6241 * If m1 had really been reusable,
6242 * we would have just stolen it, so
6243 * let's not propagate it's "reusable"
6244 * bit and assert that m2 is not
6245 * marked as "reusable".
6247 // m2->vmp_reusable = m1->vmp_reusable;
6248 assert(!m2
->vmp_reusable
);
6250 // assert(!m1->vmp_lopage);
6252 if (m1
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
6253 m2
->vmp_q_state
= VM_PAGE_USED_BY_COMPRESSOR
;
6257 * page may need to be flushed if
6258 * it is marshalled into a UPL
6259 * that is going to be used by a device
6260 * that doesn't support coherency
6262 m2
->vmp_written_by_kernel
= TRUE
;
6265 * make sure we clear the ref/mod state
6266 * from the pmap layer... else we risk
6267 * inheriting state from the last time
6268 * this page was used...
6270 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2
), VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
6272 if (refmod
& VM_MEM_REFERENCED
) {
6273 m2
->vmp_reference
= TRUE
;
6275 if (refmod
& VM_MEM_MODIFIED
) {
6276 SET_PAGE_DIRTY(m2
, TRUE
);
6278 offset
= m1
->vmp_offset
;
6281 * completely cleans up the state
6282 * of the page so that it is ready
6283 * to be put onto the free list, or
6284 * for this purpose it looks like it
6285 * just came off of the free list
6287 vm_page_free_prepare(m1
);
6290 * now put the substitute page
6293 vm_page_insert_internal(m2
, locked_object
, offset
, VM_KERN_MEMORY_NONE
, TRUE
, TRUE
, FALSE
, FALSE
, NULL
);
6295 if (m2
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
6296 m2
->vmp_pmapped
= TRUE
;
6297 m2
->vmp_wpmapped
= TRUE
;
6299 PMAP_ENTER(kernel_pmap
, (vm_map_offset_t
)m2
->vmp_offset
, m2
,
6300 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, 0, TRUE
, kr
);
6302 assert(kr
== KERN_SUCCESS
);
6306 if (m2
->vmp_reference
) {
6307 vm_page_activate(m2
);
6309 vm_page_deactivate(m2
);
6312 PAGE_WAKEUP_DONE(m2
);
6314 assert(m1
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
6317 * completely cleans up the state
6318 * of the page so that it is ready
6319 * to be put onto the free list, or
6320 * for this purpose it looks like it
6321 * just came off of the free list
6323 vm_page_free_prepare(m1
);
6328 #if CONFIG_BACKGROUND_QUEUE
6329 vm_page_assign_background_state(m1
);
6331 VM_PAGE_ZERO_PAGEQ_ENTRY(m1
);
6335 if (locked_object
) {
6336 vm_object_unlock(locked_object
);
6337 locked_object
= VM_OBJECT_NULL
;
6340 if (abort_run
== TRUE
) {
6342 * want the index of the last
6343 * page in this run that was
6344 * successfully 'stolen', so back
6345 * it up 1 for the auto-decrement on use
6346 * and 1 more to bump back over this page
6348 page_idx
= tmp_start_idx
+ 2;
6349 if (page_idx
>= vm_pages_count
) {
6351 if (m
!= VM_PAGE_NULL
) {
6352 vm_page_unlock_queues();
6353 vm_page_free_list(m
, FALSE
);
6354 vm_page_lock_queues();
6360 page_idx
= last_idx
= 0;
6366 * We didn't find a contiguous range but we didn't
6367 * start from the very first page.
6368 * Start again from the very first page.
6370 RESET_STATE_OF_RUN();
6372 if (flags
& KMA_LOMEM
) {
6373 idx_last_contig_page_found
= vm_page_lomem_find_contiguous_last_idx
= page_idx
;
6375 idx_last_contig_page_found
= vm_page_find_contiguous_last_idx
= page_idx
;
6378 last_idx
= page_idx
;
6380 if (m
!= VM_PAGE_NULL
) {
6381 vm_page_unlock_queues();
6382 vm_page_free_list(m
, FALSE
);
6383 vm_page_lock_queues();
6388 lck_mtx_lock(&vm_page_queue_free_lock
);
6390 * reset our free page limit since we
6391 * dropped the lock protecting the vm_page_free_queue
6393 free_available
= vm_page_free_count
- vm_page_free_reserved
;
6397 for (m1
= m
; m1
!= VM_PAGE_NULL
; m1
= NEXT_PAGE(m1
)) {
6398 assert(m1
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
6399 assert(m1
->vmp_wire_count
== 0);
6402 m1
->vmp_wire_count
++;
6403 m1
->vmp_q_state
= VM_PAGE_IS_WIRED
;
6405 m1
->vmp_gobbled
= TRUE
;
6408 if (wire
== FALSE
) {
6409 vm_page_gobble_count
+= npages
;
6413 * gobbled pages are also counted as wired pages
6415 vm_page_wire_count
+= npages
;
6417 assert(vm_page_verify_contiguous(m
, npages
));
6420 PAGE_REPLACEMENT_ALLOWED(FALSE
);
6422 vm_page_unlock_queues();
6425 clock_get_system_microtime(&tv_end_sec
, &tv_end_usec
);
6427 tv_end_sec
-= tv_start_sec
;
6428 if (tv_end_usec
< tv_start_usec
) {
6430 tv_end_usec
+= 1000000;
6432 tv_end_usec
-= tv_start_usec
;
6433 if (tv_end_usec
>= 1000000) {
6435 tv_end_sec
-= 1000000;
6437 if (vm_page_find_contig_debug
) {
6438 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
6439 __func__
, contig_pages
, max_pnum
, npages
, (vm_object_offset_t
)start_pnum
<< PAGE_SHIFT
,
6440 (long)tv_end_sec
, tv_end_usec
, orig_last_idx
,
6441 scanned
, yielded
, dumped_run
, stolen_pages
, compressed_pages
);
6446 vm_page_verify_free_lists();
6448 if (m
== NULL
&& zone_gc_called
== FALSE
) {
6449 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6450 __func__
, contig_pages
, max_pnum
, npages
, (vm_object_offset_t
)start_pnum
<< PAGE_SHIFT
,
6451 scanned
, yielded
, dumped_run
, stolen_pages
, compressed_pages
, vm_page_wire_count
);
6453 if (consider_buffer_cache_collect
!= NULL
) {
6454 (void)(*consider_buffer_cache_collect
)(1);
6457 consider_zone_gc(FALSE
);
6459 zone_gc_called
= TRUE
;
6461 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count
);
6462 goto full_scan_again
;
6469 * Allocate a list of contiguous, wired pages.
6481 unsigned int npages
;
6483 if (size
% PAGE_SIZE
!= 0) {
6484 return KERN_INVALID_ARGUMENT
;
6487 npages
= (unsigned int) (size
/ PAGE_SIZE
);
6488 if (npages
!= size
/ PAGE_SIZE
) {
6489 /* 32-bit overflow */
6490 return KERN_INVALID_ARGUMENT
;
6494 * Obtain a pointer to a subset of the free
6495 * list large enough to satisfy the request;
6496 * the region will be physically contiguous.
6498 pages
= vm_page_find_contiguous(npages
, max_pnum
, pnum_mask
, wire
, flags
);
6500 if (pages
== VM_PAGE_NULL
) {
6501 return KERN_NO_SPACE
;
6504 * determine need for wakeups
6506 if (vm_page_free_count
< vm_page_free_min
) {
6507 lck_mtx_lock(&vm_page_queue_free_lock
);
6508 if (vm_pageout_running
== FALSE
) {
6509 lck_mtx_unlock(&vm_page_queue_free_lock
);
6510 thread_wakeup((event_t
) &vm_page_free_wanted
);
6512 lck_mtx_unlock(&vm_page_queue_free_lock
);
6516 VM_CHECK_MEMORYSTATUS
;
6519 * The CPM pages should now be available and
6520 * ordered by ascending physical address.
6522 assert(vm_page_verify_contiguous(pages
, npages
));
6525 return KERN_SUCCESS
;
6529 unsigned int vm_max_delayed_work_limit
= DEFAULT_DELAYED_WORK_LIMIT
;
6532 * when working on a 'run' of pages, it is necessary to hold
6533 * the vm_page_queue_lock (a hot global lock) for certain operations
6534 * on the page... however, the majority of the work can be done
6535 * while merely holding the object lock... in fact there are certain
6536 * collections of pages that don't require any work brokered by the
6537 * vm_page_queue_lock... to mitigate the time spent behind the global
6538 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6539 * while doing all of the work that doesn't require the vm_page_queue_lock...
6540 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6541 * necessary work for each page... we will grab the busy bit on the page
6542 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6543 * if it can't immediately take the vm_page_queue_lock in order to compete
6544 * for the locks in the same order that vm_pageout_scan takes them.
6545 * the operation names are modeled after the names of the routines that
6546 * need to be called in order to make the changes very obvious in the
6551 vm_page_do_delayed_work(
6554 struct vm_page_delayed_work
*dwp
,
6559 vm_page_t local_free_q
= VM_PAGE_NULL
;
6562 * pageout_scan takes the vm_page_lock_queues first
6563 * then tries for the object lock... to avoid what
6564 * is effectively a lock inversion, we'll go to the
6565 * trouble of taking them in that same order... otherwise
6566 * if this object contains the majority of the pages resident
6567 * in the UBC (or a small set of large objects actively being
6568 * worked on contain the majority of the pages), we could
6569 * cause the pageout_scan thread to 'starve' in its attempt
6570 * to find pages to move to the free queue, since it has to
6571 * successfully acquire the object lock of any candidate page
6572 * before it can steal/clean it.
6574 if (!vm_page_trylockspin_queues()) {
6575 vm_object_unlock(object
);
6578 * "Turnstile enabled vm_pageout_scan" can be runnable
6579 * for a very long time without getting on a core.
6580 * If this is a higher priority thread it could be
6581 * waiting here for a very long time respecting the fact
6582 * that pageout_scan would like its object after VPS does
6584 * So we cap the number of yields in the vm_object_lock_avoid()
6585 * case to a single mutex_pause(0) which will give vm_pageout_scan
6586 * 10us to run and grab the object if needed.
6588 vm_page_lockspin_queues();
6591 if ((!vm_object_lock_avoid(object
) ||
6592 (vps_dynamic_priority_enabled
&& (j
> 0))) &&
6593 _vm_object_lock_try(object
)) {
6596 vm_page_unlock_queues();
6598 vm_page_lockspin_queues();
6601 for (j
= 0; j
< dw_count
; j
++, dwp
++) {
6604 if (dwp
->dw_mask
& DW_vm_pageout_throttle_up
) {
6605 vm_pageout_throttle_up(m
);
6607 #if CONFIG_PHANTOM_CACHE
6608 if (dwp
->dw_mask
& DW_vm_phantom_cache_update
) {
6609 vm_phantom_cache_update(m
);
6612 if (dwp
->dw_mask
& DW_vm_page_wire
) {
6613 vm_page_wire(m
, tag
, FALSE
);
6614 } else if (dwp
->dw_mask
& DW_vm_page_unwire
) {
6617 queueit
= (dwp
->dw_mask
& (DW_vm_page_free
| DW_vm_page_deactivate_internal
)) ? FALSE
: TRUE
;
6619 vm_page_unwire(m
, queueit
);
6621 if (dwp
->dw_mask
& DW_vm_page_free
) {
6622 vm_page_free_prepare_queues(m
);
6624 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
6626 * Add this page to our list of reclaimed pages,
6627 * to be freed later.
6629 m
->vmp_snext
= local_free_q
;
6632 if (dwp
->dw_mask
& DW_vm_page_deactivate_internal
) {
6633 vm_page_deactivate_internal(m
, FALSE
);
6634 } else if (dwp
->dw_mask
& DW_vm_page_activate
) {
6635 if (m
->vmp_q_state
!= VM_PAGE_ON_ACTIVE_Q
) {
6636 vm_page_activate(m
);
6638 } else if (dwp
->dw_mask
& DW_vm_page_speculate
) {
6639 vm_page_speculate(m
, TRUE
);
6640 } else if (dwp
->dw_mask
& DW_enqueue_cleaned
) {
6642 * if we didn't hold the object lock and did this,
6643 * we might disconnect the page, then someone might
6644 * soft fault it back in, then we would put it on the
6645 * cleaned queue, and so we would have a referenced (maybe even dirty)
6646 * page on that queue, which we don't want
6648 int refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
6650 if ((refmod_state
& VM_MEM_REFERENCED
)) {
6652 * this page has been touched since it got cleaned; let's activate it
6653 * if it hasn't already been
6655 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned
, 1);
6656 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
6658 if (m
->vmp_q_state
!= VM_PAGE_ON_ACTIVE_Q
) {
6659 vm_page_activate(m
);
6662 m
->vmp_reference
= FALSE
;
6663 vm_page_enqueue_cleaned(m
);
6665 } else if (dwp
->dw_mask
& DW_vm_page_lru
) {
6667 } else if (dwp
->dw_mask
& DW_VM_PAGE_QUEUES_REMOVE
) {
6668 if (m
->vmp_q_state
!= VM_PAGE_ON_PAGEOUT_Q
) {
6669 vm_page_queues_remove(m
, TRUE
);
6672 if (dwp
->dw_mask
& DW_set_reference
) {
6673 m
->vmp_reference
= TRUE
;
6674 } else if (dwp
->dw_mask
& DW_clear_reference
) {
6675 m
->vmp_reference
= FALSE
;
6678 if (dwp
->dw_mask
& DW_move_page
) {
6679 if (m
->vmp_q_state
!= VM_PAGE_ON_PAGEOUT_Q
) {
6680 vm_page_queues_remove(m
, FALSE
);
6682 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
6684 vm_page_enqueue_inactive(m
, FALSE
);
6687 if (dwp
->dw_mask
& DW_clear_busy
) {
6688 m
->vmp_busy
= FALSE
;
6691 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
6696 vm_page_unlock_queues();
6699 vm_page_free_list(local_free_q
, TRUE
);
6702 VM_CHECK_MEMORYSTATUS
;
6711 vm_page_t lo_page_list
= VM_PAGE_NULL
;
6715 if (!(flags
& KMA_LOMEM
)) {
6716 panic("vm_page_alloc_list: called w/o KMA_LOMEM");
6719 for (i
= 0; i
< page_count
; i
++) {
6720 mem
= vm_page_grablo();
6722 if (mem
== VM_PAGE_NULL
) {
6724 vm_page_free_list(lo_page_list
, FALSE
);
6727 *list
= VM_PAGE_NULL
;
6729 return KERN_RESOURCE_SHORTAGE
;
6731 mem
->vmp_snext
= lo_page_list
;
6734 *list
= lo_page_list
;
6736 return KERN_SUCCESS
;
6740 vm_page_set_offset(vm_page_t page
, vm_object_offset_t offset
)
6742 page
->vmp_offset
= offset
;
6746 vm_page_get_next(vm_page_t page
)
6748 return page
->vmp_snext
;
6752 vm_page_get_offset(vm_page_t page
)
6754 return page
->vmp_offset
;
6758 vm_page_get_phys_page(vm_page_t page
)
6760 return VM_PAGE_GET_PHYS_PAGE(page
);
6764 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6768 static vm_page_t hibernate_gobble_queue
;
6770 static int hibernate_drain_pageout_queue(struct vm_pageout_queue
*);
6771 static int hibernate_flush_dirty_pages(int);
6772 static int hibernate_flush_queue(vm_page_queue_head_t
*, int);
6774 void hibernate_flush_wait(void);
6775 void hibernate_mark_in_progress(void);
6776 void hibernate_clear_in_progress(void);
6778 void hibernate_free_range(int, int);
6779 void hibernate_hash_insert_page(vm_page_t
);
6780 uint32_t hibernate_mark_as_unneeded(addr64_t
, addr64_t
, hibernate_page_list_t
*, hibernate_page_list_t
*);
6781 uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t
*, hibernate_page_list_t
*);
6782 ppnum_t
hibernate_lookup_paddr(unsigned int);
6784 struct hibernate_statistics
{
6785 int hibernate_considered
;
6786 int hibernate_reentered_on_q
;
6787 int hibernate_found_dirty
;
6788 int hibernate_skipped_cleaning
;
6789 int hibernate_skipped_transient
;
6790 int hibernate_skipped_precious
;
6791 int hibernate_skipped_external
;
6792 int hibernate_queue_nolock
;
6793 int hibernate_queue_paused
;
6794 int hibernate_throttled
;
6795 int hibernate_throttle_timeout
;
6796 int hibernate_drained
;
6797 int hibernate_drain_timeout
;
6799 int cd_found_precious
;
6802 int cd_found_unusual
;
6803 int cd_found_cleaning
;
6804 int cd_found_laundry
;
6806 int cd_found_xpmapped
;
6807 int cd_skipped_xpmapped
;
6810 int cd_vm_page_wire_count
;
6811 int cd_vm_struct_pages_unneeded
;
6819 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
6820 * so that we don't overrun the estimated image size, which would
6821 * result in a hibernation failure.
6823 #define HIBERNATE_XPMAPPED_LIMIT 40000
6827 hibernate_drain_pageout_queue(struct vm_pageout_queue
*q
)
6829 wait_result_t wait_result
;
6831 vm_page_lock_queues();
6833 while (!vm_page_queue_empty(&q
->pgo_pending
)) {
6834 q
->pgo_draining
= TRUE
;
6836 assert_wait_timeout((event_t
) (&q
->pgo_laundry
+ 1), THREAD_INTERRUPTIBLE
, 5000, 1000 * NSEC_PER_USEC
);
6838 vm_page_unlock_queues();
6840 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
6842 if (wait_result
== THREAD_TIMED_OUT
&& !vm_page_queue_empty(&q
->pgo_pending
)) {
6843 hibernate_stats
.hibernate_drain_timeout
++;
6845 if (q
== &vm_pageout_queue_external
) {
6851 vm_page_lock_queues();
6853 hibernate_stats
.hibernate_drained
++;
6855 vm_page_unlock_queues();
6861 boolean_t hibernate_skip_external
= FALSE
;
6864 hibernate_flush_queue(vm_page_queue_head_t
*q
, int qcount
)
6867 vm_object_t l_object
= NULL
;
6868 vm_object_t m_object
= NULL
;
6869 int refmod_state
= 0;
6870 int try_failed_count
= 0;
6872 int current_run
= 0;
6873 struct vm_pageout_queue
*iq
;
6874 struct vm_pageout_queue
*eq
;
6875 struct vm_pageout_queue
*tq
;
6877 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 4) | DBG_FUNC_START
,
6878 VM_KERNEL_UNSLIDE_OR_PERM(q
), qcount
);
6880 iq
= &vm_pageout_queue_internal
;
6881 eq
= &vm_pageout_queue_external
;
6883 vm_page_lock_queues();
6885 while (qcount
&& !vm_page_queue_empty(q
)) {
6886 if (current_run
++ == 1000) {
6887 if (hibernate_should_abort()) {
6894 m
= (vm_page_t
) vm_page_queue_first(q
);
6895 m_object
= VM_PAGE_OBJECT(m
);
6898 * check to see if we currently are working
6899 * with the same object... if so, we've
6900 * already got the lock
6902 if (m_object
!= l_object
) {
6904 * the object associated with candidate page is
6905 * different from the one we were just working
6906 * with... dump the lock if we still own it
6908 if (l_object
!= NULL
) {
6909 vm_object_unlock(l_object
);
6913 * Try to lock object; since we've alread got the
6914 * page queues lock, we can only 'try' for this one.
6915 * if the 'try' fails, we need to do a mutex_pause
6916 * to allow the owner of the object lock a chance to
6919 if (!vm_object_lock_try_scan(m_object
)) {
6920 if (try_failed_count
> 20) {
6921 hibernate_stats
.hibernate_queue_nolock
++;
6923 goto reenter_pg_on_q
;
6926 vm_page_unlock_queues();
6927 mutex_pause(try_failed_count
++);
6928 vm_page_lock_queues();
6930 hibernate_stats
.hibernate_queue_paused
++;
6933 l_object
= m_object
;
6936 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
) {
6938 * page is not to be cleaned
6939 * put it back on the head of its queue
6941 if (m
->vmp_cleaning
) {
6942 hibernate_stats
.hibernate_skipped_cleaning
++;
6944 hibernate_stats
.hibernate_skipped_transient
++;
6947 goto reenter_pg_on_q
;
6949 if (m_object
->copy
== VM_OBJECT_NULL
) {
6950 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
|| m_object
->purgable
== VM_PURGABLE_EMPTY
) {
6952 * let the normal hibernate image path
6955 goto reenter_pg_on_q
;
6958 if (!m
->vmp_dirty
&& m
->vmp_pmapped
) {
6959 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
6961 if ((refmod_state
& VM_MEM_MODIFIED
)) {
6962 SET_PAGE_DIRTY(m
, FALSE
);
6968 if (!m
->vmp_dirty
) {
6970 * page is not to be cleaned
6971 * put it back on the head of its queue
6973 if (m
->vmp_precious
) {
6974 hibernate_stats
.hibernate_skipped_precious
++;
6977 goto reenter_pg_on_q
;
6980 if (hibernate_skip_external
== TRUE
&& !m_object
->internal
) {
6981 hibernate_stats
.hibernate_skipped_external
++;
6983 goto reenter_pg_on_q
;
6987 if (m_object
->internal
) {
6988 if (VM_PAGE_Q_THROTTLED(iq
)) {
6991 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
6996 wait_result_t wait_result
;
6999 if (l_object
!= NULL
) {
7000 vm_object_unlock(l_object
);
7004 while (retval
== 0) {
7005 tq
->pgo_throttled
= TRUE
;
7007 assert_wait_timeout((event_t
) &tq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, 1000, 1000 * NSEC_PER_USEC
);
7009 vm_page_unlock_queues();
7011 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
7013 vm_page_lock_queues();
7015 if (wait_result
!= THREAD_TIMED_OUT
) {
7018 if (!VM_PAGE_Q_THROTTLED(tq
)) {
7022 if (hibernate_should_abort()) {
7026 if (--wait_count
== 0) {
7027 hibernate_stats
.hibernate_throttle_timeout
++;
7030 hibernate_skip_external
= TRUE
;
7040 hibernate_stats
.hibernate_throttled
++;
7045 * we've already factored out pages in the laundry which
7046 * means this page can't be on the pageout queue so it's
7047 * safe to do the vm_page_queues_remove
7049 vm_page_queues_remove(m
, TRUE
);
7051 if (m_object
->internal
== TRUE
) {
7052 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m
), PMAP_OPTIONS_COMPRESSOR
, NULL
);
7055 vm_pageout_cluster(m
);
7057 hibernate_stats
.hibernate_found_dirty
++;
7062 vm_page_queue_remove(q
, m
, vmp_pageq
);
7063 vm_page_queue_enter(q
, m
, vmp_pageq
);
7065 hibernate_stats
.hibernate_reentered_on_q
++;
7067 hibernate_stats
.hibernate_considered
++;
7070 try_failed_count
= 0;
7072 if (l_object
!= NULL
) {
7073 vm_object_unlock(l_object
);
7077 vm_page_unlock_queues();
7079 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 4) | DBG_FUNC_END
, hibernate_stats
.hibernate_found_dirty
, retval
, 0, 0, 0);
7086 hibernate_flush_dirty_pages(int pass
)
7088 struct vm_speculative_age_q
*aq
;
7091 if (vm_page_local_q
) {
7092 zpercpu_foreach_cpu(lid
) {
7093 vm_page_reactivate_local(lid
, TRUE
, FALSE
);
7097 for (i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++) {
7101 aq
= &vm_page_queue_speculative
[i
];
7103 if (vm_page_queue_empty(&aq
->age_q
)) {
7108 vm_page_lockspin_queues();
7110 vm_page_queue_iterate(&aq
->age_q
, m
, vmp_pageq
) {
7113 vm_page_unlock_queues();
7116 if (hibernate_flush_queue(&aq
->age_q
, qcount
)) {
7121 if (hibernate_flush_queue(&vm_page_queue_inactive
, vm_page_inactive_count
- vm_page_anonymous_count
- vm_page_cleaned_count
)) {
7124 /* XXX FBDP TODO: flush secluded queue */
7125 if (hibernate_flush_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
)) {
7128 if (hibernate_flush_queue(&vm_page_queue_cleaned
, vm_page_cleaned_count
)) {
7131 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal
)) {
7136 vm_compressor_record_warmup_start();
7139 if (hibernate_flush_queue(&vm_page_queue_active
, vm_page_active_count
)) {
7141 vm_compressor_record_warmup_end();
7145 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal
)) {
7147 vm_compressor_record_warmup_end();
7152 vm_compressor_record_warmup_end();
7155 if (hibernate_skip_external
== FALSE
&& hibernate_drain_pageout_queue(&vm_pageout_queue_external
)) {
7164 hibernate_reset_stats()
7166 bzero(&hibernate_stats
, sizeof(struct hibernate_statistics
));
7171 hibernate_flush_memory()
7175 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
7177 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 3) | DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0, 0);
7179 hibernate_cleaning_in_progress
= TRUE
;
7180 hibernate_skip_external
= FALSE
;
7182 if ((retval
= hibernate_flush_dirty_pages(1)) == 0) {
7183 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 10) | DBG_FUNC_START
, VM_PAGE_COMPRESSOR_COUNT
, 0, 0, 0, 0);
7185 vm_compressor_flush();
7187 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 10) | DBG_FUNC_END
, VM_PAGE_COMPRESSOR_COUNT
, 0, 0, 0, 0);
7189 if (consider_buffer_cache_collect
!= NULL
) {
7190 unsigned int orig_wire_count
;
7192 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 7) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
7193 orig_wire_count
= vm_page_wire_count
;
7195 (void)(*consider_buffer_cache_collect
)(1);
7196 consider_zone_gc(FALSE
);
7198 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count
- vm_page_wire_count
);
7200 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 7) | DBG_FUNC_END
, orig_wire_count
- vm_page_wire_count
, 0, 0, 0, 0);
7203 hibernate_cleaning_in_progress
= FALSE
;
7205 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 3) | DBG_FUNC_END
, vm_page_free_count
, hibernate_stats
.hibernate_found_dirty
, retval
, 0, 0);
7208 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT
);
7212 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7213 hibernate_stats
.hibernate_considered
,
7214 hibernate_stats
.hibernate_reentered_on_q
,
7215 hibernate_stats
.hibernate_found_dirty
);
7216 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7217 hibernate_stats
.hibernate_skipped_cleaning
,
7218 hibernate_stats
.hibernate_skipped_transient
,
7219 hibernate_stats
.hibernate_skipped_precious
,
7220 hibernate_stats
.hibernate_skipped_external
,
7221 hibernate_stats
.hibernate_queue_nolock
);
7222 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7223 hibernate_stats
.hibernate_queue_paused
,
7224 hibernate_stats
.hibernate_throttled
,
7225 hibernate_stats
.hibernate_throttle_timeout
,
7226 hibernate_stats
.hibernate_drained
,
7227 hibernate_stats
.hibernate_drain_timeout
);
7234 hibernate_page_list_zero(hibernate_page_list_t
*list
)
7237 hibernate_bitmap_t
* bitmap
;
7239 bitmap
= &list
->bank_bitmap
[0];
7240 for (bank
= 0; bank
< list
->bank_count
; bank
++) {
7243 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
7244 // set out-of-bound bits at end of bitmap.
7245 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
+ 1) & 31);
7247 bitmap
->bitmap
[bitmap
->bitmapwords
- 1] = (0xFFFFFFFF >> last_bit
);
7250 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
7255 hibernate_free_gobble_pages(void)
7260 m
= (vm_page_t
) hibernate_gobble_queue
;
7262 next
= m
->vmp_snext
;
7267 hibernate_gobble_queue
= VM_PAGE_NULL
;
7270 HIBLOG("Freed %d pages\n", count
);
7275 hibernate_consider_discard(vm_page_t m
, boolean_t preflight
)
7277 vm_object_t object
= NULL
;
7279 boolean_t discard
= FALSE
;
7282 if (m
->vmp_private
) {
7283 panic("hibernate_consider_discard: private");
7286 object
= VM_PAGE_OBJECT(m
);
7288 if (!vm_object_lock_try(object
)) {
7291 hibernate_stats
.cd_lock_failed
++;
7295 if (VM_PAGE_WIRED(m
)) {
7297 hibernate_stats
.cd_found_wired
++;
7301 if (m
->vmp_precious
) {
7303 hibernate_stats
.cd_found_precious
++;
7307 if (m
->vmp_busy
|| !object
->alive
) {
7309 * Somebody is playing with this page.
7312 hibernate_stats
.cd_found_busy
++;
7316 if (m
->vmp_absent
|| m
->vmp_unusual
|| m
->vmp_error
) {
7318 * If it's unusual in anyway, ignore it
7321 hibernate_stats
.cd_found_unusual
++;
7325 if (m
->vmp_cleaning
) {
7327 hibernate_stats
.cd_found_cleaning
++;
7331 if (m
->vmp_laundry
) {
7333 hibernate_stats
.cd_found_laundry
++;
7337 if (!m
->vmp_dirty
) {
7338 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
7340 if (refmod_state
& VM_MEM_REFERENCED
) {
7341 m
->vmp_reference
= TRUE
;
7343 if (refmod_state
& VM_MEM_MODIFIED
) {
7344 SET_PAGE_DIRTY(m
, FALSE
);
7349 * If it's clean or purgeable we can discard the page on wakeup.
7351 discard
= (!m
->vmp_dirty
)
7352 || (VM_PURGABLE_VOLATILE
== object
->purgable
)
7353 || (VM_PURGABLE_EMPTY
== object
->purgable
);
7356 if (discard
== FALSE
) {
7358 hibernate_stats
.cd_found_dirty
++;
7360 } else if (m
->vmp_xpmapped
&& m
->vmp_reference
&& !object
->internal
) {
7361 if (hibernate_stats
.cd_found_xpmapped
< HIBERNATE_XPMAPPED_LIMIT
) {
7363 hibernate_stats
.cd_found_xpmapped
++;
7368 hibernate_stats
.cd_skipped_xpmapped
++;
7375 vm_object_unlock(object
);
7383 hibernate_discard_page(vm_page_t m
)
7385 vm_object_t m_object
;
7387 if (m
->vmp_absent
|| m
->vmp_unusual
|| m
->vmp_error
) {
7389 * If it's unusual in anyway, ignore
7394 m_object
= VM_PAGE_OBJECT(m
);
7396 #if MACH_ASSERT || DEBUG
7397 if (!vm_object_lock_try(m_object
)) {
7398 panic("hibernate_discard_page(%p) !vm_object_lock_try", m
);
7401 /* No need to lock page queue for token delete, hibernate_vm_unlock()
7402 * makes sure these locks are uncontended before sleep */
7403 #endif /* MACH_ASSERT || DEBUG */
7405 if (m
->vmp_pmapped
== TRUE
) {
7406 __unused
int refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
7409 if (m
->vmp_laundry
) {
7410 panic("hibernate_discard_page(%p) laundry", m
);
7412 if (m
->vmp_private
) {
7413 panic("hibernate_discard_page(%p) private", m
);
7415 if (m
->vmp_fictitious
) {
7416 panic("hibernate_discard_page(%p) fictitious", m
);
7419 if (VM_PURGABLE_VOLATILE
== m_object
->purgable
) {
7420 /* object should be on a queue */
7421 assert((m_object
->objq
.next
!= NULL
) && (m_object
->objq
.prev
!= NULL
));
7422 purgeable_q_t old_queue
= vm_purgeable_object_remove(m_object
);
7424 if (m_object
->purgeable_when_ripe
) {
7425 vm_purgeable_token_delete_first(old_queue
);
7427 vm_object_lock_assert_exclusive(m_object
);
7428 m_object
->purgable
= VM_PURGABLE_EMPTY
;
7431 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
7432 * accounted in the "volatile" ledger, so no change here.
7433 * We have to update vm_page_purgeable_count, though, since we're
7434 * effectively purging this object.
7437 assert(m_object
->resident_page_count
>= m_object
->wired_page_count
);
7438 delta
= (m_object
->resident_page_count
- m_object
->wired_page_count
);
7439 assert(vm_page_purgeable_count
>= delta
);
7441 OSAddAtomic(-delta
, (SInt32
*)&vm_page_purgeable_count
);
7446 #if MACH_ASSERT || DEBUG
7447 vm_object_unlock(m_object
);
7448 #endif /* MACH_ASSERT || DEBUG */
7452 * Grab locks for hibernate_page_list_setall()
7455 hibernate_vm_lock_queues(void)
7457 vm_object_lock(compressor_object
);
7458 vm_page_lock_queues();
7459 lck_mtx_lock(&vm_page_queue_free_lock
);
7460 lck_mtx_lock(&vm_purgeable_queue_lock
);
7462 if (vm_page_local_q
) {
7463 zpercpu_foreach(lq
, vm_page_local_q
) {
7464 VPL_LOCK(&lq
->vpl_lock
);
7470 hibernate_vm_unlock_queues(void)
7472 if (vm_page_local_q
) {
7473 zpercpu_foreach(lq
, vm_page_local_q
) {
7474 VPL_UNLOCK(&lq
->vpl_lock
);
7477 lck_mtx_unlock(&vm_purgeable_queue_lock
);
7478 lck_mtx_unlock(&vm_page_queue_free_lock
);
7479 vm_page_unlock_queues();
7480 vm_object_unlock(compressor_object
);
7484 * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7485 * pages known to VM to not need saving are subtracted.
7486 * Wired pages to be saved are present in page_list_wired, pageable in page_list.
7490 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
7491 hibernate_page_list_t
* page_list_wired
,
7492 hibernate_page_list_t
* page_list_pal
,
7493 boolean_t preflight
,
7494 boolean_t will_discard
,
7495 uint32_t * pagesOut
)
7497 uint64_t start
, end
, nsec
;
7500 uint32_t pages
= page_list
->page_count
;
7501 uint32_t count_anonymous
= 0, count_throttled
= 0, count_compressor
= 0;
7502 uint32_t count_inactive
= 0, count_active
= 0, count_speculative
= 0, count_cleaned
= 0;
7503 uint32_t count_wire
= pages
;
7504 uint32_t count_discard_active
= 0;
7505 uint32_t count_discard_inactive
= 0;
7506 uint32_t count_discard_cleaned
= 0;
7507 uint32_t count_discard_purgeable
= 0;
7508 uint32_t count_discard_speculative
= 0;
7509 uint32_t count_discard_vm_struct_pages
= 0;
7512 hibernate_bitmap_t
* bitmap
;
7513 hibernate_bitmap_t
* bitmap_wired
;
7514 boolean_t discard_all
;
7517 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight
);
7521 page_list_wired
= NULL
;
7522 page_list_pal
= NULL
;
7523 discard_all
= FALSE
;
7525 discard_all
= will_discard
;
7528 #if MACH_ASSERT || DEBUG
7530 assert(hibernate_vm_locks_are_safe());
7531 vm_page_lock_queues();
7532 if (vm_page_local_q
) {
7533 zpercpu_foreach(lq
, vm_page_local_q
) {
7534 VPL_LOCK(&lq
->vpl_lock
);
7538 #endif /* MACH_ASSERT || DEBUG */
7541 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 8) | DBG_FUNC_START
, count_wire
, 0, 0, 0, 0);
7543 clock_get_uptime(&start
);
7546 hibernate_page_list_zero(page_list
);
7547 hibernate_page_list_zero(page_list_wired
);
7548 hibernate_page_list_zero(page_list_pal
);
7550 hibernate_stats
.cd_vm_page_wire_count
= vm_page_wire_count
;
7551 hibernate_stats
.cd_pages
= pages
;
7554 if (vm_page_local_q
) {
7555 zpercpu_foreach_cpu(lid
) {
7556 vm_page_reactivate_local(lid
, TRUE
, !preflight
);
7561 vm_object_lock(compressor_object
);
7562 vm_page_lock_queues();
7563 lck_mtx_lock(&vm_page_queue_free_lock
);
7566 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
7568 hibernation_vmqueues_inspection
= TRUE
;
7570 m
= (vm_page_t
) hibernate_gobble_queue
;
7575 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7576 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7582 percpu_foreach(free_pages_head
, free_pages
) {
7583 for (m
= *free_pages_head
; m
; m
= m
->vmp_snext
) {
7584 assert(m
->vmp_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
);
7588 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7589 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7591 hibernate_stats
.cd_local_free
++;
7592 hibernate_stats
.cd_total_free
++;
7597 for (i
= 0; i
< vm_colors
; i
++) {
7598 vm_page_queue_iterate(&vm_page_queue_free
[i
].qhead
, m
, vmp_pageq
) {
7599 assert(m
->vmp_q_state
== VM_PAGE_ON_FREE_Q
);
7604 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7605 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7607 hibernate_stats
.cd_total_free
++;
7612 vm_page_queue_iterate(&vm_lopage_queue_free
, m
, vmp_pageq
) {
7613 assert(m
->vmp_q_state
== VM_PAGE_ON_FREE_LOPAGE_Q
);
7618 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7619 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7621 hibernate_stats
.cd_total_free
++;
7625 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
7626 while (m
&& !vm_page_queue_end(&vm_page_queue_throttled
, (vm_page_queue_entry_t
)m
)) {
7627 assert(m
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
);
7629 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7631 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
7632 && hibernate_consider_discard(m
, preflight
)) {
7634 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7636 count_discard_inactive
++;
7637 discard
= discard_all
;
7643 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7647 hibernate_discard_page(m
);
7652 m
= (vm_page_t
)vm_page_queue_first(&vm_page_queue_anonymous
);
7653 while (m
&& !vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
)m
)) {
7654 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
7656 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7658 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
) &&
7659 hibernate_consider_discard(m
, preflight
)) {
7661 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7664 count_discard_purgeable
++;
7666 count_discard_inactive
++;
7668 discard
= discard_all
;
7674 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7677 hibernate_discard_page(m
);
7682 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
7683 while (m
&& !vm_page_queue_end(&vm_page_queue_cleaned
, (vm_page_queue_entry_t
)m
)) {
7684 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
7686 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7688 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
) &&
7689 hibernate_consider_discard(m
, preflight
)) {
7691 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7694 count_discard_purgeable
++;
7696 count_discard_cleaned
++;
7698 discard
= discard_all
;
7704 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7707 hibernate_discard_page(m
);
7712 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
7713 while (m
&& !vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
)m
)) {
7714 assert(m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_Q
);
7716 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7718 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
) &&
7719 hibernate_consider_discard(m
, preflight
)) {
7721 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7724 count_discard_purgeable
++;
7726 count_discard_active
++;
7728 discard
= discard_all
;
7734 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7737 hibernate_discard_page(m
);
7742 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
7743 while (m
&& !vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
)m
)) {
7744 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
7746 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7748 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
) &&
7749 hibernate_consider_discard(m
, preflight
)) {
7751 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7754 count_discard_purgeable
++;
7756 count_discard_inactive
++;
7758 discard
= discard_all
;
7764 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7767 hibernate_discard_page(m
);
7771 /* XXX FBDP TODO: secluded queue */
7773 for (i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++) {
7774 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_speculative
[i
].age_q
);
7775 while (m
&& !vm_page_queue_end(&vm_page_queue_speculative
[i
].age_q
, (vm_page_queue_entry_t
)m
)) {
7776 assertf(m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
,
7777 "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
7778 m
, m
->vmp_pageq
.next
, m
->vmp_pageq
.prev
, i
, m
->vmp_q_state
, discard
, preflight
);
7780 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7782 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
) &&
7783 hibernate_consider_discard(m
, preflight
)) {
7785 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7787 count_discard_speculative
++;
7788 discard
= discard_all
;
7790 count_speculative
++;
7794 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7797 hibernate_discard_page(m
);
7803 vm_page_queue_iterate(&compressor_object
->memq
, m
, vmp_listq
) {
7804 assert(m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
);
7809 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
7813 if (preflight
== FALSE
&& discard_all
== TRUE
) {
7814 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 12) | DBG_FUNC_START
);
7816 HIBLOG("hibernate_teardown started\n");
7817 count_discard_vm_struct_pages
= hibernate_teardown_vm_structs(page_list
, page_list_wired
);
7818 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages
);
7820 pages
-= count_discard_vm_struct_pages
;
7821 count_wire
-= count_discard_vm_struct_pages
;
7823 hibernate_stats
.cd_vm_struct_pages_unneeded
= count_discard_vm_struct_pages
;
7825 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 12) | DBG_FUNC_END
);
7829 // pull wired from hibernate_bitmap
7830 bitmap
= &page_list
->bank_bitmap
[0];
7831 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
7832 for (bank
= 0; bank
< page_list
->bank_count
; bank
++) {
7833 for (i
= 0; i
< bitmap
->bitmapwords
; i
++) {
7834 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
7836 bitmap
= (hibernate_bitmap_t
*)&bitmap
->bitmap
[bitmap
->bitmapwords
];
7837 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
7841 // machine dependent adjustments
7842 hibernate_page_list_setall_machine(page_list
, page_list_wired
, preflight
, &pages
);
7845 hibernate_stats
.cd_count_wire
= count_wire
;
7846 hibernate_stats
.cd_discarded
= count_discard_active
+ count_discard_inactive
+ count_discard_purgeable
+
7847 count_discard_speculative
+ count_discard_cleaned
+ count_discard_vm_struct_pages
;
7850 clock_get_uptime(&end
);
7851 absolutetime_to_nanoseconds(end
- start
, &nsec
);
7852 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
7854 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
7855 pages
, count_wire
, count_active
, count_inactive
, count_cleaned
, count_speculative
, count_anonymous
, count_throttled
, count_compressor
, hibernate_stats
.cd_found_xpmapped
,
7856 discard_all
? "did" : "could",
7857 count_discard_active
, count_discard_inactive
, count_discard_purgeable
, count_discard_speculative
, count_discard_cleaned
);
7859 if (hibernate_stats
.cd_skipped_xpmapped
) {
7860 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats
.cd_skipped_xpmapped
);
7863 *pagesOut
= pages
- count_discard_active
- count_discard_inactive
- count_discard_purgeable
- count_discard_speculative
- count_discard_cleaned
;
7865 if (preflight
&& will_discard
) {
7866 *pagesOut
-= count_compressor
+ count_throttled
+ count_anonymous
+ count_inactive
+ count_cleaned
+ count_speculative
+ count_active
;
7869 hibernation_vmqueues_inspection
= FALSE
;
7871 #if MACH_ASSERT || DEBUG
7873 if (vm_page_local_q
) {
7874 zpercpu_foreach(lq
, vm_page_local_q
) {
7875 VPL_UNLOCK(&lq
->vpl_lock
);
7878 vm_page_unlock_queues();
7880 #endif /* MACH_ASSERT || DEBUG */
7883 lck_mtx_unlock(&vm_page_queue_free_lock
);
7884 vm_page_unlock_queues();
7885 vm_object_unlock(compressor_object
);
7888 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 8) | DBG_FUNC_END
, count_wire
, *pagesOut
, 0, 0, 0);
7892 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
7894 uint64_t start
, end
, nsec
;
7898 uint32_t count_discard_active
= 0;
7899 uint32_t count_discard_inactive
= 0;
7900 uint32_t count_discard_purgeable
= 0;
7901 uint32_t count_discard_cleaned
= 0;
7902 uint32_t count_discard_speculative
= 0;
7905 #if MACH_ASSERT || DEBUG
7906 vm_page_lock_queues();
7907 if (vm_page_local_q
) {
7908 zpercpu_foreach(lq
, vm_page_local_q
) {
7909 VPL_LOCK(&lq
->vpl_lock
);
7912 #endif /* MACH_ASSERT || DEBUG */
7914 clock_get_uptime(&start
);
7916 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
7917 while (m
&& !vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
)m
)) {
7918 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
7920 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7921 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
))) {
7923 count_discard_purgeable
++;
7925 count_discard_inactive
++;
7927 hibernate_discard_page(m
);
7932 for (i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++) {
7933 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_speculative
[i
].age_q
);
7934 while (m
&& !vm_page_queue_end(&vm_page_queue_speculative
[i
].age_q
, (vm_page_queue_entry_t
)m
)) {
7935 assert(m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
);
7937 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7938 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
))) {
7939 count_discard_speculative
++;
7940 hibernate_discard_page(m
);
7946 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
7947 while (m
&& !vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
)m
)) {
7948 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
7950 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7951 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
))) {
7953 count_discard_purgeable
++;
7955 count_discard_inactive
++;
7957 hibernate_discard_page(m
);
7961 /* XXX FBDP TODO: secluded queue */
7963 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
7964 while (m
&& !vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
)m
)) {
7965 assert(m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_Q
);
7967 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7968 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
))) {
7970 count_discard_purgeable
++;
7972 count_discard_active
++;
7974 hibernate_discard_page(m
);
7979 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
7980 while (m
&& !vm_page_queue_end(&vm_page_queue_cleaned
, (vm_page_queue_entry_t
)m
)) {
7981 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
7983 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->vmp_pageq
.next
);
7984 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
))) {
7986 count_discard_purgeable
++;
7988 count_discard_cleaned
++;
7990 hibernate_discard_page(m
);
7995 #if MACH_ASSERT || DEBUG
7996 if (vm_page_local_q
) {
7997 zpercpu_foreach(lq
, vm_page_local_q
) {
7998 VPL_UNLOCK(&lq
->vpl_lock
);
8001 vm_page_unlock_queues();
8002 #endif /* MACH_ASSERT || DEBUG */
8004 clock_get_uptime(&end
);
8005 absolutetime_to_nanoseconds(end
- start
, &nsec
);
8006 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8008 count_discard_active
, count_discard_inactive
, count_discard_purgeable
, count_discard_speculative
, count_discard_cleaned
);
8011 boolean_t hibernate_paddr_map_inited
= FALSE
;
8012 unsigned int hibernate_teardown_last_valid_compact_indx
= -1;
8013 vm_page_t hibernate_rebuild_hash_list
= NULL
;
8015 unsigned int hibernate_teardown_found_tabled_pages
= 0;
8016 unsigned int hibernate_teardown_found_created_pages
= 0;
8017 unsigned int hibernate_teardown_found_free_pages
= 0;
8018 unsigned int hibernate_teardown_vm_page_free_count
;
8021 struct ppnum_mapping
{
8022 struct ppnum_mapping
*ppnm_next
;
8023 ppnum_t ppnm_base_paddr
;
8024 unsigned int ppnm_sindx
;
8025 unsigned int ppnm_eindx
;
8028 struct ppnum_mapping
*ppnm_head
;
8029 struct ppnum_mapping
*ppnm_last_found
= NULL
;
8033 hibernate_create_paddr_map(void)
8036 ppnum_t next_ppnum_in_run
= 0;
8037 struct ppnum_mapping
*ppnm
= NULL
;
8039 if (hibernate_paddr_map_inited
== FALSE
) {
8040 for (i
= 0; i
< vm_pages_count
; i
++) {
8042 ppnm
->ppnm_eindx
= i
;
8045 if (ppnm
== NULL
|| VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
]) != next_ppnum_in_run
) {
8046 ppnm
= zalloc_permanent_type(struct ppnum_mapping
);
8048 ppnm
->ppnm_next
= ppnm_head
;
8051 ppnm
->ppnm_sindx
= i
;
8052 ppnm
->ppnm_base_paddr
= VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
]);
8054 next_ppnum_in_run
= VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
]) + 1;
8058 hibernate_paddr_map_inited
= TRUE
;
8063 hibernate_lookup_paddr(unsigned int indx
)
8065 struct ppnum_mapping
*ppnm
= NULL
;
8067 ppnm
= ppnm_last_found
;
8070 if (indx
>= ppnm
->ppnm_sindx
&& indx
< ppnm
->ppnm_eindx
) {
8074 for (ppnm
= ppnm_head
; ppnm
; ppnm
= ppnm
->ppnm_next
) {
8075 if (indx
>= ppnm
->ppnm_sindx
&& indx
< ppnm
->ppnm_eindx
) {
8076 ppnm_last_found
= ppnm
;
8081 panic("hibernate_lookup_paddr of %d failed\n", indx
);
8084 return ppnm
->ppnm_base_paddr
+ (indx
- ppnm
->ppnm_sindx
);
8089 hibernate_mark_as_unneeded(addr64_t saddr
, addr64_t eaddr
, hibernate_page_list_t
*page_list
, hibernate_page_list_t
*page_list_wired
)
8091 addr64_t saddr_aligned
;
8092 addr64_t eaddr_aligned
;
8095 unsigned int mark_as_unneeded_pages
= 0;
8097 saddr_aligned
= (saddr
+ PAGE_MASK_64
) & ~PAGE_MASK_64
;
8098 eaddr_aligned
= eaddr
& ~PAGE_MASK_64
;
8100 for (addr
= saddr_aligned
; addr
< eaddr_aligned
; addr
+= PAGE_SIZE_64
) {
8101 paddr
= pmap_find_phys(kernel_pmap
, addr
);
8105 hibernate_page_bitset(page_list
, TRUE
, paddr
);
8106 hibernate_page_bitset(page_list_wired
, TRUE
, paddr
);
8108 mark_as_unneeded_pages
++;
8110 return mark_as_unneeded_pages
;
8115 hibernate_hash_insert_page(vm_page_t mem
)
8117 vm_page_bucket_t
*bucket
;
8119 vm_object_t m_object
;
8121 m_object
= VM_PAGE_OBJECT(mem
);
8123 assert(mem
->vmp_hashed
);
8125 assert(mem
->vmp_offset
!= (vm_object_offset_t
) -1);
8128 * Insert it into the object_object/offset hash table
8130 hash_id
= vm_page_hash(m_object
, mem
->vmp_offset
);
8131 bucket
= &vm_page_buckets
[hash_id
];
8133 mem
->vmp_next_m
= bucket
->page_list
;
8134 bucket
->page_list
= VM_PAGE_PACK_PTR(mem
);
8139 hibernate_free_range(int sindx
, int eindx
)
8144 while (sindx
< eindx
) {
8145 mem
= &vm_pages
[sindx
];
8147 vm_page_init(mem
, hibernate_lookup_paddr(sindx
), FALSE
);
8149 mem
->vmp_lopage
= FALSE
;
8150 mem
->vmp_q_state
= VM_PAGE_ON_FREE_Q
;
8152 color
= VM_PAGE_GET_COLOR(mem
);
8153 #if defined(__x86_64__)
8154 vm_page_queue_enter_clump(&vm_page_queue_free
[color
].qhead
, mem
);
8156 vm_page_queue_enter(&vm_page_queue_free
[color
].qhead
, mem
, vmp_pageq
);
8158 vm_page_free_count
++;
8165 hibernate_rebuild_vm_structs(void)
8167 int i
, cindx
, sindx
, eindx
;
8168 vm_page_t mem
, tmem
, mem_next
;
8169 AbsoluteTime startTime
, endTime
;
8172 if (hibernate_rebuild_needed
== FALSE
) {
8176 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 13) | DBG_FUNC_START
);
8177 HIBLOG("hibernate_rebuild started\n");
8179 clock_get_uptime(&startTime
);
8181 pal_hib_rebuild_pmap_structs();
8183 bzero(&vm_page_buckets
[0], vm_page_bucket_count
* sizeof(vm_page_bucket_t
));
8184 eindx
= vm_pages_count
;
8187 * Mark all the vm_pages[] that have not been initialized yet as being
8188 * transient. This is needed to ensure that buddy page search is corrrect.
8189 * Without this random data in these vm_pages[] can trip the buddy search
8191 for (i
= hibernate_teardown_last_valid_compact_indx
+ 1; i
< eindx
; ++i
) {
8192 vm_pages
[i
].vmp_q_state
= VM_PAGE_NOT_ON_Q
;
8195 for (cindx
= hibernate_teardown_last_valid_compact_indx
; cindx
>= 0; cindx
--) {
8196 mem
= &vm_pages
[cindx
];
8197 assert(mem
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
);
8199 * hibernate_teardown_vm_structs leaves the location where
8200 * this vm_page_t must be located in "next".
8202 tmem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->vmp_next_m
));
8203 mem
->vmp_next_m
= VM_PAGE_PACK_PTR(NULL
);
8205 sindx
= (int)(tmem
- &vm_pages
[0]);
8209 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8210 * so move it back to its real location
8215 if (mem
->vmp_hashed
) {
8216 hibernate_hash_insert_page(mem
);
8219 * the 'hole' between this vm_page_t and the previous
8220 * vm_page_t we moved needs to be initialized as
8221 * a range of free vm_page_t's
8223 hibernate_free_range(sindx
+ 1, eindx
);
8228 hibernate_free_range(0, sindx
);
8231 assert(vm_page_free_count
== hibernate_teardown_vm_page_free_count
);
8234 * process the list of vm_page_t's that were entered in the hash,
8235 * but were not located in the vm_pages arrary... these are
8236 * vm_page_t's that were created on the fly (i.e. fictitious)
8238 for (mem
= hibernate_rebuild_hash_list
; mem
; mem
= mem_next
) {
8239 mem_next
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->vmp_next_m
));
8241 mem
->vmp_next_m
= 0;
8242 hibernate_hash_insert_page(mem
);
8244 hibernate_rebuild_hash_list
= NULL
;
8246 clock_get_uptime(&endTime
);
8247 SUB_ABSOLUTETIME(&endTime
, &startTime
);
8248 absolutetime_to_nanoseconds(endTime
, &nsec
);
8250 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec
/ 1000000ULL);
8252 hibernate_rebuild_needed
= FALSE
;
8254 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 13) | DBG_FUNC_END
);
8258 hibernate_teardown_vm_structs(hibernate_page_list_t
*page_list
, hibernate_page_list_t
*page_list_wired
)
8261 unsigned int compact_target_indx
;
8262 vm_page_t mem
, mem_next
;
8263 vm_page_bucket_t
*bucket
;
8264 unsigned int mark_as_unneeded_pages
= 0;
8265 unsigned int unneeded_vm_page_bucket_pages
= 0;
8266 unsigned int unneeded_vm_pages_pages
= 0;
8267 unsigned int unneeded_pmap_pages
= 0;
8268 addr64_t start_of_unneeded
= 0;
8269 addr64_t end_of_unneeded
= 0;
8272 if (hibernate_should_abort()) {
8276 hibernate_rebuild_needed
= TRUE
;
8278 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8279 vm_page_wire_count
, vm_page_free_count
, vm_page_active_count
, vm_page_inactive_count
, vm_page_speculative_count
,
8280 vm_page_cleaned_count
, compressor_object
->resident_page_count
);
8282 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
8283 bucket
= &vm_page_buckets
[i
];
8285 for (mem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
)); mem
!= VM_PAGE_NULL
; mem
= mem_next
) {
8286 assert(mem
->vmp_hashed
);
8288 mem_next
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->vmp_next_m
));
8290 if (mem
< &vm_pages
[0] || mem
>= &vm_pages
[vm_pages_count
]) {
8291 mem
->vmp_next_m
= VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list
);
8292 hibernate_rebuild_hash_list
= mem
;
8296 unneeded_vm_page_bucket_pages
= hibernate_mark_as_unneeded((addr64_t
)&vm_page_buckets
[0], (addr64_t
)&vm_page_buckets
[vm_page_bucket_count
], page_list
, page_list_wired
);
8297 mark_as_unneeded_pages
+= unneeded_vm_page_bucket_pages
;
8299 hibernate_teardown_vm_page_free_count
= vm_page_free_count
;
8301 compact_target_indx
= 0;
8303 for (i
= 0; i
< vm_pages_count
; i
++) {
8306 if (mem
->vmp_q_state
== VM_PAGE_ON_FREE_Q
) {
8309 assert(mem
->vmp_busy
);
8310 assert(!mem
->vmp_lopage
);
8312 color
= VM_PAGE_GET_COLOR(mem
);
8314 vm_page_queue_remove(&vm_page_queue_free
[color
].qhead
, mem
, vmp_pageq
);
8316 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
8318 vm_page_free_count
--;
8320 hibernate_teardown_found_free_pages
++;
8322 if (vm_pages
[compact_target_indx
].vmp_q_state
!= VM_PAGE_ON_FREE_Q
) {
8323 compact_target_indx
= i
;
8327 * record this vm_page_t's original location
8328 * we need this even if it doesn't get moved
8329 * as an indicator to the rebuild function that
8330 * we don't have to move it
8332 mem
->vmp_next_m
= VM_PAGE_PACK_PTR(mem
);
8334 if (vm_pages
[compact_target_indx
].vmp_q_state
== VM_PAGE_ON_FREE_Q
) {
8336 * we've got a hole to fill, so
8337 * move this vm_page_t to it's new home
8339 vm_pages
[compact_target_indx
] = *mem
;
8340 mem
->vmp_q_state
= VM_PAGE_ON_FREE_Q
;
8342 hibernate_teardown_last_valid_compact_indx
= compact_target_indx
;
8343 compact_target_indx
++;
8345 hibernate_teardown_last_valid_compact_indx
= i
;
8349 unneeded_vm_pages_pages
= hibernate_mark_as_unneeded((addr64_t
)&vm_pages
[hibernate_teardown_last_valid_compact_indx
+ 1],
8350 (addr64_t
)&vm_pages
[vm_pages_count
- 1], page_list
, page_list_wired
);
8351 mark_as_unneeded_pages
+= unneeded_vm_pages_pages
;
8353 pal_hib_teardown_pmap_structs(&start_of_unneeded
, &end_of_unneeded
);
8355 if (start_of_unneeded
) {
8356 unneeded_pmap_pages
= hibernate_mark_as_unneeded(start_of_unneeded
, end_of_unneeded
, page_list
, page_list_wired
);
8357 mark_as_unneeded_pages
+= unneeded_pmap_pages
;
8359 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages
, unneeded_vm_pages_pages
, unneeded_pmap_pages
);
8361 return mark_as_unneeded_pages
;
8365 #endif /* HIBERNATION */
8367 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8369 #include <mach_vm_debug.h>
8372 #include <mach_debug/hash_info.h>
8373 #include <vm/vm_debug.h>
8376 * Routine: vm_page_info
8378 * Return information about the global VP table.
8379 * Fills the buffer with as much information as possible
8380 * and returns the desired size of the buffer.
8382 * Nothing locked. The caller should provide
8383 * possibly-pageable memory.
8388 hash_info_bucket_t
*info
,
8392 lck_spin_t
*bucket_lock
;
8394 if (vm_page_bucket_count
< count
) {
8395 count
= vm_page_bucket_count
;
8398 for (i
= 0; i
< count
; i
++) {
8399 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
8400 unsigned int bucket_count
= 0;
8403 bucket_lock
= &vm_page_bucket_locks
[i
/ BUCKETS_PER_LOCK
];
8404 lck_spin_lock_grp(bucket_lock
, &vm_page_lck_grp_bucket
);
8406 for (m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
));
8408 m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(m
->vmp_next_m
))) {
8412 lck_spin_unlock(bucket_lock
);
8414 /* don't touch pageable memory while holding locks */
8415 info
[i
].hib_count
= bucket_count
;
8418 return vm_page_bucket_count
;
8420 #endif /* MACH_VM_DEBUG */
8422 #if VM_PAGE_BUCKETS_CHECK
8424 vm_page_buckets_check(void)
8428 unsigned int p_hash
;
8429 vm_page_bucket_t
*bucket
;
8430 lck_spin_t
*bucket_lock
;
8432 if (!vm_page_buckets_check_ready
) {
8437 if (hibernate_rebuild_needed
||
8438 hibernate_rebuild_hash_list
) {
8439 panic("BUCKET_CHECK: hibernation in progress: "
8440 "rebuild_needed=%d rebuild_hash_list=%p\n",
8441 hibernate_rebuild_needed
,
8442 hibernate_rebuild_hash_list
);
8444 #endif /* HIBERNATION */
8446 #if VM_PAGE_FAKE_BUCKETS
8448 for (cp
= (char *) vm_page_fake_buckets_start
;
8449 cp
< (char *) vm_page_fake_buckets_end
;
8452 panic("BUCKET_CHECK: corruption at %p in fake buckets "
8453 "[0x%llx:0x%llx]\n",
8455 (uint64_t) vm_page_fake_buckets_start
,
8456 (uint64_t) vm_page_fake_buckets_end
);
8459 #endif /* VM_PAGE_FAKE_BUCKETS */
8461 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
8462 vm_object_t p_object
;
8464 bucket
= &vm_page_buckets
[i
];
8465 if (!bucket
->page_list
) {
8469 bucket_lock
= &vm_page_bucket_locks
[i
/ BUCKETS_PER_LOCK
];
8470 lck_spin_lock_grp(bucket_lock
, &vm_page_lck_grp_bucket
);
8471 p
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
));
8473 while (p
!= VM_PAGE_NULL
) {
8474 p_object
= VM_PAGE_OBJECT(p
);
8476 if (!p
->vmp_hashed
) {
8477 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8478 "hash %d in bucket %d at %p "
8480 p
, p_object
, p
->vmp_offset
,
8483 p_hash
= vm_page_hash(p_object
, p
->vmp_offset
);
8485 panic("BUCKET_CHECK: corruption in bucket %d "
8486 "at %p: page %p object %p offset 0x%llx "
8488 i
, bucket
, p
, p_object
, p
->vmp_offset
,
8491 p
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->vmp_next_m
));
8493 lck_spin_unlock(bucket_lock
);
8496 // printf("BUCKET_CHECK: checked buckets\n");
8498 #endif /* VM_PAGE_BUCKETS_CHECK */
8501 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8502 * local queues if they exist... its the only spot in the system where we add pages
8503 * to those queues... once on those queues, those pages can only move to one of the
8504 * global page queues or the free queues... they NEVER move from local q to local q.
8505 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8506 * the global vm_page_queue_lock at this point... we still need to take the local lock
8507 * in case this operation is being run on a different CPU then the local queue's identity,
8508 * but we don't have to worry about the page moving to a global queue or becoming wired
8509 * while we're grabbing the local lock since those operations would require the global
8510 * vm_page_queue_lock to be held, and we already own it.
8512 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8513 * 'wired' and local are ALWAYS mutually exclusive conditions.
8516 #if CONFIG_BACKGROUND_QUEUE
8518 vm_page_queues_remove(vm_page_t mem
, boolean_t remove_from_backgroundq
)
8521 vm_page_queues_remove(vm_page_t mem
, boolean_t __unused remove_from_backgroundq
)
8524 boolean_t was_pageable
= TRUE
;
8525 vm_object_t m_object
;
8527 m_object
= VM_PAGE_OBJECT(mem
);
8529 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
8531 if (mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) {
8532 assert(mem
->vmp_pageq
.next
== 0 && mem
->vmp_pageq
.prev
== 0);
8533 #if CONFIG_BACKGROUND_QUEUE
8534 if (remove_from_backgroundq
== TRUE
) {
8535 vm_page_remove_from_backgroundq(mem
);
8537 if (mem
->vmp_on_backgroundq
) {
8538 assert(mem
->vmp_backgroundq
.next
!= 0);
8539 assert(mem
->vmp_backgroundq
.prev
!= 0);
8541 assert(mem
->vmp_backgroundq
.next
== 0);
8542 assert(mem
->vmp_backgroundq
.prev
== 0);
8544 #endif /* CONFIG_BACKGROUND_QUEUE */
8548 if (mem
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
8549 assert(mem
->vmp_pageq
.next
== 0 && mem
->vmp_pageq
.prev
== 0);
8550 #if CONFIG_BACKGROUND_QUEUE
8551 assert(mem
->vmp_backgroundq
.next
== 0 &&
8552 mem
->vmp_backgroundq
.prev
== 0 &&
8553 mem
->vmp_on_backgroundq
== FALSE
);
8557 if (mem
->vmp_q_state
== VM_PAGE_IS_WIRED
) {
8559 * might put these guys on a list for debugging purposes
8560 * if we do, we'll need to remove this assert
8562 assert(mem
->vmp_pageq
.next
== 0 && mem
->vmp_pageq
.prev
== 0);
8563 #if CONFIG_BACKGROUND_QUEUE
8564 assert(mem
->vmp_backgroundq
.next
== 0 &&
8565 mem
->vmp_backgroundq
.prev
== 0 &&
8566 mem
->vmp_on_backgroundq
== FALSE
);
8571 assert(m_object
!= compressor_object
);
8572 assert(m_object
!= kernel_object
);
8573 assert(m_object
!= vm_submap_object
);
8574 assert(!mem
->vmp_fictitious
);
8576 switch (mem
->vmp_q_state
) {
8577 case VM_PAGE_ON_ACTIVE_LOCAL_Q
:
8581 lq
= zpercpu_get_cpu(vm_page_local_q
, mem
->vmp_local_id
);
8582 VPL_LOCK(&lq
->vpl_lock
);
8583 vm_page_queue_remove(&lq
->vpl_queue
, mem
, vmp_pageq
);
8584 mem
->vmp_local_id
= 0;
8586 if (m_object
->internal
) {
8587 lq
->vpl_internal_count
--;
8589 lq
->vpl_external_count
--;
8591 VPL_UNLOCK(&lq
->vpl_lock
);
8592 was_pageable
= FALSE
;
8595 case VM_PAGE_ON_ACTIVE_Q
:
8597 vm_page_queue_remove(&vm_page_queue_active
, mem
, vmp_pageq
);
8598 vm_page_active_count
--;
8602 case VM_PAGE_ON_INACTIVE_INTERNAL_Q
:
8604 assert(m_object
->internal
== TRUE
);
8606 vm_page_inactive_count
--;
8607 vm_page_queue_remove(&vm_page_queue_anonymous
, mem
, vmp_pageq
);
8608 vm_page_anonymous_count
--;
8610 vm_purgeable_q_advance_all();
8611 vm_page_balance_inactive(3);
8615 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q
:
8617 assert(m_object
->internal
== FALSE
);
8619 vm_page_inactive_count
--;
8620 vm_page_queue_remove(&vm_page_queue_inactive
, mem
, vmp_pageq
);
8621 vm_purgeable_q_advance_all();
8622 vm_page_balance_inactive(3);
8626 case VM_PAGE_ON_INACTIVE_CLEANED_Q
:
8628 assert(m_object
->internal
== FALSE
);
8630 vm_page_inactive_count
--;
8631 vm_page_queue_remove(&vm_page_queue_cleaned
, mem
, vmp_pageq
);
8632 vm_page_cleaned_count
--;
8633 vm_page_balance_inactive(3);
8637 case VM_PAGE_ON_THROTTLED_Q
:
8639 assert(m_object
->internal
== TRUE
);
8641 vm_page_queue_remove(&vm_page_queue_throttled
, mem
, vmp_pageq
);
8642 vm_page_throttled_count
--;
8643 was_pageable
= FALSE
;
8647 case VM_PAGE_ON_SPECULATIVE_Q
:
8649 assert(m_object
->internal
== FALSE
);
8651 vm_page_remque(&mem
->vmp_pageq
);
8652 vm_page_speculative_count
--;
8653 vm_page_balance_inactive(3);
8657 #if CONFIG_SECLUDED_MEMORY
8658 case VM_PAGE_ON_SECLUDED_Q
:
8660 vm_page_queue_remove(&vm_page_queue_secluded
, mem
, vmp_pageq
);
8661 vm_page_secluded_count
--;
8662 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
8663 if (m_object
== VM_OBJECT_NULL
) {
8664 vm_page_secluded_count_free
--;
8665 was_pageable
= FALSE
;
8667 assert(!m_object
->internal
);
8668 vm_page_secluded_count_inuse
--;
8669 was_pageable
= FALSE
;
8670 // was_pageable = TRUE;
8674 #endif /* CONFIG_SECLUDED_MEMORY */
8679 * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
8680 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
8681 * the caller is responsible for determing if the page is on that queue, and if so, must
8682 * either first remove it (it needs both the page queues lock and the object lock to do
8683 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
8685 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
8686 * or any of the undefined states
8688 panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem
, mem
->vmp_q_state
);
8692 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
8693 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
8695 #if CONFIG_BACKGROUND_QUEUE
8696 if (remove_from_backgroundq
== TRUE
) {
8697 vm_page_remove_from_backgroundq(mem
);
8701 if (m_object
->internal
) {
8702 vm_page_pageable_internal_count
--;
8704 vm_page_pageable_external_count
--;
8710 vm_page_remove_internal(vm_page_t page
)
8712 vm_object_t __object
= VM_PAGE_OBJECT(page
);
8713 if (page
== __object
->memq_hint
) {
8714 vm_page_t __new_hint
;
8715 vm_page_queue_entry_t __qe
;
8716 __qe
= (vm_page_queue_entry_t
)vm_page_queue_next(&page
->vmp_listq
);
8717 if (vm_page_queue_end(&__object
->memq
, __qe
)) {
8718 __qe
= (vm_page_queue_entry_t
)vm_page_queue_prev(&page
->vmp_listq
);
8719 if (vm_page_queue_end(&__object
->memq
, __qe
)) {
8723 __new_hint
= (vm_page_t
)((uintptr_t) __qe
);
8724 __object
->memq_hint
= __new_hint
;
8726 vm_page_queue_remove(&__object
->memq
, page
, vmp_listq
);
8727 #if CONFIG_SECLUDED_MEMORY
8728 if (__object
->eligible_for_secluded
) {
8729 vm_page_secluded
.eligible_for_secluded
--;
8731 #endif /* CONFIG_SECLUDED_MEMORY */
8735 vm_page_enqueue_inactive(vm_page_t mem
, boolean_t first
)
8737 vm_object_t m_object
;
8739 m_object
= VM_PAGE_OBJECT(mem
);
8741 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
8742 assert(!mem
->vmp_fictitious
);
8743 assert(!mem
->vmp_laundry
);
8744 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8745 vm_page_check_pageable_safe(mem
);
8747 if (m_object
->internal
) {
8748 mem
->vmp_q_state
= VM_PAGE_ON_INACTIVE_INTERNAL_Q
;
8750 if (first
== TRUE
) {
8751 vm_page_queue_enter_first(&vm_page_queue_anonymous
, mem
, vmp_pageq
);
8753 vm_page_queue_enter(&vm_page_queue_anonymous
, mem
, vmp_pageq
);
8756 vm_page_anonymous_count
++;
8757 vm_page_pageable_internal_count
++;
8759 mem
->vmp_q_state
= VM_PAGE_ON_INACTIVE_EXTERNAL_Q
;
8761 if (first
== TRUE
) {
8762 vm_page_queue_enter_first(&vm_page_queue_inactive
, mem
, vmp_pageq
);
8764 vm_page_queue_enter(&vm_page_queue_inactive
, mem
, vmp_pageq
);
8767 vm_page_pageable_external_count
++;
8769 vm_page_inactive_count
++;
8770 token_new_pagecount
++;
8772 #if CONFIG_BACKGROUND_QUEUE
8773 if (mem
->vmp_in_background
) {
8774 vm_page_add_to_backgroundq(mem
, FALSE
);
8780 vm_page_enqueue_active(vm_page_t mem
, boolean_t first
)
8782 vm_object_t m_object
;
8784 m_object
= VM_PAGE_OBJECT(mem
);
8786 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
8787 assert(!mem
->vmp_fictitious
);
8788 assert(!mem
->vmp_laundry
);
8789 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8790 vm_page_check_pageable_safe(mem
);
8792 mem
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
8793 if (first
== TRUE
) {
8794 vm_page_queue_enter_first(&vm_page_queue_active
, mem
, vmp_pageq
);
8796 vm_page_queue_enter(&vm_page_queue_active
, mem
, vmp_pageq
);
8798 vm_page_active_count
++;
8800 if (m_object
->internal
) {
8801 vm_page_pageable_internal_count
++;
8803 vm_page_pageable_external_count
++;
8806 #if CONFIG_BACKGROUND_QUEUE
8807 if (mem
->vmp_in_background
) {
8808 vm_page_add_to_backgroundq(mem
, FALSE
);
8811 vm_page_balance_inactive(3);
8815 * Pages from special kernel objects shouldn't
8816 * be placed on pageable queues.
8819 vm_page_check_pageable_safe(vm_page_t page
)
8821 vm_object_t page_object
;
8823 page_object
= VM_PAGE_OBJECT(page
);
8825 if (page_object
== kernel_object
) {
8826 panic("vm_page_check_pageable_safe: trying to add page" \
8827 "from kernel object (%p) to pageable queue", kernel_object
);
8830 if (page_object
== compressor_object
) {
8831 panic("vm_page_check_pageable_safe: trying to add page" \
8832 "from compressor object (%p) to pageable queue", compressor_object
);
8835 if (page_object
== vm_submap_object
) {
8836 panic("vm_page_check_pageable_safe: trying to add page" \
8837 "from submap object (%p) to pageable queue", vm_submap_object
);
8841 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
8842 * wired page diagnose
8843 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8845 #include <libkern/OSKextLibPrivate.h>
8847 #define KA_SIZE(namelen, subtotalscount) \
8848 (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
8850 #define KA_NAME(alloc) \
8851 ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
8853 #define KA_NAME_LEN(alloc) \
8854 (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
8859 uintptr_t* frameptr
;
8860 uintptr_t* frameptr_next
;
8862 uintptr_t kstackb
, kstackt
;
8863 const vm_allocation_site_t
* site
;
8865 kern_allocation_name_t name
;
8867 cthread
= current_thread();
8868 if (__improbable(cthread
== NULL
)) {
8869 return VM_KERN_MEMORY_OSFMK
;
8872 if ((name
= thread_get_kernel_state(cthread
)->allocation_name
)) {
8879 kstackb
= cthread
->kernel_stack
;
8880 kstackt
= kstackb
+ kernel_stack_size
;
8882 /* Load stack frame pointer (EBP on x86) into frameptr */
8883 frameptr
= __builtin_frame_address(0);
8885 while (frameptr
!= NULL
) {
8886 /* Verify thread stack bounds */
8887 if (((uintptr_t)(frameptr
+ 2) > kstackt
) || ((uintptr_t)frameptr
< kstackb
)) {
8891 /* Next frame pointer is pointed to by the previous one */
8892 frameptr_next
= (uintptr_t*) *frameptr
;
8894 /* Pull return address from one spot above the frame pointer */
8895 retaddr
= *(frameptr
+ 1);
8897 #if defined(HAS_APPLE_PAC)
8898 retaddr
= (uintptr_t) ptrauth_strip((void *)retaddr
, ptrauth_key_return_address
);
8901 if (((retaddr
< vm_kernel_builtinkmod_text_end
) && (retaddr
>= vm_kernel_builtinkmod_text
))
8902 || (retaddr
< vm_kernel_stext
) || (retaddr
> vm_kernel_top
)) {
8903 site
= OSKextGetAllocationSiteForCaller(retaddr
);
8906 frameptr
= frameptr_next
;
8909 return site
? site
->tag
: VM_KERN_MEMORY_NONE
;
8912 static uint64_t free_tag_bits
[VM_MAX_TAG_VALUE
/ 64];
8915 vm_tag_alloc_locked(vm_allocation_site_t
* site
, vm_allocation_site_t
** releasesiteP
)
8920 vm_allocation_site_t
* prev
;
8928 avail
= free_tag_bits
[idx
];
8930 tag
= (vm_tag_t
)__builtin_clzll(avail
);
8931 avail
&= ~(1ULL << (63 - tag
));
8932 free_tag_bits
[idx
] = avail
;
8937 if (idx
>= ARRAY_COUNT(free_tag_bits
)) {
8938 for (idx
= 0; idx
< ARRAY_COUNT(vm_allocation_sites
); idx
++) {
8939 prev
= vm_allocation_sites
[idx
];
8943 if (!KA_NAME_LEN(prev
)) {
8952 if (1 != prev
->refcount
) {
8956 assert(idx
== prev
->tag
);
8957 tag
= (vm_tag_t
)idx
;
8958 prev
->tag
= VM_KERN_MEMORY_NONE
;
8959 *releasesiteP
= prev
;
8962 if (idx
>= ARRAY_COUNT(vm_allocation_sites
)) {
8963 tag
= VM_KERN_MEMORY_ANY
;
8970 OSAddAtomic16(1, &site
->refcount
);
8972 if (VM_KERN_MEMORY_ANY
!= tag
) {
8973 vm_allocation_sites
[tag
] = site
;
8976 if (tag
> vm_allocation_tag_highest
) {
8977 vm_allocation_tag_highest
= tag
;
8982 vm_tag_free_locked(vm_tag_t tag
)
8988 if (VM_KERN_MEMORY_ANY
== tag
) {
8993 avail
= free_tag_bits
[idx
];
8995 bit
= (1ULL << (63 - tag
));
8996 assert(!(avail
& bit
));
8997 free_tag_bits
[idx
] = (avail
| bit
);
9004 for (tag
= VM_KERN_MEMORY_FIRST_DYNAMIC
; tag
< VM_KERN_MEMORY_ANY
; tag
++) {
9005 vm_tag_free_locked(tag
);
9008 for (tag
= VM_KERN_MEMORY_ANY
+ 1; tag
< VM_MAX_TAG_VALUE
; tag
++) {
9009 vm_tag_free_locked(tag
);
9014 vm_tag_alloc(vm_allocation_site_t
* site
)
9017 vm_allocation_site_t
* releasesite
;
9019 if (VM_TAG_BT
& site
->flags
) {
9021 if (VM_KERN_MEMORY_NONE
!= tag
) {
9028 lck_spin_lock(&vm_allocation_sites_lock
);
9029 vm_tag_alloc_locked(site
, &releasesite
);
9030 lck_spin_unlock(&vm_allocation_sites_lock
);
9032 kern_allocation_name_release(releasesite
);
9040 vm_tag_update_size(vm_tag_t tag
, int64_t delta
)
9042 vm_allocation_site_t
* allocation
;
9045 assert(VM_KERN_MEMORY_NONE
!= tag
);
9046 assert(tag
< VM_MAX_TAG_VALUE
);
9048 allocation
= vm_allocation_sites
[tag
];
9052 assertf(allocation
->total
>= ((uint64_t)-delta
), "tag %d, site %p", tag
, allocation
);
9054 prior
= OSAddAtomic64(delta
, &allocation
->total
);
9056 #if DEBUG || DEVELOPMENT
9059 new = prior
+ delta
;
9061 peak
= allocation
->peak
;
9065 }while (!OSCompareAndSwap64(peak
, new, &allocation
->peak
));
9067 #endif /* DEBUG || DEVELOPMENT */
9069 if (tag
< VM_KERN_MEMORY_FIRST_DYNAMIC
) {
9073 if (!prior
&& !allocation
->tag
) {
9074 vm_tag_alloc(allocation
);
9079 kern_allocation_update_size(kern_allocation_name_t allocation
, int64_t delta
)
9084 assertf(allocation
->total
>= ((uint64_t)-delta
), "name %p", allocation
);
9086 prior
= OSAddAtomic64(delta
, &allocation
->total
);
9088 #if DEBUG || DEVELOPMENT
9091 new = prior
+ delta
;
9093 peak
= allocation
->peak
;
9097 }while (!OSCompareAndSwap64(peak
, new, &allocation
->peak
));
9099 #endif /* DEBUG || DEVELOPMENT */
9101 if (!prior
&& !allocation
->tag
) {
9102 vm_tag_alloc(allocation
);
9106 #if VM_MAX_TAG_ZONES
9109 vm_allocation_zones_init(void)
9115 size
= VM_MAX_TAG_VALUE
* sizeof(vm_allocation_zone_total_t
* *)
9116 + 2 * VM_MAX_TAG_ZONES
* sizeof(vm_allocation_zone_total_t
);
9118 ret
= kernel_memory_allocate(kernel_map
,
9119 &addr
, round_page(size
), 0,
9120 KMA_ZERO
, VM_KERN_MEMORY_DIAG
);
9121 assert(KERN_SUCCESS
== ret
);
9123 vm_allocation_zone_totals
= (vm_allocation_zone_total_t
**) addr
;
9124 addr
+= VM_MAX_TAG_VALUE
* sizeof(vm_allocation_zone_total_t
* *);
9126 // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
9127 // in vm_tag_update_zone_size() won't recurse
9128 vm_allocation_zone_totals
[VM_KERN_MEMORY_DIAG
] = (vm_allocation_zone_total_t
*) addr
;
9129 addr
+= VM_MAX_TAG_ZONES
* sizeof(vm_allocation_zone_total_t
);
9130 vm_allocation_zone_totals
[VM_KERN_MEMORY_KALLOC
] = (vm_allocation_zone_total_t
*) addr
;
9134 vm_tag_will_update_zone(vm_tag_t tag
, uint32_t zidx
)
9136 vm_allocation_zone_total_t
* zone
;
9138 assert(VM_KERN_MEMORY_NONE
!= tag
);
9139 assert(tag
< VM_MAX_TAG_VALUE
);
9141 if (zidx
>= VM_MAX_TAG_ZONES
) {
9145 zone
= vm_allocation_zone_totals
[tag
];
9147 zone
= kalloc_tag(VM_MAX_TAG_ZONES
* sizeof(*zone
), VM_KERN_MEMORY_DIAG
);
9151 bzero(zone
, VM_MAX_TAG_ZONES
* sizeof(*zone
));
9152 if (!OSCompareAndSwapPtr(NULL
, zone
, &vm_allocation_zone_totals
[tag
])) {
9153 kfree(zone
, VM_MAX_TAG_ZONES
* sizeof(*zone
));
9159 vm_tag_update_zone_size(vm_tag_t tag
, uint32_t zidx
, int64_t delta
, int64_t dwaste
)
9161 vm_allocation_zone_total_t
* zone
;
9164 assert(VM_KERN_MEMORY_NONE
!= tag
);
9165 assert(tag
< VM_MAX_TAG_VALUE
);
9167 if (zidx
>= VM_MAX_TAG_ZONES
) {
9171 zone
= vm_allocation_zone_totals
[tag
];
9175 /* the zone is locked */
9177 assertf(zone
->total
>= ((uint64_t)-delta
), "zidx %d, tag %d, %p", zidx
, tag
, zone
);
9178 zone
->total
+= delta
;
9180 zone
->total
+= delta
;
9181 if (zone
->total
> zone
->peak
) {
9182 zone
->peak
= zone
->total
;
9186 if (zone
->wastediv
< 65536) {
9191 __assert_only
bool ov
= os_add_overflow(new, dwaste
, &new);
9198 #endif /* VM_MAX_TAG_ZONES */
9201 kern_allocation_update_subtotal(kern_allocation_name_t allocation
, uint32_t subtag
, int64_t delta
)
9203 kern_allocation_name_t other
;
9204 struct vm_allocation_total
* total
;
9208 assert(VM_KERN_MEMORY_NONE
!= subtag
);
9209 lck_spin_lock(&vm_allocation_sites_lock
);
9210 for (; subidx
< allocation
->subtotalscount
; subidx
++) {
9211 if (VM_KERN_MEMORY_NONE
== allocation
->subtotals
[subidx
].tag
) {
9212 allocation
->subtotals
[subidx
].tag
= (vm_tag_t
)subtag
;
9215 if (subtag
== allocation
->subtotals
[subidx
].tag
) {
9219 lck_spin_unlock(&vm_allocation_sites_lock
);
9220 assert(subidx
< allocation
->subtotalscount
);
9221 if (subidx
>= allocation
->subtotalscount
) {
9225 total
= &allocation
->subtotals
[subidx
];
9226 other
= vm_allocation_sites
[subtag
];
9230 assertf(total
->total
>= ((uint64_t)-delta
), "name %p", allocation
);
9231 assertf(other
->mapped
>= ((uint64_t)-delta
), "other %p", other
);
9233 OSAddAtomic64(delta
, &other
->mapped
);
9234 OSAddAtomic64(delta
, &total
->total
);
9238 kern_allocation_get_name(kern_allocation_name_t allocation
)
9240 return KA_NAME(allocation
);
9243 kern_allocation_name_t
9244 kern_allocation_name_allocate(const char * name
, uint16_t subtotalscount
)
9248 namelen
= (uint16_t)strnlen(name
, MACH_MEMORY_INFO_NAME_MAX_LEN
- 1);
9250 kern_allocation_name_t allocation
;
9251 allocation
= kheap_alloc(KHEAP_DATA_BUFFERS
,
9252 KA_SIZE(namelen
, subtotalscount
), Z_WAITOK
);
9253 bzero(allocation
, KA_SIZE(namelen
, subtotalscount
));
9255 allocation
->refcount
= 1;
9256 allocation
->subtotalscount
= subtotalscount
;
9257 allocation
->flags
= (uint16_t)(namelen
<< VM_TAG_NAME_LEN_SHIFT
);
9258 strlcpy(KA_NAME(allocation
), name
, namelen
+ 1);
9264 kern_allocation_name_release(kern_allocation_name_t allocation
)
9266 assert(allocation
->refcount
> 0);
9267 if (1 == OSAddAtomic16(-1, &allocation
->refcount
)) {
9268 kheap_free(KHEAP_DATA_BUFFERS
, allocation
,
9269 KA_SIZE(KA_NAME_LEN(allocation
), allocation
->subtotalscount
));
9274 kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation
)
9276 return vm_tag_alloc(allocation
);
9279 #if !VM_TAG_ACTIVE_UPDATE
9281 vm_page_count_object(mach_memory_info_t
* info
, unsigned int __unused num_info
, vm_object_t object
)
9283 if (!object
->wired_page_count
) {
9286 if (object
!= kernel_object
) {
9287 assert(object
->wire_tag
< num_info
);
9288 info
[object
->wire_tag
].size
+= ptoa_64(object
->wired_page_count
);
9292 typedef void (*vm_page_iterate_proc
)(mach_memory_info_t
* info
,
9293 unsigned int num_info
, vm_object_t object
);
9296 vm_page_iterate_purgeable_objects(mach_memory_info_t
* info
, unsigned int num_info
,
9297 vm_page_iterate_proc proc
, purgeable_q_t queue
,
9302 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
9303 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
9304 object
= (vm_object_t
) queue_next(&object
->objq
)) {
9305 proc(info
, num_info
, object
);
9310 vm_page_iterate_objects(mach_memory_info_t
* info
, unsigned int num_info
,
9311 vm_page_iterate_proc proc
)
9315 lck_spin_lock_grp(&vm_objects_wired_lock
, &vm_page_lck_grp_bucket
);
9316 queue_iterate(&vm_objects_wired
,
9321 proc(info
, num_info
, object
);
9323 lck_spin_unlock(&vm_objects_wired_lock
);
9325 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9328 process_account(mach_memory_info_t
* info
, unsigned int num_info
,
9329 uint64_t zones_collectable_bytes
, boolean_t iterated
)
9332 unsigned int idx
, count
, nextinfo
;
9333 vm_allocation_site_t
* site
;
9334 lck_spin_lock(&vm_allocation_sites_lock
);
9336 for (idx
= 0; idx
<= vm_allocation_tag_highest
; idx
++) {
9337 site
= vm_allocation_sites
[idx
];
9341 info
[idx
].mapped
= site
->mapped
;
9342 info
[idx
].tag
= site
->tag
;
9344 info
[idx
].size
= site
->total
;
9345 #if DEBUG || DEVELOPMENT
9346 info
[idx
].peak
= site
->peak
;
9347 #endif /* DEBUG || DEVELOPMENT */
9349 if (!site
->subtotalscount
&& (site
->total
!= info
[idx
].size
)) {
9350 printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx
, site
->total
, info
[idx
].size
);
9351 info
[idx
].size
= site
->total
;
9354 info
[idx
].flags
|= VM_KERN_SITE_WIRED
;
9355 if (idx
< VM_KERN_MEMORY_FIRST_DYNAMIC
) {
9356 info
[idx
].site
= idx
;
9357 info
[idx
].flags
|= VM_KERN_SITE_TAG
;
9358 if (VM_KERN_MEMORY_ZONE
== idx
) {
9359 info
[idx
].flags
|= VM_KERN_SITE_HIDE
;
9360 info
[idx
].flags
&= ~VM_KERN_SITE_WIRED
;
9361 info
[idx
].collectable_bytes
= zones_collectable_bytes
;
9363 } else if ((namelen
= (VM_TAG_NAME_LEN_MAX
& (site
->flags
>> VM_TAG_NAME_LEN_SHIFT
)))) {
9365 info
[idx
].flags
|= VM_KERN_SITE_NAMED
;
9366 if (namelen
> sizeof(info
[idx
].name
)) {
9367 namelen
= sizeof(info
[idx
].name
);
9369 strncpy(&info
[idx
].name
[0], KA_NAME(site
), namelen
);
9370 } else if (VM_TAG_KMOD
& site
->flags
) {
9371 info
[idx
].site
= OSKextGetKmodIDForSite(site
, NULL
, 0);
9372 info
[idx
].flags
|= VM_KERN_SITE_KMOD
;
9374 info
[idx
].site
= VM_KERNEL_UNSLIDE(site
);
9375 info
[idx
].flags
|= VM_KERN_SITE_KERNEL
;
9379 nextinfo
= (vm_allocation_tag_highest
+ 1);
9381 if (count
>= num_info
) {
9385 for (idx
= 0; idx
< count
; idx
++) {
9386 site
= vm_allocation_sites
[idx
];
9390 #if VM_MAX_TAG_ZONES
9391 vm_allocation_zone_total_t
* zone
;
9393 vm_size_t elem_size
;
9395 if (vm_allocation_zone_totals
9396 && (zone
= vm_allocation_zone_totals
[idx
])
9397 && (nextinfo
< num_info
)) {
9398 for (zidx
= 0; zidx
< VM_MAX_TAG_ZONES
; zidx
++) {
9399 if (!zone
[zidx
].peak
) {
9402 info
[nextinfo
] = info
[idx
];
9403 info
[nextinfo
].zone
= (uint16_t)zone_index_from_tag_index(zidx
, &elem_size
);
9404 info
[nextinfo
].flags
&= ~VM_KERN_SITE_WIRED
;
9405 info
[nextinfo
].flags
|= VM_KERN_SITE_ZONE
;
9406 info
[nextinfo
].size
= zone
[zidx
].total
;
9407 info
[nextinfo
].peak
= zone
[zidx
].peak
;
9408 info
[nextinfo
].mapped
= 0;
9409 if (zone
[zidx
].wastediv
) {
9410 info
[nextinfo
].collectable_bytes
= ((zone
[zidx
].waste
* zone
[zidx
].total
/ elem_size
) / zone
[zidx
].wastediv
);
9415 #endif /* VM_MAX_TAG_ZONES */
9416 if (site
->subtotalscount
) {
9417 uint64_t mapped
, mapcost
, take
;
9421 info
[idx
].size
= site
->total
;
9422 mapped
= info
[idx
].size
;
9423 info
[idx
].mapped
= mapped
;
9425 for (sub
= 0; sub
< site
->subtotalscount
; sub
++) {
9426 alloctag
= site
->subtotals
[sub
].tag
;
9427 assert(alloctag
< num_info
);
9428 if (info
[alloctag
].name
[0]) {
9431 take
= site
->subtotals
[sub
].total
;
9432 if (take
> info
[alloctag
].size
) {
9433 take
= info
[alloctag
].size
;
9435 if (take
> mapped
) {
9438 info
[alloctag
].mapped
-= take
;
9439 info
[alloctag
].size
-= take
;
9443 info
[idx
].size
= mapcost
;
9446 lck_spin_unlock(&vm_allocation_sites_lock
);
9452 vm_page_diagnose_estimate(void)
9454 vm_allocation_site_t
* site
;
9455 uint32_t count
= zone_view_count
;
9458 lck_spin_lock(&vm_allocation_sites_lock
);
9459 for (idx
= 0; idx
< VM_MAX_TAG_VALUE
; idx
++) {
9460 site
= vm_allocation_sites
[idx
];
9465 #if VM_MAX_TAG_ZONES
9466 if (vm_allocation_zone_totals
) {
9467 vm_allocation_zone_total_t
* zone
;
9468 zone
= vm_allocation_zone_totals
[idx
];
9472 for (uint32_t zidx
= 0; zidx
< VM_MAX_TAG_ZONES
; zidx
++) {
9473 if (zone
[zidx
].peak
) {
9480 lck_spin_unlock(&vm_allocation_sites_lock
);
9482 /* some slop for new tags created */
9484 count
+= VM_KERN_COUNTER_COUNT
;
9490 vm_page_diagnose_zone_stats(mach_memory_info_t
*info
, zone_stats_t zstats
,
9493 zpercpu_foreach(zs
, zstats
) {
9494 info
->size
+= zs
->zs_mem_allocated
- zs
->zs_mem_freed
;
9497 info
->size
*= zpercpu_count();
9499 info
->flags
|= VM_KERN_SITE_NAMED
| VM_KERN_SITE_ZONE_VIEW
;
9503 vm_page_diagnose_zone(mach_memory_info_t
*info
, zone_t z
)
9505 vm_page_diagnose_zone_stats(info
, z
->z_stats
, z
->percpu
);
9506 snprintf(info
->name
, sizeof(info
->name
),
9507 "%s%s[raw]", zone_heap_name(z
), z
->z_name
);
9511 vm_page_diagnose_heap(mach_memory_info_t
*info
, kalloc_heap_t kheap
)
9513 struct kheap_zones
*zones
= kheap
->kh_zones
;
9516 for (; i
< zones
->max_k_zone
; i
++) {
9517 vm_page_diagnose_zone(info
+ i
, zones
->k_zone
[i
]);
9520 for (kalloc_heap_t kh
= zones
->views
; kh
; kh
= kh
->kh_next
, i
++) {
9521 vm_page_diagnose_zone_stats(info
+ i
, kh
->kh_stats
, false);
9522 snprintf(info
[i
].name
, sizeof(info
[i
].name
),
9523 "%skalloc[%s]", kheap
->kh_name
, kh
->kh_name
);
9530 vm_page_diagnose(mach_memory_info_t
* info
, unsigned int num_info
, uint64_t zones_collectable_bytes
)
9532 uint64_t wired_size
;
9533 uint64_t wired_managed_size
;
9534 uint64_t wired_reserved_size
;
9536 mach_memory_info_t
* counts
;
9539 bzero(info
, num_info
* sizeof(mach_memory_info_t
));
9541 if (!vm_page_wire_count_initial
) {
9542 return KERN_ABORTED
;
9546 wired_size
= ptoa_64(vm_page_wire_count
);
9547 wired_reserved_size
= ptoa_64(vm_page_wire_count_initial
- vm_page_stolen_count
);
9549 wired_size
= ptoa_64(vm_page_wire_count
+ vm_lopage_free_count
+ vm_page_throttled_count
);
9550 wired_reserved_size
= ptoa_64(vm_page_wire_count_initial
- vm_page_stolen_count
+ vm_page_throttled_count
);
9552 wired_managed_size
= ptoa_64(vm_page_wire_count
- vm_page_wire_count_initial
);
9554 wired_size
+= booter_size
;
9556 assert(num_info
>= VM_KERN_COUNTER_COUNT
);
9557 num_info
-= VM_KERN_COUNTER_COUNT
;
9558 counts
= &info
[num_info
];
9560 #define SET_COUNT(xcount, xsize, xflags) \
9561 counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
9562 counts[xcount].site = (xcount); \
9563 counts[xcount].size = (xsize); \
9564 counts[xcount].mapped = (xsize); \
9565 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
9567 SET_COUNT(VM_KERN_COUNT_MANAGED
, ptoa_64(vm_page_pages
), 0);
9568 SET_COUNT(VM_KERN_COUNT_WIRED
, wired_size
, 0);
9569 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED
, wired_managed_size
, 0);
9570 SET_COUNT(VM_KERN_COUNT_RESERVED
, wired_reserved_size
, VM_KERN_SITE_WIRED
);
9571 SET_COUNT(VM_KERN_COUNT_STOLEN
, ptoa_64(vm_page_stolen_count
), VM_KERN_SITE_WIRED
);
9572 SET_COUNT(VM_KERN_COUNT_LOPAGE
, ptoa_64(vm_lopage_free_count
), VM_KERN_SITE_WIRED
);
9573 SET_COUNT(VM_KERN_COUNT_WIRED_BOOT
, ptoa_64(vm_page_wire_count_on_boot
), 0);
9574 SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN
, booter_size
, VM_KERN_SITE_WIRED
);
9575 SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE
, ptoa_64(vm_page_kernelcache_count
), 0);
9577 #define SET_MAP(xcount, xsize, xfree, xlargest) \
9578 counts[xcount].site = (xcount); \
9579 counts[xcount].size = (xsize); \
9580 counts[xcount].mapped = (xsize); \
9581 counts[xcount].free = (xfree); \
9582 counts[xcount].largest = (xlargest); \
9583 counts[xcount].flags = VM_KERN_SITE_COUNTER;
9585 vm_map_size_t map_size
, map_free
, map_largest
;
9587 vm_map_sizes(kernel_map
, &map_size
, &map_free
, &map_largest
);
9588 SET_MAP(VM_KERN_COUNT_MAP_KERNEL
, map_size
, map_free
, map_largest
);
9590 zone_map_sizes(&map_size
, &map_free
, &map_largest
);
9591 SET_MAP(VM_KERN_COUNT_MAP_ZONE
, map_size
, map_free
, map_largest
);
9593 vm_map_sizes(kalloc_map
, &map_size
, &map_free
, &map_largest
);
9594 SET_MAP(VM_KERN_COUNT_MAP_KALLOC
, map_size
, map_free
, map_largest
);
9596 assert(num_info
>= zone_view_count
);
9597 num_info
-= zone_view_count
;
9598 counts
= &info
[num_info
];
9601 i
+= vm_page_diagnose_heap(counts
+ i
, KHEAP_DEFAULT
);
9602 if (KHEAP_DATA_BUFFERS
->kh_heap_id
== KHEAP_ID_DATA_BUFFERS
) {
9603 i
+= vm_page_diagnose_heap(counts
+ i
, KHEAP_DATA_BUFFERS
);
9605 if (KHEAP_KEXT
->kh_heap_id
== KHEAP_ID_KEXT
) {
9606 i
+= vm_page_diagnose_heap(counts
+ i
, KHEAP_KEXT
);
9608 assert(i
<= zone_view_count
);
9610 zone_index_foreach(zidx
) {
9611 zone_t z
= &zone_array
[zidx
];
9612 zone_view_t zv
= z
->z_views
;
9618 if (z
->kalloc_heap
== KHEAP_ID_NONE
) {
9619 vm_page_diagnose_zone(counts
+ i
, z
);
9621 assert(i
<= zone_view_count
);
9624 for (; zv
; zv
= zv
->zv_next
) {
9625 vm_page_diagnose_zone_stats(counts
+ i
, zv
->zv_stats
,
9627 snprintf(counts
[i
].name
, sizeof(counts
[i
].name
), "%s%s[%s]",
9628 zone_heap_name(z
), z
->z_name
, zv
->zv_name
);
9630 assert(i
<= zone_view_count
);
9634 iterate
= !VM_TAG_ACTIVE_UPDATE
;
9636 enum { kMaxKernelDepth
= 1 };
9637 vm_map_t maps
[kMaxKernelDepth
];
9638 vm_map_entry_t entries
[kMaxKernelDepth
];
9640 vm_map_entry_t entry
;
9641 vm_object_offset_t offset
;
9643 int stackIdx
, count
;
9645 #if !VM_TAG_ACTIVE_UPDATE
9646 vm_page_iterate_objects(info
, num_info
, &vm_page_count_object
);
9647 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9653 for (entry
= map
->hdr
.links
.next
; map
; entry
= entry
->links
.next
) {
9654 if (entry
->is_sub_map
) {
9655 assert(stackIdx
< kMaxKernelDepth
);
9656 maps
[stackIdx
] = map
;
9657 entries
[stackIdx
] = entry
;
9659 map
= VME_SUBMAP(entry
);
9663 if (VME_OBJECT(entry
) == kernel_object
) {
9665 vm_object_lock(VME_OBJECT(entry
));
9666 for (offset
= entry
->links
.start
; offset
< entry
->links
.end
; offset
+= page_size
) {
9667 page
= vm_page_lookup(VME_OBJECT(entry
), offset
);
9668 if (page
&& VM_PAGE_WIRED(page
)) {
9672 vm_object_unlock(VME_OBJECT(entry
));
9675 assert(VME_ALIAS(entry
) != VM_KERN_MEMORY_NONE
);
9676 assert(VME_ALIAS(entry
) < num_info
);
9677 info
[VME_ALIAS(entry
)].size
+= ptoa_64(count
);
9680 while (map
&& (entry
== vm_map_last_entry(map
))) {
9686 map
= maps
[stackIdx
];
9687 entry
= entries
[stackIdx
];
9694 process_account(info
, num_info
, zones_collectable_bytes
, iterate
);
9696 return KERN_SUCCESS
;
9699 #if DEBUG || DEVELOPMENT
9702 vm_kern_allocation_info(uintptr_t addr
, vm_size_t
* size
, vm_tag_t
* tag
, vm_size_t
* zone_size
)
9707 vm_map_entry_t entry
;
9709 zsize
= zone_element_info((void *) addr
, tag
);
9711 *zone_size
= *size
= zsize
;
9712 return KERN_SUCCESS
;
9716 ret
= KERN_INVALID_ADDRESS
;
9717 for (map
= kernel_map
; map
;) {
9719 if (!vm_map_lookup_entry(map
, addr
, &entry
)) {
9722 if (entry
->is_sub_map
) {
9723 if (map
!= kernel_map
) {
9726 map
= VME_SUBMAP(entry
);
9729 if (entry
->vme_start
!= addr
) {
9732 *tag
= (vm_tag_t
)VME_ALIAS(entry
);
9733 *size
= (entry
->vme_end
- addr
);
9737 if (map
!= kernel_map
) {
9740 vm_map_unlock(kernel_map
);
9745 #endif /* DEBUG || DEVELOPMENT */
9748 vm_tag_get_kext(vm_tag_t tag
, char * name
, vm_size_t namelen
)
9750 vm_allocation_site_t
* site
;
9754 lck_spin_lock(&vm_allocation_sites_lock
);
9755 if ((site
= vm_allocation_sites
[tag
])) {
9756 if (VM_TAG_KMOD
& site
->flags
) {
9757 kmodId
= OSKextGetKmodIDForSite(site
, name
, namelen
);
9760 lck_spin_unlock(&vm_allocation_sites_lock
);
9766 #if CONFIG_SECLUDED_MEMORY
9768 * Note that there's no locking around other accesses to vm_page_secluded_target.
9769 * That should be OK, since these are the only place where it can be changed after
9770 * initialization. Other users (like vm_pageout) may see the wrong value briefly,
9771 * but will eventually get the correct value. This brief mismatch is OK as pageout
9772 * and page freeing will auto-adjust the vm_page_secluded_count to match the target
9775 unsigned int vm_page_secluded_suppress_cnt
= 0;
9776 unsigned int vm_page_secluded_save_target
;
9778 LCK_GRP_DECLARE(secluded_suppress_slock_grp
, "secluded_suppress_slock");
9779 LCK_SPIN_DECLARE(secluded_suppress_slock
, &secluded_suppress_slock_grp
);
9782 start_secluded_suppression(task_t task
)
9784 if (task
->task_suppressed_secluded
) {
9787 lck_spin_lock(&secluded_suppress_slock
);
9788 if (!task
->task_suppressed_secluded
&& vm_page_secluded_suppress_cnt
++ == 0) {
9789 task
->task_suppressed_secluded
= TRUE
;
9790 vm_page_secluded_save_target
= vm_page_secluded_target
;
9791 vm_page_secluded_target
= 0;
9792 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9794 lck_spin_unlock(&secluded_suppress_slock
);
9798 stop_secluded_suppression(task_t task
)
9800 lck_spin_lock(&secluded_suppress_slock
);
9801 if (task
->task_suppressed_secluded
&& --vm_page_secluded_suppress_cnt
== 0) {
9802 task
->task_suppressed_secluded
= FALSE
;
9803 vm_page_secluded_target
= vm_page_secluded_save_target
;
9804 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9806 lck_spin_unlock(&secluded_suppress_slock
);
9809 #endif /* CONFIG_SECLUDED_MEMORY */