2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Resident memory management module.
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
73 #include <kern/counters.h>
74 #include <kern/sched_prim.h>
75 #include <kern/policy_internal.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/kalloc.h>
79 #include <kern/zalloc.h>
81 #include <kern/ledger.h>
83 #include <vm/vm_init.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
88 #include <kern/misc_protos.h>
89 #include <zone_debug.h>
90 #include <mach_debug/zone_info.h>
92 #include <pexpert/pexpert.h>
94 #include <vm/vm_protos.h>
95 #include <vm/memory_object.h>
96 #include <vm/vm_purgeable_internal.h>
97 #include <vm/vm_compressor.h>
99 #if CONFIG_PHANTOM_CACHE
100 #include <vm/vm_phantom_cache.h>
103 #include <IOKit/IOHibernatePrivate.h>
105 #include <sys/kdebug.h>
108 char vm_page_inactive_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
109 char vm_page_pageable_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
110 char vm_page_non_speculative_pageable_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
111 char vm_page_active_or_inactive_states
[VM_PAGE_Q_STATE_ARRAY_SIZE
];
113 #if CONFIG_SECLUDED_MEMORY
114 struct vm_page_secluded_data vm_page_secluded
;
115 #endif /* CONFIG_SECLUDED_MEMORY */
117 boolean_t hibernate_cleaning_in_progress
= FALSE
;
118 boolean_t vm_page_free_verify
= TRUE
;
120 uint32_t vm_lopage_free_count
= 0;
121 uint32_t vm_lopage_free_limit
= 0;
122 uint32_t vm_lopage_lowater
= 0;
123 boolean_t vm_lopage_refill
= FALSE
;
124 boolean_t vm_lopage_needed
= FALSE
;
126 lck_mtx_ext_t vm_page_queue_lock_ext
;
127 lck_mtx_ext_t vm_page_queue_free_lock_ext
;
128 lck_mtx_ext_t vm_purgeable_queue_lock_ext
;
130 int speculative_age_index
= 0;
131 int speculative_steal_index
= 0;
132 struct vm_speculative_age_q vm_page_queue_speculative
[VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1];
135 __private_extern__
void vm_page_init_lck_grp(void);
137 static void vm_page_free_prepare(vm_page_t page
);
138 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr
);
140 static void vm_tag_init(void);
142 uint64_t vm_min_kernel_and_kext_address
= VM_MIN_KERNEL_AND_KEXT_ADDRESS
;
143 uint32_t vm_packed_from_vm_pages_array_mask
= VM_PACKED_FROM_VM_PAGES_ARRAY
;
144 uint32_t vm_packed_pointer_shift
= VM_PACKED_POINTER_SHIFT
;
147 * Associated with page of user-allocatable memory is a
152 * These variables record the values returned by vm_page_bootstrap,
153 * for debugging purposes. The implementation of pmap_steal_memory
154 * and pmap_startup here also uses them internally.
157 vm_offset_t virtual_space_start
;
158 vm_offset_t virtual_space_end
;
159 uint32_t vm_page_pages
;
162 * The vm_page_lookup() routine, which provides for fast
163 * (virtual memory object, offset) to page lookup, employs
164 * the following hash table. The vm_page_{insert,remove}
165 * routines install and remove associations in the table.
166 * [This table is often called the virtual-to-physical,
170 vm_page_packed_t page_list
;
171 #if MACH_PAGE_HASH_STATS
172 int cur_count
; /* current count */
173 int hi_count
; /* high water mark */
174 #endif /* MACH_PAGE_HASH_STATS */
178 #define BUCKETS_PER_LOCK 16
180 vm_page_bucket_t
*vm_page_buckets
; /* Array of buckets */
181 unsigned int vm_page_bucket_count
= 0; /* How big is array? */
182 unsigned int vm_page_hash_mask
; /* Mask for hash function */
183 unsigned int vm_page_hash_shift
; /* Shift for hash function */
184 uint32_t vm_page_bucket_hash
; /* Basic bucket hash */
185 unsigned int vm_page_bucket_lock_count
= 0; /* How big is array of locks? */
187 lck_spin_t
*vm_page_bucket_locks
;
188 lck_spin_t vm_objects_wired_lock
;
189 lck_spin_t vm_allocation_sites_lock
;
191 #if VM_PAGE_BUCKETS_CHECK
192 boolean_t vm_page_buckets_check_ready
= FALSE
;
193 #if VM_PAGE_FAKE_BUCKETS
194 vm_page_bucket_t
*vm_page_fake_buckets
; /* decoy buckets */
195 vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
196 #endif /* VM_PAGE_FAKE_BUCKETS */
197 #endif /* VM_PAGE_BUCKETS_CHECK */
201 #if MACH_PAGE_HASH_STATS
202 /* This routine is only for debug. It is intended to be called by
203 * hand by a developer using a kernel debugger. This routine prints
204 * out vm_page_hash table statistics to the kernel debug console.
214 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
215 if (vm_page_buckets
[i
].hi_count
) {
217 highsum
+= vm_page_buckets
[i
].hi_count
;
218 if (vm_page_buckets
[i
].hi_count
> maxdepth
)
219 maxdepth
= vm_page_buckets
[i
].hi_count
;
222 printf("Total number of buckets: %d\n", vm_page_bucket_count
);
223 printf("Number used buckets: %d = %d%%\n",
224 numbuckets
, 100*numbuckets
/vm_page_bucket_count
);
225 printf("Number unused buckets: %d = %d%%\n",
226 vm_page_bucket_count
- numbuckets
,
227 100*(vm_page_bucket_count
-numbuckets
)/vm_page_bucket_count
);
228 printf("Sum of bucket max depth: %d\n", highsum
);
229 printf("Average bucket depth: %d.%2d\n",
230 highsum
/vm_page_bucket_count
,
231 highsum%vm_page_bucket_count
);
232 printf("Maximum bucket depth: %d\n", maxdepth
);
234 #endif /* MACH_PAGE_HASH_STATS */
237 * The virtual page size is currently implemented as a runtime
238 * variable, but is constant once initialized using vm_set_page_size.
239 * This initialization must be done in the machine-dependent
240 * bootstrap sequence, before calling other machine-independent
243 * All references to the virtual page size outside this
244 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
247 vm_size_t page_size
= PAGE_SIZE
;
248 vm_size_t page_mask
= PAGE_MASK
;
249 int page_shift
= PAGE_SHIFT
;
252 * Resident page structures are initialized from
253 * a template (see vm_page_alloc).
255 * When adding a new field to the virtual memory
256 * object structure, be sure to add initialization
257 * (see vm_page_bootstrap).
259 struct vm_page vm_page_template
;
261 vm_page_t vm_pages
= VM_PAGE_NULL
;
262 vm_page_t vm_page_array_beginning_addr
;
263 vm_page_t vm_page_array_ending_addr
;
265 unsigned int vm_pages_count
= 0;
266 ppnum_t vm_page_lowest
= 0;
269 * Resident pages that represent real memory
270 * are allocated from a set of free lists,
273 unsigned int vm_colors
;
274 unsigned int vm_color_mask
; /* mask is == (vm_colors-1) */
275 unsigned int vm_cache_geometry_colors
= 0; /* set by hw dependent code during startup */
276 unsigned int vm_free_magazine_refill_limit
= 0;
279 struct vm_page_queue_free_head
{
280 vm_page_queue_head_t qhead
;
281 } __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
283 struct vm_page_queue_free_head vm_page_queue_free
[MAX_COLORS
];
286 unsigned int vm_page_free_wanted
;
287 unsigned int vm_page_free_wanted_privileged
;
288 #if CONFIG_SECLUDED_MEMORY
289 unsigned int vm_page_free_wanted_secluded
;
290 #endif /* CONFIG_SECLUDED_MEMORY */
291 unsigned int vm_page_free_count
;
294 * Occasionally, the virtual memory system uses
295 * resident page structures that do not refer to
296 * real pages, for example to leave a page with
297 * important state information in the VP table.
299 * These page structures are allocated the way
300 * most other kernel structures are.
302 zone_t vm_page_array_zone
;
304 vm_locks_array_t vm_page_locks
;
305 decl_lck_mtx_data(,vm_page_alloc_lock
)
306 lck_mtx_ext_t vm_page_alloc_lock_ext
;
308 unsigned int io_throttle_zero_fill
;
310 unsigned int vm_page_local_q_count
= 0;
311 unsigned int vm_page_local_q_soft_limit
= 250;
312 unsigned int vm_page_local_q_hard_limit
= 500;
313 struct vplq
*vm_page_local_q
= NULL
;
315 /* N.B. Guard and fictitious pages must not
316 * be assigned a zero phys_page value.
319 * Fictitious pages don't have a physical address,
320 * but we must initialize phys_page to something.
321 * For debugging, this should be a strange value
322 * that the pmap module can recognize in assertions.
324 ppnum_t vm_page_fictitious_addr
= (ppnum_t
) -1;
327 * Guard pages are not accessible so they don't
328 * need a physical address, but we need to enter
330 * Let's make it recognizable and make sure that
331 * we don't use a real physical page with that
334 ppnum_t vm_page_guard_addr
= (ppnum_t
) -2;
337 * Resident page structures are also chained on
338 * queues that are used by the page replacement
339 * system (pageout daemon). These queues are
340 * defined here, but are shared by the pageout
341 * module. The inactive queue is broken into
342 * file backed and anonymous for convenience as the
343 * pageout daemon often assignes a higher
344 * importance to anonymous pages (less likely to pick)
346 vm_page_queue_head_t vm_page_queue_active
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
347 vm_page_queue_head_t vm_page_queue_inactive
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
348 #if CONFIG_SECLUDED_MEMORY
349 vm_page_queue_head_t vm_page_queue_secluded
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
350 #endif /* CONFIG_SECLUDED_MEMORY */
351 vm_page_queue_head_t vm_page_queue_anonymous
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
))); /* inactive memory queue for anonymous pages */
352 vm_page_queue_head_t vm_page_queue_throttled
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
354 queue_head_t vm_objects_wired
;
356 #if CONFIG_BACKGROUND_QUEUE
357 vm_page_queue_head_t vm_page_queue_background
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
358 uint32_t vm_page_background_limit
;
359 uint32_t vm_page_background_target
;
360 uint32_t vm_page_background_count
;
361 uint64_t vm_page_background_promoted_count
;
363 uint32_t vm_page_background_internal_count
;
364 uint32_t vm_page_background_external_count
;
366 uint32_t vm_page_background_mode
;
367 uint32_t vm_page_background_exclude_external
;
370 unsigned int vm_page_active_count
;
371 unsigned int vm_page_inactive_count
;
372 #if CONFIG_SECLUDED_MEMORY
373 unsigned int vm_page_secluded_count
;
374 unsigned int vm_page_secluded_count_free
;
375 unsigned int vm_page_secluded_count_inuse
;
376 #endif /* CONFIG_SECLUDED_MEMORY */
377 unsigned int vm_page_anonymous_count
;
378 unsigned int vm_page_throttled_count
;
379 unsigned int vm_page_speculative_count
;
381 unsigned int vm_page_wire_count
;
382 unsigned int vm_page_stolen_count
;
383 unsigned int vm_page_wire_count_initial
;
384 unsigned int vm_page_pages_initial
;
385 unsigned int vm_page_gobble_count
= 0;
387 #define VM_PAGE_WIRE_COUNT_WARNING 0
388 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
390 unsigned int vm_page_purgeable_count
= 0; /* # of pages purgeable now */
391 unsigned int vm_page_purgeable_wired_count
= 0; /* # of purgeable pages that are wired now */
392 uint64_t vm_page_purged_count
= 0; /* total count of purged pages */
394 unsigned int vm_page_xpmapped_external_count
= 0;
395 unsigned int vm_page_external_count
= 0;
396 unsigned int vm_page_internal_count
= 0;
397 unsigned int vm_page_pageable_external_count
= 0;
398 unsigned int vm_page_pageable_internal_count
= 0;
400 #if DEVELOPMENT || DEBUG
401 unsigned int vm_page_speculative_recreated
= 0;
402 unsigned int vm_page_speculative_created
= 0;
403 unsigned int vm_page_speculative_used
= 0;
406 vm_page_queue_head_t vm_page_queue_cleaned
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
408 unsigned int vm_page_cleaned_count
= 0;
409 unsigned int vm_pageout_enqueued_cleaned
= 0;
411 uint64_t max_valid_dma_address
= 0xffffffffffffffffULL
;
412 ppnum_t max_valid_low_ppnum
= 0xffffffff;
416 * Several page replacement parameters are also
417 * shared with this module, so that page allocation
418 * (done here in vm_page_alloc) can trigger the
421 unsigned int vm_page_free_target
= 0;
422 unsigned int vm_page_free_min
= 0;
423 unsigned int vm_page_throttle_limit
= 0;
424 unsigned int vm_page_inactive_target
= 0;
425 #if CONFIG_SECLUDED_MEMORY
426 unsigned int vm_page_secluded_target
= 0;
427 #endif /* CONFIG_SECLUDED_MEMORY */
428 unsigned int vm_page_anonymous_min
= 0;
429 unsigned int vm_page_inactive_min
= 0;
430 unsigned int vm_page_free_reserved
= 0;
431 unsigned int vm_page_throttle_count
= 0;
435 * The VM system has a couple of heuristics for deciding
436 * that pages are "uninteresting" and should be placed
437 * on the inactive queue as likely candidates for replacement.
438 * These variables let the heuristics be controlled at run-time
439 * to make experimentation easier.
442 boolean_t vm_page_deactivate_hint
= TRUE
;
444 struct vm_page_stats_reusable vm_page_stats_reusable
;
449 * Sets the page size, perhaps based upon the memory
450 * size. Must be called before any use of page-size
451 * dependent functions.
453 * Sets page_shift and page_mask from page_size.
456 vm_set_page_size(void)
458 page_size
= PAGE_SIZE
;
459 page_mask
= PAGE_MASK
;
460 page_shift
= PAGE_SHIFT
;
462 if ((page_mask
& page_size
) != 0)
463 panic("vm_set_page_size: page size not a power of two");
465 for (page_shift
= 0; ; page_shift
++)
466 if ((1U << page_shift
) == page_size
)
470 #define COLOR_GROUPS_TO_STEAL 4
473 /* Called once during statup, once the cache geometry is known.
476 vm_page_set_colors( void )
478 unsigned int n
, override
;
480 if ( PE_parse_boot_argn("colors", &override
, sizeof (override
)) ) /* colors specified as a boot-arg? */
482 else if ( vm_cache_geometry_colors
) /* do we know what the cache geometry is? */
483 n
= vm_cache_geometry_colors
;
484 else n
= DEFAULT_COLORS
; /* use default if all else fails */
488 if ( n
> MAX_COLORS
)
491 /* the count must be a power of 2 */
492 if ( ( n
& (n
- 1)) != 0 )
493 panic("vm_page_set_colors");
496 vm_color_mask
= n
- 1;
498 vm_free_magazine_refill_limit
= vm_colors
* COLOR_GROUPS_TO_STEAL
;
502 lck_grp_t vm_page_lck_grp_free
;
503 lck_grp_t vm_page_lck_grp_queue
;
504 lck_grp_t vm_page_lck_grp_local
;
505 lck_grp_t vm_page_lck_grp_purge
;
506 lck_grp_t vm_page_lck_grp_alloc
;
507 lck_grp_t vm_page_lck_grp_bucket
;
508 lck_grp_attr_t vm_page_lck_grp_attr
;
509 lck_attr_t vm_page_lck_attr
;
512 __private_extern__
void
513 vm_page_init_lck_grp(void)
516 * initialze the vm_page lock world
518 lck_grp_attr_setdefault(&vm_page_lck_grp_attr
);
519 lck_grp_init(&vm_page_lck_grp_free
, "vm_page_free", &vm_page_lck_grp_attr
);
520 lck_grp_init(&vm_page_lck_grp_queue
, "vm_page_queue", &vm_page_lck_grp_attr
);
521 lck_grp_init(&vm_page_lck_grp_local
, "vm_page_queue_local", &vm_page_lck_grp_attr
);
522 lck_grp_init(&vm_page_lck_grp_purge
, "vm_page_purge", &vm_page_lck_grp_attr
);
523 lck_grp_init(&vm_page_lck_grp_alloc
, "vm_page_alloc", &vm_page_lck_grp_attr
);
524 lck_grp_init(&vm_page_lck_grp_bucket
, "vm_page_bucket", &vm_page_lck_grp_attr
);
525 lck_attr_setdefault(&vm_page_lck_attr
);
526 lck_mtx_init_ext(&vm_page_alloc_lock
, &vm_page_alloc_lock_ext
, &vm_page_lck_grp_alloc
, &vm_page_lck_attr
);
528 vm_compressor_init_locks();
532 vm_page_init_local_q()
534 unsigned int num_cpus
;
536 struct vplq
*t_local_q
;
538 num_cpus
= ml_get_max_cpus();
541 * no point in this for a uni-processor system
544 t_local_q
= (struct vplq
*)kalloc(num_cpus
* sizeof(struct vplq
));
546 for (i
= 0; i
< num_cpus
; i
++) {
549 lq
= &t_local_q
[i
].vpl_un
.vpl
;
550 VPL_LOCK_INIT(lq
, &vm_page_lck_grp_local
, &vm_page_lck_attr
);
551 vm_page_queue_init(&lq
->vpl_queue
);
553 lq
->vpl_internal_count
= 0;
554 lq
->vpl_external_count
= 0;
556 vm_page_local_q_count
= num_cpus
;
558 vm_page_local_q
= (struct vplq
*)t_local_q
;
566 * Initializes the resident memory module.
568 * Allocates memory for the page cells, and
569 * for the object/offset-to-page hash table headers.
570 * Each page cell is initialized and placed on the free list.
571 * Returns the range of available kernel virtual memory.
586 * Initialize the vm_page template.
589 m
= &vm_page_template
;
590 bzero(m
, sizeof (*m
));
592 #if CONFIG_BACKGROUND_QUEUE
593 m
->vm_page_backgroundq
.next
= 0;
594 m
->vm_page_backgroundq
.prev
= 0;
595 m
->vm_page_in_background
= FALSE
;
596 m
->vm_page_on_backgroundq
= FALSE
;
599 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
604 m
->vm_page_object
= 0; /* reset later */
605 m
->offset
= (vm_object_offset_t
) -1; /* reset later */
608 m
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
610 m
->reference
= FALSE
;
613 m
->__unused_pageq_bits
= 0;
615 VM_PAGE_SET_PHYS_PAGE(m
, 0); /* reset later */
620 m
->fictitious
= FALSE
;
623 m
->free_when_done
= FALSE
;
629 m
->clustered
= FALSE
;
630 m
->overwriting
= FALSE
;
633 m
->encrypted
= FALSE
;
634 m
->encrypted_cleaning
= FALSE
;
635 m
->cs_validated
= FALSE
;
636 m
->cs_tainted
= FALSE
;
642 m
->written_by_kernel
= FALSE
;
643 m
->__unused_object_bits
= 0;
646 * Initialize the page queues.
648 vm_page_init_lck_grp();
650 lck_mtx_init_ext(&vm_page_queue_free_lock
, &vm_page_queue_free_lock_ext
, &vm_page_lck_grp_free
, &vm_page_lck_attr
);
651 lck_mtx_init_ext(&vm_page_queue_lock
, &vm_page_queue_lock_ext
, &vm_page_lck_grp_queue
, &vm_page_lck_attr
);
652 lck_mtx_init_ext(&vm_purgeable_queue_lock
, &vm_purgeable_queue_lock_ext
, &vm_page_lck_grp_purge
, &vm_page_lck_attr
);
654 for (i
= 0; i
< PURGEABLE_Q_TYPE_MAX
; i
++) {
657 purgeable_queues
[i
].token_q_head
= 0;
658 purgeable_queues
[i
].token_q_tail
= 0;
659 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
660 queue_init(&purgeable_queues
[i
].objq
[group
]);
662 purgeable_queues
[i
].type
= i
;
663 purgeable_queues
[i
].new_pages
= 0;
665 purgeable_queues
[i
].debug_count_tokens
= 0;
666 purgeable_queues
[i
].debug_count_objects
= 0;
669 purgeable_nonvolatile_count
= 0;
670 queue_init(&purgeable_nonvolatile_queue
);
672 for (i
= 0; i
< MAX_COLORS
; i
++ )
673 vm_page_queue_init(&vm_page_queue_free
[i
].qhead
);
675 vm_page_queue_init(&vm_lopage_queue_free
);
676 vm_page_queue_init(&vm_page_queue_active
);
677 vm_page_queue_init(&vm_page_queue_inactive
);
678 #if CONFIG_SECLUDED_MEMORY
679 vm_page_queue_init(&vm_page_queue_secluded
);
680 #endif /* CONFIG_SECLUDED_MEMORY */
681 vm_page_queue_init(&vm_page_queue_cleaned
);
682 vm_page_queue_init(&vm_page_queue_throttled
);
683 vm_page_queue_init(&vm_page_queue_anonymous
);
684 queue_init(&vm_objects_wired
);
686 for ( i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++ ) {
687 vm_page_queue_init(&vm_page_queue_speculative
[i
].age_q
);
689 vm_page_queue_speculative
[i
].age_ts
.tv_sec
= 0;
690 vm_page_queue_speculative
[i
].age_ts
.tv_nsec
= 0;
692 #if CONFIG_BACKGROUND_QUEUE
693 vm_page_queue_init(&vm_page_queue_background
);
695 vm_page_background_count
= 0;
696 vm_page_background_internal_count
= 0;
697 vm_page_background_external_count
= 0;
698 vm_page_background_promoted_count
= 0;
700 vm_page_background_target
= (unsigned int)(atop_64(max_mem
) / 25);
702 if (vm_page_background_target
> VM_PAGE_BACKGROUND_TARGET_MAX
)
703 vm_page_background_target
= VM_PAGE_BACKGROUND_TARGET_MAX
;
704 vm_page_background_limit
= vm_page_background_target
+ 256;
706 vm_page_background_mode
= VM_PAGE_BG_LEVEL_1
;
707 vm_page_background_exclude_external
= 0;
709 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode
, sizeof(vm_page_background_mode
));
710 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external
, sizeof(vm_page_background_exclude_external
));
711 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target
, sizeof(vm_page_background_target
));
712 PE_parse_boot_argn("vm_page_bg_limit", &vm_page_background_limit
, sizeof(vm_page_background_limit
));
714 if (vm_page_background_mode
> VM_PAGE_BG_LEVEL_3
)
715 vm_page_background_mode
= VM_PAGE_BG_LEVEL_1
;
717 if (vm_page_background_limit
<= vm_page_background_target
)
718 vm_page_background_limit
= vm_page_background_target
+ 256;
720 vm_page_free_wanted
= 0;
721 vm_page_free_wanted_privileged
= 0;
722 #if CONFIG_SECLUDED_MEMORY
723 vm_page_free_wanted_secluded
= 0;
724 #endif /* CONFIG_SECLUDED_MEMORY */
726 vm_page_set_colors();
728 bzero(vm_page_inactive_states
, sizeof(vm_page_inactive_states
));
729 vm_page_inactive_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
730 vm_page_inactive_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
731 vm_page_inactive_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
733 bzero(vm_page_pageable_states
, sizeof(vm_page_pageable_states
));
734 vm_page_pageable_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
735 vm_page_pageable_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
736 vm_page_pageable_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
737 vm_page_pageable_states
[VM_PAGE_ON_ACTIVE_Q
] = 1;
738 vm_page_pageable_states
[VM_PAGE_ON_SPECULATIVE_Q
] = 1;
739 vm_page_pageable_states
[VM_PAGE_ON_THROTTLED_Q
] = 1;
740 #if CONFIG_SECLUDED_MEMORY
741 vm_page_pageable_states
[VM_PAGE_ON_SECLUDED_Q
] = 1;
742 #endif /* CONFIG_SECLUDED_MEMORY */
744 bzero(vm_page_non_speculative_pageable_states
, sizeof(vm_page_non_speculative_pageable_states
));
745 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
746 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
747 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
748 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_ACTIVE_Q
] = 1;
749 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_THROTTLED_Q
] = 1;
750 #if CONFIG_SECLUDED_MEMORY
751 vm_page_non_speculative_pageable_states
[VM_PAGE_ON_SECLUDED_Q
] = 1;
752 #endif /* CONFIG_SECLUDED_MEMORY */
754 bzero(vm_page_active_or_inactive_states
, sizeof(vm_page_active_or_inactive_states
));
755 vm_page_active_or_inactive_states
[VM_PAGE_ON_INACTIVE_INTERNAL_Q
] = 1;
756 vm_page_active_or_inactive_states
[VM_PAGE_ON_INACTIVE_EXTERNAL_Q
] = 1;
757 vm_page_active_or_inactive_states
[VM_PAGE_ON_INACTIVE_CLEANED_Q
] = 1;
758 vm_page_active_or_inactive_states
[VM_PAGE_ON_ACTIVE_Q
] = 1;
759 #if CONFIG_SECLUDED_MEMORY
760 vm_page_active_or_inactive_states
[VM_PAGE_ON_SECLUDED_Q
] = 1;
761 #endif /* CONFIG_SECLUDED_MEMORY */
765 * Steal memory for the map and zone subsystems.
770 kernel_debug_string_early("vm_map_steal_memory");
771 vm_map_steal_memory();
774 * Allocate (and initialize) the virtual-to-physical
775 * table hash buckets.
777 * The number of buckets should be a power of two to
778 * get a good hash function. The following computation
779 * chooses the first power of two that is greater
780 * than the number of physical pages in the system.
783 if (vm_page_bucket_count
== 0) {
784 unsigned int npages
= pmap_free_pages();
786 vm_page_bucket_count
= 1;
787 while (vm_page_bucket_count
< npages
)
788 vm_page_bucket_count
<<= 1;
790 vm_page_bucket_lock_count
= (vm_page_bucket_count
+ BUCKETS_PER_LOCK
- 1) / BUCKETS_PER_LOCK
;
792 vm_page_hash_mask
= vm_page_bucket_count
- 1;
795 * Calculate object shift value for hashing algorithm:
796 * O = log2(sizeof(struct vm_object))
797 * B = log2(vm_page_bucket_count)
798 * hash shifts the object left by
801 size
= vm_page_bucket_count
;
802 for (log1
= 0; size
> 1; log1
++)
804 size
= sizeof(struct vm_object
);
805 for (log2
= 0; size
> 1; log2
++)
807 vm_page_hash_shift
= log1
/2 - log2
+ 1;
809 vm_page_bucket_hash
= 1 << ((log1
+ 1) >> 1); /* Get (ceiling of sqrt of table size) */
810 vm_page_bucket_hash
|= 1 << ((log1
+ 1) >> 2); /* Get (ceiling of quadroot of table size) */
811 vm_page_bucket_hash
|= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
813 if (vm_page_hash_mask
& vm_page_bucket_count
)
814 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
816 #if VM_PAGE_BUCKETS_CHECK
817 #if VM_PAGE_FAKE_BUCKETS
819 * Allocate a decoy set of page buckets, to detect
820 * any stomping there.
822 vm_page_fake_buckets
= (vm_page_bucket_t
*)
823 pmap_steal_memory(vm_page_bucket_count
*
824 sizeof(vm_page_bucket_t
));
825 vm_page_fake_buckets_start
= (vm_map_offset_t
) vm_page_fake_buckets
;
826 vm_page_fake_buckets_end
=
827 vm_map_round_page((vm_page_fake_buckets_start
+
828 (vm_page_bucket_count
*
829 sizeof (vm_page_bucket_t
))),
832 for (cp
= (char *)vm_page_fake_buckets_start
;
833 cp
< (char *)vm_page_fake_buckets_end
;
837 #endif /* VM_PAGE_FAKE_BUCKETS */
838 #endif /* VM_PAGE_BUCKETS_CHECK */
840 kernel_debug_string_early("vm_page_buckets");
841 vm_page_buckets
= (vm_page_bucket_t
*)
842 pmap_steal_memory(vm_page_bucket_count
*
843 sizeof(vm_page_bucket_t
));
845 kernel_debug_string_early("vm_page_bucket_locks");
846 vm_page_bucket_locks
= (lck_spin_t
*)
847 pmap_steal_memory(vm_page_bucket_lock_count
*
850 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
851 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
853 bucket
->page_list
= VM_PAGE_PACK_PTR(VM_PAGE_NULL
);
854 #if MACH_PAGE_HASH_STATS
855 bucket
->cur_count
= 0;
856 bucket
->hi_count
= 0;
857 #endif /* MACH_PAGE_HASH_STATS */
860 for (i
= 0; i
< vm_page_bucket_lock_count
; i
++)
861 lck_spin_init(&vm_page_bucket_locks
[i
], &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
863 lck_spin_init(&vm_objects_wired_lock
, &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
864 lck_spin_init(&vm_allocation_sites_lock
, &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
867 #if VM_PAGE_BUCKETS_CHECK
868 vm_page_buckets_check_ready
= TRUE
;
869 #endif /* VM_PAGE_BUCKETS_CHECK */
872 * Machine-dependent code allocates the resident page table.
873 * It uses vm_page_init to initialize the page frames.
874 * The code also returns to us the virtual space available
875 * to the kernel. We don't trust the pmap module
876 * to get the alignment right.
879 kernel_debug_string_early("pmap_startup");
880 pmap_startup(&virtual_space_start
, &virtual_space_end
);
881 virtual_space_start
= round_page(virtual_space_start
);
882 virtual_space_end
= trunc_page(virtual_space_end
);
884 *startp
= virtual_space_start
;
885 *endp
= virtual_space_end
;
888 * Compute the initial "wire" count.
889 * Up until now, the pages which have been set aside are not under
890 * the VM system's control, so although they aren't explicitly
891 * wired, they nonetheless can't be moved. At this moment,
892 * all VM managed pages are "free", courtesy of pmap_startup.
894 assert((unsigned int) atop_64(max_mem
) == atop_64(max_mem
));
895 vm_page_wire_count
= ((unsigned int) atop_64(max_mem
)) - vm_page_free_count
- vm_lopage_free_count
; /* initial value */
896 #if CONFIG_SECLUDED_MEMORY
897 vm_page_wire_count
-= vm_page_secluded_count
;
899 vm_page_wire_count_initial
= vm_page_wire_count
;
900 vm_page_pages_initial
= vm_page_pages
;
902 printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
903 vm_page_free_count
, vm_page_wire_count
);
905 kernel_debug_string_early("vm_page_bootstrap complete");
906 simple_lock_init(&vm_paging_lock
, 0);
909 #ifndef MACHINE_PAGES
911 * We implement pmap_steal_memory and pmap_startup with the help
912 * of two simpler functions, pmap_virtual_space and pmap_next_page.
919 vm_offset_t addr
, vaddr
;
923 * We round the size to a round multiple.
926 size
= (size
+ sizeof (void *) - 1) &~ (sizeof (void *) - 1);
929 * If this is the first call to pmap_steal_memory,
930 * we have to initialize ourself.
933 if (virtual_space_start
== virtual_space_end
) {
934 pmap_virtual_space(&virtual_space_start
, &virtual_space_end
);
937 * The initial values must be aligned properly, and
938 * we don't trust the pmap module to do it right.
941 virtual_space_start
= round_page(virtual_space_start
);
942 virtual_space_end
= trunc_page(virtual_space_end
);
946 * Allocate virtual memory for this request.
949 addr
= virtual_space_start
;
950 virtual_space_start
+= size
;
952 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
955 * Allocate and map physical pages to back new virtual pages.
958 for (vaddr
= round_page(addr
);
960 vaddr
+= PAGE_SIZE
) {
962 if (!pmap_next_page_hi(&phys_page
))
963 panic("pmap_steal_memory() size: 0x%llx\n", (uint64_t)size
);
966 * XXX Logically, these mappings should be wired,
967 * but some pmap modules barf if they are.
969 #if defined(__LP64__)
970 pmap_pre_expand(kernel_pmap
, vaddr
);
973 pmap_enter(kernel_pmap
, vaddr
, phys_page
,
974 VM_PROT_READ
|VM_PROT_WRITE
, VM_PROT_NONE
,
975 VM_WIMG_USE_DEFAULT
, FALSE
);
977 * Account for newly stolen memory
979 vm_page_wire_count
++;
980 vm_page_stolen_count
++;
983 return (void *) addr
;
986 #if CONFIG_SECLUDED_MEMORY
987 /* boot-args to control secluded memory */
988 unsigned int secluded_mem_mb
= 0; /* # of MBs of RAM to seclude */
989 int secluded_for_iokit
= 1; /* IOKit can use secluded memory */
990 int secluded_for_apps
= 1; /* apps can use secluded memory */
991 int secluded_for_filecache
= 2; /* filecache can use seclude memory */
993 int secluded_for_fbdp
= 0;
995 int secluded_aging_policy
= SECLUDED_AGING_BEFORE_ACTIVE
;
996 #endif /* CONFIG_SECLUDED_MEMORY */
1001 void vm_page_release_startup(vm_page_t mem
);
1004 vm_offset_t
*startp
,
1007 unsigned int i
, npages
, pages_initialized
, fill
, fillval
;
1011 #if defined(__LP64__)
1013 * make sure we are aligned on a 64 byte boundary
1014 * for VM_PAGE_PACK_PTR (it clips off the low-order
1015 * 6 bits of the pointer)
1017 if (virtual_space_start
!= virtual_space_end
)
1018 virtual_space_start
= round_page(virtual_space_start
);
1022 * We calculate how many page frames we will have
1023 * and then allocate the page structures in one chunk.
1026 tmpaddr
= (addr64_t
)pmap_free_pages() * (addr64_t
)PAGE_SIZE
; /* Get the amount of memory left */
1027 tmpaddr
= tmpaddr
+ (addr64_t
)(round_page(virtual_space_start
) - virtual_space_start
); /* Account for any slop */
1028 npages
= (unsigned int)(tmpaddr
/ (addr64_t
)(PAGE_SIZE
+ sizeof(*vm_pages
))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
1030 vm_pages
= (vm_page_t
) pmap_steal_memory(npages
* sizeof *vm_pages
);
1033 * Initialize the page frames.
1035 kernel_debug_string_early("Initialize the page frames");
1037 vm_page_array_beginning_addr
= &vm_pages
[0];
1038 vm_page_array_ending_addr
= &vm_pages
[npages
];
1041 for (i
= 0, pages_initialized
= 0; i
< npages
; i
++) {
1042 if (!pmap_next_page(&phys_page
))
1044 if (pages_initialized
== 0 || phys_page
< vm_page_lowest
)
1045 vm_page_lowest
= phys_page
;
1047 vm_page_init(&vm_pages
[i
], phys_page
, FALSE
);
1049 pages_initialized
++;
1051 vm_pages_count
= pages_initialized
;
1053 #if defined(__LP64__)
1055 if ((vm_page_t
)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages
[0]))) != &vm_pages
[0])
1056 panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages
[0]);
1058 if ((vm_page_t
)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages
[vm_pages_count
-1]))) != &vm_pages
[vm_pages_count
-1])
1059 panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages
[vm_pages_count
-1]);
1061 kernel_debug_string_early("page fill/release");
1063 * Check if we want to initialize pages to a known value
1065 fill
= 0; /* Assume no fill */
1066 if (PE_parse_boot_argn("fill", &fillval
, sizeof (fillval
))) fill
= 1; /* Set fill */
1068 /* This slows down booting the DEBUG kernel, particularly on
1069 * large memory systems, but is worthwhile in deterministically
1070 * trapping uninitialized memory usage.
1074 fillval
= 0xDEB8F177;
1078 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval
);
1080 #if CONFIG_SECLUDED_MEMORY
1081 /* default: no secluded mem */
1082 secluded_mem_mb
= 0;
1083 if (max_mem
> 1*1024*1024*1024) {
1084 /* default to 90MB for devices with > 1GB of RAM */
1085 secluded_mem_mb
= 90;
1087 /* override with value from device tree, if provided */
1088 PE_get_default("kern.secluded_mem_mb",
1089 &secluded_mem_mb
, sizeof(secluded_mem_mb
));
1090 /* override with value from boot-args, if provided */
1091 PE_parse_boot_argn("secluded_mem_mb",
1093 sizeof (secluded_mem_mb
));
1095 vm_page_secluded_target
= (unsigned int)
1096 ((secluded_mem_mb
* 1024ULL * 1024ULL) / PAGE_SIZE
);
1097 PE_parse_boot_argn("secluded_for_iokit",
1098 &secluded_for_iokit
,
1099 sizeof (secluded_for_iokit
));
1100 PE_parse_boot_argn("secluded_for_apps",
1102 sizeof (secluded_for_apps
));
1103 PE_parse_boot_argn("secluded_for_filecache",
1104 &secluded_for_filecache
,
1105 sizeof (secluded_for_filecache
));
1107 PE_parse_boot_argn("secluded_for_fbdp",
1109 sizeof (secluded_for_fbdp
));
1111 PE_parse_boot_argn("secluded_aging_policy",
1112 &secluded_aging_policy
,
1113 sizeof (secluded_aging_policy
));
1114 #endif /* CONFIG_SECLUDED_MEMORY */
1116 // -debug code remove
1117 if (2 == vm_himemory_mode
) {
1118 // free low -> high so high is preferred
1119 for (i
= 1; i
<= pages_initialized
; i
++) {
1120 if(fill
) fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
- 1]), fillval
); /* Fill the page with a know value if requested at boot */
1121 vm_page_release_startup(&vm_pages
[i
- 1]);
1125 // debug code remove-
1128 * Release pages in reverse order so that physical pages
1129 * initially get allocated in ascending addresses. This keeps
1130 * the devices (which must address physical memory) happy if
1131 * they require several consecutive pages.
1133 for (i
= pages_initialized
; i
> 0; i
--) {
1134 if(fill
) fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
- 1]), fillval
); /* Fill the page with a know value if requested at boot */
1135 vm_page_release_startup(&vm_pages
[i
- 1]);
1138 VM_CHECK_MEMORYSTATUS
;
1142 vm_page_t xx
, xxo
, xxl
;
1145 j
= 0; /* (BRINGUP) */
1148 for( i
= 0; i
< vm_colors
; i
++ ) {
1149 queue_iterate(&vm_page_queue_free
[i
].qhead
,
1152 pageq
) { /* BRINGUP */
1153 j
++; /* (BRINGUP) */
1154 if(j
> vm_page_free_count
) { /* (BRINGUP) */
1155 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx
, xxl
);
1158 l
= vm_page_free_count
- j
; /* (BRINGUP) */
1159 k
= 0; /* (BRINGUP) */
1161 if(((j
- 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j
, vm_page_free_count
);
1163 for(xxo
= xx
->pageq
.next
; xxo
!= &vm_page_queue_free
[i
].qhead
; xxo
= xxo
->pageq
.next
) { /* (BRINGUP) */
1165 if(k
> l
) panic("pmap_startup: too many in secondary check %d %d\n", k
, l
);
1166 if((xx
->phys_page
& 0xFFFFFFFF) == (xxo
->phys_page
& 0xFFFFFFFF)) { /* (BRINGUP) */
1167 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx
, xxo
);
1175 if(j
!= vm_page_free_count
) { /* (BRINGUP) */
1176 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j
, vm_page_free_count
);
1183 * We have to re-align virtual_space_start,
1184 * because pmap_steal_memory has been using it.
1187 virtual_space_start
= round_page(virtual_space_start
);
1189 *startp
= virtual_space_start
;
1190 *endp
= virtual_space_end
;
1192 #endif /* MACHINE_PAGES */
1195 * Routine: vm_page_module_init
1197 * Second initialization pass, to be done after
1198 * the basic VM system is ready.
1201 vm_page_module_init(void)
1203 uint64_t vm_page_zone_pages
, vm_page_array_zone_data_size
;
1204 vm_size_t vm_page_with_ppnum_size
;
1206 vm_page_array_zone
= zinit((vm_size_t
) sizeof(struct vm_page
),
1207 0, PAGE_SIZE
, "vm pages array");
1209 zone_change(vm_page_array_zone
, Z_CALLERACCT
, FALSE
);
1210 zone_change(vm_page_array_zone
, Z_EXPAND
, FALSE
);
1211 zone_change(vm_page_array_zone
, Z_EXHAUST
, TRUE
);
1212 zone_change(vm_page_array_zone
, Z_FOREIGN
, TRUE
);
1213 zone_change(vm_page_array_zone
, Z_GZALLOC_EXEMPT
, TRUE
);
1215 * Adjust zone statistics to account for the real pages allocated
1216 * in vm_page_create(). [Q: is this really what we want?]
1218 vm_page_array_zone
->count
+= vm_page_pages
;
1219 vm_page_array_zone
->sum_count
+= vm_page_pages
;
1220 vm_page_array_zone_data_size
= vm_page_pages
* vm_page_array_zone
->elem_size
;
1221 vm_page_array_zone
->cur_size
+= vm_page_array_zone_data_size
;
1222 vm_page_zone_pages
= ((round_page(vm_page_array_zone_data_size
)) / PAGE_SIZE
);
1223 OSAddAtomic64(vm_page_zone_pages
, &(vm_page_array_zone
->page_count
));
1224 /* since zone accounts for these, take them out of stolen */
1225 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages
);
1227 vm_page_with_ppnum_size
= (sizeof(struct vm_page_with_ppnum
) + (VM_PACKED_POINTER_ALIGNMENT
-1)) & ~(VM_PACKED_POINTER_ALIGNMENT
- 1);
1229 vm_page_zone
= zinit(vm_page_with_ppnum_size
,
1230 0, PAGE_SIZE
, "vm pages");
1232 zone_change(vm_page_zone
, Z_CALLERACCT
, FALSE
);
1233 zone_change(vm_page_zone
, Z_EXPAND
, FALSE
);
1234 zone_change(vm_page_zone
, Z_EXHAUST
, TRUE
);
1235 zone_change(vm_page_zone
, Z_FOREIGN
, TRUE
);
1236 zone_change(vm_page_zone
, Z_GZALLOC_EXEMPT
, TRUE
);
1240 * Routine: vm_page_create
1242 * After the VM system is up, machine-dependent code
1243 * may stumble across more physical memory. For example,
1244 * memory that it was reserving for a frame buffer.
1245 * vm_page_create turns this memory into available pages.
1256 for (phys_page
= start
;
1259 while ((m
= (vm_page_t
) vm_page_grab_fictitious_common(phys_page
))
1261 vm_page_more_fictitious();
1263 m
->fictitious
= FALSE
;
1264 pmap_clear_noencrypt(phys_page
);
1267 vm_page_release(m
, FALSE
);
1274 * Distributes the object/offset key pair among hash buckets.
1276 * NOTE: The bucket count must be a power of 2
1278 #define vm_page_hash(object, offset) (\
1279 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1280 & vm_page_hash_mask)
1284 * vm_page_insert: [ internal use only ]
1286 * Inserts the given mem entry into the object/object-page
1287 * table and object list.
1289 * The object must be locked.
1295 vm_object_offset_t offset
)
1297 vm_page_insert_internal(mem
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, TRUE
, FALSE
, FALSE
, NULL
);
1301 vm_page_insert_wired(
1304 vm_object_offset_t offset
,
1307 vm_page_insert_internal(mem
, object
, offset
, tag
, FALSE
, TRUE
, FALSE
, FALSE
, NULL
);
1311 vm_page_insert_internal(
1314 vm_object_offset_t offset
,
1316 boolean_t queues_lock_held
,
1317 boolean_t insert_in_hash
,
1318 boolean_t batch_pmap_op
,
1319 boolean_t batch_accounting
,
1320 uint64_t *delayed_ledger_update
)
1322 vm_page_bucket_t
*bucket
;
1323 lck_spin_t
*bucket_lock
;
1328 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
1329 object
, offset
, mem
, 0,0);
1332 * we may not hold the page queue lock
1333 * so this check isn't safe to make
1338 assert(page_aligned(offset
));
1340 assert(!VM_PAGE_WIRED(mem
) || mem
->private || mem
->fictitious
|| (tag
!= VM_KERN_MEMORY_NONE
));
1342 /* the vm_submap_object is only a placeholder for submaps */
1343 assert(object
!= vm_submap_object
);
1345 vm_object_lock_assert_exclusive(object
);
1346 LCK_MTX_ASSERT(&vm_page_queue_lock
,
1347 queues_lock_held
? LCK_MTX_ASSERT_OWNED
1348 : LCK_MTX_ASSERT_NOTOWNED
);
1349 if (queues_lock_held
== FALSE
)
1350 assert(!VM_PAGE_PAGEABLE(mem
));
1352 if (insert_in_hash
== TRUE
) {
1353 #if DEBUG || VM_PAGE_CHECK_BUCKETS
1354 if (mem
->tabled
|| mem
->vm_page_object
)
1355 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1356 "already in (obj=%p,off=0x%llx)",
1357 mem
, object
, offset
, VM_PAGE_OBJECT(mem
), mem
->offset
);
1359 assert(!object
->internal
|| offset
< object
->vo_size
);
1360 assert(vm_page_lookup(object
, offset
) == VM_PAGE_NULL
);
1363 * Record the object/offset pair in this page
1366 mem
->vm_page_object
= VM_PAGE_PACK_OBJECT(object
);
1367 mem
->offset
= offset
;
1369 #if CONFIG_SECLUDED_MEMORY
1370 if (object
->eligible_for_secluded
) {
1371 vm_page_secluded
.eligible_for_secluded
++;
1373 #endif /* CONFIG_SECLUDED_MEMORY */
1376 * Insert it into the object_object/offset hash table
1378 hash_id
= vm_page_hash(object
, offset
);
1379 bucket
= &vm_page_buckets
[hash_id
];
1380 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
1382 lck_spin_lock(bucket_lock
);
1384 mem
->next_m
= bucket
->page_list
;
1385 bucket
->page_list
= VM_PAGE_PACK_PTR(mem
);
1386 assert(mem
== (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
)));
1388 #if MACH_PAGE_HASH_STATS
1389 if (++bucket
->cur_count
> bucket
->hi_count
)
1390 bucket
->hi_count
= bucket
->cur_count
;
1391 #endif /* MACH_PAGE_HASH_STATS */
1393 lck_spin_unlock(bucket_lock
);
1397 unsigned int cache_attr
;
1399 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
1401 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
1402 PMAP_SET_CACHE_ATTR(mem
, object
, cache_attr
, batch_pmap_op
);
1406 * Now link into the object's list of backed pages.
1408 vm_page_queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
1409 object
->memq_hint
= mem
;
1413 * Show that the object has one more resident page.
1416 object
->resident_page_count
++;
1417 if (VM_PAGE_WIRED(mem
)) {
1418 assert(mem
->wire_count
> 0);
1420 if (!mem
->private && !mem
->fictitious
)
1422 if (!object
->wired_page_count
)
1424 assert(VM_KERN_MEMORY_NONE
!= tag
);
1425 object
->wire_tag
= tag
;
1426 VM_OBJECT_WIRED(object
);
1429 object
->wired_page_count
++;
1431 assert(object
->resident_page_count
>= object
->wired_page_count
);
1433 if (batch_accounting
== FALSE
) {
1434 if (object
->internal
) {
1435 OSAddAtomic(1, &vm_page_internal_count
);
1437 OSAddAtomic(1, &vm_page_external_count
);
1442 * It wouldn't make sense to insert a "reusable" page in
1443 * an object (the page would have been marked "reusable" only
1444 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1445 * in the object at that time).
1446 * But a page could be inserted in a "all_reusable" object, if
1447 * something faults it in (a vm_read() from another task or a
1448 * "use-after-free" issue in user space, for example). It can
1449 * also happen if we're relocating a page from that object to
1450 * a different physical page during a physically-contiguous
1453 assert(!mem
->reusable
);
1454 if (object
->all_reusable
) {
1455 OSAddAtomic(+1, &vm_page_stats_reusable
.reusable_count
);
1458 if (object
->purgable
== VM_PURGABLE_DENY
) {
1461 owner
= object
->vo_purgeable_owner
;
1464 (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1465 VM_PAGE_WIRED(mem
))) {
1467 if (delayed_ledger_update
)
1468 *delayed_ledger_update
+= PAGE_SIZE
;
1470 /* more non-volatile bytes */
1471 ledger_credit(owner
->ledger
,
1472 task_ledgers
.purgeable_nonvolatile
,
1474 /* more footprint */
1475 ledger_credit(owner
->ledger
,
1476 task_ledgers
.phys_footprint
,
1481 (object
->purgable
== VM_PURGABLE_VOLATILE
||
1482 object
->purgable
== VM_PURGABLE_EMPTY
)) {
1483 assert(! VM_PAGE_WIRED(mem
));
1484 /* more volatile bytes */
1485 ledger_credit(owner
->ledger
,
1486 task_ledgers
.purgeable_volatile
,
1490 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1491 if (VM_PAGE_WIRED(mem
)) {
1492 OSAddAtomic(+1, &vm_page_purgeable_wired_count
);
1494 OSAddAtomic(+1, &vm_page_purgeable_count
);
1496 } else if (object
->purgable
== VM_PURGABLE_EMPTY
&&
1497 mem
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
) {
1499 * This page belongs to a purged VM object but hasn't
1500 * been purged (because it was "busy").
1501 * It's in the "throttled" queue and hence not
1502 * visible to vm_pageout_scan(). Move it to a pageable
1503 * queue, so that it can eventually be reclaimed, instead
1504 * of lingering in the "empty" object.
1506 if (queues_lock_held
== FALSE
)
1507 vm_page_lockspin_queues();
1508 vm_page_deactivate(mem
);
1509 if (queues_lock_held
== FALSE
)
1510 vm_page_unlock_queues();
1513 #if VM_OBJECT_TRACKING_OP_MODIFIED
1514 if (vm_object_tracking_inited
&&
1516 object
->resident_page_count
== 0 &&
1517 object
->pager
== NULL
&&
1518 object
->shadow
!= NULL
&&
1519 object
->shadow
->copy
== object
) {
1520 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
1523 numsaved
=OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
1524 btlog_add_entry(vm_object_tracking_btlog
,
1526 VM_OBJECT_TRACKING_OP_MODIFIED
,
1530 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
1536 * Exactly like vm_page_insert, except that we first
1537 * remove any existing page at the given offset in object.
1539 * The object must be locked.
1545 vm_object_offset_t offset
)
1547 vm_page_bucket_t
*bucket
;
1548 vm_page_t found_m
= VM_PAGE_NULL
;
1549 lck_spin_t
*bucket_lock
;
1554 * we don't hold the page queue lock
1555 * so this check isn't safe to make
1559 vm_object_lock_assert_exclusive(object
);
1560 #if DEBUG || VM_PAGE_CHECK_BUCKETS
1561 if (mem
->tabled
|| mem
->vm_page_object
)
1562 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
1563 "already in (obj=%p,off=0x%llx)",
1564 mem
, object
, offset
, VM_PAGE_OBJECT(mem
), mem
->offset
);
1566 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
1568 assert(!VM_PAGE_PAGEABLE(mem
));
1571 * Record the object/offset pair in this page
1573 mem
->vm_page_object
= VM_PAGE_PACK_OBJECT(object
);
1574 mem
->offset
= offset
;
1577 * Insert it into the object_object/offset hash table,
1578 * replacing any page that might have been there.
1581 hash_id
= vm_page_hash(object
, offset
);
1582 bucket
= &vm_page_buckets
[hash_id
];
1583 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
1585 lck_spin_lock(bucket_lock
);
1587 if (bucket
->page_list
) {
1588 vm_page_packed_t
*mp
= &bucket
->page_list
;
1589 vm_page_t m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(*mp
));
1593 * compare packed object pointers
1595 if (m
->vm_page_object
== mem
->vm_page_object
&& m
->offset
== offset
) {
1597 * Remove old page from hash list
1601 m
->next_m
= VM_PAGE_PACK_PTR(NULL
);
1607 } while ((m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(*mp
))));
1609 mem
->next_m
= bucket
->page_list
;
1611 mem
->next_m
= VM_PAGE_PACK_PTR(NULL
);
1614 * insert new page at head of hash list
1616 bucket
->page_list
= VM_PAGE_PACK_PTR(mem
);
1619 lck_spin_unlock(bucket_lock
);
1623 * there was already a page at the specified
1624 * offset for this object... remove it from
1625 * the object and free it back to the free list
1627 vm_page_free_unlocked(found_m
, FALSE
);
1629 vm_page_insert_internal(mem
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, FALSE
, FALSE
, FALSE
, NULL
);
1633 * vm_page_remove: [ internal use only ]
1635 * Removes the given mem entry from the object/offset-page
1636 * table and the object page list.
1638 * The object must be locked.
1644 boolean_t remove_from_hash
)
1646 vm_page_bucket_t
*bucket
;
1648 lck_spin_t
*bucket_lock
;
1651 vm_object_t m_object
;
1653 m_object
= VM_PAGE_OBJECT(mem
);
1656 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
1657 m_object
, mem
->offset
,
1660 vm_object_lock_assert_exclusive(m_object
);
1661 assert(mem
->tabled
);
1662 assert(!mem
->cleaning
);
1663 assert(!mem
->laundry
);
1665 if (VM_PAGE_PAGEABLE(mem
)) {
1666 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1670 * we don't hold the page queue lock
1671 * so this check isn't safe to make
1675 if (remove_from_hash
== TRUE
) {
1677 * Remove from the object_object/offset hash table
1679 hash_id
= vm_page_hash(m_object
, mem
->offset
);
1680 bucket
= &vm_page_buckets
[hash_id
];
1681 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
1683 lck_spin_lock(bucket_lock
);
1685 if ((this = (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
))) == mem
) {
1686 /* optimize for common case */
1688 bucket
->page_list
= mem
->next_m
;
1690 vm_page_packed_t
*prev
;
1692 for (prev
= &this->next_m
;
1693 (this = (vm_page_t
)(VM_PAGE_UNPACK_PTR(*prev
))) != mem
;
1694 prev
= &this->next_m
)
1696 *prev
= this->next_m
;
1698 #if MACH_PAGE_HASH_STATS
1699 bucket
->cur_count
--;
1700 #endif /* MACH_PAGE_HASH_STATS */
1701 mem
->hashed
= FALSE
;
1702 this->next_m
= VM_PAGE_PACK_PTR(NULL
);
1703 lck_spin_unlock(bucket_lock
);
1706 * Now remove from the object's list of backed pages.
1709 vm_page_remove_internal(mem
);
1712 * And show that the object has one fewer resident
1716 assert(m_object
->resident_page_count
> 0);
1717 m_object
->resident_page_count
--;
1719 if (m_object
->internal
) {
1721 assert(vm_page_internal_count
);
1724 OSAddAtomic(-1, &vm_page_internal_count
);
1726 assert(vm_page_external_count
);
1727 OSAddAtomic(-1, &vm_page_external_count
);
1729 if (mem
->xpmapped
) {
1730 assert(vm_page_xpmapped_external_count
);
1731 OSAddAtomic(-1, &vm_page_xpmapped_external_count
);
1734 if (!m_object
->internal
&& (m_object
->objq
.next
|| m_object
->objq
.prev
)) {
1735 if (m_object
->resident_page_count
== 0)
1736 vm_object_cache_remove(m_object
);
1739 if (VM_PAGE_WIRED(mem
)) {
1740 assert(mem
->wire_count
> 0);
1741 assert(m_object
->wired_page_count
> 0);
1742 m_object
->wired_page_count
--;
1743 if (!m_object
->wired_page_count
) {
1744 VM_OBJECT_UNWIRED(m_object
);
1747 assert(m_object
->resident_page_count
>=
1748 m_object
->wired_page_count
);
1749 if (mem
->reusable
) {
1750 assert(m_object
->reusable_page_count
> 0);
1751 m_object
->reusable_page_count
--;
1752 assert(m_object
->reusable_page_count
<=
1753 m_object
->resident_page_count
);
1754 mem
->reusable
= FALSE
;
1755 OSAddAtomic(-1, &vm_page_stats_reusable
.reusable_count
);
1756 vm_page_stats_reusable
.reused_remove
++;
1757 } else if (m_object
->all_reusable
) {
1758 OSAddAtomic(-1, &vm_page_stats_reusable
.reusable_count
);
1759 vm_page_stats_reusable
.reused_remove
++;
1762 if (m_object
->purgable
== VM_PURGABLE_DENY
) {
1765 owner
= m_object
->vo_purgeable_owner
;
1768 (m_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1769 VM_PAGE_WIRED(mem
))) {
1770 /* less non-volatile bytes */
1771 ledger_debit(owner
->ledger
,
1772 task_ledgers
.purgeable_nonvolatile
,
1774 /* less footprint */
1775 ledger_debit(owner
->ledger
,
1776 task_ledgers
.phys_footprint
,
1779 (m_object
->purgable
== VM_PURGABLE_VOLATILE
||
1780 m_object
->purgable
== VM_PURGABLE_EMPTY
)) {
1781 assert(! VM_PAGE_WIRED(mem
));
1782 /* less volatile bytes */
1783 ledger_debit(owner
->ledger
,
1784 task_ledgers
.purgeable_volatile
,
1787 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
1788 if (VM_PAGE_WIRED(mem
)) {
1789 assert(vm_page_purgeable_wired_count
> 0);
1790 OSAddAtomic(-1, &vm_page_purgeable_wired_count
);
1792 assert(vm_page_purgeable_count
> 0);
1793 OSAddAtomic(-1, &vm_page_purgeable_count
);
1796 if (m_object
->set_cache_attr
== TRUE
)
1797 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem
), 0);
1799 mem
->tabled
= FALSE
;
1800 mem
->vm_page_object
= 0;
1801 mem
->offset
= (vm_object_offset_t
) -1;
1808 * Returns the page associated with the object/offset
1809 * pair specified; if none is found, VM_PAGE_NULL is returned.
1811 * The object must be locked. No side effects.
1814 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
1816 #if DEBUG_VM_PAGE_LOOKUP
1820 uint64_t vpl_empty_obj
;
1821 uint64_t vpl_bucket_NULL
;
1822 uint64_t vpl_hit_hint
;
1823 uint64_t vpl_hit_hint_next
;
1824 uint64_t vpl_hit_hint_prev
;
1830 uint64_t vpl_fast_elapsed
;
1831 uint64_t vpl_slow_elapsed
;
1832 } vm_page_lookup_stats
__attribute__((aligned(8)));
1836 #define KDP_VM_PAGE_WALK_MAX 1000
1841 vm_object_offset_t offset
)
1844 int num_traversed
= 0;
1847 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
1850 vm_page_queue_iterate(&object
->memq
, cur_page
, vm_page_t
, listq
) {
1851 if (cur_page
->offset
== offset
) {
1856 if (num_traversed
>= KDP_VM_PAGE_WALK_MAX
) {
1857 return VM_PAGE_NULL
;
1861 return VM_PAGE_NULL
;
1867 vm_object_offset_t offset
)
1870 vm_page_bucket_t
*bucket
;
1871 vm_page_queue_entry_t qe
;
1872 lck_spin_t
*bucket_lock
= NULL
;
1874 #if DEBUG_VM_PAGE_LOOKUP
1875 uint64_t start
, elapsed
;
1877 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_total
);
1879 vm_object_lock_assert_held(object
);
1881 if (object
->resident_page_count
== 0) {
1882 #if DEBUG_VM_PAGE_LOOKUP
1883 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_empty_obj
);
1885 return (VM_PAGE_NULL
);
1888 mem
= object
->memq_hint
;
1890 if (mem
!= VM_PAGE_NULL
) {
1891 assert(VM_PAGE_OBJECT(mem
) == object
);
1893 if (mem
->offset
== offset
) {
1894 #if DEBUG_VM_PAGE_LOOKUP
1895 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit_hint
);
1899 qe
= (vm_page_queue_entry_t
)vm_page_queue_next(&mem
->listq
);
1901 if (! vm_page_queue_end(&object
->memq
, qe
)) {
1902 vm_page_t next_page
;
1904 next_page
= (vm_page_t
)((uintptr_t)qe
);
1905 assert(VM_PAGE_OBJECT(next_page
) == object
);
1907 if (next_page
->offset
== offset
) {
1908 object
->memq_hint
= next_page
; /* new hint */
1909 #if DEBUG_VM_PAGE_LOOKUP
1910 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit_hint_next
);
1915 qe
= (vm_page_queue_entry_t
)vm_page_queue_prev(&mem
->listq
);
1917 if (! vm_page_queue_end(&object
->memq
, qe
)) {
1918 vm_page_t prev_page
;
1920 prev_page
= (vm_page_t
)((uintptr_t)qe
);
1921 assert(VM_PAGE_OBJECT(prev_page
) == object
);
1923 if (prev_page
->offset
== offset
) {
1924 object
->memq_hint
= prev_page
; /* new hint */
1925 #if DEBUG_VM_PAGE_LOOKUP
1926 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit_hint_prev
);
1933 * Search the hash table for this object/offset pair
1935 hash_id
= vm_page_hash(object
, offset
);
1936 bucket
= &vm_page_buckets
[hash_id
];
1939 * since we hold the object lock, we are guaranteed that no
1940 * new pages can be inserted into this object... this in turn
1941 * guarantess that the page we're looking for can't exist
1942 * if the bucket it hashes to is currently NULL even when looked
1943 * at outside the scope of the hash bucket lock... this is a
1944 * really cheap optimiztion to avoid taking the lock
1946 if (!bucket
->page_list
) {
1947 #if DEBUG_VM_PAGE_LOOKUP
1948 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_bucket_NULL
);
1950 return (VM_PAGE_NULL
);
1953 #if DEBUG_VM_PAGE_LOOKUP
1954 start
= mach_absolute_time();
1956 if (object
->resident_page_count
<= VM_PAGE_HASH_LOOKUP_THRESHOLD
) {
1958 * on average, it's roughly 3 times faster to run a short memq list
1959 * than to take the spin lock and go through the hash list
1961 mem
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1963 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)mem
)) {
1965 if (mem
->offset
== offset
)
1968 mem
= (vm_page_t
)vm_page_queue_next(&mem
->listq
);
1970 if (vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)mem
))
1973 vm_page_object_t packed_object
;
1975 packed_object
= VM_PAGE_PACK_OBJECT(object
);
1977 bucket_lock
= &vm_page_bucket_locks
[hash_id
/ BUCKETS_PER_LOCK
];
1979 lck_spin_lock(bucket_lock
);
1981 for (mem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
));
1982 mem
!= VM_PAGE_NULL
;
1983 mem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->next_m
))) {
1986 * we don't hold the page queue lock
1987 * so this check isn't safe to make
1991 if ((mem
->vm_page_object
== packed_object
) && (mem
->offset
== offset
))
1994 lck_spin_unlock(bucket_lock
);
1997 #if DEBUG_VM_PAGE_LOOKUP
1998 elapsed
= mach_absolute_time() - start
;
2001 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_slow
);
2002 OSAddAtomic64(elapsed
, &vm_page_lookup_stats
.vpl_slow_elapsed
);
2004 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_fast
);
2005 OSAddAtomic64(elapsed
, &vm_page_lookup_stats
.vpl_fast_elapsed
);
2007 if (mem
!= VM_PAGE_NULL
)
2008 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_hit
);
2010 OSAddAtomic64(1, &vm_page_lookup_stats
.vpl_miss
);
2012 if (mem
!= VM_PAGE_NULL
) {
2013 assert(VM_PAGE_OBJECT(mem
) == object
);
2015 object
->memq_hint
= mem
;
2024 * Move the given memory entry from its
2025 * current object to the specified target object/offset.
2027 * The object must be locked.
2032 vm_object_t new_object
,
2033 vm_object_offset_t new_offset
,
2034 boolean_t encrypted_ok
)
2036 boolean_t internal_to_external
, external_to_internal
;
2038 vm_object_t m_object
;
2040 m_object
= VM_PAGE_OBJECT(mem
);
2042 assert(m_object
!= new_object
);
2047 * The encryption key is based on the page's memory object
2048 * (aka "pager") and paging offset. Moving the page to
2049 * another VM object changes its "pager" and "paging_offset"
2050 * so it has to be decrypted first, or we would lose the key.
2052 * One exception is VM object collapsing, where we transfer pages
2053 * from one backing object to its parent object. This operation also
2054 * transfers the paging information, so the <pager,paging_offset> info
2055 * should remain consistent. The caller (vm_object_do_collapse())
2056 * sets "encrypted_ok" in this case.
2058 if (!encrypted_ok
&& mem
->encrypted
) {
2059 panic("vm_page_rename: page %p is encrypted\n", mem
);
2063 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
2064 new_object
, new_offset
,
2068 * Changes to mem->object require the page lock because
2069 * the pageout daemon uses that lock to get the object.
2071 vm_page_lockspin_queues();
2073 internal_to_external
= FALSE
;
2074 external_to_internal
= FALSE
;
2076 if (mem
->vm_page_q_state
== VM_PAGE_ON_ACTIVE_LOCAL_Q
) {
2078 * it's much easier to get the vm_page_pageable_xxx accounting correct
2079 * if we first move the page to the active queue... it's going to end
2080 * up there anyway, and we don't do vm_page_rename's frequently enough
2081 * for this to matter.
2083 vm_page_queues_remove(mem
, FALSE
);
2084 vm_page_activate(mem
);
2086 if (VM_PAGE_PAGEABLE(mem
)) {
2087 if (m_object
->internal
&& !new_object
->internal
) {
2088 internal_to_external
= TRUE
;
2090 if (!m_object
->internal
&& new_object
->internal
) {
2091 external_to_internal
= TRUE
;
2095 tag
= m_object
->wire_tag
;
2096 vm_page_remove(mem
, TRUE
);
2097 vm_page_insert_internal(mem
, new_object
, new_offset
, tag
, TRUE
, TRUE
, FALSE
, FALSE
, NULL
);
2099 if (internal_to_external
) {
2100 vm_page_pageable_internal_count
--;
2101 vm_page_pageable_external_count
++;
2102 } else if (external_to_internal
) {
2103 vm_page_pageable_external_count
--;
2104 vm_page_pageable_internal_count
++;
2107 vm_page_unlock_queues();
2113 * Initialize the fields in a new page.
2114 * This takes a structure with random values and initializes it
2115 * so that it can be given to vm_page_release or vm_page_insert.
2126 if ((phys_page
!= vm_page_fictitious_addr
) && (phys_page
!= vm_page_guard_addr
)) {
2127 if (!(pmap_valid_page(phys_page
))) {
2128 panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page
);
2132 *mem
= vm_page_template
;
2134 VM_PAGE_SET_PHYS_PAGE(mem
, phys_page
);
2137 * we're leaving this turned off for now... currently pages
2138 * come off the free list and are either immediately dirtied/referenced
2139 * due to zero-fill or COW faults, or are used to read or write files...
2140 * in the file I/O case, the UPL mechanism takes care of clearing
2141 * the state of the HW ref/mod bits in a somewhat fragile way.
2142 * Since we may change the way this works in the future (to toughen it up),
2143 * I'm leaving this as a reminder of where these bits could get cleared
2147 * make sure both the h/w referenced and modified bits are
2148 * clear at this point... we are especially dependent on
2149 * not finding a 'stale' h/w modified in a number of spots
2150 * once this page goes back into use
2152 pmap_clear_refmod(phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
2154 mem
->lopage
= lopage
;
2158 * vm_page_grab_fictitious:
2160 * Remove a fictitious page from the free list.
2161 * Returns VM_PAGE_NULL if there are no free pages.
2163 int c_vm_page_grab_fictitious
= 0;
2164 int c_vm_page_grab_fictitious_failed
= 0;
2165 int c_vm_page_release_fictitious
= 0;
2166 int c_vm_page_more_fictitious
= 0;
2169 vm_page_grab_fictitious_common(
2174 if ((m
= (vm_page_t
)zget(vm_page_zone
))) {
2176 vm_page_init(m
, phys_addr
, FALSE
);
2177 m
->fictitious
= TRUE
;
2179 c_vm_page_grab_fictitious
++;
2181 c_vm_page_grab_fictitious_failed
++;
2187 vm_page_grab_fictitious(void)
2189 return vm_page_grab_fictitious_common(vm_page_fictitious_addr
);
2193 vm_page_grab_guard(void)
2195 return vm_page_grab_fictitious_common(vm_page_guard_addr
);
2200 * vm_page_release_fictitious:
2202 * Release a fictitious page to the zone pool
2205 vm_page_release_fictitious(
2208 assert((m
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
) || (m
->vm_page_q_state
== VM_PAGE_IS_WIRED
));
2209 assert(m
->fictitious
);
2210 assert(VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_fictitious_addr
||
2211 VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
);
2213 c_vm_page_release_fictitious
++;
2215 zfree(vm_page_zone
, m
);
2219 * vm_page_more_fictitious:
2221 * Add more fictitious pages to the zone.
2222 * Allowed to block. This routine is way intimate
2223 * with the zones code, for several reasons:
2224 * 1. we need to carve some page structures out of physical
2225 * memory before zones work, so they _cannot_ come from
2227 * 2. the zone needs to be collectable in order to prevent
2228 * growth without bound. These structures are used by
2229 * the device pager (by the hundreds and thousands), as
2230 * private pages for pageout, and as blocking pages for
2231 * pagein. Temporary bursts in demand should not result in
2232 * permanent allocation of a resource.
2233 * 3. To smooth allocation humps, we allocate single pages
2234 * with kernel_memory_allocate(), and cram them into the
2238 void vm_page_more_fictitious(void)
2241 kern_return_t retval
;
2243 c_vm_page_more_fictitious
++;
2246 * Allocate a single page from the zone_map. Do not wait if no physical
2247 * pages are immediately available, and do not zero the space. We need
2248 * our own blocking lock here to prevent having multiple,
2249 * simultaneous requests from piling up on the zone_map lock. Exactly
2250 * one (of our) threads should be potentially waiting on the map lock.
2251 * If winner is not vm-privileged, then the page allocation will fail,
2252 * and it will temporarily block here in the vm_page_wait().
2254 lck_mtx_lock(&vm_page_alloc_lock
);
2256 * If another thread allocated space, just bail out now.
2258 if (zone_free_count(vm_page_zone
) > 5) {
2260 * The number "5" is a small number that is larger than the
2261 * number of fictitious pages that any single caller will
2262 * attempt to allocate. Otherwise, a thread will attempt to
2263 * acquire a fictitious page (vm_page_grab_fictitious), fail,
2264 * release all of the resources and locks already acquired,
2265 * and then call this routine. This routine finds the pages
2266 * that the caller released, so fails to allocate new space.
2267 * The process repeats infinitely. The largest known number
2268 * of fictitious pages required in this manner is 2. 5 is
2269 * simply a somewhat larger number.
2271 lck_mtx_unlock(&vm_page_alloc_lock
);
2275 retval
= kernel_memory_allocate(zone_map
,
2276 &addr
, PAGE_SIZE
, VM_PROT_ALL
,
2277 KMA_KOBJECT
|KMA_NOPAGEWAIT
, VM_KERN_MEMORY_ZONE
);
2278 if (retval
!= KERN_SUCCESS
) {
2280 * No page was available. Drop the
2281 * lock to give another thread a chance at it, and
2282 * wait for the pageout daemon to make progress.
2284 lck_mtx_unlock(&vm_page_alloc_lock
);
2285 vm_page_wait(THREAD_UNINT
);
2289 zcram(vm_page_zone
, addr
, PAGE_SIZE
);
2291 lck_mtx_unlock(&vm_page_alloc_lock
);
2298 * Return true if it is not likely that a non-vm_privileged thread
2299 * can get memory without blocking. Advisory only, since the
2300 * situation may change under us.
2305 /* No locking, at worst we will fib. */
2306 return( vm_page_free_count
<= vm_page_free_reserved
);
2310 #if CONFIG_BACKGROUND_QUEUE
2313 vm_page_update_background_state(vm_page_t mem
)
2315 if (vm_page_background_mode
== VM_PAGE_BG_DISABLED
)
2318 if (mem
->vm_page_in_background
== FALSE
)
2321 #if BACKGROUNDQ_BASED_ON_QOS
2322 if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS
) <= THREAD_QOS_LEGACY
)
2327 my_task
= current_task();
2330 if (proc_get_effective_task_policy(my_task
, TASK_POLICY_DARWIN_BG
))
2334 vm_page_lockspin_queues();
2336 mem
->vm_page_in_background
= FALSE
;
2337 vm_page_background_promoted_count
++;
2339 vm_page_remove_from_backgroundq(mem
);
2341 vm_page_unlock_queues();
2346 vm_page_assign_background_state(vm_page_t mem
)
2348 if (vm_page_background_mode
== VM_PAGE_BG_DISABLED
)
2351 #if BACKGROUNDQ_BASED_ON_QOS
2352 if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS
) <= THREAD_QOS_LEGACY
)
2353 mem
->vm_page_in_background
= TRUE
;
2355 mem
->vm_page_in_background
= FALSE
;
2359 my_task
= current_task();
2362 mem
->vm_page_in_background
= proc_get_effective_task_policy(my_task
, TASK_POLICY_DARWIN_BG
);
2368 vm_page_remove_from_backgroundq(
2371 vm_object_t m_object
;
2373 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2375 if (mem
->vm_page_on_backgroundq
) {
2376 vm_page_queue_remove(&vm_page_queue_background
, mem
, vm_page_t
, vm_page_backgroundq
);
2378 mem
->vm_page_backgroundq
.next
= 0;
2379 mem
->vm_page_backgroundq
.prev
= 0;
2380 mem
->vm_page_on_backgroundq
= FALSE
;
2382 vm_page_background_count
--;
2384 m_object
= VM_PAGE_OBJECT(mem
);
2386 if (m_object
->internal
)
2387 vm_page_background_internal_count
--;
2389 vm_page_background_external_count
--;
2391 assert(VM_PAGE_UNPACK_PTR(mem
->vm_page_backgroundq
.next
) == (uintptr_t)NULL
&&
2392 VM_PAGE_UNPACK_PTR(mem
->vm_page_backgroundq
.prev
) == (uintptr_t)NULL
);
2398 vm_page_add_to_backgroundq(
2402 vm_object_t m_object
;
2404 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2406 if (vm_page_background_mode
== VM_PAGE_BG_DISABLED
)
2409 if (mem
->vm_page_on_backgroundq
== FALSE
) {
2411 m_object
= VM_PAGE_OBJECT(mem
);
2413 if (vm_page_background_exclude_external
&& !m_object
->internal
)
2417 vm_page_queue_enter_first(&vm_page_queue_background
, mem
, vm_page_t
, vm_page_backgroundq
);
2419 vm_page_queue_enter(&vm_page_queue_background
, mem
, vm_page_t
, vm_page_backgroundq
);
2420 mem
->vm_page_on_backgroundq
= TRUE
;
2422 vm_page_background_count
++;
2424 if (m_object
->internal
)
2425 vm_page_background_internal_count
++;
2427 vm_page_background_external_count
++;
2434 * this is an interface to support bring-up of drivers
2435 * on platforms with physical memory > 4G...
2437 int vm_himemory_mode
= 2;
2441 * this interface exists to support hardware controllers
2442 * incapable of generating DMAs with more than 32 bits
2443 * of address on platforms with physical memory > 4G...
2445 unsigned int vm_lopages_allocated_q
= 0;
2446 unsigned int vm_lopages_allocated_cpm_success
= 0;
2447 unsigned int vm_lopages_allocated_cpm_failed
= 0;
2448 vm_page_queue_head_t vm_lopage_queue_free
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
2451 vm_page_grablo(void)
2455 if (vm_lopage_needed
== FALSE
)
2456 return (vm_page_grab());
2458 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
2460 if ( !vm_page_queue_empty(&vm_lopage_queue_free
)) {
2461 vm_page_queue_remove_first(&vm_lopage_queue_free
,
2465 assert(vm_lopage_free_count
);
2466 assert(mem
->vm_page_q_state
== VM_PAGE_ON_FREE_LOPAGE_Q
);
2467 mem
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
2469 vm_lopage_free_count
--;
2470 vm_lopages_allocated_q
++;
2472 if (vm_lopage_free_count
< vm_lopage_lowater
)
2473 vm_lopage_refill
= TRUE
;
2475 lck_mtx_unlock(&vm_page_queue_free_lock
);
2477 #if CONFIG_BACKGROUND_QUEUE
2478 vm_page_assign_background_state(mem
);
2481 lck_mtx_unlock(&vm_page_queue_free_lock
);
2483 if (cpm_allocate(PAGE_SIZE
, &mem
, atop(0xffffffff), 0, FALSE
, KMA_LOMEM
) != KERN_SUCCESS
) {
2485 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
2486 vm_lopages_allocated_cpm_failed
++;
2487 lck_mtx_unlock(&vm_page_queue_free_lock
);
2489 return (VM_PAGE_NULL
);
2491 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
2495 vm_page_lockspin_queues();
2497 mem
->gobbled
= FALSE
;
2498 vm_page_gobble_count
--;
2499 vm_page_wire_count
--;
2501 vm_lopages_allocated_cpm_success
++;
2502 vm_page_unlock_queues();
2505 assert(!mem
->pmapped
);
2506 assert(!mem
->wpmapped
);
2507 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
)));
2509 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
2518 * first try to grab a page from the per-cpu free list...
2519 * this must be done while pre-emption is disabled... if
2520 * a page is available, we're done...
2521 * if no page is available, grab the vm_page_queue_free_lock
2522 * and see if current number of free pages would allow us
2523 * to grab at least 1... if not, return VM_PAGE_NULL as before...
2524 * if there are pages available, disable preemption and
2525 * recheck the state of the per-cpu free list... we could
2526 * have been preempted and moved to a different cpu, or
2527 * some other thread could have re-filled it... if still
2528 * empty, figure out how many pages we can steal from the
2529 * global free queue and move to the per-cpu queue...
2530 * return 1 of these pages when done... only wakeup the
2531 * pageout_scan thread if we moved pages from the global
2532 * list... no need for the wakeup if we've satisfied the
2533 * request from the per-cpu queue.
2536 #if CONFIG_SECLUDED_MEMORY
2537 vm_page_t
vm_page_grab_secluded(void);
2538 #endif /* CONFIG_SECLUDED_MEMORY */
2543 return vm_page_grab_options(0);
2547 vm_page_grab_options(
2552 disable_preemption();
2554 if ((mem
= PROCESSOR_DATA(current_processor(), free_pages
))) {
2555 return_page_from_cpu_list
:
2556 assert(mem
->vm_page_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
);
2558 PROCESSOR_DATA(current_processor(), page_grab_count
) += 1;
2559 PROCESSOR_DATA(current_processor(), free_pages
) = mem
->snext
;
2561 enable_preemption();
2562 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
2563 mem
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
2565 assert(mem
->listq
.next
== 0 && mem
->listq
.prev
== 0);
2566 assert(mem
->tabled
== FALSE
);
2567 assert(mem
->vm_page_object
== 0);
2568 assert(!mem
->laundry
);
2569 assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem
)));
2571 assert(!mem
->encrypted
);
2572 assert(!mem
->pmapped
);
2573 assert(!mem
->wpmapped
);
2574 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
)));
2576 #if CONFIG_BACKGROUND_QUEUE
2577 vm_page_assign_background_state(mem
);
2581 enable_preemption();
2585 * Optionally produce warnings if the wire or gobble
2586 * counts exceed some threshold.
2588 #if VM_PAGE_WIRE_COUNT_WARNING
2589 if (vm_page_wire_count
>= VM_PAGE_WIRE_COUNT_WARNING
) {
2590 printf("mk: vm_page_grab(): high wired page count of %d\n",
2591 vm_page_wire_count
);
2594 #if VM_PAGE_GOBBLE_COUNT_WARNING
2595 if (vm_page_gobble_count
>= VM_PAGE_GOBBLE_COUNT_WARNING
) {
2596 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
2597 vm_page_gobble_count
);
2601 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
2604 * Only let privileged threads (involved in pageout)
2605 * dip into the reserved pool.
2607 if ((vm_page_free_count
< vm_page_free_reserved
) &&
2608 !(current_thread()->options
& TH_OPT_VMPRIV
)) {
2609 /* no page for us in the free queue... */
2610 lck_mtx_unlock(&vm_page_queue_free_lock
);
2613 #if CONFIG_SECLUDED_MEMORY
2614 /* ... but can we try and grab from the secluded queue? */
2615 if (vm_page_secluded_count
> 0 &&
2616 ((grab_options
& VM_PAGE_GRAB_SECLUDED
) ||
2617 task_can_use_secluded_mem(current_task()))) {
2618 mem
= vm_page_grab_secluded();
2619 if (grab_options
& VM_PAGE_GRAB_SECLUDED
) {
2620 vm_page_secluded
.grab_for_iokit
++;
2622 vm_page_secluded
.grab_for_iokit_success
++;
2626 VM_CHECK_MEMORYSTATUS
;
2630 #else /* CONFIG_SECLUDED_MEMORY */
2631 (void) grab_options
;
2632 #endif /* CONFIG_SECLUDED_MEMORY */
2637 unsigned int pages_to_steal
;
2640 while ( vm_page_free_count
== 0 ) {
2642 lck_mtx_unlock(&vm_page_queue_free_lock
);
2644 * must be a privileged thread to be
2645 * in this state since a non-privileged
2646 * thread would have bailed if we were
2647 * under the vm_page_free_reserved mark
2650 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
2653 disable_preemption();
2655 if ((mem
= PROCESSOR_DATA(current_processor(), free_pages
))) {
2656 lck_mtx_unlock(&vm_page_queue_free_lock
);
2659 * we got preempted and moved to another processor
2660 * or we got preempted and someone else ran and filled the cache
2662 goto return_page_from_cpu_list
;
2664 if (vm_page_free_count
<= vm_page_free_reserved
)
2667 if (vm_free_magazine_refill_limit
<= (vm_page_free_count
- vm_page_free_reserved
))
2668 pages_to_steal
= vm_free_magazine_refill_limit
;
2670 pages_to_steal
= (vm_page_free_count
- vm_page_free_reserved
);
2672 color
= PROCESSOR_DATA(current_processor(), start_color
);
2675 vm_page_free_count
-= pages_to_steal
;
2677 while (pages_to_steal
--) {
2679 while (vm_page_queue_empty(&vm_page_queue_free
[color
].qhead
))
2680 color
= (color
+ 1) & vm_color_mask
;
2682 vm_page_queue_remove_first(&vm_page_queue_free
[color
].qhead
,
2686 assert(mem
->vm_page_q_state
== VM_PAGE_ON_FREE_Q
);
2688 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
2690 color
= (color
+ 1) & vm_color_mask
;
2698 assert(mem
->listq
.next
== 0 && mem
->listq
.prev
== 0);
2699 assert(mem
->tabled
== FALSE
);
2700 assert(mem
->vm_page_object
== 0);
2701 assert(!mem
->laundry
);
2703 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_LOCAL_Q
;
2705 assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem
)));
2707 assert(!mem
->encrypted
);
2708 assert(!mem
->pmapped
);
2709 assert(!mem
->wpmapped
);
2710 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
)));
2712 lck_mtx_unlock(&vm_page_queue_free_lock
);
2714 PROCESSOR_DATA(current_processor(), free_pages
) = head
->snext
;
2715 PROCESSOR_DATA(current_processor(), start_color
) = color
;
2718 * satisfy this request
2720 PROCESSOR_DATA(current_processor(), page_grab_count
) += 1;
2722 assert(mem
->vm_page_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
);
2724 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
2725 mem
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
2727 enable_preemption();
2730 * Decide if we should poke the pageout daemon.
2731 * We do this if the free count is less than the low
2732 * water mark, or if the free count is less than the high
2733 * water mark (but above the low water mark) and the inactive
2734 * count is less than its target.
2736 * We don't have the counts locked ... if they change a little,
2737 * it doesn't really matter.
2739 if ((vm_page_free_count
< vm_page_free_min
) ||
2740 ((vm_page_free_count
< vm_page_free_target
) &&
2741 ((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_min
)))
2742 thread_wakeup((event_t
) &vm_page_free_wanted
);
2743 #if CONFIG_BACKGROUND_QUEUE
2744 if (vm_page_background_mode
== VM_PAGE_BG_LEVEL_3
&& (vm_page_background_count
> vm_page_background_limit
))
2745 thread_wakeup((event_t
) &vm_page_free_wanted
);
2748 VM_CHECK_MEMORYSTATUS
;
2751 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
2753 #if CONFIG_BACKGROUND_QUEUE
2754 vm_page_assign_background_state(mem
);
2760 #if CONFIG_SECLUDED_MEMORY
2762 vm_page_grab_secluded(void)
2768 if (vm_page_secluded_count
== 0) {
2769 /* no secluded pages to grab... */
2770 return VM_PAGE_NULL
;
2773 /* secluded queue is protected by the VM page queue lock */
2774 vm_page_lock_queues();
2776 if (vm_page_secluded_count
== 0) {
2777 /* no secluded pages to grab... */
2778 vm_page_unlock_queues();
2779 return VM_PAGE_NULL
;
2783 /* can we grab from the secluded queue? */
2784 if (vm_page_secluded_count
> vm_page_secluded_target
||
2785 (vm_page_secluded_count
> 0 &&
2786 task_can_use_secluded_mem(current_task()))) {
2789 /* can't grab from secluded queue... */
2790 vm_page_unlock_queues();
2791 return VM_PAGE_NULL
;
2795 /* we can grab a page from secluded queue! */
2796 assert((vm_page_secluded_count_free
+
2797 vm_page_secluded_count_inuse
) ==
2798 vm_page_secluded_count
);
2799 if (current_task()->task_can_use_secluded_mem
) {
2800 assert(num_tasks_can_use_secluded_mem
> 0);
2802 assert(!vm_page_queue_empty(&vm_page_queue_secluded
));
2803 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2804 mem
= vm_page_queue_first(&vm_page_queue_secluded
);
2805 assert(mem
->vm_page_q_state
== VM_PAGE_ON_SECLUDED_Q
);
2806 vm_page_queues_remove(mem
, TRUE
);
2808 object
= VM_PAGE_OBJECT(mem
);
2810 assert(!mem
->fictitious
);
2811 assert(!VM_PAGE_WIRED(mem
));
2812 if (object
== VM_OBJECT_NULL
) {
2813 /* free for grab! */
2814 vm_page_unlock_queues();
2815 vm_page_secluded
.grab_success_free
++;
2818 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
2819 assert(VM_PAGE_OBJECT(mem
) == VM_OBJECT_NULL
);
2820 assert(mem
->pageq
.next
== 0);
2821 assert(mem
->pageq
.prev
== 0);
2822 assert(mem
->listq
.next
== 0);
2823 assert(mem
->listq
.prev
== 0);
2824 #if CONFIG_BACKGROUND_QUEUE
2825 assert(mem
->vm_page_on_backgroundq
== 0);
2826 assert(mem
->vm_page_backgroundq
.next
== 0);
2827 assert(mem
->vm_page_backgroundq
.prev
== 0);
2828 #endif /* CONFIG_BACKGROUND_QUEUE */
2832 assert(!object
->internal
);
2833 // vm_page_pageable_external_count--;
2835 if (!vm_object_lock_try(object
)) {
2836 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
2837 vm_page_secluded
.grab_failure_locked
++;
2838 reactivate_secluded_page
:
2839 vm_page_activate(mem
);
2840 vm_page_unlock_queues();
2841 return VM_PAGE_NULL
;
2846 /* can't steal page in this state... */
2847 vm_object_unlock(object
);
2848 vm_page_secluded
.grab_failure_state
++;
2849 goto reactivate_secluded_page
;
2853 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem
));
2854 if (refmod_state
& VM_MEM_REFERENCED
) {
2855 mem
->reference
= TRUE
;
2857 if (refmod_state
& VM_MEM_MODIFIED
) {
2858 SET_PAGE_DIRTY(mem
, FALSE
);
2860 if (mem
->dirty
|| mem
->precious
) {
2861 /* can't grab a dirty page; re-activate */
2862 // printf("SECLUDED: dirty page %p\n", mem);
2863 PAGE_WAKEUP_DONE(mem
);
2864 vm_page_secluded
.grab_failure_dirty
++;
2865 vm_object_unlock(object
);
2866 goto reactivate_secluded_page
;
2868 if (mem
->reference
) {
2869 /* it's been used but we do need to grab a page... */
2872 vm_page_unlock_queues();
2874 /* finish what vm_page_free() would have done... */
2875 vm_page_free_prepare_object(mem
, TRUE
);
2876 vm_object_unlock(object
);
2877 object
= VM_OBJECT_NULL
;
2878 if (vm_page_free_verify
) {
2879 assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem
)));
2881 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
2882 vm_page_secluded
.grab_success_other
++;
2885 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
2886 assert(VM_PAGE_OBJECT(mem
) == VM_OBJECT_NULL
);
2887 assert(mem
->pageq
.next
== 0);
2888 assert(mem
->pageq
.prev
== 0);
2889 assert(mem
->listq
.next
== 0);
2890 assert(mem
->listq
.prev
== 0);
2891 #if CONFIG_BACKGROUND_QUEUE
2892 assert(mem
->vm_page_on_backgroundq
== 0);
2893 assert(mem
->vm_page_backgroundq
.next
== 0);
2894 assert(mem
->vm_page_backgroundq
.prev
== 0);
2895 #endif /* CONFIG_BACKGROUND_QUEUE */
2899 #endif /* CONFIG_SECLUDED_MEMORY */
2904 * Return a page to the free list.
2910 boolean_t page_queues_locked
)
2913 int need_wakeup
= 0;
2914 int need_priv_wakeup
= 0;
2915 #if CONFIG_SECLUDED_MEMORY
2916 int need_secluded_wakeup
= 0;
2917 #endif /* CONFIG_SECLUDED_MEMORY */
2919 if (page_queues_locked
) {
2920 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2922 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
2925 assert(!mem
->private && !mem
->fictitious
);
2926 if (vm_page_free_verify
) {
2927 assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem
)));
2929 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
2931 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
2933 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
2935 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
2937 assert(!mem
->laundry
);
2938 assert(mem
->vm_page_object
== 0);
2939 assert(mem
->pageq
.next
== 0 && mem
->pageq
.prev
== 0);
2940 assert(mem
->listq
.next
== 0 && mem
->listq
.prev
== 0);
2941 #if CONFIG_BACKGROUND_QUEUE
2942 assert(mem
->vm_page_backgroundq
.next
== 0 &&
2943 mem
->vm_page_backgroundq
.prev
== 0 &&
2944 mem
->vm_page_on_backgroundq
== FALSE
);
2946 if ((mem
->lopage
== TRUE
|| vm_lopage_refill
== TRUE
) &&
2947 vm_lopage_free_count
< vm_lopage_free_limit
&&
2948 VM_PAGE_GET_PHYS_PAGE(mem
) < max_valid_low_ppnum
) {
2950 * this exists to support hardware controllers
2951 * incapable of generating DMAs with more than 32 bits
2952 * of address on platforms with physical memory > 4G...
2954 vm_page_queue_enter_first(&vm_lopage_queue_free
,
2958 vm_lopage_free_count
++;
2960 if (vm_lopage_free_count
>= vm_lopage_free_limit
)
2961 vm_lopage_refill
= FALSE
;
2963 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_LOPAGE_Q
;
2965 #if CONFIG_SECLUDED_MEMORY
2966 } else if (vm_page_free_count
> vm_page_free_reserved
&&
2967 vm_page_secluded_count
< vm_page_secluded_target
&&
2968 num_tasks_can_use_secluded_mem
== 0) {
2970 * XXX FBDP TODO: also avoid refilling secluded queue
2971 * when some IOKit objects are already grabbing from it...
2973 if (!page_queues_locked
) {
2974 if (!vm_page_trylock_queues()) {
2975 /* take locks in right order */
2976 lck_mtx_unlock(&vm_page_queue_free_lock
);
2977 vm_page_lock_queues();
2978 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
2981 mem
->lopage
= FALSE
;
2982 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2983 vm_page_queue_enter_first(&vm_page_queue_secluded
,
2987 mem
->vm_page_q_state
= VM_PAGE_ON_SECLUDED_Q
;
2988 vm_page_secluded_count
++;
2989 vm_page_secluded_count_free
++;
2990 if (!page_queues_locked
) {
2991 vm_page_unlock_queues();
2993 LCK_MTX_ASSERT(&vm_page_queue_free_lock
, LCK_MTX_ASSERT_OWNED
);
2994 if (vm_page_free_wanted_secluded
> 0) {
2995 vm_page_free_wanted_secluded
--;
2996 need_secluded_wakeup
= 1;
2998 #endif /* CONFIG_SECLUDED_MEMORY */
3000 mem
->lopage
= FALSE
;
3001 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_Q
;
3003 color
= VM_PAGE_GET_PHYS_PAGE(mem
) & vm_color_mask
;
3004 vm_page_queue_enter_first(&vm_page_queue_free
[color
].qhead
,
3008 vm_page_free_count
++;
3010 * Check if we should wake up someone waiting for page.
3011 * But don't bother waking them unless they can allocate.
3013 * We wakeup only one thread, to prevent starvation.
3014 * Because the scheduling system handles wait queues FIFO,
3015 * if we wakeup all waiting threads, one greedy thread
3016 * can starve multiple niceguy threads. When the threads
3017 * all wakeup, the greedy threads runs first, grabs the page,
3018 * and waits for another page. It will be the first to run
3019 * when the next page is freed.
3021 * However, there is a slight danger here.
3022 * The thread we wake might not use the free page.
3023 * Then the other threads could wait indefinitely
3024 * while the page goes unused. To forestall this,
3025 * the pageout daemon will keep making free pages
3026 * as long as vm_page_free_wanted is non-zero.
3029 assert(vm_page_free_count
> 0);
3030 if (vm_page_free_wanted_privileged
> 0) {
3031 vm_page_free_wanted_privileged
--;
3032 need_priv_wakeup
= 1;
3033 #if CONFIG_SECLUDED_MEMORY
3034 } else if (vm_page_free_wanted_secluded
> 0 &&
3035 vm_page_free_count
> vm_page_free_reserved
) {
3036 vm_page_free_wanted_secluded
--;
3037 need_secluded_wakeup
= 1;
3038 #endif /* CONFIG_SECLUDED_MEMORY */
3039 } else if (vm_page_free_wanted
> 0 &&
3040 vm_page_free_count
> vm_page_free_reserved
) {
3041 vm_page_free_wanted
--;
3045 lck_mtx_unlock(&vm_page_queue_free_lock
);
3047 if (need_priv_wakeup
)
3048 thread_wakeup_one((event_t
) &vm_page_free_wanted_privileged
);
3049 #if CONFIG_SECLUDED_MEMORY
3050 else if (need_secluded_wakeup
)
3051 thread_wakeup_one((event_t
) &vm_page_free_wanted_secluded
);
3052 #endif /* CONFIG_SECLUDED_MEMORY */
3053 else if (need_wakeup
)
3054 thread_wakeup_one((event_t
) &vm_page_free_count
);
3056 VM_CHECK_MEMORYSTATUS
;
3060 * This version of vm_page_release() is used only at startup
3061 * when we are single-threaded and pages are being released
3062 * for the first time. Hence, no locking or unnecessary checks are made.
3063 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3066 vm_page_release_startup(
3069 vm_page_queue_t queue_free
;
3071 if (vm_lopage_free_count
< vm_lopage_free_limit
&&
3072 VM_PAGE_GET_PHYS_PAGE(mem
) < max_valid_low_ppnum
) {
3074 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_LOPAGE_Q
;
3075 vm_lopage_free_count
++;
3076 queue_free
= &vm_lopage_queue_free
;
3077 #if CONFIG_SECLUDED_MEMORY
3078 } else if (vm_page_secluded_count
< vm_page_secluded_target
) {
3079 mem
->lopage
= FALSE
;
3080 mem
->vm_page_q_state
= VM_PAGE_ON_SECLUDED_Q
;
3081 vm_page_secluded_count
++;
3082 vm_page_secluded_count_free
++;
3083 queue_free
= &vm_page_queue_secluded
;
3084 #endif /* CONFIG_SECLUDED_MEMORY */
3086 mem
->lopage
= FALSE
;
3087 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_Q
;
3088 vm_page_free_count
++;
3089 queue_free
= &vm_page_queue_free
[VM_PAGE_GET_PHYS_PAGE(mem
) & vm_color_mask
].qhead
;
3091 vm_page_queue_enter_first(queue_free
, mem
, vm_page_t
, pageq
);
3097 * Wait for a page to become available.
3098 * If there are plenty of free pages, then we don't sleep.
3101 * TRUE: There may be another page, try again
3102 * FALSE: We were interrupted out of our wait, don't try again
3110 * We can't use vm_page_free_reserved to make this
3111 * determination. Consider: some thread might
3112 * need to allocate two pages. The first allocation
3113 * succeeds, the second fails. After the first page is freed,
3114 * a call to vm_page_wait must really block.
3116 kern_return_t wait_result
;
3117 int need_wakeup
= 0;
3118 int is_privileged
= current_thread()->options
& TH_OPT_VMPRIV
;
3120 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3122 if (is_privileged
&& vm_page_free_count
) {
3123 lck_mtx_unlock(&vm_page_queue_free_lock
);
3127 if (vm_page_free_count
>= vm_page_free_target
) {
3128 lck_mtx_unlock(&vm_page_queue_free_lock
);
3132 if (is_privileged
) {
3133 if (vm_page_free_wanted_privileged
++ == 0)
3135 wait_result
= assert_wait((event_t
)&vm_page_free_wanted_privileged
, interruptible
);
3136 #if CONFIG_SECLUDED_MEMORY
3137 } else if (secluded_for_apps
&&
3138 task_can_use_secluded_mem(current_task())) {
3140 /* XXX FBDP: need pageq lock for this... */
3141 /* XXX FBDP: might wait even if pages available, */
3142 /* XXX FBDP: hopefully not for too long... */
3143 if (vm_page_secluded_count
> 0) {
3144 lck_mtx_unlock(&vm_page_queue_free_lock
);
3148 if (vm_page_free_wanted_secluded
++ == 0) {
3151 wait_result
= assert_wait(
3152 (event_t
)&vm_page_free_wanted_secluded
,
3154 #endif /* CONFIG_SECLUDED_MEMORY */
3156 if (vm_page_free_wanted
++ == 0)
3158 wait_result
= assert_wait((event_t
)&vm_page_free_count
,
3161 lck_mtx_unlock(&vm_page_queue_free_lock
);
3162 counter(c_vm_page_wait_block
++);
3165 thread_wakeup((event_t
)&vm_page_free_wanted
);
3167 if (wait_result
== THREAD_WAITING
) {
3168 VM_DEBUG_EVENT(vm_page_wait_block
, VM_PAGE_WAIT_BLOCK
, DBG_FUNC_START
,
3169 vm_page_free_wanted_privileged
,
3170 vm_page_free_wanted
,
3171 #if CONFIG_SECLUDED_MEMORY
3172 vm_page_free_wanted_secluded
,
3173 #else /* CONFIG_SECLUDED_MEMORY */
3175 #endif /* CONFIG_SECLUDED_MEMORY */
3177 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
3178 VM_DEBUG_EVENT(vm_page_wait_block
,
3179 VM_PAGE_WAIT_BLOCK
, DBG_FUNC_END
, 0, 0, 0, 0);
3182 return (wait_result
== THREAD_AWAKENED
);
3188 * Allocate and return a memory cell associated
3189 * with this VM object/offset pair.
3191 * Object must be locked.
3197 vm_object_offset_t offset
)
3202 vm_object_lock_assert_exclusive(object
);
3204 #if CONFIG_SECLUDED_MEMORY
3205 if (object
->can_grab_secluded
) {
3206 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
3208 #endif /* CONFIG_SECLUDED_MEMORY */
3209 mem
= vm_page_grab_options(grab_options
);
3210 if (mem
== VM_PAGE_NULL
)
3211 return VM_PAGE_NULL
;
3213 vm_page_insert(mem
, object
, offset
);
3219 * vm_page_alloc_guard:
3221 * Allocate a fictitious page which will be used
3222 * as a guard page. The page will be inserted into
3223 * the object and returned to the caller.
3227 vm_page_alloc_guard(
3229 vm_object_offset_t offset
)
3233 vm_object_lock_assert_exclusive(object
);
3234 mem
= vm_page_grab_guard();
3235 if (mem
== VM_PAGE_NULL
)
3236 return VM_PAGE_NULL
;
3238 vm_page_insert(mem
, object
, offset
);
3244 counter(unsigned int c_laundry_pages_freed
= 0;)
3247 * vm_page_free_prepare:
3249 * Removes page from any queue it may be on
3250 * and disassociates it from its VM object.
3252 * Object and page queues must be locked prior to entry.
3255 vm_page_free_prepare(
3258 vm_page_free_prepare_queues(mem
);
3259 vm_page_free_prepare_object(mem
, TRUE
);
3264 vm_page_free_prepare_queues(
3267 vm_object_t m_object
;
3271 assert(mem
->vm_page_q_state
!= VM_PAGE_ON_FREE_Q
);
3272 assert(!mem
->cleaning
);
3273 m_object
= VM_PAGE_OBJECT(mem
);
3275 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3277 vm_object_lock_assert_exclusive(m_object
);
3281 * We may have to free a page while it's being laundered
3282 * if we lost its pager (due to a forced unmount, for example).
3283 * We need to call vm_pageout_steal_laundry() before removing
3284 * the page from its VM object, so that we can remove it
3285 * from its pageout queue and adjust the laundry accounting
3287 vm_pageout_steal_laundry(mem
, TRUE
);
3288 counter(++c_laundry_pages_freed
);
3291 vm_page_queues_remove(mem
, TRUE
);
3293 if (VM_PAGE_WIRED(mem
)) {
3294 assert(mem
->wire_count
> 0);
3297 assert(m_object
->wired_page_count
> 0);
3298 m_object
->wired_page_count
--;
3299 if (!m_object
->wired_page_count
) {
3300 VM_OBJECT_UNWIRED(m_object
);
3303 assert(m_object
->resident_page_count
>=
3304 m_object
->wired_page_count
);
3306 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
3307 OSAddAtomic(+1, &vm_page_purgeable_count
);
3308 assert(vm_page_purgeable_wired_count
> 0);
3309 OSAddAtomic(-1, &vm_page_purgeable_wired_count
);
3311 if ((m_object
->purgable
== VM_PURGABLE_VOLATILE
||
3312 m_object
->purgable
== VM_PURGABLE_EMPTY
) &&
3313 m_object
->vo_purgeable_owner
!= TASK_NULL
) {
3316 owner
= m_object
->vo_purgeable_owner
;
3318 * While wired, this page was accounted
3319 * as "non-volatile" but it should now
3320 * be accounted as "volatile".
3322 /* one less "non-volatile"... */
3323 ledger_debit(owner
->ledger
,
3324 task_ledgers
.purgeable_nonvolatile
,
3326 /* ... and "phys_footprint" */
3327 ledger_debit(owner
->ledger
,
3328 task_ledgers
.phys_footprint
,
3330 /* one more "volatile" */
3331 ledger_credit(owner
->ledger
,
3332 task_ledgers
.purgeable_volatile
,
3336 if (!mem
->private && !mem
->fictitious
)
3337 vm_page_wire_count
--;
3339 mem
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
3340 mem
->wire_count
= 0;
3341 assert(!mem
->gobbled
);
3342 } else if (mem
->gobbled
) {
3343 if (!mem
->private && !mem
->fictitious
)
3344 vm_page_wire_count
--;
3345 vm_page_gobble_count
--;
3351 vm_page_free_prepare_object(
3353 boolean_t remove_from_hash
)
3356 vm_page_remove(mem
, remove_from_hash
); /* clears tabled, object, offset */
3358 PAGE_WAKEUP(mem
); /* clears wanted */
3361 mem
->private = FALSE
;
3362 mem
->fictitious
= TRUE
;
3363 VM_PAGE_SET_PHYS_PAGE(mem
, vm_page_fictitious_addr
);
3365 if ( !mem
->fictitious
) {
3366 assert(mem
->pageq
.next
== 0);
3367 assert(mem
->pageq
.prev
== 0);
3368 assert(mem
->listq
.next
== 0);
3369 assert(mem
->listq
.prev
== 0);
3370 #if CONFIG_BACKGROUND_QUEUE
3371 assert(mem
->vm_page_backgroundq
.next
== 0);
3372 assert(mem
->vm_page_backgroundq
.prev
== 0);
3373 #endif /* CONFIG_BACKGROUND_QUEUE */
3374 assert(mem
->next_m
== 0);
3375 vm_page_init(mem
, VM_PAGE_GET_PHYS_PAGE(mem
), mem
->lopage
);
3383 * Returns the given page to the free list,
3384 * disassociating it with any VM object.
3386 * Object and page queues must be locked prior to entry.
3392 vm_page_free_prepare(mem
);
3394 if (mem
->fictitious
) {
3395 vm_page_release_fictitious(mem
);
3397 vm_page_release(mem
,
3398 TRUE
); /* page queues are locked */
3404 vm_page_free_unlocked(
3406 boolean_t remove_from_hash
)
3408 vm_page_lockspin_queues();
3409 vm_page_free_prepare_queues(mem
);
3410 vm_page_unlock_queues();
3412 vm_page_free_prepare_object(mem
, remove_from_hash
);
3414 if (mem
->fictitious
) {
3415 vm_page_release_fictitious(mem
);
3417 vm_page_release(mem
, FALSE
); /* page queues are not locked */
3423 * Free a list of pages. The list can be up to several hundred pages,
3424 * as blocked up by vm_pageout_scan().
3425 * The big win is not having to take the free list lock once
3428 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
3429 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
3434 boolean_t prepare_object
)
3438 vm_page_t local_freeq
;
3441 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
3442 LCK_MTX_ASSERT(&vm_page_queue_free_lock
, LCK_MTX_ASSERT_NOTOWNED
);
3447 local_freeq
= VM_PAGE_NULL
;
3451 * break up the processing into smaller chunks so
3452 * that we can 'pipeline' the pages onto the
3453 * free list w/o introducing too much
3454 * contention on the global free queue lock
3456 while (mem
&& pg_count
< 64) {
3458 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
3459 #if CONFIG_BACKGROUND_QUEUE
3460 assert(mem
->vm_page_backgroundq
.next
== 0 &&
3461 mem
->vm_page_backgroundq
.prev
== 0 &&
3462 mem
->vm_page_on_backgroundq
== FALSE
);
3466 assert(mem
->pageq
.prev
== 0);
3468 if (vm_page_free_verify
&& !mem
->fictitious
&& !mem
->private) {
3469 assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem
)));
3471 if (prepare_object
== TRUE
)
3472 vm_page_free_prepare_object(mem
, TRUE
);
3474 if (!mem
->fictitious
) {
3477 if ((mem
->lopage
== TRUE
|| vm_lopage_refill
== TRUE
) &&
3478 vm_lopage_free_count
< vm_lopage_free_limit
&&
3479 VM_PAGE_GET_PHYS_PAGE(mem
) < max_valid_low_ppnum
) {
3480 vm_page_release(mem
, FALSE
); /* page queues are not locked */
3481 #if CONFIG_SECLUDED_MEMORY
3482 } else if (vm_page_secluded_count
< vm_page_secluded_target
&&
3483 num_tasks_can_use_secluded_mem
== 0) {
3484 vm_page_release(mem
,
3485 FALSE
); /* page queues are not locked */
3486 #endif /* CONFIG_SECLUDED_MEMORY */
3489 * IMPORTANT: we can't set the page "free" here
3490 * because that would make the page eligible for
3491 * a physically-contiguous allocation (see
3492 * vm_page_find_contiguous()) right away (we don't
3493 * hold the vm_page_queue_free lock). That would
3494 * cause trouble because the page is not actually
3495 * in the free queue yet...
3497 mem
->snext
= local_freeq
;
3501 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
3504 assert(VM_PAGE_GET_PHYS_PAGE(mem
) == vm_page_fictitious_addr
||
3505 VM_PAGE_GET_PHYS_PAGE(mem
) == vm_page_guard_addr
);
3506 vm_page_release_fictitious(mem
);
3512 if ( (mem
= local_freeq
) ) {
3513 unsigned int avail_free_count
;
3514 unsigned int need_wakeup
= 0;
3515 unsigned int need_priv_wakeup
= 0;
3516 #if CONFIG_SECLUDED_MEMORY
3517 unsigned int need_wakeup_secluded
= 0;
3518 #endif /* CONFIG_SECLUDED_MEMORY */
3520 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3527 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
3529 mem
->lopage
= FALSE
;
3530 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_Q
;
3532 color
= VM_PAGE_GET_PHYS_PAGE(mem
) & vm_color_mask
;
3533 vm_page_queue_enter_first(&vm_page_queue_free
[color
].qhead
,
3539 vm_page_free_count
+= pg_count
;
3540 avail_free_count
= vm_page_free_count
;
3542 if (vm_page_free_wanted_privileged
> 0 && avail_free_count
> 0) {
3544 if (avail_free_count
< vm_page_free_wanted_privileged
) {
3545 need_priv_wakeup
= avail_free_count
;
3546 vm_page_free_wanted_privileged
-= avail_free_count
;
3547 avail_free_count
= 0;
3549 need_priv_wakeup
= vm_page_free_wanted_privileged
;
3550 avail_free_count
-= vm_page_free_wanted_privileged
;
3551 vm_page_free_wanted_privileged
= 0;
3554 #if CONFIG_SECLUDED_MEMORY
3555 if (vm_page_free_wanted_secluded
> 0 &&
3556 avail_free_count
> vm_page_free_reserved
) {
3557 unsigned int available_pages
;
3558 available_pages
= (avail_free_count
-
3559 vm_page_free_reserved
);
3560 if (available_pages
<
3561 vm_page_free_wanted_secluded
) {
3562 need_wakeup_secluded
= available_pages
;
3563 vm_page_free_wanted_secluded
-=
3565 avail_free_count
-= available_pages
;
3567 need_wakeup_secluded
=
3568 vm_page_free_wanted_secluded
;
3570 vm_page_free_wanted_secluded
;
3571 vm_page_free_wanted_secluded
= 0;
3574 #endif /* CONFIG_SECLUDED_MEMORY */
3575 if (vm_page_free_wanted
> 0 && avail_free_count
> vm_page_free_reserved
) {
3576 unsigned int available_pages
;
3578 available_pages
= avail_free_count
- vm_page_free_reserved
;
3580 if (available_pages
>= vm_page_free_wanted
) {
3581 need_wakeup
= vm_page_free_wanted
;
3582 vm_page_free_wanted
= 0;
3584 need_wakeup
= available_pages
;
3585 vm_page_free_wanted
-= available_pages
;
3588 lck_mtx_unlock(&vm_page_queue_free_lock
);
3590 if (need_priv_wakeup
!= 0) {
3592 * There shouldn't be that many VM-privileged threads,
3593 * so let's wake them all up, even if we don't quite
3594 * have enough pages to satisfy them all.
3596 thread_wakeup((event_t
)&vm_page_free_wanted_privileged
);
3598 #if CONFIG_SECLUDED_MEMORY
3599 if (need_wakeup_secluded
!= 0 &&
3600 vm_page_free_wanted_secluded
== 0) {
3601 thread_wakeup((event_t
)
3602 &vm_page_free_wanted_secluded
);
3605 need_wakeup_secluded
!= 0;
3606 need_wakeup_secluded
--) {
3609 &vm_page_free_wanted_secluded
);
3612 #endif /* CONFIG_SECLUDED_MEMORY */
3613 if (need_wakeup
!= 0 && vm_page_free_wanted
== 0) {
3615 * We don't expect to have any more waiters
3616 * after this, so let's wake them all up at
3619 thread_wakeup((event_t
) &vm_page_free_count
);
3620 } else for (; need_wakeup
!= 0; need_wakeup
--) {
3622 * Wake up one waiter per page we just released.
3624 thread_wakeup_one((event_t
) &vm_page_free_count
);
3627 VM_CHECK_MEMORYSTATUS
;
3636 * Mark this page as wired down by yet
3637 * another map, removing it from paging queues
3640 * The page's object and the page queues must be locked.
3648 boolean_t check_memorystatus
)
3650 vm_object_t m_object
;
3652 m_object
= VM_PAGE_OBJECT(mem
);
3654 // dbgLog(current_thread(), mem->offset, m_object, 1); /* (TEST/DEBUG) */
3658 vm_object_lock_assert_exclusive(m_object
);
3661 * In theory, the page should be in an object before it
3662 * gets wired, since we need to hold the object lock
3663 * to update some fields in the page structure.
3664 * However, some code (i386 pmap, for example) might want
3665 * to wire a page before it gets inserted into an object.
3666 * That's somewhat OK, as long as nobody else can get to
3667 * that page and update it at the same time.
3670 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3671 if ( !VM_PAGE_WIRED(mem
)) {
3674 vm_pageout_steal_laundry(mem
, TRUE
);
3676 vm_page_queues_remove(mem
, TRUE
);
3678 assert(mem
->wire_count
== 0);
3679 mem
->vm_page_q_state
= VM_PAGE_IS_WIRED
;
3683 if (!mem
->private && !mem
->fictitious
)
3685 if (!m_object
->wired_page_count
)
3687 assert(VM_KERN_MEMORY_NONE
!= tag
);
3688 m_object
->wire_tag
= tag
;
3689 VM_OBJECT_WIRED(m_object
);
3692 m_object
->wired_page_count
++;
3694 assert(m_object
->resident_page_count
>=
3695 m_object
->wired_page_count
);
3696 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
3697 assert(vm_page_purgeable_count
> 0);
3698 OSAddAtomic(-1, &vm_page_purgeable_count
);
3699 OSAddAtomic(1, &vm_page_purgeable_wired_count
);
3701 if ((m_object
->purgable
== VM_PURGABLE_VOLATILE
||
3702 m_object
->purgable
== VM_PURGABLE_EMPTY
) &&
3703 m_object
->vo_purgeable_owner
!= TASK_NULL
) {
3706 owner
= m_object
->vo_purgeable_owner
;
3707 /* less volatile bytes */
3708 ledger_debit(owner
->ledger
,
3709 task_ledgers
.purgeable_volatile
,
3711 /* more not-quite-volatile bytes */
3712 ledger_credit(owner
->ledger
,
3713 task_ledgers
.purgeable_nonvolatile
,
3715 /* more footprint */
3716 ledger_credit(owner
->ledger
,
3717 task_ledgers
.phys_footprint
,
3720 if (m_object
->all_reusable
) {
3722 * Wired pages are not counted as "re-usable"
3723 * in "all_reusable" VM objects, so nothing
3726 } else if (mem
->reusable
) {
3728 * This page is not "re-usable" when it's
3729 * wired, so adjust its state and the
3732 vm_object_reuse_pages(m_object
,
3734 mem
->offset
+PAGE_SIZE_64
,
3738 assert(!mem
->reusable
);
3740 if (!mem
->private && !mem
->fictitious
&& !mem
->gobbled
)
3741 vm_page_wire_count
++;
3743 vm_page_gobble_count
--;
3744 mem
->gobbled
= FALSE
;
3746 if (check_memorystatus
== TRUE
) {
3747 VM_CHECK_MEMORYSTATUS
;
3751 * The page could be encrypted, but
3752 * We don't have to decrypt it here
3753 * because we don't guarantee that the
3754 * data is actually valid at this point.
3755 * The page will get decrypted in
3756 * vm_fault_wire() if needed.
3759 assert(!mem
->gobbled
);
3760 assert(mem
->vm_page_q_state
== VM_PAGE_IS_WIRED
);
3762 if (__improbable(mem
->wire_count
== 0)) {
3763 panic("vm_page_wire(%p): wire_count overflow", mem
);
3771 * Release one wiring of this page, potentially
3772 * enabling it to be paged again.
3774 * The page's object and the page queues must be locked.
3781 vm_object_t m_object
;
3783 m_object
= VM_PAGE_OBJECT(mem
);
3785 // dbgLog(current_thread(), mem->offset, m_object, 0); /* (TEST/DEBUG) */
3788 assert(VM_PAGE_WIRED(mem
));
3789 assert(mem
->wire_count
> 0);
3790 assert(!mem
->gobbled
);
3791 assert(m_object
!= VM_OBJECT_NULL
);
3792 vm_object_lock_assert_exclusive(m_object
);
3793 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3794 if (--mem
->wire_count
== 0) {
3795 mem
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
3797 if (!mem
->private && !mem
->fictitious
) {
3798 vm_page_wire_count
--;
3800 assert(m_object
->wired_page_count
> 0);
3801 m_object
->wired_page_count
--;
3802 if (!m_object
->wired_page_count
) {
3803 VM_OBJECT_UNWIRED(m_object
);
3805 assert(m_object
->resident_page_count
>=
3806 m_object
->wired_page_count
);
3807 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
) {
3808 OSAddAtomic(+1, &vm_page_purgeable_count
);
3809 assert(vm_page_purgeable_wired_count
> 0);
3810 OSAddAtomic(-1, &vm_page_purgeable_wired_count
);
3812 if ((m_object
->purgable
== VM_PURGABLE_VOLATILE
||
3813 m_object
->purgable
== VM_PURGABLE_EMPTY
) &&
3814 m_object
->vo_purgeable_owner
!= TASK_NULL
) {
3817 owner
= m_object
->vo_purgeable_owner
;
3818 /* more volatile bytes */
3819 ledger_credit(owner
->ledger
,
3820 task_ledgers
.purgeable_volatile
,
3822 /* less not-quite-volatile bytes */
3823 ledger_debit(owner
->ledger
,
3824 task_ledgers
.purgeable_nonvolatile
,
3826 /* less footprint */
3827 ledger_debit(owner
->ledger
,
3828 task_ledgers
.phys_footprint
,
3831 assert(m_object
!= kernel_object
);
3832 assert(mem
->pageq
.next
== 0 && mem
->pageq
.prev
== 0);
3834 if (queueit
== TRUE
) {
3835 if (m_object
->purgable
== VM_PURGABLE_EMPTY
) {
3836 vm_page_deactivate(mem
);
3838 vm_page_activate(mem
);
3842 VM_CHECK_MEMORYSTATUS
;
3849 * vm_page_deactivate:
3851 * Returns the given page to the inactive list,
3852 * indicating that no physical maps have access
3853 * to this page. [Used by the physical mapping system.]
3855 * The page queues must be locked.
3861 vm_page_deactivate_internal(m
, TRUE
);
3866 vm_page_deactivate_internal(
3868 boolean_t clear_hw_reference
)
3870 vm_object_t m_object
;
3872 m_object
= VM_PAGE_OBJECT(m
);
3875 assert(m_object
!= kernel_object
);
3876 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
3878 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
3879 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3881 * This page is no longer very interesting. If it was
3882 * interesting (active or inactive/referenced), then we
3883 * clear the reference bit and (re)enter it in the
3884 * inactive queue. Note wired pages should not have
3885 * their reference bit cleared.
3887 assert ( !(m
->absent
&& !m
->unusual
));
3889 if (m
->gobbled
) { /* can this happen? */
3890 assert( !VM_PAGE_WIRED(m
));
3892 if (!m
->private && !m
->fictitious
)
3893 vm_page_wire_count
--;
3894 vm_page_gobble_count
--;
3898 * if this page is currently on the pageout queue, we can't do the
3899 * vm_page_queues_remove (which doesn't handle the pageout queue case)
3900 * and we can't remove it manually since we would need the object lock
3901 * (which is not required here) to decrement the activity_in_progress
3902 * reference which is held on the object while the page is in the pageout queue...
3903 * just let the normal laundry processing proceed
3905 if (m
->laundry
|| m
->private || m
->fictitious
||
3906 (m
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
3907 (m
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
) ||
3911 if (!m
->absent
&& clear_hw_reference
== TRUE
)
3912 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m
));
3914 m
->reference
= FALSE
;
3915 m
->no_cache
= FALSE
;
3917 if ( !VM_PAGE_INACTIVE(m
)) {
3918 vm_page_queues_remove(m
, FALSE
);
3920 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3921 m
->dirty
&& m_object
->internal
&&
3922 (m_object
->purgable
== VM_PURGABLE_DENY
||
3923 m_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
3924 m_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
3925 vm_page_check_pageable_safe(m
);
3926 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vm_page_t
, pageq
);
3927 m
->vm_page_q_state
= VM_PAGE_ON_THROTTLED_Q
;
3928 vm_page_throttled_count
++;
3930 if (m_object
->named
&& m_object
->ref_count
== 1) {
3931 vm_page_speculate(m
, FALSE
);
3932 #if DEVELOPMENT || DEBUG
3933 vm_page_speculative_recreated
++;
3936 vm_page_enqueue_inactive(m
, FALSE
);
3943 * vm_page_enqueue_cleaned
3945 * Put the page on the cleaned queue, mark it cleaned, etc.
3946 * Being on the cleaned queue (and having m->clean_queue set)
3947 * does ** NOT ** guarantee that the page is clean!
3949 * Call with the queues lock held.
3952 void vm_page_enqueue_cleaned(vm_page_t m
)
3954 vm_object_t m_object
;
3956 m_object
= VM_PAGE_OBJECT(m
);
3958 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
3959 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
3960 assert( !(m
->absent
&& !m
->unusual
));
3961 assert( !VM_PAGE_WIRED(m
));
3964 if (!m
->private && !m
->fictitious
)
3965 vm_page_wire_count
--;
3966 vm_page_gobble_count
--;
3970 * if this page is currently on the pageout queue, we can't do the
3971 * vm_page_queues_remove (which doesn't handle the pageout queue case)
3972 * and we can't remove it manually since we would need the object lock
3973 * (which is not required here) to decrement the activity_in_progress
3974 * reference which is held on the object while the page is in the pageout queue...
3975 * just let the normal laundry processing proceed
3977 if (m
->laundry
|| m
->private || m
->fictitious
||
3978 (m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) ||
3979 (m
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
3982 vm_page_queues_remove(m
, FALSE
);
3984 vm_page_check_pageable_safe(m
);
3985 vm_page_queue_enter(&vm_page_queue_cleaned
, m
, vm_page_t
, pageq
);
3986 m
->vm_page_q_state
= VM_PAGE_ON_INACTIVE_CLEANED_Q
;
3987 vm_page_cleaned_count
++;
3989 vm_page_inactive_count
++;
3990 if (m_object
->internal
) {
3991 vm_page_pageable_internal_count
++;
3993 vm_page_pageable_external_count
++;
3995 #if CONFIG_BACKGROUND_QUEUE
3996 if (m
->vm_page_in_background
)
3997 vm_page_add_to_backgroundq(m
, TRUE
);
3999 vm_pageout_enqueued_cleaned
++;
4005 * Put the specified page on the active list (if appropriate).
4007 * The page queues must be locked.
4014 vm_object_t m_object
;
4016 m_object
= VM_PAGE_OBJECT(m
);
4019 #ifdef FIXME_4778297
4020 assert(m_object
!= kernel_object
);
4022 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4023 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4024 assert( !(m
->absent
&& !m
->unusual
));
4027 assert( !VM_PAGE_WIRED(m
));
4028 if (!m
->private && !m
->fictitious
)
4029 vm_page_wire_count
--;
4030 vm_page_gobble_count
--;
4034 * if this page is currently on the pageout queue, we can't do the
4035 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4036 * and we can't remove it manually since we would need the object lock
4037 * (which is not required here) to decrement the activity_in_progress
4038 * reference which is held on the object while the page is in the pageout queue...
4039 * just let the normal laundry processing proceed
4041 if (m
->laundry
|| m
->private || m
->fictitious
||
4042 (m
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
4043 (m
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
))
4047 if (m
->vm_page_q_state
== VM_PAGE_ON_ACTIVE_Q
)
4048 panic("vm_page_activate: already active");
4051 if (m
->vm_page_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
4052 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
4053 DTRACE_VM2(pgfrec
, int, 1, (uint64_t *), NULL
);
4056 vm_page_queues_remove(m
, FALSE
);
4058 if ( !VM_PAGE_WIRED(m
)) {
4059 vm_page_check_pageable_safe(m
);
4060 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4061 m
->dirty
&& m_object
->internal
&&
4062 (m_object
->purgable
== VM_PURGABLE_DENY
||
4063 m_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
4064 m_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
4065 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vm_page_t
, pageq
);
4066 m
->vm_page_q_state
= VM_PAGE_ON_THROTTLED_Q
;
4067 vm_page_throttled_count
++;
4069 #if CONFIG_SECLUDED_MEMORY
4070 if (secluded_for_filecache
&&
4071 vm_page_secluded_target
!= 0 &&
4072 num_tasks_can_use_secluded_mem
== 0 &&
4073 m_object
->eligible_for_secluded
&&
4074 ((secluded_aging_policy
== SECLUDED_AGING_FIFO
) ||
4075 (secluded_aging_policy
==
4076 SECLUDED_AGING_ALONG_ACTIVE
) ||
4077 (secluded_aging_policy
==
4078 SECLUDED_AGING_BEFORE_ACTIVE
))) {
4079 vm_page_queue_enter(&vm_page_queue_secluded
, m
,
4081 m
->vm_page_q_state
= VM_PAGE_ON_SECLUDED_Q
;
4082 vm_page_secluded_count
++;
4083 vm_page_secluded_count_inuse
++;
4084 assert(!m_object
->internal
);
4085 // vm_page_pageable_external_count++;
4087 #endif /* CONFIG_SECLUDED_MEMORY */
4088 vm_page_enqueue_active(m
, FALSE
);
4090 m
->reference
= TRUE
;
4091 m
->no_cache
= FALSE
;
4098 * vm_page_speculate:
4100 * Put the specified page on the speculative list (if appropriate).
4102 * The page queues must be locked.
4109 struct vm_speculative_age_q
*aq
;
4110 vm_object_t m_object
;
4112 m_object
= VM_PAGE_OBJECT(m
);
4115 vm_page_check_pageable_safe(m
);
4117 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4118 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4119 assert( !(m
->absent
&& !m
->unusual
));
4120 assert(m_object
->internal
== FALSE
);
4123 * if this page is currently on the pageout queue, we can't do the
4124 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4125 * and we can't remove it manually since we would need the object lock
4126 * (which is not required here) to decrement the activity_in_progress
4127 * reference which is held on the object while the page is in the pageout queue...
4128 * just let the normal laundry processing proceed
4130 if (m
->laundry
|| m
->private || m
->fictitious
||
4131 (m
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
4132 (m
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
))
4135 vm_page_queues_remove(m
, FALSE
);
4137 if ( !VM_PAGE_WIRED(m
)) {
4142 clock_get_system_nanotime(&sec
, &nsec
);
4143 ts
.tv_sec
= (unsigned int) sec
;
4146 if (vm_page_speculative_count
== 0) {
4148 speculative_age_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
4149 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
4151 aq
= &vm_page_queue_speculative
[speculative_age_index
];
4154 * set the timer to begin a new group
4156 aq
->age_ts
.tv_sec
= vm_page_speculative_q_age_ms
/ 1000;
4157 aq
->age_ts
.tv_nsec
= (vm_page_speculative_q_age_ms
% 1000) * 1000 * NSEC_PER_USEC
;
4159 ADD_MACH_TIMESPEC(&aq
->age_ts
, &ts
);
4161 aq
= &vm_page_queue_speculative
[speculative_age_index
];
4163 if (CMP_MACH_TIMESPEC(&ts
, &aq
->age_ts
) >= 0) {
4165 speculative_age_index
++;
4167 if (speculative_age_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
)
4168 speculative_age_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
4169 if (speculative_age_index
== speculative_steal_index
) {
4170 speculative_steal_index
= speculative_age_index
+ 1;
4172 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
)
4173 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
4175 aq
= &vm_page_queue_speculative
[speculative_age_index
];
4177 if (!vm_page_queue_empty(&aq
->age_q
))
4178 vm_page_speculate_ageit(aq
);
4180 aq
->age_ts
.tv_sec
= vm_page_speculative_q_age_ms
/ 1000;
4181 aq
->age_ts
.tv_nsec
= (vm_page_speculative_q_age_ms
% 1000) * 1000 * NSEC_PER_USEC
;
4183 ADD_MACH_TIMESPEC(&aq
->age_ts
, &ts
);
4186 vm_page_enqueue_tail(&aq
->age_q
, &m
->pageq
);
4187 m
->vm_page_q_state
= VM_PAGE_ON_SPECULATIVE_Q
;
4188 vm_page_speculative_count
++;
4189 vm_page_pageable_external_count
++;
4192 vm_object_lock_assert_exclusive(m_object
);
4194 m_object
->pages_created
++;
4195 #if DEVELOPMENT || DEBUG
4196 vm_page_speculative_created
++;
4205 * move pages from the specified aging bin to
4206 * the speculative bin that pageout_scan claims from
4208 * The page queues must be locked.
4211 vm_page_speculate_ageit(struct vm_speculative_age_q
*aq
)
4213 struct vm_speculative_age_q
*sq
;
4216 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
4218 if (vm_page_queue_empty(&sq
->age_q
)) {
4219 sq
->age_q
.next
= aq
->age_q
.next
;
4220 sq
->age_q
.prev
= aq
->age_q
.prev
;
4222 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(sq
->age_q
.next
);
4223 t
->pageq
.prev
= VM_PAGE_PACK_PTR(&sq
->age_q
);
4225 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(sq
->age_q
.prev
);
4226 t
->pageq
.next
= VM_PAGE_PACK_PTR(&sq
->age_q
);
4228 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(sq
->age_q
.prev
);
4229 t
->pageq
.next
= aq
->age_q
.next
;
4231 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(aq
->age_q
.next
);
4232 t
->pageq
.prev
= sq
->age_q
.prev
;
4234 t
= (vm_page_t
)VM_PAGE_UNPACK_PTR(aq
->age_q
.prev
);
4235 t
->pageq
.next
= VM_PAGE_PACK_PTR(&sq
->age_q
);
4237 sq
->age_q
.prev
= aq
->age_q
.prev
;
4239 vm_page_queue_init(&aq
->age_q
);
4248 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
4249 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
4251 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
4253 * if this page is currently on the pageout queue, we can't do the
4254 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4255 * and we can't remove it manually since we would need the object lock
4256 * (which is not required here) to decrement the activity_in_progress
4257 * reference which is held on the object while the page is in the pageout queue...
4258 * just let the normal laundry processing proceed
4260 if (m
->laundry
|| m
->private ||
4261 (m
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
4262 (m
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
) ||
4266 m
->no_cache
= FALSE
;
4268 vm_page_queues_remove(m
, FALSE
);
4270 vm_page_enqueue_inactive(m
, FALSE
);
4275 vm_page_reactivate_all_throttled(void)
4277 vm_page_t first_throttled
, last_throttled
;
4278 vm_page_t first_active
;
4280 int extra_active_count
;
4281 int extra_internal_count
, extra_external_count
;
4282 vm_object_t m_object
;
4284 if (!VM_DYNAMIC_PAGING_ENABLED())
4287 extra_active_count
= 0;
4288 extra_internal_count
= 0;
4289 extra_external_count
= 0;
4290 vm_page_lock_queues();
4291 if (! vm_page_queue_empty(&vm_page_queue_throttled
)) {
4293 * Switch "throttled" pages to "active".
4295 vm_page_queue_iterate(&vm_page_queue_throttled
, m
, vm_page_t
, pageq
) {
4297 assert(m
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
);
4299 m_object
= VM_PAGE_OBJECT(m
);
4301 extra_active_count
++;
4302 if (m_object
->internal
) {
4303 extra_internal_count
++;
4305 extra_external_count
++;
4308 m
->vm_page_q_state
= VM_PAGE_ON_ACTIVE_Q
;
4310 #if CONFIG_BACKGROUND_QUEUE
4311 if (m
->vm_page_in_background
)
4312 vm_page_add_to_backgroundq(m
, FALSE
);
4317 * Transfer the entire throttled queue to a regular LRU page queues.
4318 * We insert it at the head of the active queue, so that these pages
4319 * get re-evaluated by the LRU algorithm first, since they've been
4320 * completely out of it until now.
4322 first_throttled
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
4323 last_throttled
= (vm_page_t
) vm_page_queue_last(&vm_page_queue_throttled
);
4324 first_active
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
4325 if (vm_page_queue_empty(&vm_page_queue_active
)) {
4326 vm_page_queue_active
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled
);
4328 first_active
->pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled
);
4330 vm_page_queue_active
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled
);
4331 first_throttled
->pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active
);
4332 last_throttled
->pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active
);
4335 printf("reactivated %d throttled pages\n", vm_page_throttled_count
);
4337 vm_page_queue_init(&vm_page_queue_throttled
);
4339 * Adjust the global page counts.
4341 vm_page_active_count
+= extra_active_count
;
4342 vm_page_pageable_internal_count
+= extra_internal_count
;
4343 vm_page_pageable_external_count
+= extra_external_count
;
4344 vm_page_throttled_count
= 0;
4346 assert(vm_page_throttled_count
== 0);
4347 assert(vm_page_queue_empty(&vm_page_queue_throttled
));
4348 vm_page_unlock_queues();
4353 * move pages from the indicated local queue to the global active queue
4354 * its ok to fail if we're below the hard limit and force == FALSE
4355 * the nolocks == TRUE case is to allow this function to be run on
4356 * the hibernate path
4360 vm_page_reactivate_local(uint32_t lid
, boolean_t force
, boolean_t nolocks
)
4363 vm_page_t first_local
, last_local
;
4364 vm_page_t first_active
;
4368 if (vm_page_local_q
== NULL
)
4371 lq
= &vm_page_local_q
[lid
].vpl_un
.vpl
;
4373 if (nolocks
== FALSE
) {
4374 if (lq
->vpl_count
< vm_page_local_q_hard_limit
&& force
== FALSE
) {
4375 if ( !vm_page_trylockspin_queues())
4378 vm_page_lockspin_queues();
4380 VPL_LOCK(&lq
->vpl_lock
);
4382 if (lq
->vpl_count
) {
4384 * Switch "local" pages to "active".
4386 assert(!vm_page_queue_empty(&lq
->vpl_queue
));
4388 vm_page_queue_iterate(&lq
->vpl_queue
, m
, vm_page_t
, pageq
) {
4390 vm_page_check_pageable_safe(m
);
4391 assert(m
->vm_page_q_state
== VM_PAGE_ON_ACTIVE_LOCAL_Q
);
4392 assert(!m
->fictitious
);
4394 if (m
->local_id
!= lid
)
4395 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m
);
4398 m
->vm_page_q_state
= VM_PAGE_ON_ACTIVE_Q
;
4400 #if CONFIG_BACKGROUND_QUEUE
4401 if (m
->vm_page_in_background
)
4402 vm_page_add_to_backgroundq(m
, FALSE
);
4406 if (count
!= lq
->vpl_count
)
4407 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d\n", count
, lq
->vpl_count
);
4410 * Transfer the entire local queue to a regular LRU page queues.
4412 first_local
= (vm_page_t
) vm_page_queue_first(&lq
->vpl_queue
);
4413 last_local
= (vm_page_t
) vm_page_queue_last(&lq
->vpl_queue
);
4414 first_active
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
4416 if (vm_page_queue_empty(&vm_page_queue_active
)) {
4417 vm_page_queue_active
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
4419 first_active
->pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
4421 vm_page_queue_active
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
4422 first_local
->pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active
);
4423 last_local
->pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active
);
4425 vm_page_queue_init(&lq
->vpl_queue
);
4427 * Adjust the global page counts.
4429 vm_page_active_count
+= lq
->vpl_count
;
4430 vm_page_pageable_internal_count
+= lq
->vpl_internal_count
;
4431 vm_page_pageable_external_count
+= lq
->vpl_external_count
;
4433 lq
->vpl_internal_count
= 0;
4434 lq
->vpl_external_count
= 0;
4436 assert(vm_page_queue_empty(&lq
->vpl_queue
));
4438 if (nolocks
== FALSE
) {
4439 VPL_UNLOCK(&lq
->vpl_lock
);
4440 vm_page_unlock_queues();
4445 * vm_page_part_zero_fill:
4447 * Zero-fill a part of the page.
4449 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
4451 vm_page_part_zero_fill(
4459 * we don't hold the page queue lock
4460 * so this check isn't safe to make
4465 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
4466 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m
), m_pa
, len
);
4470 tmp
= vm_page_grab();
4471 if (tmp
== VM_PAGE_NULL
) {
4472 vm_page_wait(THREAD_UNINT
);
4477 vm_page_zero_fill(tmp
);
4479 vm_page_part_copy(m
, 0, tmp
, 0, m_pa
);
4481 if((m_pa
+ len
) < PAGE_SIZE
) {
4482 vm_page_part_copy(m
, m_pa
+ len
, tmp
,
4483 m_pa
+ len
, PAGE_SIZE
- (m_pa
+ len
));
4485 vm_page_copy(tmp
,m
);
4492 * vm_page_zero_fill:
4494 * Zero-fill the specified page.
4501 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
4502 VM_PAGE_OBJECT(m
), m
->offset
, m
, 0,0);
4505 * we don't hold the page queue lock
4506 * so this check isn't safe to make
4511 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
4512 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m
));
4516 * vm_page_part_copy:
4518 * copy part of one page to another
4531 * we don't hold the page queue lock
4532 * so this check isn't safe to make
4534 VM_PAGE_CHECK(src_m
);
4535 VM_PAGE_CHECK(dst_m
);
4537 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m
), src_pa
,
4538 VM_PAGE_GET_PHYS_PAGE(dst_m
), dst_pa
, len
);
4544 * Copy one page to another
4547 * The source page should not be encrypted. The caller should
4548 * make sure the page is decrypted first, if necessary.
4551 int vm_page_copy_cs_validations
= 0;
4552 int vm_page_copy_cs_tainted
= 0;
4559 vm_object_t src_m_object
;
4561 src_m_object
= VM_PAGE_OBJECT(src_m
);
4564 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
4565 src_m_object
, src_m
->offset
,
4566 VM_PAGE_OBJECT(dest_m
), dest_m
->offset
,
4570 * we don't hold the page queue lock
4571 * so this check isn't safe to make
4573 VM_PAGE_CHECK(src_m
);
4574 VM_PAGE_CHECK(dest_m
);
4576 vm_object_lock_assert_held(src_m_object
);
4580 * The source page should not be encrypted at this point.
4581 * The destination page will therefore not contain encrypted
4582 * data after the copy.
4584 if (src_m
->encrypted
) {
4585 panic("vm_page_copy: source page %p is encrypted\n", src_m
);
4587 dest_m
->encrypted
= FALSE
;
4589 if (src_m_object
!= VM_OBJECT_NULL
&&
4590 src_m_object
->code_signed
) {
4592 * We're copying a page from a code-signed object.
4593 * Whoever ends up mapping the copy page might care about
4594 * the original page's integrity, so let's validate the
4597 vm_page_copy_cs_validations
++;
4598 vm_page_validate_cs(src_m
);
4599 #if DEVELOPMENT || DEBUG
4600 DTRACE_VM4(codesigned_copy
,
4601 vm_object_t
, src_m_object
,
4602 vm_object_offset_t
, src_m
->offset
,
4603 int, src_m
->cs_validated
,
4604 int, src_m
->cs_tainted
);
4605 #endif /* DEVELOPMENT || DEBUG */
4609 if (vm_page_is_slideable(src_m
)) {
4610 boolean_t was_busy
= src_m
->busy
;
4612 (void) vm_page_slide(src_m
, 0);
4613 assert(src_m
->busy
);
4615 PAGE_WAKEUP_DONE(src_m
);
4620 * Propagate the cs_tainted bit to the copy page. Do not propagate
4621 * the cs_validated bit.
4623 dest_m
->cs_tainted
= src_m
->cs_tainted
;
4624 if (dest_m
->cs_tainted
) {
4625 vm_page_copy_cs_tainted
++;
4627 dest_m
->slid
= src_m
->slid
;
4628 dest_m
->error
= src_m
->error
; /* sliding src_m might have failed... */
4629 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m
), VM_PAGE_GET_PHYS_PAGE(dest_m
));
4637 printf("vm_page %p: \n", p
);
4638 printf(" pageq: next=%p prev=%p\n",
4639 (vm_page_t
)VM_PAGE_UNPACK_PTR(p
->pageq
.next
),
4640 (vm_page_t
)VM_PAGE_UNPACK_PTR(p
->pageq
.prev
));
4641 printf(" listq: next=%p prev=%p\n",
4642 (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->listq
.next
)),
4643 (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->listq
.prev
)));
4644 printf(" next=%p\n", (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->next_m
)));
4645 printf(" object=%p offset=0x%llx\n",VM_PAGE_OBJECT(p
), p
->offset
);
4646 printf(" wire_count=%u\n", p
->wire_count
);
4647 printf(" q_state=%u\n", p
->vm_page_q_state
);
4649 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
4650 (p
->laundry
? "" : "!"),
4651 (p
->reference
? "" : "!"),
4652 (p
->gobbled
? "" : "!"),
4653 (p
->private ? "" : "!"));
4654 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
4655 (p
->busy
? "" : "!"),
4656 (p
->wanted
? "" : "!"),
4657 (p
->tabled
? "" : "!"),
4658 (p
->fictitious
? "" : "!"),
4659 (p
->pmapped
? "" : "!"),
4660 (p
->wpmapped
? "" : "!"));
4661 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
4662 (p
->free_when_done
? "" : "!"),
4663 (p
->absent
? "" : "!"),
4664 (p
->error
? "" : "!"),
4665 (p
->dirty
? "" : "!"),
4666 (p
->cleaning
? "" : "!"),
4667 (p
->precious
? "" : "!"),
4668 (p
->clustered
? "" : "!"));
4669 printf(" %soverwriting, %srestart, %sunusual, %sencrypted, %sencrypted_cleaning\n",
4670 (p
->overwriting
? "" : "!"),
4671 (p
->restart
? "" : "!"),
4672 (p
->unusual
? "" : "!"),
4673 (p
->encrypted
? "" : "!"),
4674 (p
->encrypted_cleaning
? "" : "!"));
4675 printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n",
4676 (p
->cs_validated
? "" : "!"),
4677 (p
->cs_tainted
? "" : "!"),
4678 (p
->cs_nx
? "" : "!"),
4679 (p
->no_cache
? "" : "!"));
4681 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p
));
4685 * Check that the list of pages is ordered by
4686 * ascending physical address and has no holes.
4689 vm_page_verify_contiguous(
4691 unsigned int npages
)
4694 unsigned int page_count
;
4695 vm_offset_t prev_addr
;
4697 prev_addr
= VM_PAGE_GET_PHYS_PAGE(pages
);
4699 for (m
= NEXT_PAGE(pages
); m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
4700 if (VM_PAGE_GET_PHYS_PAGE(m
) != prev_addr
+ 1) {
4701 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
4702 m
, (long)prev_addr
, VM_PAGE_GET_PHYS_PAGE(m
));
4703 printf("pages %p page_count %d npages %d\n", pages
, page_count
, npages
);
4704 panic("vm_page_verify_contiguous: not contiguous!");
4706 prev_addr
= VM_PAGE_GET_PHYS_PAGE(m
);
4709 if (page_count
!= npages
) {
4710 printf("pages %p actual count 0x%x but requested 0x%x\n",
4711 pages
, page_count
, npages
);
4712 panic("vm_page_verify_contiguous: count error");
4719 * Check the free lists for proper length etc.
4721 static boolean_t vm_page_verify_this_free_list_enabled
= FALSE
;
4723 vm_page_verify_free_list(
4724 vm_page_queue_head_t
*vm_page_queue
,
4726 vm_page_t look_for_page
,
4727 boolean_t expect_page
)
4729 unsigned int npages
;
4732 boolean_t found_page
;
4734 if (! vm_page_verify_this_free_list_enabled
)
4739 prev_m
= (vm_page_t
)((uintptr_t)vm_page_queue
);
4741 vm_page_queue_iterate(vm_page_queue
,
4746 if (m
== look_for_page
) {
4749 if ((vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.prev
) != prev_m
)
4750 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
4751 color
, npages
, m
, (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.prev
), prev_m
);
4753 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
4755 if (color
!= (unsigned int) -1) {
4756 if ((VM_PAGE_GET_PHYS_PAGE(m
) & vm_color_mask
) != color
)
4757 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
4758 color
, npages
, m
, VM_PAGE_GET_PHYS_PAGE(m
) & vm_color_mask
, color
);
4759 if (m
->vm_page_q_state
!= VM_PAGE_ON_FREE_Q
)
4760 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d\n",
4761 color
, npages
, m
, m
->vm_page_q_state
);
4763 if (m
->vm_page_q_state
!= VM_PAGE_ON_FREE_LOCAL_Q
)
4764 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d\n",
4765 npages
, m
, m
->vm_page_q_state
);
4770 if (look_for_page
!= VM_PAGE_NULL
) {
4771 unsigned int other_color
;
4773 if (expect_page
&& !found_page
) {
4774 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
4775 color
, npages
, look_for_page
, VM_PAGE_GET_PHYS_PAGE(look_for_page
));
4776 _vm_page_print(look_for_page
);
4777 for (other_color
= 0;
4778 other_color
< vm_colors
;
4780 if (other_color
== color
)
4782 vm_page_verify_free_list(&vm_page_queue_free
[other_color
].qhead
,
4783 other_color
, look_for_page
, FALSE
);
4785 if (color
== (unsigned int) -1) {
4786 vm_page_verify_free_list(&vm_lopage_queue_free
,
4787 (unsigned int) -1, look_for_page
, FALSE
);
4789 panic("vm_page_verify_free_list(color=%u)\n", color
);
4791 if (!expect_page
&& found_page
) {
4792 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
4793 color
, npages
, look_for_page
, VM_PAGE_GET_PHYS_PAGE(look_for_page
));
4799 static boolean_t vm_page_verify_all_free_lists_enabled
= FALSE
;
4801 vm_page_verify_free_lists( void )
4803 unsigned int color
, npages
, nlopages
;
4804 boolean_t toggle
= TRUE
;
4806 if (! vm_page_verify_all_free_lists_enabled
)
4811 lck_mtx_lock(&vm_page_queue_free_lock
);
4813 if (vm_page_verify_this_free_list_enabled
== TRUE
) {
4815 * This variable has been set globally for extra checking of
4816 * each free list Q. Since we didn't set it, we don't own it
4817 * and we shouldn't toggle it.
4822 if (toggle
== TRUE
) {
4823 vm_page_verify_this_free_list_enabled
= TRUE
;
4826 for( color
= 0; color
< vm_colors
; color
++ ) {
4827 npages
+= vm_page_verify_free_list(&vm_page_queue_free
[color
].qhead
,
4828 color
, VM_PAGE_NULL
, FALSE
);
4830 nlopages
= vm_page_verify_free_list(&vm_lopage_queue_free
,
4832 VM_PAGE_NULL
, FALSE
);
4833 if (npages
!= vm_page_free_count
|| nlopages
!= vm_lopage_free_count
)
4834 panic("vm_page_verify_free_lists: "
4835 "npages %u free_count %d nlopages %u lo_free_count %u",
4836 npages
, vm_page_free_count
, nlopages
, vm_lopage_free_count
);
4838 if (toggle
== TRUE
) {
4839 vm_page_verify_this_free_list_enabled
= FALSE
;
4842 lck_mtx_unlock(&vm_page_queue_free_lock
);
4845 #endif /* MACH_ASSERT */
4851 extern boolean_t (* volatile consider_buffer_cache_collect
)(int);
4854 * CONTIGUOUS PAGE ALLOCATION
4856 * Find a region large enough to contain at least n pages
4857 * of contiguous physical memory.
4859 * This is done by traversing the vm_page_t array in a linear fashion
4860 * we assume that the vm_page_t array has the avaiable physical pages in an
4861 * ordered, ascending list... this is currently true of all our implementations
4862 * and must remain so... there can be 'holes' in the array... we also can
4863 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
4864 * which use to happen via 'vm_page_convert'... that function was no longer
4865 * being called and was removed...
4867 * The basic flow consists of stabilizing some of the interesting state of
4868 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
4869 * sweep at the beginning of the array looking for pages that meet our criterea
4870 * for a 'stealable' page... currently we are pretty conservative... if the page
4871 * meets this criterea and is physically contiguous to the previous page in the 'run'
4872 * we keep developing it. If we hit a page that doesn't fit, we reset our state
4873 * and start to develop a new run... if at this point we've already considered
4874 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
4875 * and mutex_pause (which will yield the processor), to keep the latency low w/r
4876 * to other threads trying to acquire free pages (or move pages from q to q),
4877 * and then continue from the spot we left off... we only make 1 pass through the
4878 * array. Once we have a 'run' that is long enough, we'll go into the loop which
4879 * which steals the pages from the queues they're currently on... pages on the free
4880 * queue can be stolen directly... pages that are on any of the other queues
4881 * must be removed from the object they are tabled on... this requires taking the
4882 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
4883 * or if the state of the page behind the vm_object lock is no longer viable, we'll
4884 * dump the pages we've currently stolen back to the free list, and pick up our
4885 * scan from the point where we aborted the 'current' run.
4889 * - neither vm_page_queue nor vm_free_list lock can be held on entry
4891 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
4896 #define MAX_CONSIDERED_BEFORE_YIELD 1000
4899 #define RESET_STATE_OF_RUN() \
4901 prevcontaddr = -2; \
4903 free_considered = 0; \
4904 substitute_needed = 0; \
4909 * Can we steal in-use (i.e. not free) pages when searching for
4910 * physically-contiguous pages ?
4912 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
4914 static unsigned int vm_page_find_contiguous_last_idx
= 0, vm_page_lomem_find_contiguous_last_idx
= 0;
4916 int vm_page_find_contig_debug
= 0;
4920 vm_page_find_contiguous(
4921 unsigned int contig_pages
,
4928 ppnum_t prevcontaddr
;
4930 unsigned int npages
, considered
, scanned
;
4931 unsigned int page_idx
, start_idx
, last_idx
, orig_last_idx
;
4932 unsigned int idx_last_contig_page_found
= 0;
4933 int free_considered
, free_available
;
4934 int substitute_needed
;
4935 boolean_t wrapped
, zone_gc_called
= FALSE
;
4937 clock_sec_t tv_start_sec
, tv_end_sec
;
4938 clock_usec_t tv_start_usec
, tv_end_usec
;
4943 int stolen_pages
= 0;
4944 int compressed_pages
= 0;
4947 if (contig_pages
== 0)
4948 return VM_PAGE_NULL
;
4953 vm_page_verify_free_lists();
4956 clock_get_system_microtime(&tv_start_sec
, &tv_start_usec
);
4958 PAGE_REPLACEMENT_ALLOWED(TRUE
);
4960 vm_page_lock_queues();
4963 lck_mtx_lock(&vm_page_queue_free_lock
);
4965 RESET_STATE_OF_RUN();
4969 free_available
= vm_page_free_count
- vm_page_free_reserved
;
4973 if(flags
& KMA_LOMEM
)
4974 idx_last_contig_page_found
= vm_page_lomem_find_contiguous_last_idx
;
4976 idx_last_contig_page_found
= vm_page_find_contiguous_last_idx
;
4978 orig_last_idx
= idx_last_contig_page_found
;
4979 last_idx
= orig_last_idx
;
4981 for (page_idx
= last_idx
, start_idx
= last_idx
;
4982 npages
< contig_pages
&& page_idx
< vm_pages_count
;
4987 page_idx
>= orig_last_idx
) {
4989 * We're back where we started and we haven't
4990 * found any suitable contiguous range. Let's
4996 m
= &vm_pages
[page_idx
];
4998 assert(!m
->fictitious
);
4999 assert(!m
->private);
5001 if (max_pnum
&& VM_PAGE_GET_PHYS_PAGE(m
) > max_pnum
) {
5002 /* no more low pages... */
5005 if (!npages
& ((VM_PAGE_GET_PHYS_PAGE(m
) & pnum_mask
) != 0)) {
5009 RESET_STATE_OF_RUN();
5011 } else if (VM_PAGE_WIRED(m
) || m
->gobbled
||
5012 m
->encrypted_cleaning
|| m
->laundry
|| m
->wanted
||
5013 m
->cleaning
|| m
->overwriting
|| m
->free_when_done
) {
5015 * page is in a transient state
5016 * or a state we don't want to deal
5017 * with, so don't consider it which
5018 * means starting a new run
5020 RESET_STATE_OF_RUN();
5022 } else if ((m
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
) ||
5023 (m
->vm_page_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
) ||
5024 (m
->vm_page_q_state
== VM_PAGE_ON_FREE_LOPAGE_Q
) ||
5025 (m
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
5027 * page needs to be on one of our queues (other then the pageout or special free queues)
5028 * or it needs to belong to the compressor pool (which is now indicated
5029 * by vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
5030 * from the check for VM_PAGE_NOT_ON_Q)
5031 * in order for it to be stable behind the
5032 * locks we hold at this point...
5033 * if not, don't consider it which
5034 * means starting a new run
5036 RESET_STATE_OF_RUN();
5038 } else if ((m
->vm_page_q_state
!= VM_PAGE_ON_FREE_Q
) && (!m
->tabled
|| m
->busy
)) {
5040 * pages on the free list are always 'busy'
5041 * so we couldn't test for 'busy' in the check
5042 * for the transient states... pages that are
5043 * 'free' are never 'tabled', so we also couldn't
5044 * test for 'tabled'. So we check here to make
5045 * sure that a non-free page is not busy and is
5046 * tabled on an object...
5047 * if not, don't consider it which
5048 * means starting a new run
5050 RESET_STATE_OF_RUN();
5053 if (VM_PAGE_GET_PHYS_PAGE(m
) != prevcontaddr
+ 1) {
5054 if ((VM_PAGE_GET_PHYS_PAGE(m
) & pnum_mask
) != 0) {
5055 RESET_STATE_OF_RUN();
5059 start_idx
= page_idx
;
5060 start_pnum
= VM_PAGE_GET_PHYS_PAGE(m
);
5065 prevcontaddr
= VM_PAGE_GET_PHYS_PAGE(m
);
5068 if (m
->vm_page_q_state
== VM_PAGE_ON_FREE_Q
) {
5072 * This page is not free.
5073 * If we can't steal used pages,
5074 * we have to give up this run
5076 * Otherwise, we might need to
5077 * move the contents of this page
5078 * into a substitute page.
5080 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
5081 if (m
->pmapped
|| m
->dirty
|| m
->precious
) {
5082 substitute_needed
++;
5085 RESET_STATE_OF_RUN();
5089 if ((free_considered
+ substitute_needed
) > free_available
) {
5091 * if we let this run continue
5092 * we will end up dropping the vm_page_free_count
5093 * below the reserve limit... we need to abort
5094 * this run, but we can at least re-consider this
5095 * page... thus the jump back to 'retry'
5097 RESET_STATE_OF_RUN();
5099 if (free_available
&& considered
<= MAX_CONSIDERED_BEFORE_YIELD
) {
5104 * free_available == 0
5105 * so can't consider any free pages... if
5106 * we went to retry in this case, we'd
5107 * get stuck looking at the same page
5108 * w/o making any forward progress
5109 * we also want to take this path if we've already
5110 * reached our limit that controls the lock latency
5115 if (considered
> MAX_CONSIDERED_BEFORE_YIELD
&& npages
<= 1) {
5117 PAGE_REPLACEMENT_ALLOWED(FALSE
);
5119 lck_mtx_unlock(&vm_page_queue_free_lock
);
5120 vm_page_unlock_queues();
5124 PAGE_REPLACEMENT_ALLOWED(TRUE
);
5126 vm_page_lock_queues();
5127 lck_mtx_lock(&vm_page_queue_free_lock
);
5129 RESET_STATE_OF_RUN();
5131 * reset our free page limit since we
5132 * dropped the lock protecting the vm_page_free_queue
5134 free_available
= vm_page_free_count
- vm_page_free_reserved
;
5145 if (npages
!= contig_pages
) {
5148 * We didn't find a contiguous range but we didn't
5149 * start from the very first page.
5150 * Start again from the very first page.
5152 RESET_STATE_OF_RUN();
5153 if( flags
& KMA_LOMEM
)
5154 idx_last_contig_page_found
= vm_page_lomem_find_contiguous_last_idx
= 0;
5156 idx_last_contig_page_found
= vm_page_find_contiguous_last_idx
= 0;
5158 page_idx
= last_idx
;
5162 lck_mtx_unlock(&vm_page_queue_free_lock
);
5166 unsigned int cur_idx
;
5167 unsigned int tmp_start_idx
;
5168 vm_object_t locked_object
= VM_OBJECT_NULL
;
5169 boolean_t abort_run
= FALSE
;
5171 assert(page_idx
- start_idx
== contig_pages
);
5173 tmp_start_idx
= start_idx
;
5176 * first pass through to pull the free pages
5177 * off of the free queue so that in case we
5178 * need substitute pages, we won't grab any
5179 * of the free pages in the run... we'll clear
5180 * the 'free' bit in the 2nd pass, and even in
5181 * an abort_run case, we'll collect all of the
5182 * free pages in this run and return them to the free list
5184 while (start_idx
< page_idx
) {
5186 m1
= &vm_pages
[start_idx
++];
5188 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
5189 assert(m1
->vm_page_q_state
== VM_PAGE_ON_FREE_Q
);
5192 if (m1
->vm_page_q_state
== VM_PAGE_ON_FREE_Q
) {
5195 color
= VM_PAGE_GET_PHYS_PAGE(m1
) & vm_color_mask
;
5197 vm_page_verify_free_list(&vm_page_queue_free
[color
].qhead
, color
, m1
, TRUE
);
5199 vm_page_queue_remove(&vm_page_queue_free
[color
].qhead
,
5204 VM_PAGE_ZERO_PAGEQ_ENTRY(m1
);
5206 vm_page_verify_free_list(&vm_page_queue_free
[color
].qhead
, color
, VM_PAGE_NULL
, FALSE
);
5209 * Clear the "free" bit so that this page
5210 * does not get considered for another
5211 * concurrent physically-contiguous allocation.
5213 m1
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
5216 vm_page_free_count
--;
5219 if( flags
& KMA_LOMEM
)
5220 vm_page_lomem_find_contiguous_last_idx
= page_idx
;
5222 vm_page_find_contiguous_last_idx
= page_idx
;
5225 * we can drop the free queue lock at this point since
5226 * we've pulled any 'free' candidates off of the list
5227 * we need it dropped so that we can do a vm_page_grab
5228 * when substituing for pmapped/dirty pages
5230 lck_mtx_unlock(&vm_page_queue_free_lock
);
5232 start_idx
= tmp_start_idx
;
5233 cur_idx
= page_idx
- 1;
5235 while (start_idx
++ < page_idx
) {
5237 * must go through the list from back to front
5238 * so that the page list is created in the
5239 * correct order - low -> high phys addresses
5241 m1
= &vm_pages
[cur_idx
--];
5243 if (m1
->vm_page_object
== 0) {
5245 * page has already been removed from
5246 * the free list in the 1st pass
5248 assert(m1
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
5249 assert(m1
->offset
== (vm_object_offset_t
) -1);
5251 assert(!m1
->wanted
);
5252 assert(!m1
->laundry
);
5256 boolean_t disconnected
, reusable
;
5258 if (abort_run
== TRUE
)
5261 assert(m1
->vm_page_q_state
!= VM_PAGE_NOT_ON_Q
);
5263 object
= VM_PAGE_OBJECT(m1
);
5265 if (object
!= locked_object
) {
5266 if (locked_object
) {
5267 vm_object_unlock(locked_object
);
5268 locked_object
= VM_OBJECT_NULL
;
5270 if (vm_object_lock_try(object
))
5271 locked_object
= object
;
5273 if (locked_object
== VM_OBJECT_NULL
||
5274 (VM_PAGE_WIRED(m1
) || m1
->gobbled
||
5275 m1
->encrypted_cleaning
|| m1
->laundry
|| m1
->wanted
||
5276 m1
->cleaning
|| m1
->overwriting
|| m1
->free_when_done
|| m1
->busy
) ||
5277 (m1
->vm_page_q_state
== VM_PAGE_ON_PAGEOUT_Q
)) {
5279 if (locked_object
) {
5280 vm_object_unlock(locked_object
);
5281 locked_object
= VM_OBJECT_NULL
;
5283 tmp_start_idx
= cur_idx
;
5288 disconnected
= FALSE
;
5291 if ((m1
->reusable
||
5292 object
->all_reusable
) &&
5293 (m1
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
) &&
5296 /* reusable page... */
5297 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1
));
5298 disconnected
= TRUE
;
5301 * ... not reused: can steal
5302 * without relocating contents.
5312 vm_object_offset_t offset
;
5314 m2
= vm_page_grab();
5316 if (m2
== VM_PAGE_NULL
) {
5317 if (locked_object
) {
5318 vm_object_unlock(locked_object
);
5319 locked_object
= VM_OBJECT_NULL
;
5321 tmp_start_idx
= cur_idx
;
5325 if (! disconnected
) {
5327 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1
));
5332 /* copy the page's contents */
5333 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1
), VM_PAGE_GET_PHYS_PAGE(m2
));
5334 /* copy the page's state */
5335 assert(!VM_PAGE_WIRED(m1
));
5336 assert(m1
->vm_page_q_state
!= VM_PAGE_ON_FREE_Q
);
5337 assert(m1
->vm_page_q_state
!= VM_PAGE_ON_PAGEOUT_Q
);
5338 assert(!m1
->laundry
);
5339 m2
->reference
= m1
->reference
;
5340 assert(!m1
->gobbled
);
5341 assert(!m1
->private);
5342 m2
->no_cache
= m1
->no_cache
;
5345 assert(!m1
->wanted
);
5346 assert(!m1
->fictitious
);
5347 m2
->pmapped
= m1
->pmapped
; /* should flush cache ? */
5348 m2
->wpmapped
= m1
->wpmapped
;
5349 assert(!m1
->free_when_done
);
5350 m2
->absent
= m1
->absent
;
5351 m2
->error
= m1
->error
;
5352 m2
->dirty
= m1
->dirty
;
5353 assert(!m1
->cleaning
);
5354 m2
->precious
= m1
->precious
;
5355 m2
->clustered
= m1
->clustered
;
5356 assert(!m1
->overwriting
);
5357 m2
->restart
= m1
->restart
;
5358 m2
->unusual
= m1
->unusual
;
5359 m2
->encrypted
= m1
->encrypted
;
5360 assert(!m1
->encrypted_cleaning
);
5361 m2
->cs_validated
= m1
->cs_validated
;
5362 m2
->cs_tainted
= m1
->cs_tainted
;
5363 m2
->cs_nx
= m1
->cs_nx
;
5366 * If m1 had really been reusable,
5367 * we would have just stolen it, so
5368 * let's not propagate it's "reusable"
5369 * bit and assert that m2 is not
5370 * marked as "reusable".
5372 // m2->reusable = m1->reusable;
5373 assert(!m2
->reusable
);
5375 // assert(!m1->lopage);
5376 m2
->slid
= m1
->slid
;
5378 if (m1
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
)
5379 m2
->vm_page_q_state
= VM_PAGE_USED_BY_COMPRESSOR
;
5382 * page may need to be flushed if
5383 * it is marshalled into a UPL
5384 * that is going to be used by a device
5385 * that doesn't support coherency
5387 m2
->written_by_kernel
= TRUE
;
5390 * make sure we clear the ref/mod state
5391 * from the pmap layer... else we risk
5392 * inheriting state from the last time
5393 * this page was used...
5395 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2
), VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
5397 if (refmod
& VM_MEM_REFERENCED
)
5398 m2
->reference
= TRUE
;
5399 if (refmod
& VM_MEM_MODIFIED
) {
5400 SET_PAGE_DIRTY(m2
, TRUE
);
5402 offset
= m1
->offset
;
5405 * completely cleans up the state
5406 * of the page so that it is ready
5407 * to be put onto the free list, or
5408 * for this purpose it looks like it
5409 * just came off of the free list
5411 vm_page_free_prepare(m1
);
5414 * now put the substitute page
5417 vm_page_insert_internal(m2
, locked_object
, offset
, VM_KERN_MEMORY_NONE
, TRUE
, TRUE
, FALSE
, FALSE
, NULL
);
5419 if (m2
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
5421 m2
->wpmapped
= TRUE
;
5423 PMAP_ENTER(kernel_pmap
, m2
->offset
, m2
,
5424 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, 0, TRUE
);
5430 vm_page_activate(m2
);
5432 vm_page_deactivate(m2
);
5434 PAGE_WAKEUP_DONE(m2
);
5437 assert(m1
->vm_page_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
5440 * completely cleans up the state
5441 * of the page so that it is ready
5442 * to be put onto the free list, or
5443 * for this purpose it looks like it
5444 * just came off of the free list
5446 vm_page_free_prepare(m1
);
5452 #if CONFIG_BACKGROUND_QUEUE
5453 vm_page_assign_background_state(m1
);
5455 VM_PAGE_ZERO_PAGEQ_ENTRY(m1
);
5459 if (locked_object
) {
5460 vm_object_unlock(locked_object
);
5461 locked_object
= VM_OBJECT_NULL
;
5464 if (abort_run
== TRUE
) {
5466 * want the index of the last
5467 * page in this run that was
5468 * successfully 'stolen', so back
5469 * it up 1 for the auto-decrement on use
5470 * and 1 more to bump back over this page
5472 page_idx
= tmp_start_idx
+ 2;
5473 if (page_idx
>= vm_pages_count
) {
5475 if (m
!= VM_PAGE_NULL
) {
5476 vm_page_unlock_queues();
5477 vm_page_free_list(m
, FALSE
);
5478 vm_page_lock_queues();
5484 page_idx
= last_idx
= 0;
5490 * We didn't find a contiguous range but we didn't
5491 * start from the very first page.
5492 * Start again from the very first page.
5494 RESET_STATE_OF_RUN();
5496 if( flags
& KMA_LOMEM
)
5497 idx_last_contig_page_found
= vm_page_lomem_find_contiguous_last_idx
= page_idx
;
5499 idx_last_contig_page_found
= vm_page_find_contiguous_last_idx
= page_idx
;
5501 last_idx
= page_idx
;
5503 if (m
!= VM_PAGE_NULL
) {
5504 vm_page_unlock_queues();
5505 vm_page_free_list(m
, FALSE
);
5506 vm_page_lock_queues();
5511 lck_mtx_lock(&vm_page_queue_free_lock
);
5513 * reset our free page limit since we
5514 * dropped the lock protecting the vm_page_free_queue
5516 free_available
= vm_page_free_count
- vm_page_free_reserved
;
5520 for (m1
= m
; m1
!= VM_PAGE_NULL
; m1
= NEXT_PAGE(m1
)) {
5522 assert(m1
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
5523 assert(m1
->wire_count
== 0);
5527 m1
->vm_page_q_state
= VM_PAGE_IS_WIRED
;
5532 vm_page_gobble_count
+= npages
;
5535 * gobbled pages are also counted as wired pages
5537 vm_page_wire_count
+= npages
;
5539 assert(vm_page_verify_contiguous(m
, npages
));
5542 PAGE_REPLACEMENT_ALLOWED(FALSE
);
5544 vm_page_unlock_queues();
5547 clock_get_system_microtime(&tv_end_sec
, &tv_end_usec
);
5549 tv_end_sec
-= tv_start_sec
;
5550 if (tv_end_usec
< tv_start_usec
) {
5552 tv_end_usec
+= 1000000;
5554 tv_end_usec
-= tv_start_usec
;
5555 if (tv_end_usec
>= 1000000) {
5557 tv_end_sec
-= 1000000;
5559 if (vm_page_find_contig_debug
) {
5560 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
5561 __func__
, contig_pages
, max_pnum
, npages
, (vm_object_offset_t
)start_pnum
<< PAGE_SHIFT
,
5562 (long)tv_end_sec
, tv_end_usec
, orig_last_idx
,
5563 scanned
, yielded
, dumped_run
, stolen_pages
, compressed_pages
);
5568 vm_page_verify_free_lists();
5570 if (m
== NULL
&& zone_gc_called
== FALSE
) {
5571 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
5572 __func__
, contig_pages
, max_pnum
, npages
, (vm_object_offset_t
)start_pnum
<< PAGE_SHIFT
,
5573 scanned
, yielded
, dumped_run
, stolen_pages
, compressed_pages
, vm_page_wire_count
);
5575 if (consider_buffer_cache_collect
!= NULL
) {
5576 (void)(*consider_buffer_cache_collect
)(1);
5581 zone_gc_called
= TRUE
;
5583 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count
);
5584 goto full_scan_again
;
5591 * Allocate a list of contiguous, wired pages.
5603 unsigned int npages
;
5605 if (size
% PAGE_SIZE
!= 0)
5606 return KERN_INVALID_ARGUMENT
;
5608 npages
= (unsigned int) (size
/ PAGE_SIZE
);
5609 if (npages
!= size
/ PAGE_SIZE
) {
5610 /* 32-bit overflow */
5611 return KERN_INVALID_ARGUMENT
;
5615 * Obtain a pointer to a subset of the free
5616 * list large enough to satisfy the request;
5617 * the region will be physically contiguous.
5619 pages
= vm_page_find_contiguous(npages
, max_pnum
, pnum_mask
, wire
, flags
);
5621 if (pages
== VM_PAGE_NULL
)
5622 return KERN_NO_SPACE
;
5624 * determine need for wakeups
5626 if ((vm_page_free_count
< vm_page_free_min
) ||
5627 ((vm_page_free_count
< vm_page_free_target
) &&
5628 ((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_min
)))
5629 thread_wakeup((event_t
) &vm_page_free_wanted
);
5631 VM_CHECK_MEMORYSTATUS
;
5634 * The CPM pages should now be available and
5635 * ordered by ascending physical address.
5637 assert(vm_page_verify_contiguous(pages
, npages
));
5640 return KERN_SUCCESS
;
5644 unsigned int vm_max_delayed_work_limit
= DEFAULT_DELAYED_WORK_LIMIT
;
5647 * when working on a 'run' of pages, it is necessary to hold
5648 * the vm_page_queue_lock (a hot global lock) for certain operations
5649 * on the page... however, the majority of the work can be done
5650 * while merely holding the object lock... in fact there are certain
5651 * collections of pages that don't require any work brokered by the
5652 * vm_page_queue_lock... to mitigate the time spent behind the global
5653 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
5654 * while doing all of the work that doesn't require the vm_page_queue_lock...
5655 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
5656 * necessary work for each page... we will grab the busy bit on the page
5657 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
5658 * if it can't immediately take the vm_page_queue_lock in order to compete
5659 * for the locks in the same order that vm_pageout_scan takes them.
5660 * the operation names are modeled after the names of the routines that
5661 * need to be called in order to make the changes very obvious in the
5666 vm_page_do_delayed_work(
5669 struct vm_page_delayed_work
*dwp
,
5674 vm_page_t local_free_q
= VM_PAGE_NULL
;
5677 * pageout_scan takes the vm_page_lock_queues first
5678 * then tries for the object lock... to avoid what
5679 * is effectively a lock inversion, we'll go to the
5680 * trouble of taking them in that same order... otherwise
5681 * if this object contains the majority of the pages resident
5682 * in the UBC (or a small set of large objects actively being
5683 * worked on contain the majority of the pages), we could
5684 * cause the pageout_scan thread to 'starve' in its attempt
5685 * to find pages to move to the free queue, since it has to
5686 * successfully acquire the object lock of any candidate page
5687 * before it can steal/clean it.
5689 if (!vm_page_trylockspin_queues()) {
5690 vm_object_unlock(object
);
5692 vm_page_lockspin_queues();
5694 for (j
= 0; ; j
++) {
5695 if (!vm_object_lock_avoid(object
) &&
5696 _vm_object_lock_try(object
))
5698 vm_page_unlock_queues();
5700 vm_page_lockspin_queues();
5703 for (j
= 0; j
< dw_count
; j
++, dwp
++) {
5707 if (dwp
->dw_mask
& DW_vm_pageout_throttle_up
)
5708 vm_pageout_throttle_up(m
);
5709 #if CONFIG_PHANTOM_CACHE
5710 if (dwp
->dw_mask
& DW_vm_phantom_cache_update
)
5711 vm_phantom_cache_update(m
);
5713 if (dwp
->dw_mask
& DW_vm_page_wire
)
5714 vm_page_wire(m
, tag
, FALSE
);
5715 else if (dwp
->dw_mask
& DW_vm_page_unwire
) {
5718 queueit
= (dwp
->dw_mask
& (DW_vm_page_free
| DW_vm_page_deactivate_internal
)) ? FALSE
: TRUE
;
5720 vm_page_unwire(m
, queueit
);
5722 if (dwp
->dw_mask
& DW_vm_page_free
) {
5723 vm_page_free_prepare_queues(m
);
5725 assert(m
->pageq
.next
== 0 && m
->pageq
.prev
== 0);
5727 * Add this page to our list of reclaimed pages,
5728 * to be freed later.
5730 m
->snext
= local_free_q
;
5733 if (dwp
->dw_mask
& DW_vm_page_deactivate_internal
)
5734 vm_page_deactivate_internal(m
, FALSE
);
5735 else if (dwp
->dw_mask
& DW_vm_page_activate
) {
5736 if (m
->vm_page_q_state
!= VM_PAGE_ON_ACTIVE_Q
) {
5737 vm_page_activate(m
);
5740 else if (dwp
->dw_mask
& DW_vm_page_speculate
)
5741 vm_page_speculate(m
, TRUE
);
5742 else if (dwp
->dw_mask
& DW_enqueue_cleaned
) {
5744 * if we didn't hold the object lock and did this,
5745 * we might disconnect the page, then someone might
5746 * soft fault it back in, then we would put it on the
5747 * cleaned queue, and so we would have a referenced (maybe even dirty)
5748 * page on that queue, which we don't want
5750 int refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
5752 if ((refmod_state
& VM_MEM_REFERENCED
)) {
5754 * this page has been touched since it got cleaned; let's activate it
5755 * if it hasn't already been
5757 vm_pageout_enqueued_cleaned
++;
5758 vm_pageout_cleaned_reactivated
++;
5759 vm_pageout_cleaned_commit_reactivated
++;
5761 if (m
->vm_page_q_state
!= VM_PAGE_ON_ACTIVE_Q
)
5762 vm_page_activate(m
);
5764 m
->reference
= FALSE
;
5765 vm_page_enqueue_cleaned(m
);
5768 else if (dwp
->dw_mask
& DW_vm_page_lru
)
5770 else if (dwp
->dw_mask
& DW_VM_PAGE_QUEUES_REMOVE
) {
5771 if (m
->vm_page_q_state
!= VM_PAGE_ON_PAGEOUT_Q
)
5772 vm_page_queues_remove(m
, TRUE
);
5774 if (dwp
->dw_mask
& DW_set_reference
)
5775 m
->reference
= TRUE
;
5776 else if (dwp
->dw_mask
& DW_clear_reference
)
5777 m
->reference
= FALSE
;
5779 if (dwp
->dw_mask
& DW_move_page
) {
5780 if (m
->vm_page_q_state
!= VM_PAGE_ON_PAGEOUT_Q
) {
5781 vm_page_queues_remove(m
, FALSE
);
5783 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
5785 vm_page_enqueue_inactive(m
, FALSE
);
5788 if (dwp
->dw_mask
& DW_clear_busy
)
5791 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
5795 vm_page_unlock_queues();
5798 vm_page_free_list(local_free_q
, TRUE
);
5800 VM_CHECK_MEMORYSTATUS
;
5810 vm_page_t lo_page_list
= VM_PAGE_NULL
;
5814 if ( !(flags
& KMA_LOMEM
))
5815 panic("vm_page_alloc_list: called w/o KMA_LOMEM");
5817 for (i
= 0; i
< page_count
; i
++) {
5819 mem
= vm_page_grablo();
5821 if (mem
== VM_PAGE_NULL
) {
5823 vm_page_free_list(lo_page_list
, FALSE
);
5825 *list
= VM_PAGE_NULL
;
5827 return (KERN_RESOURCE_SHORTAGE
);
5829 mem
->snext
= lo_page_list
;
5832 *list
= lo_page_list
;
5834 return (KERN_SUCCESS
);
5838 vm_page_set_offset(vm_page_t page
, vm_object_offset_t offset
)
5840 page
->offset
= offset
;
5844 vm_page_get_next(vm_page_t page
)
5846 return (page
->snext
);
5850 vm_page_get_offset(vm_page_t page
)
5852 return (page
->offset
);
5856 vm_page_get_phys_page(vm_page_t page
)
5858 return (VM_PAGE_GET_PHYS_PAGE(page
));
5862 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5866 static vm_page_t hibernate_gobble_queue
;
5868 static int hibernate_drain_pageout_queue(struct vm_pageout_queue
*);
5869 static int hibernate_flush_dirty_pages(int);
5870 static int hibernate_flush_queue(vm_page_queue_head_t
*, int);
5872 void hibernate_flush_wait(void);
5873 void hibernate_mark_in_progress(void);
5874 void hibernate_clear_in_progress(void);
5876 void hibernate_free_range(int, int);
5877 void hibernate_hash_insert_page(vm_page_t
);
5878 uint32_t hibernate_mark_as_unneeded(addr64_t
, addr64_t
, hibernate_page_list_t
*, hibernate_page_list_t
*);
5879 void hibernate_rebuild_vm_structs(void);
5880 uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t
*, hibernate_page_list_t
*);
5881 ppnum_t
hibernate_lookup_paddr(unsigned int);
5883 struct hibernate_statistics
{
5884 int hibernate_considered
;
5885 int hibernate_reentered_on_q
;
5886 int hibernate_found_dirty
;
5887 int hibernate_skipped_cleaning
;
5888 int hibernate_skipped_transient
;
5889 int hibernate_skipped_precious
;
5890 int hibernate_skipped_external
;
5891 int hibernate_queue_nolock
;
5892 int hibernate_queue_paused
;
5893 int hibernate_throttled
;
5894 int hibernate_throttle_timeout
;
5895 int hibernate_drained
;
5896 int hibernate_drain_timeout
;
5898 int cd_found_precious
;
5901 int cd_found_unusual
;
5902 int cd_found_cleaning
;
5903 int cd_found_laundry
;
5905 int cd_found_xpmapped
;
5906 int cd_skipped_xpmapped
;
5909 int cd_vm_page_wire_count
;
5910 int cd_vm_struct_pages_unneeded
;
5918 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
5919 * so that we don't overrun the estimated image size, which would
5920 * result in a hibernation failure.
5922 #define HIBERNATE_XPMAPPED_LIMIT 40000
5926 hibernate_drain_pageout_queue(struct vm_pageout_queue
*q
)
5928 wait_result_t wait_result
;
5930 vm_page_lock_queues();
5932 while ( !vm_page_queue_empty(&q
->pgo_pending
) ) {
5934 q
->pgo_draining
= TRUE
;
5936 assert_wait_timeout((event_t
) (&q
->pgo_laundry
+1), THREAD_INTERRUPTIBLE
, 5000, 1000*NSEC_PER_USEC
);
5938 vm_page_unlock_queues();
5940 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
5942 if (wait_result
== THREAD_TIMED_OUT
&& !vm_page_queue_empty(&q
->pgo_pending
)) {
5943 hibernate_stats
.hibernate_drain_timeout
++;
5945 if (q
== &vm_pageout_queue_external
)
5950 vm_page_lock_queues();
5952 hibernate_stats
.hibernate_drained
++;
5954 vm_page_unlock_queues();
5960 boolean_t hibernate_skip_external
= FALSE
;
5963 hibernate_flush_queue(vm_page_queue_head_t
*q
, int qcount
)
5966 vm_object_t l_object
= NULL
;
5967 vm_object_t m_object
= NULL
;
5968 int refmod_state
= 0;
5969 int try_failed_count
= 0;
5971 int current_run
= 0;
5972 struct vm_pageout_queue
*iq
;
5973 struct vm_pageout_queue
*eq
;
5974 struct vm_pageout_queue
*tq
;
5977 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 4) | DBG_FUNC_START
, q
, qcount
, 0, 0, 0);
5979 iq
= &vm_pageout_queue_internal
;
5980 eq
= &vm_pageout_queue_external
;
5982 vm_page_lock_queues();
5984 while (qcount
&& !vm_page_queue_empty(q
)) {
5986 if (current_run
++ == 1000) {
5987 if (hibernate_should_abort()) {
5994 m
= (vm_page_t
) vm_page_queue_first(q
);
5995 m_object
= VM_PAGE_OBJECT(m
);
5998 * check to see if we currently are working
5999 * with the same object... if so, we've
6000 * already got the lock
6002 if (m_object
!= l_object
) {
6004 * the object associated with candidate page is
6005 * different from the one we were just working
6006 * with... dump the lock if we still own it
6008 if (l_object
!= NULL
) {
6009 vm_object_unlock(l_object
);
6013 * Try to lock object; since we've alread got the
6014 * page queues lock, we can only 'try' for this one.
6015 * if the 'try' fails, we need to do a mutex_pause
6016 * to allow the owner of the object lock a chance to
6019 if ( !vm_object_lock_try_scan(m_object
)) {
6021 if (try_failed_count
> 20) {
6022 hibernate_stats
.hibernate_queue_nolock
++;
6024 goto reenter_pg_on_q
;
6027 vm_page_unlock_queues();
6028 mutex_pause(try_failed_count
++);
6029 vm_page_lock_queues();
6031 hibernate_stats
.hibernate_queue_paused
++;
6034 l_object
= m_object
;
6037 if ( !m_object
->alive
|| m
->encrypted_cleaning
|| m
->cleaning
|| m
->laundry
|| m
->busy
|| m
->absent
|| m
->error
) {
6039 * page is not to be cleaned
6040 * put it back on the head of its queue
6043 hibernate_stats
.hibernate_skipped_cleaning
++;
6045 hibernate_stats
.hibernate_skipped_transient
++;
6047 goto reenter_pg_on_q
;
6049 if (m_object
->copy
== VM_OBJECT_NULL
) {
6050 if (m_object
->purgable
== VM_PURGABLE_VOLATILE
|| m_object
->purgable
== VM_PURGABLE_EMPTY
) {
6052 * let the normal hibernate image path
6055 goto reenter_pg_on_q
;
6058 if ( !m
->dirty
&& m
->pmapped
) {
6059 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
6061 if ((refmod_state
& VM_MEM_MODIFIED
)) {
6062 SET_PAGE_DIRTY(m
, FALSE
);
6069 * page is not to be cleaned
6070 * put it back on the head of its queue
6073 hibernate_stats
.hibernate_skipped_precious
++;
6075 goto reenter_pg_on_q
;
6078 if (hibernate_skip_external
== TRUE
&& !m_object
->internal
) {
6080 hibernate_stats
.hibernate_skipped_external
++;
6082 goto reenter_pg_on_q
;
6086 if (m_object
->internal
) {
6087 if (VM_PAGE_Q_THROTTLED(iq
))
6089 } else if (VM_PAGE_Q_THROTTLED(eq
))
6093 wait_result_t wait_result
;
6096 if (l_object
!= NULL
) {
6097 vm_object_unlock(l_object
);
6101 while (retval
== 0) {
6103 tq
->pgo_throttled
= TRUE
;
6105 assert_wait_timeout((event_t
) &tq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, 1000, 1000*NSEC_PER_USEC
);
6107 vm_page_unlock_queues();
6109 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
6111 vm_page_lock_queues();
6113 if (wait_result
!= THREAD_TIMED_OUT
)
6115 if (!VM_PAGE_Q_THROTTLED(tq
))
6118 if (hibernate_should_abort())
6121 if (--wait_count
== 0) {
6123 hibernate_stats
.hibernate_throttle_timeout
++;
6126 hibernate_skip_external
= TRUE
;
6135 hibernate_stats
.hibernate_throttled
++;
6140 * we've already factored out pages in the laundry which
6141 * means this page can't be on the pageout queue so it's
6142 * safe to do the vm_page_queues_remove
6144 vm_page_queues_remove(m
, TRUE
);
6146 if (m_object
->internal
== TRUE
)
6147 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m
), PMAP_OPTIONS_COMPRESSOR
, NULL
);
6149 (void)vm_pageout_cluster(m
, FALSE
, FALSE
);
6151 hibernate_stats
.hibernate_found_dirty
++;
6156 vm_page_queue_remove(q
, m
, vm_page_t
, pageq
);
6157 vm_page_queue_enter(q
, m
, vm_page_t
, pageq
);
6159 hibernate_stats
.hibernate_reentered_on_q
++;
6161 hibernate_stats
.hibernate_considered
++;
6164 try_failed_count
= 0;
6166 if (l_object
!= NULL
) {
6167 vm_object_unlock(l_object
);
6171 vm_page_unlock_queues();
6173 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 4) | DBG_FUNC_END
, hibernate_stats
.hibernate_found_dirty
, retval
, 0, 0, 0);
6180 hibernate_flush_dirty_pages(int pass
)
6182 struct vm_speculative_age_q
*aq
;
6185 if (vm_page_local_q
) {
6186 for (i
= 0; i
< vm_page_local_q_count
; i
++)
6187 vm_page_reactivate_local(i
, TRUE
, FALSE
);
6190 for (i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++) {
6194 aq
= &vm_page_queue_speculative
[i
];
6196 if (vm_page_queue_empty(&aq
->age_q
))
6200 vm_page_lockspin_queues();
6202 vm_page_queue_iterate(&aq
->age_q
,
6209 vm_page_unlock_queues();
6212 if (hibernate_flush_queue(&aq
->age_q
, qcount
))
6216 if (hibernate_flush_queue(&vm_page_queue_inactive
, vm_page_inactive_count
- vm_page_anonymous_count
- vm_page_cleaned_count
))
6218 /* XXX FBDP TODO: flush secluded queue */
6219 if (hibernate_flush_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
))
6221 if (hibernate_flush_queue(&vm_page_queue_cleaned
, vm_page_cleaned_count
))
6223 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal
))
6227 vm_compressor_record_warmup_start();
6229 if (hibernate_flush_queue(&vm_page_queue_active
, vm_page_active_count
)) {
6231 vm_compressor_record_warmup_end();
6234 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal
)) {
6236 vm_compressor_record_warmup_end();
6240 vm_compressor_record_warmup_end();
6242 if (hibernate_skip_external
== FALSE
&& hibernate_drain_pageout_queue(&vm_pageout_queue_external
))
6250 hibernate_reset_stats()
6252 bzero(&hibernate_stats
, sizeof(struct hibernate_statistics
));
6257 hibernate_flush_memory()
6261 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
6263 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 3) | DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0, 0);
6265 hibernate_cleaning_in_progress
= TRUE
;
6266 hibernate_skip_external
= FALSE
;
6268 if ((retval
= hibernate_flush_dirty_pages(1)) == 0) {
6270 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 10) | DBG_FUNC_START
, VM_PAGE_COMPRESSOR_COUNT
, 0, 0, 0, 0);
6272 vm_compressor_flush();
6274 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 10) | DBG_FUNC_END
, VM_PAGE_COMPRESSOR_COUNT
, 0, 0, 0, 0);
6276 if (consider_buffer_cache_collect
!= NULL
) {
6277 unsigned int orig_wire_count
;
6279 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 7) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
6280 orig_wire_count
= vm_page_wire_count
;
6282 (void)(*consider_buffer_cache_collect
)(1);
6285 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count
- vm_page_wire_count
);
6287 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 7) | DBG_FUNC_END
, orig_wire_count
- vm_page_wire_count
, 0, 0, 0, 0);
6290 hibernate_cleaning_in_progress
= FALSE
;
6292 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 3) | DBG_FUNC_END
, vm_page_free_count
, hibernate_stats
.hibernate_found_dirty
, retval
, 0, 0);
6295 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT
);
6298 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
6299 hibernate_stats
.hibernate_considered
,
6300 hibernate_stats
.hibernate_reentered_on_q
,
6301 hibernate_stats
.hibernate_found_dirty
);
6302 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
6303 hibernate_stats
.hibernate_skipped_cleaning
,
6304 hibernate_stats
.hibernate_skipped_transient
,
6305 hibernate_stats
.hibernate_skipped_precious
,
6306 hibernate_stats
.hibernate_skipped_external
,
6307 hibernate_stats
.hibernate_queue_nolock
);
6308 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
6309 hibernate_stats
.hibernate_queue_paused
,
6310 hibernate_stats
.hibernate_throttled
,
6311 hibernate_stats
.hibernate_throttle_timeout
,
6312 hibernate_stats
.hibernate_drained
,
6313 hibernate_stats
.hibernate_drain_timeout
);
6320 hibernate_page_list_zero(hibernate_page_list_t
*list
)
6323 hibernate_bitmap_t
* bitmap
;
6325 bitmap
= &list
->bank_bitmap
[0];
6326 for (bank
= 0; bank
< list
->bank_count
; bank
++)
6330 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
6331 // set out-of-bound bits at end of bitmap.
6332 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
+ 1) & 31);
6334 bitmap
->bitmap
[bitmap
->bitmapwords
- 1] = (0xFFFFFFFF >> last_bit
);
6336 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
6341 hibernate_free_gobble_pages(void)
6346 m
= (vm_page_t
) hibernate_gobble_queue
;
6354 hibernate_gobble_queue
= VM_PAGE_NULL
;
6357 HIBLOG("Freed %d pages\n", count
);
6361 hibernate_consider_discard(vm_page_t m
, boolean_t preflight
)
6363 vm_object_t object
= NULL
;
6365 boolean_t discard
= FALSE
;
6370 panic("hibernate_consider_discard: private");
6372 object
= VM_PAGE_OBJECT(m
);
6374 if (!vm_object_lock_try(object
)) {
6376 if (!preflight
) hibernate_stats
.cd_lock_failed
++;
6379 if (VM_PAGE_WIRED(m
)) {
6380 if (!preflight
) hibernate_stats
.cd_found_wired
++;
6384 if (!preflight
) hibernate_stats
.cd_found_precious
++;
6387 if (m
->busy
|| !object
->alive
) {
6389 * Somebody is playing with this page.
6391 if (!preflight
) hibernate_stats
.cd_found_busy
++;
6394 if (m
->absent
|| m
->unusual
|| m
->error
) {
6396 * If it's unusual in anyway, ignore it
6398 if (!preflight
) hibernate_stats
.cd_found_unusual
++;
6402 if (!preflight
) hibernate_stats
.cd_found_cleaning
++;
6406 if (!preflight
) hibernate_stats
.cd_found_laundry
++;
6411 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
6413 if (refmod_state
& VM_MEM_REFERENCED
)
6414 m
->reference
= TRUE
;
6415 if (refmod_state
& VM_MEM_MODIFIED
) {
6416 SET_PAGE_DIRTY(m
, FALSE
);
6421 * If it's clean or purgeable we can discard the page on wakeup.
6423 discard
= (!m
->dirty
)
6424 || (VM_PURGABLE_VOLATILE
== object
->purgable
)
6425 || (VM_PURGABLE_EMPTY
== object
->purgable
);
6428 if (discard
== FALSE
) {
6430 hibernate_stats
.cd_found_dirty
++;
6431 } else if (m
->xpmapped
&& m
->reference
&& !object
->internal
) {
6432 if (hibernate_stats
.cd_found_xpmapped
< HIBERNATE_XPMAPPED_LIMIT
) {
6434 hibernate_stats
.cd_found_xpmapped
++;
6438 hibernate_stats
.cd_skipped_xpmapped
++;
6445 vm_object_unlock(object
);
6452 hibernate_discard_page(vm_page_t m
)
6454 vm_object_t m_object
;
6456 if (m
->absent
|| m
->unusual
|| m
->error
)
6458 * If it's unusual in anyway, ignore
6462 m_object
= VM_PAGE_OBJECT(m
);
6464 #if MACH_ASSERT || DEBUG
6465 if (!vm_object_lock_try(m_object
))
6466 panic("hibernate_discard_page(%p) !vm_object_lock_try", m
);
6468 /* No need to lock page queue for token delete, hibernate_vm_unlock()
6469 makes sure these locks are uncontended before sleep */
6470 #endif /* MACH_ASSERT || DEBUG */
6472 if (m
->pmapped
== TRUE
)
6474 __unused
int refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
6478 panic("hibernate_discard_page(%p) laundry", m
);
6480 panic("hibernate_discard_page(%p) private", m
);
6482 panic("hibernate_discard_page(%p) fictitious", m
);
6484 if (VM_PURGABLE_VOLATILE
== m_object
->purgable
)
6486 /* object should be on a queue */
6487 assert((m_object
->objq
.next
!= NULL
) && (m_object
->objq
.prev
!= NULL
));
6488 purgeable_q_t old_queue
= vm_purgeable_object_remove(m_object
);
6490 if (m_object
->purgeable_when_ripe
) {
6491 vm_purgeable_token_delete_first(old_queue
);
6493 vm_object_lock_assert_exclusive(m_object
);
6494 m_object
->purgable
= VM_PURGABLE_EMPTY
;
6497 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
6498 * accounted in the "volatile" ledger, so no change here.
6499 * We have to update vm_page_purgeable_count, though, since we're
6500 * effectively purging this object.
6503 assert(m_object
->resident_page_count
>= m_object
->wired_page_count
);
6504 delta
= (m_object
->resident_page_count
- m_object
->wired_page_count
);
6505 assert(vm_page_purgeable_count
>= delta
);
6507 OSAddAtomic(-delta
, (SInt32
*)&vm_page_purgeable_count
);
6512 #if MACH_ASSERT || DEBUG
6513 vm_object_unlock(m_object
);
6514 #endif /* MACH_ASSERT || DEBUG */
6518 Grab locks for hibernate_page_list_setall()
6521 hibernate_vm_lock_queues(void)
6523 vm_object_lock(compressor_object
);
6524 vm_page_lock_queues();
6525 lck_mtx_lock(&vm_page_queue_free_lock
);
6527 if (vm_page_local_q
) {
6529 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
6531 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
6532 VPL_LOCK(&lq
->vpl_lock
);
6538 hibernate_vm_unlock_queues(void)
6540 if (vm_page_local_q
) {
6542 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
6544 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
6545 VPL_UNLOCK(&lq
->vpl_lock
);
6548 lck_mtx_unlock(&vm_page_queue_free_lock
);
6549 vm_page_unlock_queues();
6550 vm_object_unlock(compressor_object
);
6554 Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
6555 pages known to VM to not need saving are subtracted.
6556 Wired pages to be saved are present in page_list_wired, pageable in page_list.
6560 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
6561 hibernate_page_list_t
* page_list_wired
,
6562 hibernate_page_list_t
* page_list_pal
,
6563 boolean_t preflight
,
6564 boolean_t will_discard
,
6565 uint32_t * pagesOut
)
6567 uint64_t start
, end
, nsec
;
6570 uint32_t pages
= page_list
->page_count
;
6571 uint32_t count_anonymous
= 0, count_throttled
= 0, count_compressor
= 0;
6572 uint32_t count_inactive
= 0, count_active
= 0, count_speculative
= 0, count_cleaned
= 0;
6573 uint32_t count_wire
= pages
;
6574 uint32_t count_discard_active
= 0;
6575 uint32_t count_discard_inactive
= 0;
6576 uint32_t count_discard_cleaned
= 0;
6577 uint32_t count_discard_purgeable
= 0;
6578 uint32_t count_discard_speculative
= 0;
6579 uint32_t count_discard_vm_struct_pages
= 0;
6582 hibernate_bitmap_t
* bitmap
;
6583 hibernate_bitmap_t
* bitmap_wired
;
6584 boolean_t discard_all
;
6587 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight
);
6591 page_list_wired
= NULL
;
6592 page_list_pal
= NULL
;
6593 discard_all
= FALSE
;
6595 discard_all
= will_discard
;
6598 #if MACH_ASSERT || DEBUG
6601 vm_page_lock_queues();
6602 if (vm_page_local_q
) {
6603 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
6605 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
6606 VPL_LOCK(&lq
->vpl_lock
);
6610 #endif /* MACH_ASSERT || DEBUG */
6613 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 8) | DBG_FUNC_START
, count_wire
, 0, 0, 0, 0);
6615 clock_get_uptime(&start
);
6618 hibernate_page_list_zero(page_list
);
6619 hibernate_page_list_zero(page_list_wired
);
6620 hibernate_page_list_zero(page_list_pal
);
6622 hibernate_stats
.cd_vm_page_wire_count
= vm_page_wire_count
;
6623 hibernate_stats
.cd_pages
= pages
;
6626 if (vm_page_local_q
) {
6627 for (i
= 0; i
< vm_page_local_q_count
; i
++)
6628 vm_page_reactivate_local(i
, TRUE
, !preflight
);
6632 vm_object_lock(compressor_object
);
6633 vm_page_lock_queues();
6634 lck_mtx_lock(&vm_page_queue_free_lock
);
6637 m
= (vm_page_t
) hibernate_gobble_queue
;
6643 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6644 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6649 if (!preflight
) for( i
= 0; i
< real_ncpus
; i
++ )
6651 if (cpu_data_ptr
[i
] && cpu_data_ptr
[i
]->cpu_processor
)
6653 for (m
= PROCESSOR_DATA(cpu_data_ptr
[i
]->cpu_processor
, free_pages
); m
; m
= m
->snext
)
6655 assert(m
->vm_page_q_state
== VM_PAGE_ON_FREE_LOCAL_Q
);
6659 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6660 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6662 hibernate_stats
.cd_local_free
++;
6663 hibernate_stats
.cd_total_free
++;
6668 for( i
= 0; i
< vm_colors
; i
++ )
6670 vm_page_queue_iterate(&vm_page_queue_free
[i
].qhead
,
6675 assert(m
->vm_page_q_state
== VM_PAGE_ON_FREE_Q
);
6680 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6681 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6683 hibernate_stats
.cd_total_free
++;
6688 vm_page_queue_iterate(&vm_lopage_queue_free
,
6693 assert(m
->vm_page_q_state
== VM_PAGE_ON_FREE_LOPAGE_Q
);
6698 hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6699 hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6701 hibernate_stats
.cd_total_free
++;
6705 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
6706 while (m
&& !vm_page_queue_end(&vm_page_queue_throttled
, (vm_page_queue_entry_t
)m
))
6708 assert(m
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
);
6710 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6712 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
6713 && hibernate_consider_discard(m
, preflight
))
6715 if (!preflight
) hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6716 count_discard_inactive
++;
6717 discard
= discard_all
;
6722 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6724 if (discard
) hibernate_discard_page(m
);
6728 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
6729 while (m
&& !vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
)m
))
6731 assert(m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
6733 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6735 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
6736 && hibernate_consider_discard(m
, preflight
))
6738 if (!preflight
) hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6740 count_discard_purgeable
++;
6742 count_discard_inactive
++;
6743 discard
= discard_all
;
6748 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6749 if (discard
) hibernate_discard_page(m
);
6753 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
6754 while (m
&& !vm_page_queue_end(&vm_page_queue_cleaned
, (vm_page_queue_entry_t
)m
))
6756 assert(m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
6758 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6760 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
6761 && hibernate_consider_discard(m
, preflight
))
6763 if (!preflight
) hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6765 count_discard_purgeable
++;
6767 count_discard_cleaned
++;
6768 discard
= discard_all
;
6773 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6774 if (discard
) hibernate_discard_page(m
);
6778 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
6779 while (m
&& !vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
)m
))
6781 assert(m
->vm_page_q_state
== VM_PAGE_ON_ACTIVE_Q
);
6783 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6785 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
6786 && hibernate_consider_discard(m
, preflight
))
6788 if (!preflight
) hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6790 count_discard_purgeable
++;
6792 count_discard_active
++;
6793 discard
= discard_all
;
6798 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6799 if (discard
) hibernate_discard_page(m
);
6803 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
6804 while (m
&& !vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
)m
))
6806 assert(m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
6808 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6810 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
6811 && hibernate_consider_discard(m
, preflight
))
6813 if (!preflight
) hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6815 count_discard_purgeable
++;
6817 count_discard_inactive
++;
6818 discard
= discard_all
;
6823 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6824 if (discard
) hibernate_discard_page(m
);
6827 /* XXX FBDP TODO: secluded queue */
6829 for( i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++ )
6831 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_speculative
[i
].age_q
);
6832 while (m
&& !vm_page_queue_end(&vm_page_queue_speculative
[i
].age_q
, (vm_page_queue_entry_t
)m
))
6834 assert(m
->vm_page_q_state
== VM_PAGE_ON_SPECULATIVE_Q
);
6836 next
= (vm_page_t
)VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6838 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
6839 && hibernate_consider_discard(m
, preflight
))
6841 if (!preflight
) hibernate_page_bitset(page_list
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6842 count_discard_speculative
++;
6843 discard
= discard_all
;
6846 count_speculative
++;
6848 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6849 if (discard
) hibernate_discard_page(m
);
6854 vm_page_queue_iterate(&compressor_object
->memq
, m
, vm_page_t
, listq
)
6856 assert(m
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
);
6860 if (!preflight
) hibernate_page_bitset(page_list_wired
, TRUE
, VM_PAGE_GET_PHYS_PAGE(m
));
6863 if (preflight
== FALSE
&& discard_all
== TRUE
) {
6864 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 12) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
6866 HIBLOG("hibernate_teardown started\n");
6867 count_discard_vm_struct_pages
= hibernate_teardown_vm_structs(page_list
, page_list_wired
);
6868 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages
);
6870 pages
-= count_discard_vm_struct_pages
;
6871 count_wire
-= count_discard_vm_struct_pages
;
6873 hibernate_stats
.cd_vm_struct_pages_unneeded
= count_discard_vm_struct_pages
;
6875 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 13) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
6879 // pull wired from hibernate_bitmap
6880 bitmap
= &page_list
->bank_bitmap
[0];
6881 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
6882 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
6884 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
6885 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
6886 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
6887 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
6891 // machine dependent adjustments
6892 hibernate_page_list_setall_machine(page_list
, page_list_wired
, preflight
, &pages
);
6895 hibernate_stats
.cd_count_wire
= count_wire
;
6896 hibernate_stats
.cd_discarded
= count_discard_active
+ count_discard_inactive
+ count_discard_purgeable
+
6897 count_discard_speculative
+ count_discard_cleaned
+ count_discard_vm_struct_pages
;
6900 clock_get_uptime(&end
);
6901 absolutetime_to_nanoseconds(end
- start
, &nsec
);
6902 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
6904 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
6905 pages
, count_wire
, count_active
, count_inactive
, count_cleaned
, count_speculative
, count_anonymous
, count_throttled
, count_compressor
, hibernate_stats
.cd_found_xpmapped
,
6906 discard_all
? "did" : "could",
6907 count_discard_active
, count_discard_inactive
, count_discard_purgeable
, count_discard_speculative
, count_discard_cleaned
);
6909 if (hibernate_stats
.cd_skipped_xpmapped
)
6910 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats
.cd_skipped_xpmapped
);
6912 *pagesOut
= pages
- count_discard_active
- count_discard_inactive
- count_discard_purgeable
- count_discard_speculative
- count_discard_cleaned
;
6914 if (preflight
&& will_discard
) *pagesOut
-= count_compressor
+ count_throttled
+ count_anonymous
+ count_inactive
+ count_cleaned
+ count_speculative
+ count_active
;
6916 #if MACH_ASSERT || DEBUG
6919 if (vm_page_local_q
) {
6920 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
6922 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
6923 VPL_UNLOCK(&lq
->vpl_lock
);
6926 vm_page_unlock_queues();
6928 #endif /* MACH_ASSERT || DEBUG */
6931 lck_mtx_unlock(&vm_page_queue_free_lock
);
6932 vm_page_unlock_queues();
6933 vm_object_unlock(compressor_object
);
6936 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 8) | DBG_FUNC_END
, count_wire
, *pagesOut
, 0, 0, 0);
6940 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
6942 uint64_t start
, end
, nsec
;
6946 uint32_t count_discard_active
= 0;
6947 uint32_t count_discard_inactive
= 0;
6948 uint32_t count_discard_purgeable
= 0;
6949 uint32_t count_discard_cleaned
= 0;
6950 uint32_t count_discard_speculative
= 0;
6953 #if MACH_ASSERT || DEBUG
6954 vm_page_lock_queues();
6955 if (vm_page_local_q
) {
6956 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
6958 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
6959 VPL_LOCK(&lq
->vpl_lock
);
6962 #endif /* MACH_ASSERT || DEBUG */
6964 clock_get_uptime(&start
);
6966 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
6967 while (m
&& !vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
)m
))
6969 assert(m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
6971 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6972 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
)))
6975 count_discard_purgeable
++;
6977 count_discard_inactive
++;
6978 hibernate_discard_page(m
);
6983 for( i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++ )
6985 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_speculative
[i
].age_q
);
6986 while (m
&& !vm_page_queue_end(&vm_page_queue_speculative
[i
].age_q
, (vm_page_queue_entry_t
)m
))
6988 assert(m
->vm_page_q_state
== VM_PAGE_ON_SPECULATIVE_Q
);
6990 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
6991 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
)))
6993 count_discard_speculative
++;
6994 hibernate_discard_page(m
);
7000 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
7001 while (m
&& !vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
)m
))
7003 assert(m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
7005 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
7006 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
)))
7009 count_discard_purgeable
++;
7011 count_discard_inactive
++;
7012 hibernate_discard_page(m
);
7016 /* XXX FBDP TODO: secluded queue */
7018 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
7019 while (m
&& !vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
)m
))
7021 assert(m
->vm_page_q_state
== VM_PAGE_ON_ACTIVE_Q
);
7023 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
7024 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
)))
7027 count_discard_purgeable
++;
7029 count_discard_active
++;
7030 hibernate_discard_page(m
);
7035 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
7036 while (m
&& !vm_page_queue_end(&vm_page_queue_cleaned
, (vm_page_queue_entry_t
)m
))
7038 assert(m
->vm_page_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
7040 next
= (vm_page_t
) VM_PAGE_UNPACK_PTR(m
->pageq
.next
);
7041 if (hibernate_page_bittst(page_list
, VM_PAGE_GET_PHYS_PAGE(m
)))
7044 count_discard_purgeable
++;
7046 count_discard_cleaned
++;
7047 hibernate_discard_page(m
);
7052 #if MACH_ASSERT || DEBUG
7053 if (vm_page_local_q
) {
7054 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
7056 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
7057 VPL_UNLOCK(&lq
->vpl_lock
);
7060 vm_page_unlock_queues();
7061 #endif /* MACH_ASSERT || DEBUG */
7063 clock_get_uptime(&end
);
7064 absolutetime_to_nanoseconds(end
- start
, &nsec
);
7065 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
7067 count_discard_active
, count_discard_inactive
, count_discard_purgeable
, count_discard_speculative
, count_discard_cleaned
);
7070 boolean_t hibernate_paddr_map_inited
= FALSE
;
7071 boolean_t hibernate_rebuild_needed
= FALSE
;
7072 unsigned int hibernate_teardown_last_valid_compact_indx
= -1;
7073 vm_page_t hibernate_rebuild_hash_list
= NULL
;
7075 unsigned int hibernate_teardown_found_tabled_pages
= 0;
7076 unsigned int hibernate_teardown_found_created_pages
= 0;
7077 unsigned int hibernate_teardown_found_free_pages
= 0;
7078 unsigned int hibernate_teardown_vm_page_free_count
;
7081 struct ppnum_mapping
{
7082 struct ppnum_mapping
*ppnm_next
;
7083 ppnum_t ppnm_base_paddr
;
7084 unsigned int ppnm_sindx
;
7085 unsigned int ppnm_eindx
;
7088 struct ppnum_mapping
*ppnm_head
;
7089 struct ppnum_mapping
*ppnm_last_found
= NULL
;
7093 hibernate_create_paddr_map()
7096 ppnum_t next_ppnum_in_run
= 0;
7097 struct ppnum_mapping
*ppnm
= NULL
;
7099 if (hibernate_paddr_map_inited
== FALSE
) {
7101 for (i
= 0; i
< vm_pages_count
; i
++) {
7104 ppnm
->ppnm_eindx
= i
;
7106 if (ppnm
== NULL
|| VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
]) != next_ppnum_in_run
) {
7108 ppnm
= kalloc(sizeof(struct ppnum_mapping
));
7110 ppnm
->ppnm_next
= ppnm_head
;
7113 ppnm
->ppnm_sindx
= i
;
7114 ppnm
->ppnm_base_paddr
= VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
]);
7116 next_ppnum_in_run
= VM_PAGE_GET_PHYS_PAGE(&vm_pages
[i
]) + 1;
7120 hibernate_paddr_map_inited
= TRUE
;
7125 hibernate_lookup_paddr(unsigned int indx
)
7127 struct ppnum_mapping
*ppnm
= NULL
;
7129 ppnm
= ppnm_last_found
;
7132 if (indx
>= ppnm
->ppnm_sindx
&& indx
< ppnm
->ppnm_eindx
)
7135 for (ppnm
= ppnm_head
; ppnm
; ppnm
= ppnm
->ppnm_next
) {
7137 if (indx
>= ppnm
->ppnm_sindx
&& indx
< ppnm
->ppnm_eindx
) {
7138 ppnm_last_found
= ppnm
;
7143 panic("hibernate_lookup_paddr of %d failed\n", indx
);
7145 return (ppnm
->ppnm_base_paddr
+ (indx
- ppnm
->ppnm_sindx
));
7150 hibernate_mark_as_unneeded(addr64_t saddr
, addr64_t eaddr
, hibernate_page_list_t
*page_list
, hibernate_page_list_t
*page_list_wired
)
7152 addr64_t saddr_aligned
;
7153 addr64_t eaddr_aligned
;
7156 unsigned int mark_as_unneeded_pages
= 0;
7158 saddr_aligned
= (saddr
+ PAGE_MASK_64
) & ~PAGE_MASK_64
;
7159 eaddr_aligned
= eaddr
& ~PAGE_MASK_64
;
7161 for (addr
= saddr_aligned
; addr
< eaddr_aligned
; addr
+= PAGE_SIZE_64
) {
7163 paddr
= pmap_find_phys(kernel_pmap
, addr
);
7167 hibernate_page_bitset(page_list
, TRUE
, paddr
);
7168 hibernate_page_bitset(page_list_wired
, TRUE
, paddr
);
7170 mark_as_unneeded_pages
++;
7172 return (mark_as_unneeded_pages
);
7177 hibernate_hash_insert_page(vm_page_t mem
)
7179 vm_page_bucket_t
*bucket
;
7181 vm_object_t m_object
;
7183 m_object
= VM_PAGE_OBJECT(mem
);
7185 assert(mem
->hashed
);
7187 assert(mem
->offset
!= (vm_object_offset_t
) -1);
7190 * Insert it into the object_object/offset hash table
7192 hash_id
= vm_page_hash(m_object
, mem
->offset
);
7193 bucket
= &vm_page_buckets
[hash_id
];
7195 mem
->next_m
= bucket
->page_list
;
7196 bucket
->page_list
= VM_PAGE_PACK_PTR(mem
);
7201 hibernate_free_range(int sindx
, int eindx
)
7206 while (sindx
< eindx
) {
7207 mem
= &vm_pages
[sindx
];
7209 vm_page_init(mem
, hibernate_lookup_paddr(sindx
), FALSE
);
7211 mem
->lopage
= FALSE
;
7212 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_Q
;
7214 color
= VM_PAGE_GET_PHYS_PAGE(mem
) & vm_color_mask
;
7215 vm_page_queue_enter_first(&vm_page_queue_free
[color
].qhead
,
7219 vm_page_free_count
++;
7226 extern void hibernate_rebuild_pmap_structs(void);
7229 hibernate_rebuild_vm_structs(void)
7231 int cindx
, sindx
, eindx
;
7232 vm_page_t mem
, tmem
, mem_next
;
7233 AbsoluteTime startTime
, endTime
;
7236 if (hibernate_rebuild_needed
== FALSE
)
7239 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 13) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
7240 HIBLOG("hibernate_rebuild started\n");
7242 clock_get_uptime(&startTime
);
7244 hibernate_rebuild_pmap_structs();
7246 bzero(&vm_page_buckets
[0], vm_page_bucket_count
* sizeof(vm_page_bucket_t
));
7247 eindx
= vm_pages_count
;
7249 for (cindx
= hibernate_teardown_last_valid_compact_indx
; cindx
>= 0; cindx
--) {
7251 mem
= &vm_pages
[cindx
];
7253 * hibernate_teardown_vm_structs leaves the location where
7254 * this vm_page_t must be located in "next".
7256 tmem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->next_m
));
7257 mem
->next_m
= VM_PAGE_PACK_PTR(NULL
);
7259 sindx
= (int)(tmem
- &vm_pages
[0]);
7263 * this vm_page_t was moved by hibernate_teardown_vm_structs,
7264 * so move it back to its real location
7270 hibernate_hash_insert_page(mem
);
7272 * the 'hole' between this vm_page_t and the previous
7273 * vm_page_t we moved needs to be initialized as
7274 * a range of free vm_page_t's
7276 hibernate_free_range(sindx
+ 1, eindx
);
7281 hibernate_free_range(0, sindx
);
7283 assert(vm_page_free_count
== hibernate_teardown_vm_page_free_count
);
7286 * process the list of vm_page_t's that were entered in the hash,
7287 * but were not located in the vm_pages arrary... these are
7288 * vm_page_t's that were created on the fly (i.e. fictitious)
7290 for (mem
= hibernate_rebuild_hash_list
; mem
; mem
= mem_next
) {
7291 mem_next
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->next_m
));
7294 hibernate_hash_insert_page(mem
);
7296 hibernate_rebuild_hash_list
= NULL
;
7298 clock_get_uptime(&endTime
);
7299 SUB_ABSOLUTETIME(&endTime
, &startTime
);
7300 absolutetime_to_nanoseconds(endTime
, &nsec
);
7302 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec
/ 1000000ULL);
7304 hibernate_rebuild_needed
= FALSE
;
7306 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 13) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
7310 extern void hibernate_teardown_pmap_structs(addr64_t
*, addr64_t
*);
7313 hibernate_teardown_vm_structs(hibernate_page_list_t
*page_list
, hibernate_page_list_t
*page_list_wired
)
7316 unsigned int compact_target_indx
;
7317 vm_page_t mem
, mem_next
;
7318 vm_page_bucket_t
*bucket
;
7319 unsigned int mark_as_unneeded_pages
= 0;
7320 unsigned int unneeded_vm_page_bucket_pages
= 0;
7321 unsigned int unneeded_vm_pages_pages
= 0;
7322 unsigned int unneeded_pmap_pages
= 0;
7323 addr64_t start_of_unneeded
= 0;
7324 addr64_t end_of_unneeded
= 0;
7327 if (hibernate_should_abort())
7330 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
7331 vm_page_wire_count
, vm_page_free_count
, vm_page_active_count
, vm_page_inactive_count
, vm_page_speculative_count
,
7332 vm_page_cleaned_count
, compressor_object
->resident_page_count
);
7334 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
7336 bucket
= &vm_page_buckets
[i
];
7338 for (mem
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
)); mem
!= VM_PAGE_NULL
; mem
= mem_next
) {
7339 assert(mem
->hashed
);
7341 mem_next
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(mem
->next_m
));
7343 if (mem
< &vm_pages
[0] || mem
>= &vm_pages
[vm_pages_count
]) {
7344 mem
->next_m
= VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list
);
7345 hibernate_rebuild_hash_list
= mem
;
7349 unneeded_vm_page_bucket_pages
= hibernate_mark_as_unneeded((addr64_t
)&vm_page_buckets
[0], (addr64_t
)&vm_page_buckets
[vm_page_bucket_count
], page_list
, page_list_wired
);
7350 mark_as_unneeded_pages
+= unneeded_vm_page_bucket_pages
;
7352 hibernate_teardown_vm_page_free_count
= vm_page_free_count
;
7354 compact_target_indx
= 0;
7356 for (i
= 0; i
< vm_pages_count
; i
++) {
7360 if (mem
->vm_page_q_state
== VM_PAGE_ON_FREE_Q
) {
7364 assert(!mem
->lopage
);
7366 color
= VM_PAGE_GET_PHYS_PAGE(mem
) & vm_color_mask
;
7368 vm_page_queue_remove(&vm_page_queue_free
[color
].qhead
,
7373 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
7375 vm_page_free_count
--;
7377 hibernate_teardown_found_free_pages
++;
7379 if (vm_pages
[compact_target_indx
].vm_page_q_state
!= VM_PAGE_ON_FREE_Q
)
7380 compact_target_indx
= i
;
7383 * record this vm_page_t's original location
7384 * we need this even if it doesn't get moved
7385 * as an indicator to the rebuild function that
7386 * we don't have to move it
7388 mem
->next_m
= VM_PAGE_PACK_PTR(mem
);
7390 if (vm_pages
[compact_target_indx
].vm_page_q_state
== VM_PAGE_ON_FREE_Q
) {
7392 * we've got a hole to fill, so
7393 * move this vm_page_t to it's new home
7395 vm_pages
[compact_target_indx
] = *mem
;
7396 mem
->vm_page_q_state
= VM_PAGE_ON_FREE_Q
;
7398 hibernate_teardown_last_valid_compact_indx
= compact_target_indx
;
7399 compact_target_indx
++;
7401 hibernate_teardown_last_valid_compact_indx
= i
;
7404 unneeded_vm_pages_pages
= hibernate_mark_as_unneeded((addr64_t
)&vm_pages
[hibernate_teardown_last_valid_compact_indx
+1],
7405 (addr64_t
)&vm_pages
[vm_pages_count
-1], page_list
, page_list_wired
);
7406 mark_as_unneeded_pages
+= unneeded_vm_pages_pages
;
7408 hibernate_teardown_pmap_structs(&start_of_unneeded
, &end_of_unneeded
);
7410 if (start_of_unneeded
) {
7411 unneeded_pmap_pages
= hibernate_mark_as_unneeded(start_of_unneeded
, end_of_unneeded
, page_list
, page_list_wired
);
7412 mark_as_unneeded_pages
+= unneeded_pmap_pages
;
7414 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages
, unneeded_vm_pages_pages
, unneeded_pmap_pages
);
7416 hibernate_rebuild_needed
= TRUE
;
7418 return (mark_as_unneeded_pages
);
7422 #endif /* HIBERNATION */
7424 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7426 #include <mach_vm_debug.h>
7429 #include <mach_debug/hash_info.h>
7430 #include <vm/vm_debug.h>
7433 * Routine: vm_page_info
7435 * Return information about the global VP table.
7436 * Fills the buffer with as much information as possible
7437 * and returns the desired size of the buffer.
7439 * Nothing locked. The caller should provide
7440 * possibly-pageable memory.
7445 hash_info_bucket_t
*info
,
7449 lck_spin_t
*bucket_lock
;
7451 if (vm_page_bucket_count
< count
)
7452 count
= vm_page_bucket_count
;
7454 for (i
= 0; i
< count
; i
++) {
7455 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
7456 unsigned int bucket_count
= 0;
7459 bucket_lock
= &vm_page_bucket_locks
[i
/ BUCKETS_PER_LOCK
];
7460 lck_spin_lock(bucket_lock
);
7462 for (m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
));
7464 m
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(m
->next_m
)))
7467 lck_spin_unlock(bucket_lock
);
7469 /* don't touch pageable memory while holding locks */
7470 info
[i
].hib_count
= bucket_count
;
7473 return vm_page_bucket_count
;
7475 #endif /* MACH_VM_DEBUG */
7477 #if VM_PAGE_BUCKETS_CHECK
7479 vm_page_buckets_check(void)
7483 unsigned int p_hash
;
7484 vm_page_bucket_t
*bucket
;
7485 lck_spin_t
*bucket_lock
;
7487 if (!vm_page_buckets_check_ready
) {
7492 if (hibernate_rebuild_needed
||
7493 hibernate_rebuild_hash_list
) {
7494 panic("BUCKET_CHECK: hibernation in progress: "
7495 "rebuild_needed=%d rebuild_hash_list=%p\n",
7496 hibernate_rebuild_needed
,
7497 hibernate_rebuild_hash_list
);
7499 #endif /* HIBERNATION */
7501 #if VM_PAGE_FAKE_BUCKETS
7503 for (cp
= (char *) vm_page_fake_buckets_start
;
7504 cp
< (char *) vm_page_fake_buckets_end
;
7507 panic("BUCKET_CHECK: corruption at %p in fake buckets "
7508 "[0x%llx:0x%llx]\n",
7510 (uint64_t) vm_page_fake_buckets_start
,
7511 (uint64_t) vm_page_fake_buckets_end
);
7514 #endif /* VM_PAGE_FAKE_BUCKETS */
7516 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
7517 vm_object_t p_object
;
7519 bucket
= &vm_page_buckets
[i
];
7520 if (!bucket
->page_list
) {
7524 bucket_lock
= &vm_page_bucket_locks
[i
/ BUCKETS_PER_LOCK
];
7525 lck_spin_lock(bucket_lock
);
7526 p
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(bucket
->page_list
));
7528 while (p
!= VM_PAGE_NULL
) {
7529 p_object
= VM_PAGE_OBJECT(p
);
7532 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
7533 "hash %d in bucket %d at %p "
7535 p
, p_object
, p
->offset
,
7538 p_hash
= vm_page_hash(p_object
, p
->offset
);
7540 panic("BUCKET_CHECK: corruption in bucket %d "
7541 "at %p: page %p object %p offset 0x%llx "
7543 i
, bucket
, p
, p_object
, p
->offset
,
7546 p
= (vm_page_t
)(VM_PAGE_UNPACK_PTR(p
->next_m
));
7548 lck_spin_unlock(bucket_lock
);
7551 // printf("BUCKET_CHECK: checked buckets\n");
7553 #endif /* VM_PAGE_BUCKETS_CHECK */
7556 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
7557 * local queues if they exist... its the only spot in the system where we add pages
7558 * to those queues... once on those queues, those pages can only move to one of the
7559 * global page queues or the free queues... they NEVER move from local q to local q.
7560 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
7561 * the global vm_page_queue_lock at this point... we still need to take the local lock
7562 * in case this operation is being run on a different CPU then the local queue's identity,
7563 * but we don't have to worry about the page moving to a global queue or becoming wired
7564 * while we're grabbing the local lock since those operations would require the global
7565 * vm_page_queue_lock to be held, and we already own it.
7567 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
7568 * 'wired' and local are ALWAYS mutually exclusive conditions.
7571 #if CONFIG_BACKGROUND_QUEUE
7573 vm_page_queues_remove(vm_page_t mem
, boolean_t remove_from_backgroundq
)
7576 vm_page_queues_remove(vm_page_t mem
, boolean_t __unused remove_from_backgroundq
)
7579 boolean_t was_pageable
= TRUE
;
7580 vm_object_t m_object
;
7582 m_object
= VM_PAGE_OBJECT(mem
);
7584 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
7586 if (mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
)
7588 assert(mem
->pageq
.next
== 0 && mem
->pageq
.prev
== 0);
7589 #if CONFIG_BACKGROUND_QUEUE
7590 if (remove_from_backgroundq
== TRUE
) {
7591 vm_page_remove_from_backgroundq(mem
);
7593 if (mem
->vm_page_on_backgroundq
) {
7594 assert(mem
->vm_page_backgroundq
.next
!= 0);
7595 assert(mem
->vm_page_backgroundq
.prev
!= 0);
7597 assert(mem
->vm_page_backgroundq
.next
== 0);
7598 assert(mem
->vm_page_backgroundq
.prev
== 0);
7600 #endif /* CONFIG_BACKGROUND_QUEUE */
7604 if (mem
->vm_page_q_state
== VM_PAGE_USED_BY_COMPRESSOR
)
7606 assert(mem
->pageq
.next
== 0 && mem
->pageq
.prev
== 0);
7607 #if CONFIG_BACKGROUND_QUEUE
7608 assert(mem
->vm_page_backgroundq
.next
== 0 &&
7609 mem
->vm_page_backgroundq
.prev
== 0 &&
7610 mem
->vm_page_on_backgroundq
== FALSE
);
7614 if (mem
->vm_page_q_state
== VM_PAGE_IS_WIRED
) {
7616 * might put these guys on a list for debugging purposes
7617 * if we do, we'll need to remove this assert
7619 assert(mem
->pageq
.next
== 0 && mem
->pageq
.prev
== 0);
7620 #if CONFIG_BACKGROUND_QUEUE
7621 assert(mem
->vm_page_backgroundq
.next
== 0 &&
7622 mem
->vm_page_backgroundq
.prev
== 0 &&
7623 mem
->vm_page_on_backgroundq
== FALSE
);
7628 assert(m_object
!= compressor_object
);
7629 assert(m_object
!= kernel_object
);
7630 assert(m_object
!= vm_submap_object
);
7631 assert(!mem
->fictitious
);
7633 switch(mem
->vm_page_q_state
) {
7635 case VM_PAGE_ON_ACTIVE_LOCAL_Q
:
7639 lq
= &vm_page_local_q
[mem
->local_id
].vpl_un
.vpl
;
7640 VPL_LOCK(&lq
->vpl_lock
);
7641 vm_page_queue_remove(&lq
->vpl_queue
,
7642 mem
, vm_page_t
, pageq
);
7645 if (m_object
->internal
) {
7646 lq
->vpl_internal_count
--;
7648 lq
->vpl_external_count
--;
7650 VPL_UNLOCK(&lq
->vpl_lock
);
7651 was_pageable
= FALSE
;
7654 case VM_PAGE_ON_ACTIVE_Q
:
7656 vm_page_queue_remove(&vm_page_queue_active
,
7657 mem
, vm_page_t
, pageq
);
7658 vm_page_active_count
--;
7662 case VM_PAGE_ON_INACTIVE_INTERNAL_Q
:
7664 assert(m_object
->internal
== TRUE
);
7666 vm_page_inactive_count
--;
7667 vm_page_queue_remove(&vm_page_queue_anonymous
,
7668 mem
, vm_page_t
, pageq
);
7669 vm_page_anonymous_count
--;
7670 vm_purgeable_q_advance_all();
7674 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q
:
7676 assert(m_object
->internal
== FALSE
);
7678 vm_page_inactive_count
--;
7679 vm_page_queue_remove(&vm_page_queue_inactive
,
7680 mem
, vm_page_t
, pageq
);
7681 vm_purgeable_q_advance_all();
7685 case VM_PAGE_ON_INACTIVE_CLEANED_Q
:
7687 assert(m_object
->internal
== FALSE
);
7689 vm_page_inactive_count
--;
7690 vm_page_queue_remove(&vm_page_queue_cleaned
,
7691 mem
, vm_page_t
, pageq
);
7692 vm_page_cleaned_count
--;
7696 case VM_PAGE_ON_THROTTLED_Q
:
7698 assert(m_object
->internal
== TRUE
);
7700 vm_page_queue_remove(&vm_page_queue_throttled
,
7701 mem
, vm_page_t
, pageq
);
7702 vm_page_throttled_count
--;
7703 was_pageable
= FALSE
;
7707 case VM_PAGE_ON_SPECULATIVE_Q
:
7709 assert(m_object
->internal
== FALSE
);
7711 vm_page_remque(&mem
->pageq
);
7712 vm_page_speculative_count
--;
7716 #if CONFIG_SECLUDED_MEMORY
7717 case VM_PAGE_ON_SECLUDED_Q
:
7719 vm_page_queue_remove(&vm_page_queue_secluded
,
7720 mem
, vm_page_t
, pageq
);
7721 vm_page_secluded_count
--;
7722 if (m_object
== VM_OBJECT_NULL
) {
7723 vm_page_secluded_count_free
--;
7724 was_pageable
= FALSE
;
7726 assert(!m_object
->internal
);
7727 vm_page_secluded_count_inuse
--;
7728 was_pageable
= FALSE
;
7729 // was_pageable = TRUE;
7733 #endif /* CONFIG_SECLUDED_MEMORY */
7738 * if (mem->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)
7739 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
7740 * the caller is responsible for determing if the page is on that queue, and if so, must
7741 * either first remove it (it needs both the page queues lock and the object lock to do
7742 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
7744 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
7745 * or any of the undefined states
7747 panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem
, mem
->vm_page_q_state
);
7752 VM_PAGE_ZERO_PAGEQ_ENTRY(mem
);
7753 mem
->vm_page_q_state
= VM_PAGE_NOT_ON_Q
;
7755 #if CONFIG_BACKGROUND_QUEUE
7756 if (remove_from_backgroundq
== TRUE
)
7757 vm_page_remove_from_backgroundq(mem
);
7760 if (m_object
->internal
) {
7761 vm_page_pageable_internal_count
--;
7763 vm_page_pageable_external_count
--;
7769 vm_page_remove_internal(vm_page_t page
)
7771 vm_object_t __object
= VM_PAGE_OBJECT(page
);
7772 if (page
== __object
->memq_hint
) {
7773 vm_page_t __new_hint
;
7774 vm_page_queue_entry_t __qe
;
7775 __qe
= (vm_page_queue_entry_t
)vm_page_queue_next(&page
->listq
);
7776 if (vm_page_queue_end(&__object
->memq
, __qe
)) {
7777 __qe
= (vm_page_queue_entry_t
)vm_page_queue_prev(&page
->listq
);
7778 if (vm_page_queue_end(&__object
->memq
, __qe
)) {
7782 __new_hint
= (vm_page_t
)((uintptr_t) __qe
);
7783 __object
->memq_hint
= __new_hint
;
7785 vm_page_queue_remove(&__object
->memq
, page
, vm_page_t
, listq
);
7786 #if CONFIG_SECLUDED_MEMORY
7787 if (__object
->eligible_for_secluded
) {
7788 vm_page_secluded
.eligible_for_secluded
--;
7790 #endif /* CONFIG_SECLUDED_MEMORY */
7794 vm_page_enqueue_inactive(vm_page_t mem
, boolean_t first
)
7796 vm_object_t m_object
;
7798 m_object
= VM_PAGE_OBJECT(mem
);
7800 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
7801 assert(!mem
->fictitious
);
7802 assert(!mem
->laundry
);
7803 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
7804 vm_page_check_pageable_safe(mem
);
7806 #if CONFIG_SECLUDED_MEMORY
7807 if (secluded_for_filecache
&&
7808 vm_page_secluded_target
!= 0 &&
7809 num_tasks_can_use_secluded_mem
== 0 &&
7810 m_object
->eligible_for_secluded
&&
7811 secluded_aging_policy
== SECLUDED_AGING_FIFO
) {
7812 mem
->vm_page_q_state
= VM_PAGE_ON_SECLUDED_Q
;
7813 vm_page_queue_enter(&vm_page_queue_secluded
, mem
,
7815 vm_page_secluded_count
++;
7816 vm_page_secluded_count_inuse
++;
7817 assert(!m_object
->internal
);
7818 // vm_page_pageable_external_count++;
7821 #endif /* CONFIG_SECLUDED_MEMORY */
7823 if (m_object
->internal
) {
7824 mem
->vm_page_q_state
= VM_PAGE_ON_INACTIVE_INTERNAL_Q
;
7827 vm_page_queue_enter_first(&vm_page_queue_anonymous
, mem
, vm_page_t
, pageq
);
7829 vm_page_queue_enter(&vm_page_queue_anonymous
, mem
, vm_page_t
, pageq
);
7831 vm_page_anonymous_count
++;
7832 vm_page_pageable_internal_count
++;
7834 mem
->vm_page_q_state
= VM_PAGE_ON_INACTIVE_EXTERNAL_Q
;
7837 vm_page_queue_enter_first(&vm_page_queue_inactive
, mem
, vm_page_t
, pageq
);
7839 vm_page_queue_enter(&vm_page_queue_inactive
, mem
, vm_page_t
, pageq
);
7841 vm_page_pageable_external_count
++;
7843 vm_page_inactive_count
++;
7844 token_new_pagecount
++;
7846 #if CONFIG_BACKGROUND_QUEUE
7847 if (mem
->vm_page_in_background
)
7848 vm_page_add_to_backgroundq(mem
, FALSE
);
7853 vm_page_enqueue_active(vm_page_t mem
, boolean_t first
)
7855 vm_object_t m_object
;
7857 m_object
= VM_PAGE_OBJECT(mem
);
7859 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
7860 assert(!mem
->fictitious
);
7861 assert(!mem
->laundry
);
7862 assert(mem
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
);
7863 vm_page_check_pageable_safe(mem
);
7865 mem
->vm_page_q_state
= VM_PAGE_ON_ACTIVE_Q
;
7867 vm_page_queue_enter_first(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
7869 vm_page_queue_enter(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
7870 vm_page_active_count
++;
7872 if (m_object
->internal
) {
7873 vm_page_pageable_internal_count
++;
7875 vm_page_pageable_external_count
++;
7878 #if CONFIG_BACKGROUND_QUEUE
7879 if (mem
->vm_page_in_background
)
7880 vm_page_add_to_backgroundq(mem
, FALSE
);
7885 * Pages from special kernel objects shouldn't
7886 * be placed on pageable queues.
7889 vm_page_check_pageable_safe(vm_page_t page
)
7891 vm_object_t page_object
;
7893 page_object
= VM_PAGE_OBJECT(page
);
7895 if (page_object
== kernel_object
) {
7896 panic("vm_page_check_pageable_safe: trying to add page" \
7897 "from kernel object (%p) to pageable queue", kernel_object
);
7900 if (page_object
== compressor_object
) {
7901 panic("vm_page_check_pageable_safe: trying to add page" \
7902 "from compressor object (%p) to pageable queue", compressor_object
);
7905 if (page_object
== vm_submap_object
) {
7906 panic("vm_page_check_pageable_safe: trying to add page" \
7907 "from submap object (%p) to pageable queue", vm_submap_object
);
7911 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
7912 * wired page diagnose
7913 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7915 #include <libkern/OSKextLibPrivate.h>
7917 vm_allocation_site_t
*
7918 vm_allocation_sites
[VM_KERN_MEMORY_COUNT
];
7923 uintptr_t* frameptr
;
7924 uintptr_t* frameptr_next
;
7926 uintptr_t kstackb
, kstackt
;
7927 const vm_allocation_site_t
* site
;
7930 cthread
= current_thread();
7931 if (__improbable(cthread
== NULL
)) return VM_KERN_MEMORY_OSFMK
;
7933 kstackb
= cthread
->kernel_stack
;
7934 kstackt
= kstackb
+ kernel_stack_size
;
7936 /* Load stack frame pointer (EBP on x86) into frameptr */
7937 frameptr
= __builtin_frame_address(0);
7939 while (frameptr
!= NULL
)
7941 /* Verify thread stack bounds */
7942 if (((uintptr_t)(frameptr
+ 2) > kstackt
) || ((uintptr_t)frameptr
< kstackb
)) break;
7944 /* Next frame pointer is pointed to by the previous one */
7945 frameptr_next
= (uintptr_t*) *frameptr
;
7947 /* Pull return address from one spot above the frame pointer */
7948 retaddr
= *(frameptr
+ 1);
7950 if ((retaddr
< vm_kernel_stext
) || (retaddr
> vm_kernel_top
))
7952 site
= OSKextGetAllocationSiteForCaller(retaddr
);
7956 frameptr
= frameptr_next
;
7958 return (site
? site
->tag
: VM_KERN_MEMORY_NONE
);
7961 static uint64_t free_tag_bits
[256/64];
7964 vm_tag_alloc_locked(vm_allocation_site_t
* site
)
7970 if (site
->tag
) return;
7975 avail
= free_tag_bits
[idx
];
7978 tag
= __builtin_clzll(avail
);
7979 avail
&= ~(1ULL << (63 - tag
));
7980 free_tag_bits
[idx
] = avail
;
7985 if (idx
>= (sizeof(free_tag_bits
) / sizeof(free_tag_bits
[0])))
7987 tag
= VM_KERN_MEMORY_ANY
;
7992 if (VM_KERN_MEMORY_ANY
!= tag
)
7994 assert(!vm_allocation_sites
[tag
]);
7995 vm_allocation_sites
[tag
] = site
;
8000 vm_tag_free_locked(vm_tag_t tag
)
8006 if (VM_KERN_MEMORY_ANY
== tag
) return;
8009 avail
= free_tag_bits
[idx
];
8011 bit
= (1ULL << (63 - tag
));
8012 assert(!(avail
& bit
));
8013 free_tag_bits
[idx
] = (avail
| bit
);
8020 for (tag
= VM_KERN_MEMORY_FIRST_DYNAMIC
; tag
< VM_KERN_MEMORY_ANY
; tag
++)
8022 vm_tag_free_locked(tag
);
8027 vm_tag_alloc(vm_allocation_site_t
* site
)
8031 if (VM_TAG_BT
& site
->flags
)
8034 if (VM_KERN_MEMORY_NONE
!= tag
) return (tag
);
8039 lck_spin_lock(&vm_allocation_sites_lock
);
8040 vm_tag_alloc_locked(site
);
8041 lck_spin_unlock(&vm_allocation_sites_lock
);
8048 vm_page_count_object(mach_memory_info_t
* sites
, unsigned int __unused num_sites
, vm_object_t object
)
8050 if (!object
->wired_page_count
) return;
8051 if (object
!= kernel_object
)
8053 assert(object
->wire_tag
< num_sites
);
8054 sites
[object
->wire_tag
].size
+= ptoa_64(object
->wired_page_count
);
8058 typedef void (*vm_page_iterate_proc
)(mach_memory_info_t
* sites
,
8059 unsigned int num_sites
, vm_object_t object
);
8062 vm_page_iterate_purgeable_objects(mach_memory_info_t
* sites
, unsigned int num_sites
,
8063 vm_page_iterate_proc proc
, purgeable_q_t queue
,
8068 for (object
= (vm_object_t
) queue_first(&queue
->objq
[group
]);
8069 !queue_end(&queue
->objq
[group
], (queue_entry_t
) object
);
8070 object
= (vm_object_t
) queue_next(&object
->objq
))
8072 proc(sites
, num_sites
, object
);
8077 vm_page_iterate_objects(mach_memory_info_t
* sites
, unsigned int num_sites
,
8078 vm_page_iterate_proc proc
)
8080 purgeable_q_t volatile_q
;
8081 queue_head_t
* nonvolatile_q
;
8085 lck_spin_lock(&vm_objects_wired_lock
);
8086 queue_iterate(&vm_objects_wired
,
8091 proc(sites
, num_sites
, object
);
8093 lck_spin_unlock(&vm_objects_wired_lock
);
8095 lck_mtx_lock(&vm_purgeable_queue_lock
);
8096 nonvolatile_q
= &purgeable_nonvolatile_queue
;
8097 for (object
= (vm_object_t
) queue_first(nonvolatile_q
);
8098 !queue_end(nonvolatile_q
, (queue_entry_t
) object
);
8099 object
= (vm_object_t
) queue_next(&object
->objq
))
8101 proc(sites
, num_sites
, object
);
8104 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
8105 vm_page_iterate_purgeable_objects(sites
, num_sites
, proc
, volatile_q
, 0);
8107 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
8108 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
8110 vm_page_iterate_purgeable_objects(sites
, num_sites
, proc
, volatile_q
, group
);
8113 volatile_q
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
8114 for (group
= 0; group
< NUM_VOLATILE_GROUPS
; group
++)
8116 vm_page_iterate_purgeable_objects(sites
, num_sites
, proc
, volatile_q
, group
);
8118 lck_mtx_unlock(&vm_purgeable_queue_lock
);
8122 process_account(mach_memory_info_t
* sites
, unsigned int __unused num_sites
, uint64_t zones_collectable_bytes
)
8126 vm_allocation_site_t
* site
;
8128 assert(num_sites
>= VM_KERN_MEMORY_COUNT
);
8130 for (idx
= 0; idx
< VM_KERN_MEMORY_COUNT
; idx
++)
8132 found
+= sites
[idx
].size
;
8133 if (idx
< VM_KERN_MEMORY_FIRST_DYNAMIC
)
8135 sites
[idx
].site
= idx
;
8136 sites
[idx
].flags
|= VM_KERN_SITE_TAG
;
8137 if (VM_KERN_MEMORY_ZONE
== idx
)
8139 sites
[idx
].flags
|= VM_KERN_SITE_HIDE
;
8140 sites
[idx
].collectable_bytes
= zones_collectable_bytes
;
8141 } else sites
[idx
].flags
|= VM_KERN_SITE_WIRED
;
8144 lck_spin_lock(&vm_allocation_sites_lock
);
8145 if ((site
= vm_allocation_sites
[idx
]))
8147 if (sites
[idx
].size
)
8149 sites
[idx
].flags
|= VM_KERN_SITE_WIRED
;
8150 if (VM_TAG_KMOD
== (VM_KERN_SITE_TYPE
& site
->flags
))
8152 sites
[idx
].site
= OSKextGetKmodIDForSite(site
, NULL
, 0);
8153 sites
[idx
].flags
|= VM_KERN_SITE_KMOD
;
8157 sites
[idx
].site
= VM_KERNEL_UNSLIDE(site
);
8158 sites
[idx
].flags
|= VM_KERN_SITE_KERNEL
;
8167 /* this code would free a site with no allocations but can race a new
8168 * allocation being made */
8169 vm_tag_free_locked(site
->tag
);
8170 site
->tag
= VM_KERN_MEMORY_NONE
;
8171 vm_allocation_sites
[idx
] = NULL
;
8172 if (!(VM_TAG_UNLOAD
& site
->flags
)) site
= NULL
;
8176 lck_spin_unlock(&vm_allocation_sites_lock
);
8177 if (site
) OSKextFreeSite(site
);
8184 vm_page_diagnose(mach_memory_info_t
* sites
, unsigned int num_sites
, uint64_t zones_collectable_bytes
)
8186 enum { kMaxKernelDepth
= 1 };
8187 vm_map_t maps
[kMaxKernelDepth
];
8188 vm_map_entry_t entries
[kMaxKernelDepth
];
8190 vm_map_entry_t entry
;
8191 vm_object_offset_t offset
;
8193 int stackIdx
, count
;
8194 uint64_t wired_size
;
8195 uint64_t wired_managed_size
;
8196 uint64_t wired_reserved_size
;
8197 mach_memory_info_t
* counts
;
8199 bzero(sites
, num_sites
* sizeof(mach_memory_info_t
));
8201 if (!vm_page_wire_count_initial
) return (KERN_ABORTED
);
8203 vm_page_iterate_objects(sites
, num_sites
, &vm_page_count_object
);
8205 wired_size
= ptoa_64(vm_page_wire_count
+ vm_lopage_free_count
+ vm_page_throttled_count
);
8206 wired_reserved_size
= ptoa_64(vm_page_wire_count_initial
- vm_page_stolen_count
+ vm_page_throttled_count
);
8207 wired_managed_size
= ptoa_64(vm_page_wire_count
- vm_page_wire_count_initial
);
8209 assert(num_sites
>= (VM_KERN_MEMORY_COUNT
+ VM_KERN_COUNTER_COUNT
));
8210 counts
= &sites
[VM_KERN_MEMORY_COUNT
];
8212 #define SET_COUNT(xcount, xsize, xflags) \
8213 counts[xcount].site = (xcount); \
8214 counts[xcount].size = (xsize); \
8215 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
8217 SET_COUNT(VM_KERN_COUNT_MANAGED
, ptoa_64(vm_page_pages
), 0);
8218 SET_COUNT(VM_KERN_COUNT_WIRED
, wired_size
, 0);
8219 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED
, wired_managed_size
, 0);
8220 SET_COUNT(VM_KERN_COUNT_RESERVED
, wired_reserved_size
, VM_KERN_SITE_WIRED
);
8221 SET_COUNT(VM_KERN_COUNT_STOLEN
, ptoa_64(vm_page_stolen_count
), VM_KERN_SITE_WIRED
);
8222 SET_COUNT(VM_KERN_COUNT_LOPAGE
, ptoa_64(vm_lopage_free_count
), VM_KERN_SITE_WIRED
);
8224 #define SET_MAP(xcount, xsize, xfree, xlargest) \
8225 counts[xcount].site = (xcount); \
8226 counts[xcount].size = (xsize); \
8227 counts[xcount].free = (xfree); \
8228 counts[xcount].largest = (xlargest); \
8229 counts[xcount].flags = VM_KERN_SITE_COUNTER;
8231 vm_map_size_t map_size
, map_free
, map_largest
;
8233 vm_map_sizes(kernel_map
, &map_size
, &map_free
, &map_largest
);
8234 SET_MAP(VM_KERN_COUNT_MAP_KERNEL
, map_size
, map_free
, map_largest
);
8236 vm_map_sizes(zone_map
, &map_size
, &map_free
, &map_largest
);
8237 SET_MAP(VM_KERN_COUNT_MAP_ZONE
, map_size
, map_free
, map_largest
);
8239 vm_map_sizes(kalloc_map
, &map_size
, &map_free
, &map_largest
);
8240 SET_MAP(VM_KERN_COUNT_MAP_KALLOC
, map_size
, map_free
, map_largest
);
8247 for (entry
= map
->hdr
.links
.next
; map
; entry
= entry
->links
.next
)
8249 if (entry
->is_sub_map
)
8251 assert(stackIdx
< kMaxKernelDepth
);
8252 maps
[stackIdx
] = map
;
8253 entries
[stackIdx
] = entry
;
8255 map
= VME_SUBMAP(entry
);
8259 if (VME_OBJECT(entry
) == kernel_object
)
8262 vm_object_lock(VME_OBJECT(entry
));
8263 for (offset
= entry
->links
.start
; offset
< entry
->links
.end
; offset
+= page_size
)
8265 page
= vm_page_lookup(VME_OBJECT(entry
), offset
);
8266 if (page
&& VM_PAGE_WIRED(page
)) count
++;
8268 vm_object_unlock(VME_OBJECT(entry
));
8272 assert(VME_ALIAS(entry
) < num_sites
);
8273 sites
[VME_ALIAS(entry
)].size
+= ptoa_64(count
);
8276 while (map
&& (entry
== vm_map_last_entry(map
)))
8279 if (!stackIdx
) map
= NULL
;
8283 map
= maps
[stackIdx
];
8284 entry
= entries
[stackIdx
];
8290 process_account(sites
, num_sites
, zones_collectable_bytes
);
8292 return (KERN_SUCCESS
);
8296 vm_tag_get_kext(vm_tag_t tag
, char * name
, vm_size_t namelen
)
8298 vm_allocation_site_t
* site
;
8302 lck_spin_lock(&vm_allocation_sites_lock
);
8303 if ((site
= vm_allocation_sites
[tag
]))
8305 if (VM_TAG_KMOD
== (VM_KERN_SITE_TYPE
& site
->flags
))
8307 kmodId
= OSKextGetKmodIDForSite(site
, name
, namelen
);
8310 lck_spin_unlock(&vm_allocation_sites_lock
);
8315 #if DEBUG || DEVELOPMENT
8317 #define vm_tag_set_lock(set) lck_spin_lock(&set->lock)
8318 #define vm_tag_set_unlock(set) lck_spin_unlock(&set->lock)
8321 vm_tag_set_init(vm_tag_set_t set
, uint32_t count
)
8323 lck_spin_init(&set
->lock
, &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
8324 bzero(&set
->entries
, count
* sizeof(struct vm_tag_set_entry
));
8328 vm_tag_set_enter(vm_tag_set_t set
, uint32_t count
, vm_tag_t tag
)
8333 vm_tag_set_lock(set
);
8335 assert(tag
!= VM_KERN_MEMORY_NONE
);
8339 for (idx
= 0; idx
< count
; idx
++)
8341 if (tag
== set
->entries
[idx
].tag
)
8343 set
->entries
[idx
].count
++;
8347 if ((free
== -1U) && !set
->entries
[idx
].count
) free
= idx
;
8350 if ((KERN_SUCCESS
!= kr
) && (free
!= -1U))
8352 set
->entries
[free
].tag
= tag
;
8353 set
->entries
[free
].count
= 1;
8357 vm_tag_set_unlock(set
);
8363 vm_tag_set_remove(vm_tag_set_t set
, uint32_t count
, vm_tag_t tag
, vm_tag_t
* new_tagp
)
8369 assert(tag
!= VM_KERN_MEMORY_NONE
);
8370 new_tag
= VM_KERN_MEMORY_NONE
;
8371 vm_tag_set_lock(set
);
8373 kr
= KERN_NOT_IN_SET
;
8374 for (idx
= 0; idx
< count
; idx
++)
8376 if ((tag
!= VM_KERN_MEMORY_NONE
)
8377 && (tag
== set
->entries
[idx
].tag
)
8378 && set
->entries
[idx
].count
)
8380 set
->entries
[idx
].count
--;
8382 if (set
->entries
[idx
].count
)
8387 if (!new_tagp
) break;
8388 tag
= VM_KERN_MEMORY_NONE
;
8391 if (set
->entries
[idx
].count
&& (VM_KERN_MEMORY_NONE
== new_tag
))
8393 new_tag
= set
->entries
[idx
].tag
;
8394 if (VM_KERN_MEMORY_NONE
== tag
) break;
8398 vm_tag_set_unlock(set
);
8399 if (new_tagp
) *new_tagp
= new_tag
;
8404 #endif /* DEBUG || DEVELOPMENT */