#include <debug.h>
#include <libkern/OSAtomic.h>
+#include <libkern/OSDebug.h>
#include <mach/clock_types.h>
#include <mach/vm_prot.h>
#include <mach/sdt.h>
#include <kern/counters.h>
#include <kern/sched_prim.h>
+#include <kern/policy_internal.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/kalloc.h>
#include <kern/zalloc.h>
#include <kern/xpr.h>
+#include <kern/ledger.h>
#include <vm/pmap.h>
#include <vm/vm_init.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h> /* kernel_memory_allocate() */
#include <kern/misc_protos.h>
#include <zone_debug.h>
+#include <mach_debug/zone_info.h>
#include <vm/cpm.h>
-#include <ppc/mappings.h> /* (BRINGUP) */
-#include <pexpert/pexpert.h> /* (BRINGUP) */
+#include <pexpert/pexpert.h>
+#include <san/kasan.h>
#include <vm/vm_protos.h>
#include <vm/memory_object.h>
#include <vm/vm_purgeable_internal.h>
+#include <vm/vm_compressor.h>
+
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
#include <IOKit/IOHibernatePrivate.h>
+#include <sys/kdebug.h>
-#if CONFIG_EMBEDDED
-#include <sys/kern_memorystatus.h>
-#endif
-#include <sys/kdebug.h>
+char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
+#if CONFIG_SECLUDED_MEMORY
+struct vm_page_secluded_data vm_page_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+boolean_t hibernate_cleaning_in_progress = FALSE;
boolean_t vm_page_free_verify = TRUE;
-uint_t vm_lopage_free_count = 0;
-uint_t vm_lopage_free_limit = 0;
-uint_t vm_lopage_lowater = 0;
+uint32_t vm_lopage_free_count = 0;
+uint32_t vm_lopage_free_limit = 0;
+uint32_t vm_lopage_lowater = 0;
boolean_t vm_lopage_refill = FALSE;
boolean_t vm_lopage_needed = FALSE;
__private_extern__ void vm_page_init_lck_grp(void);
-static void vm_page_free_prepare(vm_page_t page);
+static void vm_page_free_prepare(vm_page_t page);
+static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr);
+static void vm_tag_init(void);
+uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+uint32_t vm_packed_from_vm_pages_array_mask = VM_PACKED_FROM_VM_PAGES_ARRAY;
+uint32_t vm_packed_pointer_shift = VM_PACKED_POINTER_SHIFT;
/*
* Associated with page of user-allocatable memory is a
vm_offset_t virtual_space_start;
vm_offset_t virtual_space_end;
-int vm_page_pages;
+uint32_t vm_page_pages;
/*
* The vm_page_lookup() routine, which provides for fast
* or VP, table.]
*/
typedef struct {
- vm_page_t pages;
+ vm_page_packed_t page_list;
#if MACH_PAGE_HASH_STATS
int cur_count; /* current count */
int hi_count; /* high water mark */
uint32_t vm_page_bucket_hash; /* Basic bucket hash */
unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */
+#ifndef VM_TAG_ACTIVE_UPDATE
+#error VM_TAG_ACTIVE_UPDATE
+#endif
+#ifndef VM_MAX_TAG_ZONES
+#error VM_MAX_TAG_ZONES
+#endif
+
+boolean_t vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
lck_spin_t *vm_page_bucket_locks;
+lck_spin_t vm_objects_wired_lock;
+lck_spin_t vm_allocation_sites_lock;
+
+vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
+vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE];
+#if VM_MAX_TAG_ZONES
+vm_allocation_zone_total_t ** vm_allocation_zone_totals;
+#endif /* VM_MAX_TAG_ZONES */
+
+vm_tag_t vm_allocation_tag_highest;
+
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
#if MACH_PAGE_HASH_STATS
* module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
* constants.
*/
+#if defined(__arm__) || defined(__arm64__)
+vm_size_t page_size;
+vm_size_t page_mask;
+int page_shift;
+#else
vm_size_t page_size = PAGE_SIZE;
vm_size_t page_mask = PAGE_MASK;
int page_shift = PAGE_SHIFT;
+#endif
/*
* Resident page structures are initialized from
struct vm_page vm_page_template;
vm_page_t vm_pages = VM_PAGE_NULL;
+vm_page_t vm_page_array_beginning_addr;
+vm_page_t vm_page_array_ending_addr;
+vm_page_t vm_page_array_boundary;
+
unsigned int vm_pages_count = 0;
ppnum_t vm_page_lowest = 0;
unsigned int vm_colors;
unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
-queue_head_t vm_page_queue_free[MAX_COLORS];
-vm_page_t vm_page_queue_fictitious;
+unsigned int vm_free_magazine_refill_limit = 0;
+
+
+struct vm_page_queue_free_head {
+ vm_page_queue_head_t qhead;
+} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS];
+
+
unsigned int vm_page_free_wanted;
unsigned int vm_page_free_wanted_privileged;
+#if CONFIG_SECLUDED_MEMORY
+unsigned int vm_page_free_wanted_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
unsigned int vm_page_free_count;
-unsigned int vm_page_fictitious_count;
-
-unsigned int vm_page_free_count_minimum; /* debugging */
/*
* Occasionally, the virtual memory system uses
* These page structures are allocated the way
* most other kernel structures are.
*/
+zone_t vm_page_array_zone;
zone_t vm_page_zone;
vm_locks_array_t vm_page_locks;
decl_lck_mtx_data(,vm_page_alloc_lock)
+lck_mtx_ext_t vm_page_alloc_lock_ext;
+
unsigned int io_throttle_zero_fill;
unsigned int vm_page_local_q_count = 0;
unsigned int vm_page_local_q_hard_limit = 500;
struct vplq *vm_page_local_q = NULL;
+/* N.B. Guard and fictitious pages must not
+ * be assigned a zero phys_page value.
+ */
/*
* Fictitious pages don't have a physical address,
* but we must initialize phys_page to something.
* For debugging, this should be a strange value
* that the pmap module can recognize in assertions.
*/
-ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
+const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
/*
* Guard pages are not accessible so they don't
* we don't use a real physical page with that
* physical address.
*/
-ppnum_t vm_page_guard_addr = (ppnum_t) -2;
+const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
/*
* Resident page structures are also chained on
* system (pageout daemon). These queues are
* defined here, but are shared by the pageout
* module. The inactive queue is broken into
- * inactive and zf for convenience as the
+ * file backed and anonymous for convenience as the
* pageout daemon often assignes a higher
- * affinity to zf pages
+ * importance to anonymous pages (less likely to pick)
*/
-queue_head_t vm_page_queue_active;
-queue_head_t vm_page_queue_inactive;
-queue_head_t vm_page_queue_zf; /* inactive memory queue for zero fill */
-queue_head_t vm_page_queue_throttled;
+vm_page_queue_head_t vm_page_queue_active __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+vm_page_queue_head_t vm_page_queue_inactive __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+#if CONFIG_SECLUDED_MEMORY
+vm_page_queue_head_t vm_page_queue_secluded __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+#endif /* CONFIG_SECLUDED_MEMORY */
+vm_page_queue_head_t vm_page_queue_anonymous __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* inactive memory queue for anonymous pages */
+vm_page_queue_head_t vm_page_queue_throttled __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+queue_head_t vm_objects_wired;
+
+#if CONFIG_BACKGROUND_QUEUE
+vm_page_queue_head_t vm_page_queue_background __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+uint32_t vm_page_background_target;
+uint32_t vm_page_background_count;
+uint64_t vm_page_background_promoted_count;
+
+uint32_t vm_page_background_internal_count;
+uint32_t vm_page_background_external_count;
+
+uint32_t vm_page_background_mode;
+uint32_t vm_page_background_exclude_external;
+#endif
unsigned int vm_page_active_count;
unsigned int vm_page_inactive_count;
+#if CONFIG_SECLUDED_MEMORY
+unsigned int vm_page_secluded_count;
+unsigned int vm_page_secluded_count_free;
+unsigned int vm_page_secluded_count_inuse;
+#endif /* CONFIG_SECLUDED_MEMORY */
+unsigned int vm_page_anonymous_count;
unsigned int vm_page_throttled_count;
unsigned int vm_page_speculative_count;
+
unsigned int vm_page_wire_count;
+unsigned int vm_page_wire_count_on_boot = 0;
+unsigned int vm_page_stolen_count;
unsigned int vm_page_wire_count_initial;
+unsigned int vm_page_pages_initial;
unsigned int vm_page_gobble_count = 0;
-unsigned int vm_page_wire_count_warning = 0;
-unsigned int vm_page_gobble_count_warning = 0;
+
+#define VM_PAGE_WIRE_COUNT_WARNING 0
+#define VM_PAGE_GOBBLE_COUNT_WARNING 0
unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
uint64_t vm_page_purged_count = 0; /* total count of purged pages */
+unsigned int vm_page_xpmapped_external_count = 0;
+unsigned int vm_page_external_count = 0;
+unsigned int vm_page_internal_count = 0;
+unsigned int vm_page_pageable_external_count = 0;
+unsigned int vm_page_pageable_internal_count = 0;
+
#if DEVELOPMENT || DEBUG
unsigned int vm_page_speculative_recreated = 0;
unsigned int vm_page_speculative_created = 0;
unsigned int vm_page_speculative_used = 0;
#endif
+vm_page_queue_head_t vm_page_queue_cleaned __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+unsigned int vm_page_cleaned_count = 0;
+unsigned int vm_pageout_enqueued_cleaned = 0;
+
uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
ppnum_t max_valid_low_ppnum = 0xffffffff;
unsigned int vm_page_free_target = 0;
unsigned int vm_page_free_min = 0;
unsigned int vm_page_throttle_limit = 0;
-uint32_t vm_page_creation_throttle = 0;
unsigned int vm_page_inactive_target = 0;
+#if CONFIG_SECLUDED_MEMORY
+unsigned int vm_page_secluded_target = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
+unsigned int vm_page_anonymous_min = 0;
unsigned int vm_page_inactive_min = 0;
unsigned int vm_page_free_reserved = 0;
unsigned int vm_page_throttle_count = 0;
+
/*
* The VM system has a couple of heuristics for deciding
* that pages are "uninteresting" and should be placed
void
vm_set_page_size(void)
{
- page_mask = page_size - 1;
+ page_size = PAGE_SIZE;
+ page_mask = PAGE_MASK;
+ page_shift = PAGE_SHIFT;
if ((page_mask & page_size) != 0)
panic("vm_set_page_size: page size not a power of two");
break;
}
+#if defined (__x86_64__)
+
+#define MAX_CLUMP_SIZE 16
+#define DEFAULT_CLUMP_SIZE 4
+
+unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
+
+#if DEVELOPMENT || DEBUG
+unsigned long vm_clump_stats[MAX_CLUMP_SIZE+1];
+unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
+
+static inline void vm_clump_update_stats(unsigned int c) {
+ assert(c<=vm_clump_size);
+ if(c>0 && c<=vm_clump_size) vm_clump_stats[c]+=c;
+ vm_clump_allocs+=c;
+}
+#endif /* if DEVELOPMENT || DEBUG */
+
+/* Called once to setup the VM clump knobs */
+static void
+vm_page_setup_clump( void )
+{
+ unsigned int override, n;
+
+ vm_clump_size = DEFAULT_CLUMP_SIZE;
+ if ( PE_parse_boot_argn("clump_size", &override, sizeof (override)) ) vm_clump_size = override;
+
+ if(vm_clump_size > MAX_CLUMP_SIZE) panic("vm_page_setup_clump:: clump_size is too large!");
+ if(vm_clump_size < 1) panic("vm_page_setup_clump:: clump_size must be >= 1");
+ if((vm_clump_size & (vm_clump_size-1)) != 0) panic("vm_page_setup_clump:: clump_size must be a power of 2");
+
+ vm_clump_promote_threshold = vm_clump_size;
+ vm_clump_mask = vm_clump_size - 1;
+ for(vm_clump_shift=0, n=vm_clump_size; n>1; n>>=1, vm_clump_shift++);
+
+#if DEVELOPMENT || DEBUG
+ bzero(vm_clump_stats, sizeof(vm_clump_stats));
+ vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
+#endif /* if DEVELOPMENT || DEBUG */
+}
+
+#endif /* #if defined (__x86_64__) */
+
+#define COLOR_GROUPS_TO_STEAL 4
/* Called once during statup, once the cache geometry is known.
*/
vm_page_set_colors( void )
{
unsigned int n, override;
-
+
+#if defined (__x86_64__)
+ /* adjust #colors because we need to color outside the clump boundary */
+ vm_cache_geometry_colors >>= vm_clump_shift;
+#endif
if ( PE_parse_boot_argn("colors", &override, sizeof (override)) ) /* colors specified as a boot-arg? */
n = override;
else if ( vm_cache_geometry_colors ) /* do we know what the cache geometry is? */
/* the count must be a power of 2 */
if ( ( n & (n - 1)) != 0 )
- panic("vm_page_set_colors");
+ n = DEFAULT_COLORS; /* use default if all else fails */
vm_colors = n;
vm_color_mask = n - 1;
+
+ vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
+
+#if defined (__x86_64__)
+ /* adjust for reduction in colors due to clumping and multiple cores */
+ if (real_ncpus)
+ vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
+#endif
}
lck_grp_init(&vm_page_lck_grp_alloc, "vm_page_alloc", &vm_page_lck_grp_attr);
lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr);
lck_attr_setdefault(&vm_page_lck_attr);
+ lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+
+ vm_compressor_init_locks();
}
+#define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
+
void
vm_page_init_local_q()
{
* no point in this for a uni-processor system
*/
if (num_cpus >= 2) {
- t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq));
+#if KASAN
+ /* KASAN breaks the expectation of a size-aligned object by adding a
+ * redzone, so explicitly align. */
+ t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq) + VM_PACKED_POINTER_ALIGNMENT);
+ t_local_q = (void *)(((uintptr_t)t_local_q + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT-1));
+#else
+ /* round the size up to the nearest power of two */
+ t_local_q = (struct vplq *)kalloc(ROUNDUP_NEXTP2(num_cpus * sizeof(struct vplq)));
+#endif
for (i = 0; i < num_cpus; i++) {
struct vpl *lq;
lq = &t_local_q[i].vpl_un.vpl;
VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
- queue_init(&lq->vpl_queue);
+ vm_page_queue_init(&lq->vpl_queue);
lq->vpl_count = 0;
+ lq->vpl_internal_count = 0;
+ lq->vpl_external_count = 0;
}
vm_page_local_q_count = num_cpus;
}
}
+/*
+ * vm_init_before_launchd
+ *
+ * This should be called right before launchd is loaded.
+ */
+void
+vm_init_before_launchd()
+{
+ vm_page_wire_count_on_boot = vm_page_wire_count;
+}
-uint64_t initial_max_mem;
-int initial_wire_count;
-int initial_free_count;
-int initial_lopage_count;
/*
* vm_page_bootstrap:
vm_offset_t *startp,
vm_offset_t *endp)
{
- register vm_page_t m;
+ vm_page_t m;
unsigned int i;
unsigned int log1;
unsigned int log2;
m = &vm_page_template;
bzero(m, sizeof (*m));
- m->pageq.next = NULL;
- m->pageq.prev = NULL;
- m->listq.next = NULL;
- m->listq.prev = NULL;
- m->next = VM_PAGE_NULL;
+#if CONFIG_BACKGROUND_QUEUE
+ m->vm_page_backgroundq.next = 0;
+ m->vm_page_backgroundq.prev = 0;
+ m->vm_page_in_background = FALSE;
+ m->vm_page_on_backgroundq = FALSE;
+#endif
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+ m->listq.next = 0;
+ m->listq.prev = 0;
+ m->next_m = 0;
- m->object = VM_OBJECT_NULL; /* reset later */
+ m->vm_page_object = 0; /* reset later */
m->offset = (vm_object_offset_t) -1; /* reset later */
m->wire_count = 0;
- m->local = FALSE;
- m->inactive = FALSE;
- m->active = FALSE;
- m->pageout_queue = FALSE;
- m->speculative = FALSE;
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
m->laundry = FALSE;
- m->free = FALSE;
m->reference = FALSE;
m->gobbled = FALSE;
m->private = FALSE;
- m->throttled = FALSE;
m->__unused_pageq_bits = 0;
- m->phys_page = 0; /* reset later */
-
+#if !defined(__arm__) && !defined(__arm64__)
+ VM_PAGE_SET_PHYS_PAGE(m, 0); /* reset later */
+#endif
m->busy = TRUE;
m->wanted = FALSE;
m->tabled = FALSE;
+ m->hashed = FALSE;
m->fictitious = FALSE;
m->pmapped = FALSE;
m->wpmapped = FALSE;
- m->pageout = FALSE;
+ m->free_when_done = FALSE;
m->absent = FALSE;
m->error = FALSE;
m->dirty = FALSE;
m->overwriting = FALSE;
m->restart = FALSE;
m->unusual = FALSE;
- m->encrypted = FALSE;
- m->encrypted_cleaning = FALSE;
- m->list_req_pending = FALSE;
- m->dump_cleaning = FALSE;
m->cs_validated = FALSE;
m->cs_tainted = FALSE;
+ m->cs_nx = FALSE;
m->no_cache = FALSE;
- m->zero_fill = FALSE;
m->reusable = FALSE;
+ m->slid = FALSE;
+ m->xpmapped = FALSE;
+ m->written_by_kernel = FALSE;
m->__unused_object_bits = 0;
-
/*
* Initialize the page queues.
*/
purgeable_queues[i].debug_count_objects = 0;
#endif
};
+ purgeable_nonvolatile_count = 0;
+ queue_init(&purgeable_nonvolatile_queue);
for (i = 0; i < MAX_COLORS; i++ )
- queue_init(&vm_page_queue_free[i]);
- queue_init(&vm_lopage_queue_free);
- vm_page_queue_fictitious = VM_PAGE_NULL;
- queue_init(&vm_page_queue_active);
- queue_init(&vm_page_queue_inactive);
- queue_init(&vm_page_queue_throttled);
- queue_init(&vm_page_queue_zf);
+ vm_page_queue_init(&vm_page_queue_free[i].qhead);
+
+ vm_page_queue_init(&vm_lopage_queue_free);
+ vm_page_queue_init(&vm_page_queue_active);
+ vm_page_queue_init(&vm_page_queue_inactive);
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_queue_init(&vm_page_queue_secluded);
+#endif /* CONFIG_SECLUDED_MEMORY */
+ vm_page_queue_init(&vm_page_queue_cleaned);
+ vm_page_queue_init(&vm_page_queue_throttled);
+ vm_page_queue_init(&vm_page_queue_anonymous);
+ queue_init(&vm_objects_wired);
for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) {
- queue_init(&vm_page_queue_speculative[i].age_q);
+ vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
vm_page_queue_speculative[i].age_ts.tv_sec = 0;
vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
}
+#if CONFIG_BACKGROUND_QUEUE
+ vm_page_queue_init(&vm_page_queue_background);
+
+ vm_page_background_count = 0;
+ vm_page_background_internal_count = 0;
+ vm_page_background_external_count = 0;
+ vm_page_background_promoted_count = 0;
+
+ vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
+
+ if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX)
+ vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
+
+ vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
+ vm_page_background_exclude_external = 0;
+
+ PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
+ PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
+ PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
+
+ if (vm_page_background_mode > VM_PAGE_BG_LEVEL_1)
+ vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
+#endif
vm_page_free_wanted = 0;
vm_page_free_wanted_privileged = 0;
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_free_wanted_secluded = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
+#if defined (__x86_64__)
+ /* this must be called before vm_page_set_colors() */
+ vm_page_setup_clump();
+#endif
+
vm_page_set_colors();
+ bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
+ vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+ vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+ vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+
+ bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
+ vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+ vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+ vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+ vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
+ vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
+ vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
+ vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+ vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+ vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+ vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
+ vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
+ vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
+ vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
+ vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
+ vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ for (i = 0; i < VM_KERN_MEMORY_FIRST_DYNAMIC; i++)
+ {
+ vm_allocation_sites_static[i].refcount = 2;
+ vm_allocation_sites_static[i].tag = i;
+ vm_allocation_sites[i] = &vm_allocation_sites_static[i];
+ }
+ vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
+ vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
+ vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
/*
* Steal memory for the map and zone subsystems.
*/
-
+#if CONFIG_GZALLOC
+ gzalloc_configure();
+#endif
+ kernel_debug_string_early("vm_map_steal_memory");
vm_map_steal_memory();
- zone_steal_memory();
/*
* Allocate (and initialize) the virtual-to-physical
if (vm_page_hash_mask & vm_page_bucket_count)
printf("vm_page_bootstrap: WARNING -- strange page hash\n");
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+ /*
+ * Allocate a decoy set of page buckets, to detect
+ * any stomping there.
+ */
+ vm_page_fake_buckets = (vm_page_bucket_t *)
+ pmap_steal_memory(vm_page_bucket_count *
+ sizeof(vm_page_bucket_t));
+ vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+ vm_page_fake_buckets_end =
+ vm_map_round_page((vm_page_fake_buckets_start +
+ (vm_page_bucket_count *
+ sizeof (vm_page_bucket_t))),
+ PAGE_MASK);
+ char *cp;
+ for (cp = (char *)vm_page_fake_buckets_start;
+ cp < (char *)vm_page_fake_buckets_end;
+ cp++) {
+ *cp = 0x5a;
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+ kernel_debug_string_early("vm_page_buckets");
vm_page_buckets = (vm_page_bucket_t *)
pmap_steal_memory(vm_page_bucket_count *
sizeof(vm_page_bucket_t));
+ kernel_debug_string_early("vm_page_bucket_locks");
vm_page_bucket_locks = (lck_spin_t *)
pmap_steal_memory(vm_page_bucket_lock_count *
sizeof(lck_spin_t));
for (i = 0; i < vm_page_bucket_count; i++) {
- register vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
- bucket->pages = VM_PAGE_NULL;
+ bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
#if MACH_PAGE_HASH_STATS
bucket->cur_count = 0;
bucket->hi_count = 0;
for (i = 0; i < vm_page_bucket_lock_count; i++)
lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ vm_tag_init();
+
+#if VM_PAGE_BUCKETS_CHECK
+ vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
/*
* Machine-dependent code allocates the resident page table.
* It uses vm_page_init to initialize the page frames.
* to get the alignment right.
*/
+ kernel_debug_string_early("pmap_startup");
pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
*/
assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count; /* initial value */
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_wire_count -= vm_page_secluded_count;
+#endif
vm_page_wire_count_initial = vm_page_wire_count;
- vm_page_free_count_minimum = vm_page_free_count;
-
- initial_max_mem = max_mem;
- initial_wire_count = vm_page_wire_count;
- initial_free_count = vm_page_free_count;
- initial_lopage_count = vm_lopage_free_count;
+ vm_page_pages_initial = vm_page_pages;
printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
vm_page_free_count, vm_page_wire_count);
+ kernel_debug_string_early("vm_page_bootstrap complete");
simple_lock_init(&vm_paging_lock, 0);
}
pmap_steal_memory(
vm_size_t size)
{
+ kern_return_t kr;
vm_offset_t addr, vaddr;
- ppnum_t phys_page;
+ ppnum_t phys_page;
/*
* We round the size to a round multiple.
addr = virtual_space_start;
virtual_space_start += size;
- kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
+ //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
/*
* Allocate and map physical pages to back new virtual pages.
vaddr += PAGE_SIZE) {
if (!pmap_next_page_hi(&phys_page))
- panic("pmap_steal_memory");
+ panic("pmap_steal_memory() size: 0x%llx\n", (uint64_t)size);
/*
* XXX Logically, these mappings should be wired,
* but some pmap modules barf if they are.
*/
#if defined(__LP64__)
+#ifdef __arm64__
+ /* ARM64_TODO: verify that we really don't need this */
+#else
pmap_pre_expand(kernel_pmap, vaddr);
#endif
+#endif
+
+ kr = pmap_enter(kernel_pmap, vaddr, phys_page,
+ VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE,
+ VM_WIMG_USE_DEFAULT, FALSE);
+
+ if (kr != KERN_SUCCESS) {
+ panic("pmap_steal_memory() pmap_enter failed, vaddr=%#lx, phys_page=%u",
+ (unsigned long)vaddr, phys_page);
+ }
- pmap_enter(kernel_pmap, vaddr, phys_page,
- VM_PROT_READ|VM_PROT_WRITE,
- VM_WIMG_USE_DEFAULT, FALSE);
/*
* Account for newly stolen memory
*/
vm_page_wire_count++;
-
+ vm_page_stolen_count++;
}
+#if KASAN
+ kasan_notify_address(round_page(addr), size);
+#endif
return (void *) addr;
}
+#if CONFIG_SECLUDED_MEMORY
+/* boot-args to control secluded memory */
+unsigned int secluded_mem_mb = 0; /* # of MBs of RAM to seclude */
+int secluded_for_iokit = 1; /* IOKit can use secluded memory */
+int secluded_for_apps = 1; /* apps can use secluded memory */
+int secluded_for_filecache = 2; /* filecache can use seclude memory */
+#if 11
+int secluded_for_fbdp = 0;
+#endif
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+
+#if defined(__arm__) || defined(__arm64__)
+extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
+unsigned int vm_first_phys_ppnum = 0;
+#endif
+
+
+void vm_page_release_startup(vm_page_t mem);
void
pmap_startup(
vm_offset_t *startp,
ppnum_t phys_page;
addr64_t tmpaddr;
+#if defined(__LP64__)
+ /*
+ * make sure we are aligned on a 64 byte boundary
+ * for VM_PAGE_PACK_PTR (it clips off the low-order
+ * 6 bits of the pointer)
+ */
+ if (virtual_space_start != virtual_space_end)
+ virtual_space_start = round_page(virtual_space_start);
+#endif
+
/*
* We calculate how many page frames we will have
* and then allocate the page structures in one chunk.
/*
* Initialize the page frames.
*/
+ kernel_debug_string_early("Initialize the page frames");
+
+ vm_page_array_beginning_addr = &vm_pages[0];
+ vm_page_array_ending_addr = &vm_pages[npages];
+
for (i = 0, pages_initialized = 0; i < npages; i++) {
if (!pmap_next_page(&phys_page))
break;
+#if defined(__arm__) || defined(__arm64__)
+ if (pages_initialized == 0) {
+ vm_first_phys_ppnum = phys_page;
+ patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr, (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
+ }
+ assert((i + vm_first_phys_ppnum) == phys_page);
+#endif
if (pages_initialized == 0 || phys_page < vm_page_lowest)
vm_page_lowest = phys_page;
pages_initialized++;
}
vm_pages_count = pages_initialized;
+ vm_page_array_boundary = &vm_pages[pages_initialized];
+
+#if defined(__LP64__)
+
+ if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0])
+ panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+ if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1]))) != &vm_pages[vm_pages_count-1])
+ panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+ kernel_debug_string_early("page fill/release");
/*
* Check if we want to initialize pages to a known value
*/
fill = 0; /* Assume no fill */
if (PE_parse_boot_argn("fill", &fillval, sizeof (fillval))) fill = 1; /* Set fill */
-
+#if DEBUG
+ /* This slows down booting the DEBUG kernel, particularly on
+ * large memory systems, but is worthwhile in deterministically
+ * trapping uninitialized memory usage.
+ */
+ if (fill == 0) {
+ fill = 1;
+ fillval = 0xDEB8F177;
+ }
+#endif
+ if (fill)
+ kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
+
+#if CONFIG_SECLUDED_MEMORY
+ /* default: no secluded mem */
+ secluded_mem_mb = 0;
+ if (max_mem > 1*1024*1024*1024) {
+ /* default to 90MB for devices with > 1GB of RAM */
+ secluded_mem_mb = 90;
+ }
+ /* override with value from device tree, if provided */
+ PE_get_default("kern.secluded_mem_mb",
+ &secluded_mem_mb, sizeof(secluded_mem_mb));
+ /* override with value from boot-args, if provided */
+ PE_parse_boot_argn("secluded_mem_mb",
+ &secluded_mem_mb,
+ sizeof (secluded_mem_mb));
+
+ vm_page_secluded_target = (unsigned int)
+ ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE);
+ PE_parse_boot_argn("secluded_for_iokit",
+ &secluded_for_iokit,
+ sizeof (secluded_for_iokit));
+ PE_parse_boot_argn("secluded_for_apps",
+ &secluded_for_apps,
+ sizeof (secluded_for_apps));
+ PE_parse_boot_argn("secluded_for_filecache",
+ &secluded_for_filecache,
+ sizeof (secluded_for_filecache));
+#if 11
+ PE_parse_boot_argn("secluded_for_fbdp",
+ &secluded_for_fbdp,
+ sizeof (secluded_for_fbdp));
+#endif
+#endif /* CONFIG_SECLUDED_MEMORY */
+
// -debug code remove
if (2 == vm_himemory_mode) {
// free low -> high so high is preferred
for (i = 1; i <= pages_initialized; i++) {
- if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
- vm_page_release(&vm_pages[i - 1]);
+ if(fill) fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages[i - 1]), fillval); /* Fill the page with a know value if requested at boot */
+ vm_page_release_startup(&vm_pages[i - 1]);
}
}
else
* they require several consecutive pages.
*/
for (i = pages_initialized; i > 0; i--) {
- if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
- vm_page_release(&vm_pages[i - 1]);
+ if(fill) fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages[i - 1]), fillval); /* Fill the page with a know value if requested at boot */
+ vm_page_release_startup(&vm_pages[i - 1]);
}
+ VM_CHECK_MEMORYSTATUS;
+
#if 0
{
vm_page_t xx, xxo, xxl;
xxl = 0;
for( i = 0; i < vm_colors; i++ ) {
- queue_iterate(&vm_page_queue_free[i],
+ queue_iterate(&vm_page_queue_free[i].qhead,
xx,
vm_page_t,
pageq) { /* BRINGUP */
if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
- for(xxo = xx->pageq.next; xxo != &vm_page_queue_free[i]; xxo = xxo->pageq.next) { /* (BRINGUP) */
+ for(xxo = xx->pageq.next; xxo != &vm_page_queue_free[i].qhead; xxo = xxo->pageq.next) { /* (BRINGUP) */
k++;
if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
void
vm_page_module_init(void)
{
- vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
- 0, PAGE_SIZE, "vm pages");
+ uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
+ vm_size_t vm_page_with_ppnum_size;
-#if ZONE_DEBUG
- zone_debug_disable(vm_page_zone);
-#endif /* ZONE_DEBUG */
+ vm_page_array_zone = zinit((vm_size_t) sizeof(struct vm_page),
+ 0, PAGE_SIZE, "vm pages array");
+
+ zone_change(vm_page_array_zone, Z_CALLERACCT, FALSE);
+ zone_change(vm_page_array_zone, Z_EXPAND, FALSE);
+ zone_change(vm_page_array_zone, Z_EXHAUST, TRUE);
+ zone_change(vm_page_array_zone, Z_FOREIGN, TRUE);
+ zone_change(vm_page_array_zone, Z_GZALLOC_EXEMPT, TRUE);
+ /*
+ * Adjust zone statistics to account for the real pages allocated
+ * in vm_page_create(). [Q: is this really what we want?]
+ */
+ vm_page_array_zone->count += vm_page_pages;
+ vm_page_array_zone->sum_count += vm_page_pages;
+ vm_page_array_zone_data_size = vm_page_pages * vm_page_array_zone->elem_size;
+ vm_page_array_zone->cur_size += vm_page_array_zone_data_size;
+ vm_page_zone_pages = ((round_page(vm_page_array_zone_data_size)) / PAGE_SIZE);
+ OSAddAtomic64(vm_page_zone_pages, &(vm_page_array_zone->page_count));
+ /* since zone accounts for these, take them out of stolen */
+ VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
+
+ vm_page_with_ppnum_size = (sizeof(struct vm_page_with_ppnum) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1);
+
+ vm_page_zone = zinit(vm_page_with_ppnum_size,
+ 0, PAGE_SIZE, "vm pages");
+ zone_change(vm_page_zone, Z_CALLERACCT, FALSE);
zone_change(vm_page_zone, Z_EXPAND, FALSE);
zone_change(vm_page_zone, Z_EXHAUST, TRUE);
zone_change(vm_page_zone, Z_FOREIGN, TRUE);
-
- /*
- * Adjust zone statistics to account for the real pages allocated
- * in vm_page_create(). [Q: is this really what we want?]
- */
- vm_page_zone->count += vm_page_pages;
- vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
-
- lck_mtx_init(&vm_page_alloc_lock, &vm_page_lck_grp_alloc, &vm_page_lck_attr);
+ zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE);
+ zone_change(vm_page_zone, Z_ALIGNMENT_REQUIRED, TRUE);
}
/*
for (phys_page = start;
phys_page < end;
phys_page++) {
- while ((m = (vm_page_t) vm_page_grab_fictitious())
+ while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page))
== VM_PAGE_NULL)
vm_page_more_fictitious();
- vm_page_init(m, phys_page, FALSE);
+ m->fictitious = FALSE;
pmap_clear_noencrypt(phys_page);
+
vm_page_pages++;
- vm_page_release(m);
+ vm_page_release(m, FALSE);
}
}
vm_object_t object,
vm_object_offset_t offset)
{
- vm_page_insert_internal(mem, object, offset, FALSE, TRUE);
+ vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
+}
+
+void
+vm_page_insert_wired(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_tag_t tag)
+{
+ vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
}
void
vm_page_t mem,
vm_object_t object,
vm_object_offset_t offset,
+ vm_tag_t tag,
boolean_t queues_lock_held,
- boolean_t insert_in_hash)
+ boolean_t insert_in_hash,
+ boolean_t batch_pmap_op,
+ boolean_t batch_accounting,
+ uint64_t *delayed_ledger_update)
{
- vm_page_bucket_t *bucket;
- lck_spin_t *bucket_lock;
- int hash_id;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+ int hash_id;
+ task_t owner;
XPR(XPR_VM_PAGE,
"vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
object, offset, mem, 0,0);
-
+#if 0
+ /*
+ * we may not hold the page queue lock
+ * so this check isn't safe to make
+ */
VM_PAGE_CHECK(mem);
+#endif
- if (object == vm_submap_object) {
- /* the vm_submap_object is only a placeholder for submaps */
- panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset);
- }
+ assert(page_aligned(offset));
+
+ assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE));
+
+ /* the vm_submap_object is only a placeholder for submaps */
+ assert(object != vm_submap_object);
vm_object_lock_assert_exclusive(object);
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock,
+ LCK_MTX_ASSERT(&vm_page_queue_lock,
queues_lock_held ? LCK_MTX_ASSERT_OWNED
: LCK_MTX_ASSERT_NOTOWNED);
-#endif /* DEBUG */
-
+
+ if (queues_lock_held == FALSE)
+ assert(!VM_PAGE_PAGEABLE(mem));
+
if (insert_in_hash == TRUE) {
-#if DEBUG
- if (mem->tabled || mem->object != VM_OBJECT_NULL)
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
+ if (mem->tabled || mem->vm_page_object)
panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
"already in (obj=%p,off=0x%llx)",
- mem, object, offset, mem->object, mem->offset);
+ mem, object, offset, VM_PAGE_OBJECT(mem), mem->offset);
#endif
- assert(!object->internal || offset < object->size);
-
- /* only insert "pageout" pages into "pageout" objects,
- * and normal pages into normal objects */
- assert(object->pageout == mem->pageout);
+ if (object->internal && (offset >= object->vo_size)) {
+ panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
+ mem, object, offset, object->vo_size);
+ }
assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
* Record the object/offset pair in this page
*/
- mem->object = object;
+ mem->vm_page_object = VM_PAGE_PACK_OBJECT(object);
mem->offset = offset;
+#if CONFIG_SECLUDED_MEMORY
+ if (object->eligible_for_secluded) {
+ vm_page_secluded.eligible_for_secluded++;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
/*
* Insert it into the object_object/offset hash table
*/
lck_spin_lock(bucket_lock);
- mem->next = bucket->pages;
- bucket->pages = mem;
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+ assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
+
#if MACH_PAGE_HASH_STATS
if (++bucket->cur_count > bucket->hi_count)
bucket->hi_count = bucket->cur_count;
#endif /* MACH_PAGE_HASH_STATS */
-
+ mem->hashed = TRUE;
lck_spin_unlock(bucket_lock);
}
+
+ {
+ unsigned int cache_attr;
+
+ cache_attr = object->wimg_bits & VM_WIMG_MASK;
+
+ if (cache_attr != VM_WIMG_USE_DEFAULT) {
+ PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
+ }
+ }
/*
* Now link into the object's list of backed pages.
*/
-
- VM_PAGE_INSERT(mem, object);
+ vm_page_queue_enter(&object->memq, mem, vm_page_t, listq);
+ object->memq_hint = mem;
mem->tabled = TRUE;
/*
object->resident_page_count++;
if (VM_PAGE_WIRED(mem)) {
- object->wired_page_count++;
+ assert(mem->wire_count > 0);
+ VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
+ VM_OBJECT_WIRED_PAGE_ADD(object, mem);
+ VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
}
assert(object->resident_page_count >= object->wired_page_count);
+ if (batch_accounting == FALSE) {
+ if (object->internal) {
+ OSAddAtomic(1, &vm_page_internal_count);
+ } else {
+ OSAddAtomic(1, &vm_page_external_count);
+ }
+ }
+
+ /*
+ * It wouldn't make sense to insert a "reusable" page in
+ * an object (the page would have been marked "reusable" only
+ * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
+ * in the object at that time).
+ * But a page could be inserted in a "all_reusable" object, if
+ * something faults it in (a vm_read() from another task or a
+ * "use-after-free" issue in user space, for example). It can
+ * also happen if we're relocating a page from that object to
+ * a different physical page during a physically-contiguous
+ * allocation.
+ */
assert(!mem->reusable);
+ if (object->all_reusable) {
+ OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
+ }
+
+ if (object->purgable == VM_PURGABLE_DENY) {
+ owner = TASK_NULL;
+ } else {
+ owner = object->vo_purgeable_owner;
+ }
+ if (owner &&
+ (object->purgable == VM_PURGABLE_NONVOLATILE ||
+ VM_PAGE_WIRED(mem))) {
+
+ if (delayed_ledger_update)
+ *delayed_ledger_update += PAGE_SIZE;
+ else {
+ /* more non-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
+
+ } else if (owner &&
+ (object->purgable == VM_PURGABLE_VOLATILE ||
+ object->purgable == VM_PURGABLE_EMPTY)) {
+ assert(! VM_PAGE_WIRED(mem));
+ /* more volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
if (object->purgable == VM_PURGABLE_VOLATILE) {
if (VM_PAGE_WIRED(mem)) {
- OSAddAtomic(1, &vm_page_purgeable_wired_count);
+ OSAddAtomic(+1, &vm_page_purgeable_wired_count);
} else {
- OSAddAtomic(1, &vm_page_purgeable_count);
+ OSAddAtomic(+1, &vm_page_purgeable_count);
}
} else if (object->purgable == VM_PURGABLE_EMPTY &&
- mem->throttled) {
+ mem->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) {
/*
* This page belongs to a purged VM object but hasn't
* been purged (because it was "busy").
if (queues_lock_held == FALSE)
vm_page_unlock_queues();
}
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+ if (vm_object_tracking_inited &&
+ object->internal &&
+ object->resident_page_count == 0 &&
+ object->pager == NULL &&
+ object->shadow != NULL &&
+ object->shadow->copy == object) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int numsaved = 0;
+
+ numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ object,
+ VM_OBJECT_TRACKING_OP_MODIFIED,
+ bt,
+ numsaved);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
}
/*
*/
void
vm_page_replace(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_object_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_object_offset_t offset)
{
vm_page_bucket_t *bucket;
vm_page_t found_m = VM_PAGE_NULL;
lck_spin_t *bucket_lock;
int hash_id;
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
VM_PAGE_CHECK(mem);
+#endif
vm_object_lock_assert_exclusive(object);
-#if DEBUG
- if (mem->tabled || mem->object != VM_OBJECT_NULL)
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
+ if (mem->tabled || mem->vm_page_object)
panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
"already in (obj=%p,off=0x%llx)",
- mem, object, offset, mem->object, mem->offset);
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+ mem, object, offset, VM_PAGE_OBJECT(mem), mem->offset);
#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+
+ assert(!VM_PAGE_PAGEABLE(mem));
+
/*
* Record the object/offset pair in this page
*/
-
- mem->object = object;
+ mem->vm_page_object = VM_PAGE_PACK_OBJECT(object);
mem->offset = offset;
/*
lck_spin_lock(bucket_lock);
- if (bucket->pages) {
- vm_page_t *mp = &bucket->pages;
- vm_page_t m = *mp;
+ if (bucket->page_list) {
+ vm_page_packed_t *mp = &bucket->page_list;
+ vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
do {
- if (m->object == object && m->offset == offset) {
+ /*
+ * compare packed object pointers
+ */
+ if (m->vm_page_object == mem->vm_page_object && m->offset == offset) {
/*
* Remove old page from hash list
*/
- *mp = m->next;
+ *mp = m->next_m;
+ m->hashed = FALSE;
+ m->next_m = VM_PAGE_PACK_PTR(NULL);
found_m = m;
break;
}
- mp = &m->next;
- } while ((m = *mp));
+ mp = &m->next_m;
+ } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
- mem->next = bucket->pages;
+ mem->next_m = bucket->page_list;
} else {
- mem->next = VM_PAGE_NULL;
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
}
/*
* insert new page at head of hash list
*/
- bucket->pages = mem;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+ mem->hashed = TRUE;
lck_spin_unlock(bucket_lock);
*/
vm_page_free_unlocked(found_m, FALSE);
}
- vm_page_insert_internal(mem, object, offset, FALSE, FALSE);
+ vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
}
/*
vm_page_t this;
lck_spin_t *bucket_lock;
int hash_id;
+ task_t owner;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
XPR(XPR_VM_PAGE,
"vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
- mem->object, mem->offset,
+ m_object, mem->offset,
mem, 0,0);
- vm_object_lock_assert_exclusive(mem->object);
+ vm_object_lock_assert_exclusive(m_object);
assert(mem->tabled);
assert(!mem->cleaning);
- VM_PAGE_CHECK(mem);
+ assert(!mem->laundry);
+ if (VM_PAGE_PAGEABLE(mem)) {
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ }
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
+ VM_PAGE_CHECK(mem);
+#endif
if (remove_from_hash == TRUE) {
/*
* Remove from the object_object/offset hash table
*/
- hash_id = vm_page_hash(mem->object, mem->offset);
+ hash_id = vm_page_hash(m_object, mem->offset);
bucket = &vm_page_buckets[hash_id];
bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
lck_spin_lock(bucket_lock);
- if ((this = bucket->pages) == mem) {
+ if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
/* optimize for common case */
- bucket->pages = mem->next;
+ bucket->page_list = mem->next_m;
} else {
- vm_page_t *prev;
+ vm_page_packed_t *prev;
- for (prev = &this->next;
- (this = *prev) != mem;
- prev = &this->next)
+ for (prev = &this->next_m;
+ (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
+ prev = &this->next_m)
continue;
- *prev = this->next;
+ *prev = this->next_m;
}
#if MACH_PAGE_HASH_STATS
bucket->cur_count--;
#endif /* MACH_PAGE_HASH_STATS */
-
+ mem->hashed = FALSE;
+ this->next_m = VM_PAGE_PACK_PTR(NULL);
lck_spin_unlock(bucket_lock);
}
/*
* Now remove from the object's list of backed pages.
*/
- VM_PAGE_REMOVE(mem);
+ vm_page_remove_internal(mem);
/*
* And show that the object has one fewer resident
* page.
*/
- assert(mem->object->resident_page_count > 0);
- mem->object->resident_page_count--;
+ assert(m_object->resident_page_count > 0);
+ m_object->resident_page_count--;
+
+ if (m_object->internal) {
+#if DEBUG
+ assert(vm_page_internal_count);
+#endif /* DEBUG */
+
+ OSAddAtomic(-1, &vm_page_internal_count);
+ } else {
+ assert(vm_page_external_count);
+ OSAddAtomic(-1, &vm_page_external_count);
+
+ if (mem->xpmapped) {
+ assert(vm_page_xpmapped_external_count);
+ OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+ }
+ }
+ if (!m_object->internal && (m_object->objq.next || m_object->objq.prev)) {
+ if (m_object->resident_page_count == 0)
+ vm_object_cache_remove(m_object);
+ }
+
if (VM_PAGE_WIRED(mem)) {
- assert(mem->object->wired_page_count > 0);
- mem->object->wired_page_count--;
+ assert(mem->wire_count > 0);
+ VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+ VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
+ VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
}
- assert(mem->object->resident_page_count >=
- mem->object->wired_page_count);
+ assert(m_object->resident_page_count >=
+ m_object->wired_page_count);
if (mem->reusable) {
- assert(mem->object->reusable_page_count > 0);
- mem->object->reusable_page_count--;
- assert(mem->object->reusable_page_count <=
- mem->object->resident_page_count);
+ assert(m_object->reusable_page_count > 0);
+ m_object->reusable_page_count--;
+ assert(m_object->reusable_page_count <=
+ m_object->resident_page_count);
mem->reusable = FALSE;
OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
vm_page_stats_reusable.reused_remove++;
- } else if (mem->object->all_reusable) {
+ } else if (m_object->all_reusable) {
OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
vm_page_stats_reusable.reused_remove++;
}
- if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+ if (m_object->purgable == VM_PURGABLE_DENY) {
+ owner = TASK_NULL;
+ } else {
+ owner = m_object->vo_purgeable_owner;
+ }
+ if (owner &&
+ (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
+ VM_PAGE_WIRED(mem))) {
+ /* less non-volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* less footprint */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ } else if (owner &&
+ (m_object->purgable == VM_PURGABLE_VOLATILE ||
+ m_object->purgable == VM_PURGABLE_EMPTY)) {
+ assert(! VM_PAGE_WIRED(mem));
+ /* less volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
+ if (m_object->purgable == VM_PURGABLE_VOLATILE) {
if (VM_PAGE_WIRED(mem)) {
assert(vm_page_purgeable_wired_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_wired_count);
OSAddAtomic(-1, &vm_page_purgeable_count);
}
}
+
+ if (m_object->set_cache_attr == TRUE)
+ pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
+
mem->tabled = FALSE;
- mem->object = VM_OBJECT_NULL;
+ mem->vm_page_object = 0;
mem->offset = (vm_object_offset_t) -1;
}
* The object must be locked. No side effects.
*/
-unsigned long vm_page_lookup_hint = 0;
-unsigned long vm_page_lookup_hint_next = 0;
-unsigned long vm_page_lookup_hint_prev = 0;
-unsigned long vm_page_lookup_hint_miss = 0;
-unsigned long vm_page_lookup_bucket_NULL = 0;
-unsigned long vm_page_lookup_miss = 0;
+#define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
+
+#if DEBUG_VM_PAGE_LOOKUP
+
+struct {
+ uint64_t vpl_total;
+ uint64_t vpl_empty_obj;
+ uint64_t vpl_bucket_NULL;
+ uint64_t vpl_hit_hint;
+ uint64_t vpl_hit_hint_next;
+ uint64_t vpl_hit_hint_prev;
+ uint64_t vpl_fast;
+ uint64_t vpl_slow;
+ uint64_t vpl_hit;
+ uint64_t vpl_miss;
+
+ uint64_t vpl_fast_elapsed;
+ uint64_t vpl_slow_elapsed;
+} vm_page_lookup_stats __attribute__((aligned(8)));
+
+#endif
+
+#define KDP_VM_PAGE_WALK_MAX 1000
+
+vm_page_t
+kdp_vm_page_lookup(
+ vm_object_t object,
+ vm_object_offset_t offset)
+{
+ vm_page_t cur_page;
+ int num_traversed = 0;
+
+ if (not_in_kdp) {
+ panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
+ }
+
+ vm_page_queue_iterate(&object->memq, cur_page, vm_page_t, listq) {
+ if (cur_page->offset == offset) {
+ return cur_page;
+ }
+ num_traversed++;
+
+ if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
+ return VM_PAGE_NULL;
+ }
+ }
+ return VM_PAGE_NULL;
+}
vm_page_t
vm_page_lookup(
{
vm_page_t mem;
vm_page_bucket_t *bucket;
- queue_entry_t qe;
- lck_spin_t *bucket_lock;
+ vm_page_queue_entry_t qe;
+ lck_spin_t *bucket_lock = NULL;
int hash_id;
+#if DEBUG_VM_PAGE_LOOKUP
+ uint64_t start, elapsed;
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
+#endif
vm_object_lock_assert_held(object);
+
+ if (object->resident_page_count == 0) {
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
+#endif
+ return (VM_PAGE_NULL);
+ }
+
mem = object->memq_hint;
if (mem != VM_PAGE_NULL) {
- assert(mem->object == object);
+ assert(VM_PAGE_OBJECT(mem) == object);
if (mem->offset == offset) {
- vm_page_lookup_hint++;
- return mem;
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
+#endif
+ return (mem);
}
- qe = queue_next(&mem->listq);
+ qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->listq);
- if (! queue_end(&object->memq, qe)) {
+ if (! vm_page_queue_end(&object->memq, qe)) {
vm_page_t next_page;
- next_page = (vm_page_t) qe;
- assert(next_page->object == object);
+ next_page = (vm_page_t)((uintptr_t)qe);
+ assert(VM_PAGE_OBJECT(next_page) == object);
if (next_page->offset == offset) {
- vm_page_lookup_hint_next++;
object->memq_hint = next_page; /* new hint */
- return next_page;
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
+#endif
+ return (next_page);
}
}
- qe = queue_prev(&mem->listq);
+ qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->listq);
- if (! queue_end(&object->memq, qe)) {
+ if (! vm_page_queue_end(&object->memq, qe)) {
vm_page_t prev_page;
- prev_page = (vm_page_t) qe;
- assert(prev_page->object == object);
+ prev_page = (vm_page_t)((uintptr_t)qe);
+ assert(VM_PAGE_OBJECT(prev_page) == object);
if (prev_page->offset == offset) {
- vm_page_lookup_hint_prev++;
object->memq_hint = prev_page; /* new hint */
- return prev_page;
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
+#endif
+ return (prev_page);
}
}
}
* at outside the scope of the hash bucket lock... this is a
* really cheap optimiztion to avoid taking the lock
*/
- if (bucket->pages == VM_PAGE_NULL) {
- vm_page_lookup_bucket_NULL++;
-
+ if (!bucket->page_list) {
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
+#endif
return (VM_PAGE_NULL);
}
- bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
- lck_spin_lock(bucket_lock);
+#if DEBUG_VM_PAGE_LOOKUP
+ start = mach_absolute_time();
+#endif
+ if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
+ /*
+ * on average, it's roughly 3 times faster to run a short memq list
+ * than to take the spin lock and go through the hash list
+ */
+ mem = (vm_page_t)vm_page_queue_first(&object->memq);
- for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
- VM_PAGE_CHECK(mem);
- if ((mem->object == object) && (mem->offset == offset))
- break;
+ while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
+
+ if (mem->offset == offset)
+ break;
+
+ mem = (vm_page_t)vm_page_queue_next(&mem->listq);
+ }
+ if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem))
+ mem = NULL;
+ } else {
+ vm_page_object_t packed_object;
+
+ packed_object = VM_PAGE_PACK_OBJECT(object);
+
+ bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
+
+ lck_spin_lock(bucket_lock);
+
+ for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+ mem != VM_PAGE_NULL;
+ mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m))) {
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
+ VM_PAGE_CHECK(mem);
+#endif
+ if ((mem->vm_page_object == packed_object) && (mem->offset == offset))
+ break;
+ }
+ lck_spin_unlock(bucket_lock);
}
- lck_spin_unlock(bucket_lock);
+#if DEBUG_VM_PAGE_LOOKUP
+ elapsed = mach_absolute_time() - start;
+
+ if (bucket_lock) {
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
+ OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
+ } else {
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
+ OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
+ }
+ if (mem != VM_PAGE_NULL)
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
+ else
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
+#endif
if (mem != VM_PAGE_NULL) {
- if (object->memq_hint != VM_PAGE_NULL) {
- vm_page_lookup_hint_miss++;
- }
- assert(mem->object == object);
- object->memq_hint = mem;
- } else
- vm_page_lookup_miss++;
+ assert(VM_PAGE_OBJECT(mem) == object);
- return(mem);
+ object->memq_hint = mem;
+ }
+ return (mem);
}
*/
void
vm_page_rename(
- register vm_page_t mem,
- register vm_object_t new_object,
- vm_object_offset_t new_offset,
- boolean_t encrypted_ok)
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_object_offset_t new_offset)
{
- assert(mem->object != new_object);
+ boolean_t internal_to_external, external_to_internal;
+ vm_tag_t tag;
+ vm_object_t m_object;
- /*
- * ENCRYPTED SWAP:
- * The encryption key is based on the page's memory object
- * (aka "pager") and paging offset. Moving the page to
- * another VM object changes its "pager" and "paging_offset"
- * so it has to be decrypted first, or we would lose the key.
- *
- * One exception is VM object collapsing, where we transfer pages
- * from one backing object to its parent object. This operation also
- * transfers the paging information, so the <pager,paging_offset> info
- * should remain consistent. The caller (vm_object_do_collapse())
- * sets "encrypted_ok" in this case.
- */
- if (!encrypted_ok && mem->encrypted) {
- panic("vm_page_rename: page %p is encrypted\n", mem);
- }
+ m_object = VM_PAGE_OBJECT(mem);
+
+ assert(m_object != new_object);
+ assert(m_object);
XPR(XPR_VM_PAGE,
"vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
*/
vm_page_lockspin_queues();
+ internal_to_external = FALSE;
+ external_to_internal = FALSE;
+
+ if (mem->vm_page_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
+ /*
+ * it's much easier to get the vm_page_pageable_xxx accounting correct
+ * if we first move the page to the active queue... it's going to end
+ * up there anyway, and we don't do vm_page_rename's frequently enough
+ * for this to matter.
+ */
+ vm_page_queues_remove(mem, FALSE);
+ vm_page_activate(mem);
+ }
+ if (VM_PAGE_PAGEABLE(mem)) {
+ if (m_object->internal && !new_object->internal) {
+ internal_to_external = TRUE;
+ }
+ if (!m_object->internal && new_object->internal) {
+ external_to_internal = TRUE;
+ }
+ }
+
+ tag = m_object->wire_tag;
vm_page_remove(mem, TRUE);
- vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE);
+ vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
+
+ if (internal_to_external) {
+ vm_page_pageable_internal_count--;
+ vm_page_pageable_external_count++;
+ } else if (external_to_internal) {
+ vm_page_pageable_external_count--;
+ vm_page_pageable_internal_count++;
+ }
vm_page_unlock_queues();
}
{
assert(phys_page);
+#if DEBUG
+ if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
+ if (!(pmap_valid_page(phys_page))) {
+ panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page);
+ }
+ }
+#endif
*mem = vm_page_template;
- mem->phys_page = phys_page;
+
+ VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
+#if 0
+ /*
+ * we're leaving this turned off for now... currently pages
+ * come off the free list and are either immediately dirtied/referenced
+ * due to zero-fill or COW faults, or are used to read or write files...
+ * in the file I/O case, the UPL mechanism takes care of clearing
+ * the state of the HW ref/mod bits in a somewhat fragile way.
+ * Since we may change the way this works in the future (to toughen it up),
+ * I'm leaving this as a reminder of where these bits could get cleared
+ */
+
+ /*
+ * make sure both the h/w referenced and modified bits are
+ * clear at this point... we are especially dependent on
+ * not finding a 'stale' h/w modified in a number of spots
+ * once this page goes back into use
+ */
+ pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+#endif
mem->lopage = lopage;
}
* Returns VM_PAGE_NULL if there are no free pages.
*/
int c_vm_page_grab_fictitious = 0;
+int c_vm_page_grab_fictitious_failed = 0;
int c_vm_page_release_fictitious = 0;
int c_vm_page_more_fictitious = 0;
-extern vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr);
-
vm_page_t
vm_page_grab_fictitious_common(
ppnum_t phys_addr)
{
- register vm_page_t m;
+ vm_page_t m;
+
+ if ((m = (vm_page_t)zget(vm_page_zone))) {
- m = (vm_page_t)zget(vm_page_zone);
- if (m) {
vm_page_init(m, phys_addr, FALSE);
m->fictitious = TRUE;
- }
- c_vm_page_grab_fictitious++;
+ c_vm_page_grab_fictitious++;
+ } else
+ c_vm_page_grab_fictitious_failed++;
+
return m;
}
return vm_page_grab_fictitious_common(vm_page_fictitious_addr);
}
+int vm_guard_count;
+
+
vm_page_t
vm_page_grab_guard(void)
{
- return vm_page_grab_fictitious_common(vm_page_guard_addr);
+ vm_page_t page;
+ page = vm_page_grab_fictitious_common(vm_page_guard_addr);
+ if (page) OSAddAtomic(1, &vm_guard_count);
+ return page;
}
+
/*
* vm_page_release_fictitious:
*
- * Release a fictitious page to the free list.
+ * Release a fictitious page to the zone pool
*/
-
void
vm_page_release_fictitious(
- register vm_page_t m)
+ vm_page_t m)
{
- assert(!m->free);
- assert(m->busy);
+ assert((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) || (m->vm_page_q_state == VM_PAGE_IS_WIRED));
assert(m->fictitious);
- assert(m->phys_page == vm_page_fictitious_addr ||
- m->phys_page == vm_page_guard_addr);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
+ VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
+
+
+if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) OSAddAtomic(-1, &vm_guard_count);
c_vm_page_release_fictitious++;
-#if DEBUG
- if (m->free)
- panic("vm_page_release_fictitious");
-#endif
- m->free = TRUE;
+
zfree(vm_page_zone, m);
}
/*
* vm_page_more_fictitious:
*
- * Add more fictitious pages to the free list.
+ * Add more fictitious pages to the zone.
* Allowed to block. This routine is way intimate
* with the zones code, for several reasons:
* 1. we need to carve some page structures out of physical
* permanent allocation of a resource.
* 3. To smooth allocation humps, we allocate single pages
* with kernel_memory_allocate(), and cram them into the
- * zone. This also allows us to initialize the vm_page_t's
- * on the way into the zone, so that zget() always returns
- * an initialized structure. The zone free element pointer
- * and the free page pointer are both the first item in the
- * vm_page_t.
- * 4. By having the pages in the zone pre-initialized, we need
- * not keep 2 levels of lists. The garbage collector simply
- * scans our list, and reduces physical memory usage as it
- * sees fit.
+ * zone.
*/
void vm_page_more_fictitious(void)
{
- register vm_page_t m;
- vm_offset_t addr;
- kern_return_t retval;
- int i;
+ vm_offset_t addr;
+ kern_return_t retval;
c_vm_page_more_fictitious++;
}
retval = kernel_memory_allocate(zone_map,
- &addr, PAGE_SIZE, VM_PROT_ALL,
- KMA_KOBJECT|KMA_NOPAGEWAIT);
+ &addr, PAGE_SIZE, 0,
+ KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE);
if (retval != KERN_SUCCESS) {
/*
- * No page was available. Tell the pageout daemon, drop the
+ * No page was available. Drop the
* lock to give another thread a chance at it, and
* wait for the pageout daemon to make progress.
*/
vm_page_wait(THREAD_UNINT);
return;
}
- /*
- * Initialize as many vm_page_t's as will fit on this page. This
- * depends on the zone code disturbing ONLY the first item of
- * each zone element.
- */
- m = (vm_page_t)addr;
- for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
- vm_page_init(m, vm_page_fictitious_addr, FALSE);
- m->fictitious = TRUE;
- m++;
- }
- zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
+
+ zcram(vm_page_zone, addr, PAGE_SIZE);
+
lck_mtx_unlock(&vm_page_alloc_lock);
}
}
+#if CONFIG_BACKGROUND_QUEUE
+
+void
+vm_page_update_background_state(vm_page_t mem)
+{
+ if (vm_page_background_mode == VM_PAGE_BG_DISABLED)
+ return;
+
+ if (mem->vm_page_in_background == FALSE)
+ return;
+
+#if BACKGROUNDQ_BASED_ON_QOS
+ if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY)
+ return;
+#else
+ task_t my_task;
+
+ my_task = current_task();
+
+ if (my_task) {
+ if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG))
+ return;
+ }
+#endif
+ vm_page_lockspin_queues();
+
+ mem->vm_page_in_background = FALSE;
+ vm_page_background_promoted_count++;
+
+ vm_page_remove_from_backgroundq(mem);
+
+ vm_page_unlock_queues();
+}
+
+
+void
+vm_page_assign_background_state(vm_page_t mem)
+{
+ if (vm_page_background_mode == VM_PAGE_BG_DISABLED)
+ return;
+
+#if BACKGROUNDQ_BASED_ON_QOS
+ if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY)
+ mem->vm_page_in_background = TRUE;
+ else
+ mem->vm_page_in_background = FALSE;
+#else
+ task_t my_task;
+
+ my_task = current_task();
+
+ if (my_task)
+ mem->vm_page_in_background = proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG);
+#endif
+}
+
+
+void
+vm_page_remove_from_backgroundq(
+ vm_page_t mem)
+{
+ vm_object_t m_object;
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (mem->vm_page_on_backgroundq) {
+ vm_page_queue_remove(&vm_page_queue_background, mem, vm_page_t, vm_page_backgroundq);
+
+ mem->vm_page_backgroundq.next = 0;
+ mem->vm_page_backgroundq.prev = 0;
+ mem->vm_page_on_backgroundq = FALSE;
+
+ vm_page_background_count--;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ if (m_object->internal)
+ vm_page_background_internal_count--;
+ else
+ vm_page_background_external_count--;
+ } else {
+ assert(VM_PAGE_UNPACK_PTR(mem->vm_page_backgroundq.next) == (uintptr_t)NULL &&
+ VM_PAGE_UNPACK_PTR(mem->vm_page_backgroundq.prev) == (uintptr_t)NULL);
+ }
+}
+
+
+void
+vm_page_add_to_backgroundq(
+ vm_page_t mem,
+ boolean_t first)
+{
+ vm_object_t m_object;
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (vm_page_background_mode == VM_PAGE_BG_DISABLED)
+ return;
+
+ if (mem->vm_page_on_backgroundq == FALSE) {
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ if (vm_page_background_exclude_external && !m_object->internal)
+ return;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_background, mem, vm_page_t, vm_page_backgroundq);
+ else
+ vm_page_queue_enter(&vm_page_queue_background, mem, vm_page_t, vm_page_backgroundq);
+ mem->vm_page_on_backgroundq = TRUE;
+
+ vm_page_background_count++;
+
+ if (m_object->internal)
+ vm_page_background_internal_count++;
+ else
+ vm_page_background_external_count++;
+ }
+}
+
+#endif
/*
* this is an interface to support bring-up of drivers
* on platforms with physical memory > 4G...
*/
-int vm_himemory_mode = 0;
+int vm_himemory_mode = 2;
/*
unsigned int vm_lopages_allocated_q = 0;
unsigned int vm_lopages_allocated_cpm_success = 0;
unsigned int vm_lopages_allocated_cpm_failed = 0;
-queue_head_t vm_lopage_queue_free;
+vm_page_queue_head_t vm_lopage_queue_free __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
vm_page_t
vm_page_grablo(void)
lck_mtx_lock_spin(&vm_page_queue_free_lock);
- if ( !queue_empty(&vm_lopage_queue_free)) {
- queue_remove_first(&vm_lopage_queue_free,
+ if ( !vm_page_queue_empty(&vm_lopage_queue_free)) {
+ vm_page_queue_remove_first(&vm_lopage_queue_free,
mem,
vm_page_t,
pageq);
assert(vm_lopage_free_count);
+ assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
vm_lopage_free_count--;
vm_lopages_allocated_q++;
vm_lopage_refill = TRUE;
lck_mtx_unlock(&vm_page_queue_free_lock);
+
+#if CONFIG_BACKGROUND_QUEUE
+ vm_page_assign_background_state(mem);
+#endif
} else {
lck_mtx_unlock(&vm_page_queue_free_lock);
return (VM_PAGE_NULL);
}
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+
mem->busy = TRUE;
vm_page_lockspin_queues();
vm_page_unlock_queues();
}
assert(mem->busy);
- assert(!mem->free);
assert(!mem->pmapped);
assert(!mem->wpmapped);
+ assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
- mem->pageq.next = NULL;
- mem->pageq.prev = NULL;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
return (mem);
}
+
/*
* vm_page_grab:
*
* request from the per-cpu queue.
*/
-#define COLOR_GROUPS_TO_STEAL 4
+#if CONFIG_SECLUDED_MEMORY
+vm_page_t vm_page_grab_secluded(void);
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+vm_page_t
+vm_page_grab(void)
+{
+ return vm_page_grab_options(0);
+}
+#if HIBERNATION
+boolean_t hibernate_rebuild_needed = FALSE;
+#endif /* HIBERNATION */
vm_page_t
-vm_page_grab( void )
+vm_page_grab_options(
+ int grab_options)
{
vm_page_t mem;
-
disable_preemption();
if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) {
return_page_from_cpu_list:
+ assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed) {
+ panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
+ }
+#endif /* HIBERNATION */
PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
- PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
- mem->pageq.next = NULL;
+ PROCESSOR_DATA(current_processor(), free_pages) = mem->snext;
enable_preemption();
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
- assert(mem->listq.next == NULL && mem->listq.prev == NULL);
+ assert(mem->listq.next == 0 && mem->listq.prev == 0);
assert(mem->tabled == FALSE);
- assert(mem->object == VM_OBJECT_NULL);
+ assert(mem->vm_page_object == 0);
assert(!mem->laundry);
- assert(!mem->free);
- assert(pmap_verify_free(mem->phys_page));
+ assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
assert(mem->busy);
- assert(!mem->encrypted);
assert(!mem->pmapped);
assert(!mem->wpmapped);
+ assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
+#if CONFIG_BACKGROUND_QUEUE
+ vm_page_assign_background_state(mem);
+#endif
return mem;
}
enable_preemption();
* Optionally produce warnings if the wire or gobble
* counts exceed some threshold.
*/
- if (vm_page_wire_count_warning > 0
- && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+ if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
printf("mk: vm_page_grab(): high wired page count of %d\n",
vm_page_wire_count);
- assert(vm_page_wire_count < vm_page_wire_count_warning);
}
- if (vm_page_gobble_count_warning > 0
- && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+ if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
printf("mk: vm_page_grab(): high gobbled page count of %d\n",
vm_page_gobble_count);
- assert(vm_page_gobble_count < vm_page_gobble_count_warning);
}
+#endif
lck_mtx_lock_spin(&vm_page_queue_free_lock);
*/
if ((vm_page_free_count < vm_page_free_reserved) &&
!(current_thread()->options & TH_OPT_VMPRIV)) {
+ /* no page for us in the free queue... */
lck_mtx_unlock(&vm_page_queue_free_lock);
mem = VM_PAGE_NULL;
+
+#if CONFIG_SECLUDED_MEMORY
+ /* ... but can we try and grab from the secluded queue? */
+ if (vm_page_secluded_count > 0 &&
+ ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
+ task_can_use_secluded_mem(current_task()))) {
+ mem = vm_page_grab_secluded();
+ if (grab_options & VM_PAGE_GRAB_SECLUDED) {
+ vm_page_secluded.grab_for_iokit++;
+ if (mem) {
+ vm_page_secluded.grab_for_iokit_success++;
+ }
+ }
+ if (mem) {
+ VM_CHECK_MEMORYSTATUS;
+ return mem;
+ }
+ }
+#else /* CONFIG_SECLUDED_MEMORY */
+ (void) grab_options;
+#endif /* CONFIG_SECLUDED_MEMORY */
}
else {
vm_page_t head;
vm_page_t tail;
unsigned int pages_to_steal;
unsigned int color;
+ unsigned int clump_end, sub_count;
while ( vm_page_free_count == 0 ) {
if (vm_page_free_count <= vm_page_free_reserved)
pages_to_steal = 1;
else {
- pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors;
-
- if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved))
+ if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+ pages_to_steal = vm_free_magazine_refill_limit;
+ else
pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
}
color = PROCESSOR_DATA(current_processor(), start_color);
head = tail = NULL;
+ vm_page_free_count -= pages_to_steal;
+ clump_end = sub_count = 0;
+
while (pages_to_steal--) {
- if (--vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
- while (queue_empty(&vm_page_queue_free[color]))
+ while (vm_page_queue_empty(&vm_page_queue_free[color].qhead))
color = (color + 1) & vm_color_mask;
-
- queue_remove_first(&vm_page_queue_free[color],
- mem,
- vm_page_t,
- pageq);
- mem->pageq.next = NULL;
- mem->pageq.prev = NULL;
+#if defined(__x86_64__)
+ vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq,
+ clump_end);
+#else
+ vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#endif
+
+ assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_Q);
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+#if defined(__arm__) || defined(__arm64__)
color = (color + 1) & vm_color_mask;
+#else
+
+#if DEVELOPMENT || DEBUG
+
+ sub_count++;
+ if (clump_end) {
+ vm_clump_update_stats(sub_count);
+ sub_count = 0;
+ color = (color + 1) & vm_color_mask;
+ }
+#else
+ if (clump_end) color = (color + 1) & vm_color_mask;
+
+#endif /* if DEVELOPMENT || DEBUG */
+
+#endif /* if defined(__arm__) || defined(__arm64__) */
if (head == NULL)
head = mem;
else
- tail->pageq.next = (queue_t)mem;
+ tail->snext = mem;
tail = mem;
- mem->pageq.prev = NULL;
- assert(mem->listq.next == NULL && mem->listq.prev == NULL);
+ assert(mem->listq.next == 0 && mem->listq.prev == 0);
assert(mem->tabled == FALSE);
- assert(mem->object == VM_OBJECT_NULL);
+ assert(mem->vm_page_object == 0);
assert(!mem->laundry);
- assert(mem->free);
- mem->free = FALSE;
- assert(pmap_verify_free(mem->phys_page));
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
+
+ assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
assert(mem->busy);
- assert(!mem->free);
- assert(!mem->encrypted);
assert(!mem->pmapped);
assert(!mem->wpmapped);
+ assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
+ }
+#if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
+ vm_clump_update_stats(sub_count);
+#endif
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed) {
+ panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
}
- PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
+#endif /* HIBERNATION */
+ PROCESSOR_DATA(current_processor(), free_pages) = head->snext;
PROCESSOR_DATA(current_processor(), start_color) = color;
/*
*/
PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
mem = head;
- mem->pageq.next = NULL;
+ assert(mem->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
- lck_mtx_unlock(&vm_page_queue_free_lock);
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
enable_preemption();
}
* it doesn't really matter.
*/
if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
- ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
- thread_wakeup((event_t) &vm_page_free_wanted);
+ ((vm_page_free_count < vm_page_free_target) &&
+ ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+ thread_wakeup((event_t) &vm_page_free_wanted);
-#if CONFIG_EMBEDDED
- {
- int percent_avail;
+ VM_CHECK_MEMORYSTATUS;
- /*
- * Decide if we need to poke the memorystatus notification thread.
- */
- percent_avail =
- (vm_page_active_count + vm_page_inactive_count +
- vm_page_speculative_count + vm_page_free_count +
- (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
- atop_64(max_mem);
- if (percent_avail <= (kern_memorystatus_level - 5)) {
- kern_memorystatus_level = percent_avail;
- thread_wakeup((event_t)&kern_memorystatus_wakeup);
+ if (mem) {
+// dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
+
+#if CONFIG_BACKGROUND_QUEUE
+ vm_page_assign_background_state(mem);
+#endif
+ }
+ return mem;
+}
+
+#if CONFIG_SECLUDED_MEMORY
+vm_page_t
+vm_page_grab_secluded(void)
+{
+ vm_page_t mem;
+ vm_object_t object;
+ int refmod_state;
+
+ if (vm_page_secluded_count == 0) {
+ /* no secluded pages to grab... */
+ return VM_PAGE_NULL;
+ }
+
+ /* secluded queue is protected by the VM page queue lock */
+ vm_page_lock_queues();
+
+ if (vm_page_secluded_count == 0) {
+ /* no secluded pages to grab... */
+ vm_page_unlock_queues();
+ return VM_PAGE_NULL;
}
+
+#if 00
+ /* can we grab from the secluded queue? */
+ if (vm_page_secluded_count > vm_page_secluded_target ||
+ (vm_page_secluded_count > 0 &&
+ task_can_use_secluded_mem(current_task()))) {
+ /* OK */
+ } else {
+ /* can't grab from secluded queue... */
+ vm_page_unlock_queues();
+ return VM_PAGE_NULL;
}
#endif
-// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
+ /* we can grab a page from secluded queue! */
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ if (current_task()->task_can_use_secluded_mem) {
+ assert(num_tasks_can_use_secluded_mem > 0);
+ }
+ assert(!vm_page_queue_empty(&vm_page_queue_secluded));
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
+ assert(mem->vm_page_q_state == VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(mem, TRUE);
+
+ object = VM_PAGE_OBJECT(mem);
+
+ assert(!mem->fictitious);
+ assert(!VM_PAGE_WIRED(mem));
+ if (object == VM_OBJECT_NULL) {
+ /* free for grab! */
+ vm_page_unlock_queues();
+ vm_page_secluded.grab_success_free++;
+
+ assert(mem->busy);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
+ assert(mem->pageq.next == 0);
+ assert(mem->pageq.prev == 0);
+ assert(mem->listq.next == 0);
+ assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_on_backgroundq == 0);
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ return mem;
+ }
+
+ assert(!object->internal);
+// vm_page_pageable_external_count--;
+
+ if (!vm_object_lock_try(object)) {
+// printf("SECLUDED: page %p: object %p locked\n", mem, object);
+ vm_page_secluded.grab_failure_locked++;
+ reactivate_secluded_page:
+ vm_page_activate(mem);
+ vm_page_unlock_queues();
+ return VM_PAGE_NULL;
+ }
+ if (mem->busy ||
+ mem->cleaning ||
+ mem->laundry) {
+ /* can't steal page in this state... */
+ vm_object_unlock(object);
+ vm_page_secluded.grab_failure_state++;
+ goto reactivate_secluded_page;
+ }
+
+ mem->busy = TRUE;
+ refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
+ if (refmod_state & VM_MEM_REFERENCED) {
+ mem->reference = TRUE;
+ }
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(mem, FALSE);
+ }
+ if (mem->dirty || mem->precious) {
+ /* can't grab a dirty page; re-activate */
+// printf("SECLUDED: dirty page %p\n", mem);
+ PAGE_WAKEUP_DONE(mem);
+ vm_page_secluded.grab_failure_dirty++;
+ vm_object_unlock(object);
+ goto reactivate_secluded_page;
+ }
+ if (mem->reference) {
+ /* it's been used but we do need to grab a page... */
+ }
+
+ vm_page_unlock_queues();
+
+ /* finish what vm_page_free() would have done... */
+ vm_page_free_prepare_object(mem, TRUE);
+ vm_object_unlock(object);
+ object = VM_OBJECT_NULL;
+ if (vm_page_free_verify) {
+ assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
+ }
+ pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+ vm_page_secluded.grab_success_other++;
+
+ assert(mem->busy);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
+ assert(mem->pageq.next == 0);
+ assert(mem->pageq.prev == 0);
+ assert(mem->listq.next == 0);
+ assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_on_backgroundq == 0);
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
return mem;
}
+#endif /* CONFIG_SECLUDED_MEMORY */
/*
* vm_page_release:
void
vm_page_release(
- register vm_page_t mem)
+ vm_page_t mem,
+ boolean_t page_queues_locked)
{
unsigned int color;
int need_wakeup = 0;
int need_priv_wakeup = 0;
-#if 0
- unsigned int pindex;
- phys_entry *physent;
+#if CONFIG_SECLUDED_MEMORY
+ int need_secluded_wakeup = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
- physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
- if(physent->ppLink & ppN) { /* (BRINGUP) */
- panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
+ if (page_queues_locked) {
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ } else {
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
}
- physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
-#endif
+
assert(!mem->private && !mem->fictitious);
if (vm_page_free_verify) {
- assert(pmap_verify_free(mem->phys_page));
+ assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
}
-// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
+// dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
+ pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
lck_mtx_lock_spin(&vm_page_queue_free_lock);
-#if DEBUG
- if (mem->free)
- panic("vm_page_release");
-#endif
+
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
assert(mem->busy);
assert(!mem->laundry);
- assert(mem->object == VM_OBJECT_NULL);
- assert(mem->pageq.next == NULL &&
- mem->pageq.prev == NULL);
- assert(mem->listq.next == NULL &&
- mem->listq.prev == NULL);
-
- if ((mem->lopage || vm_lopage_refill == TRUE) &&
+ assert(mem->vm_page_object == 0);
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+ assert(mem->listq.next == 0 && mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0 &&
+ mem->vm_page_backgroundq.prev == 0 &&
+ mem->vm_page_on_backgroundq == FALSE);
+#endif
+ if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
vm_lopage_free_count < vm_lopage_free_limit &&
- mem->phys_page < max_valid_low_ppnum) {
+ VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
/*
* this exists to support hardware controllers
* incapable of generating DMAs with more than 32 bits
* of address on platforms with physical memory > 4G...
*/
- queue_enter_first(&vm_lopage_queue_free,
- mem,
- vm_page_t,
- pageq);
+ vm_page_queue_enter_first(&vm_lopage_queue_free,
+ mem,
+ vm_page_t,
+ pageq);
vm_lopage_free_count++;
if (vm_lopage_free_count >= vm_lopage_free_limit)
vm_lopage_refill = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
mem->lopage = TRUE;
- } else {
- mem->lopage = FALSE;
- mem->free = TRUE;
-
- color = mem->phys_page & vm_color_mask;
- queue_enter_first(&vm_page_queue_free[color],
- mem,
- vm_page_t,
- pageq);
- vm_page_free_count++;
+#if CONFIG_SECLUDED_MEMORY
+ } else if (vm_page_free_count > vm_page_free_reserved &&
+ vm_page_secluded_count < vm_page_secluded_target &&
+ num_tasks_can_use_secluded_mem == 0) {
+ /*
+ * XXX FBDP TODO: also avoid refilling secluded queue
+ * when some IOKit objects are already grabbing from it...
+ */
+ if (!page_queues_locked) {
+ if (!vm_page_trylock_queues()) {
+ /* take locks in right order */
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_lock_queues();
+ lck_mtx_lock_spin(&vm_page_queue_free_lock);
+ }
+ }
+ mem->lopage = FALSE;
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ vm_page_queue_enter_first(&vm_page_queue_secluded,
+ mem,
+ vm_page_t,
+ pageq);
+ mem->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+ vm_page_secluded_count++;
+ vm_page_secluded_count_free++;
+ if (!page_queues_locked) {
+ vm_page_unlock_queues();
+ }
+ LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
+ if (vm_page_free_wanted_secluded > 0) {
+ vm_page_free_wanted_secluded--;
+ need_secluded_wakeup = 1;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+ } else {
+ mem->lopage = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+ color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+ vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#else
+ vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#endif
+ vm_page_free_count++;
/*
* Check if we should wake up someone waiting for page.
* But don't bother waking them unless they can allocate.
if (vm_page_free_wanted_privileged > 0) {
vm_page_free_wanted_privileged--;
need_priv_wakeup = 1;
+#if CONFIG_SECLUDED_MEMORY
+ } else if (vm_page_free_wanted_secluded > 0 &&
+ vm_page_free_count > vm_page_free_reserved) {
+ vm_page_free_wanted_secluded--;
+ need_secluded_wakeup = 1;
+#endif /* CONFIG_SECLUDED_MEMORY */
} else if (vm_page_free_wanted > 0 &&
vm_page_free_count > vm_page_free_reserved) {
vm_page_free_wanted--;
if (need_priv_wakeup)
thread_wakeup_one((event_t) &vm_page_free_wanted_privileged);
+#if CONFIG_SECLUDED_MEMORY
+ else if (need_secluded_wakeup)
+ thread_wakeup_one((event_t) &vm_page_free_wanted_secluded);
+#endif /* CONFIG_SECLUDED_MEMORY */
else if (need_wakeup)
thread_wakeup_one((event_t) &vm_page_free_count);
-#if CONFIG_EMBEDDED
- {
- int percent_avail;
+ VM_CHECK_MEMORYSTATUS;
+}
- /*
- * Decide if we need to poke the memorystatus notification thread.
- * Locking is not a big issue, as only a single thread delivers these.
- */
- percent_avail =
- (vm_page_active_count + vm_page_inactive_count +
- vm_page_speculative_count + vm_page_free_count +
- (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
- atop_64(max_mem);
- if (percent_avail >= (kern_memorystatus_level + 5)) {
- kern_memorystatus_level = percent_avail;
- thread_wakeup((event_t)&kern_memorystatus_wakeup);
- }
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+ vm_page_t mem)
+{
+ vm_page_queue_t queue_free;
+
+ if (vm_lopage_free_count < vm_lopage_free_limit &&
+ VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
+ mem->lopage = TRUE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
+ vm_lopage_free_count++;
+ queue_free = &vm_lopage_queue_free;
+#if CONFIG_SECLUDED_MEMORY
+ } else if (vm_page_secluded_count < vm_page_secluded_target) {
+ mem->lopage = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+ vm_page_secluded_count++;
+ vm_page_secluded_count_free++;
+ queue_free = &vm_page_queue_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ } else {
+ mem->lopage = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+ vm_page_free_count++;
+ queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
}
+ if (mem->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+#if defined(__x86_64__)
+ vm_page_queue_enter_clump(queue_free, mem, vm_page_t, pageq);
+#else
+ vm_page_queue_enter(queue_free, mem, vm_page_t, pageq);
#endif
+ } else
+ vm_page_queue_enter_first(queue_free, mem, vm_page_t, pageq);
}
/*
lck_mtx_unlock(&vm_page_queue_free_lock);
return TRUE;
}
- if (vm_page_free_count < vm_page_free_target) {
- if (is_privileged) {
- if (vm_page_free_wanted_privileged++ == 0)
- need_wakeup = 1;
- wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, interruptible);
- } else {
- if (vm_page_free_wanted++ == 0)
- need_wakeup = 1;
- wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
- }
+ if (vm_page_free_count >= vm_page_free_target) {
lck_mtx_unlock(&vm_page_queue_free_lock);
- counter(c_vm_page_wait_block++);
-
- if (need_wakeup)
- thread_wakeup((event_t)&vm_page_free_wanted);
-
- if (wait_result == THREAD_WAITING)
- wait_result = thread_block(THREAD_CONTINUE_NULL);
+ return TRUE;
+ }
- return(wait_result == THREAD_AWAKENED);
+ if (is_privileged) {
+ if (vm_page_free_wanted_privileged++ == 0)
+ need_wakeup = 1;
+ wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, interruptible);
+#if CONFIG_SECLUDED_MEMORY
+ } else if (secluded_for_apps &&
+ task_can_use_secluded_mem(current_task())) {
+#if 00
+ /* XXX FBDP: need pageq lock for this... */
+ /* XXX FBDP: might wait even if pages available, */
+ /* XXX FBDP: hopefully not for too long... */
+ if (vm_page_secluded_count > 0) {
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ return TRUE;
+ }
+#endif
+ if (vm_page_free_wanted_secluded++ == 0) {
+ need_wakeup = 1;
+ }
+ wait_result = assert_wait(
+ (event_t)&vm_page_free_wanted_secluded,
+ interruptible);
+#endif /* CONFIG_SECLUDED_MEMORY */
} else {
- lck_mtx_unlock(&vm_page_queue_free_lock);
- return TRUE;
+ if (vm_page_free_wanted++ == 0)
+ need_wakeup = 1;
+ wait_result = assert_wait((event_t)&vm_page_free_count,
+ interruptible);
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ counter(c_vm_page_wait_block++);
+
+ if (need_wakeup)
+ thread_wakeup((event_t)&vm_page_free_wanted);
+
+ if (wait_result == THREAD_WAITING) {
+ VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
+ vm_page_free_wanted_privileged,
+ vm_page_free_wanted,
+#if CONFIG_SECLUDED_MEMORY
+ vm_page_free_wanted_secluded,
+#else /* CONFIG_SECLUDED_MEMORY */
+ 0,
+#endif /* CONFIG_SECLUDED_MEMORY */
+ 0);
+ wait_result = thread_block(THREAD_CONTINUE_NULL);
+ VM_DEBUG_EVENT(vm_page_wait_block,
+ VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
}
+
+ return (wait_result == THREAD_AWAKENED);
}
/*
vm_object_t object,
vm_object_offset_t offset)
{
- register vm_page_t mem;
-
- vm_object_lock_assert_exclusive(object);
- mem = vm_page_grab();
- if (mem == VM_PAGE_NULL)
- return VM_PAGE_NULL;
-
- vm_page_insert(mem, object, offset);
-
- return(mem);
-}
-
-vm_page_t
-vm_page_alloclo(
- vm_object_t object,
- vm_object_offset_t offset)
-{
- register vm_page_t mem;
+ vm_page_t mem;
+ int grab_options;
vm_object_lock_assert_exclusive(object);
- mem = vm_page_grablo();
+ grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+ if (object->can_grab_secluded) {
+ grab_options |= VM_PAGE_GRAB_SECLUDED;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+ mem = vm_page_grab_options(grab_options);
if (mem == VM_PAGE_NULL)
return VM_PAGE_NULL;
return(mem);
}
-
/*
* vm_page_alloc_guard:
*
vm_object_t object,
vm_object_offset_t offset)
{
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock_assert_exclusive(object);
mem = vm_page_grab_guard();
counter(unsigned int c_laundry_pages_freed = 0;)
/*
- * vm_page_free:
+ * vm_page_free_prepare:
*
- * Returns the given page to the free list,
- * disassociating it with any VM object.
+ * Removes page from any queue it may be on
+ * and disassociates it from its VM object.
*
* Object and page queues must be locked prior to entry.
*/
static void
vm_page_free_prepare(
- register vm_page_t mem)
+ vm_page_t mem)
{
vm_page_free_prepare_queues(mem);
vm_page_free_prepare_object(mem, TRUE);
vm_page_free_prepare_queues(
vm_page_t mem)
{
+ vm_object_t m_object;
+
VM_PAGE_CHECK(mem);
- assert(!mem->free);
+
+ assert(mem->vm_page_q_state != VM_PAGE_ON_FREE_Q);
assert(!mem->cleaning);
- assert(!mem->pageout);
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
- if (mem->free)
- panic("vm_page_free: freeing page on free list\n");
-#endif
- if (mem->object) {
- vm_object_lock_assert_exclusive(mem->object);
- }
+ m_object = VM_PAGE_OBJECT(mem);
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ if (m_object) {
+ vm_object_lock_assert_exclusive(m_object);
+ }
if (mem->laundry) {
/*
* We may have to free a page while it's being laundered
* if we lost its pager (due to a forced unmount, for example).
- * We need to call vm_pageout_throttle_up() before removing
- * the page from its VM object, so that we can find out on
- * which pageout queue the page is on.
+ * We need to call vm_pageout_steal_laundry() before removing
+ * the page from its VM object, so that we can remove it
+ * from its pageout queue and adjust the laundry accounting
*/
- vm_pageout_throttle_up(mem);
+ vm_pageout_steal_laundry(mem, TRUE);
counter(++c_laundry_pages_freed);
}
- VM_PAGE_QUEUES_REMOVE(mem); /* clears local/active/inactive/throttled/speculative */
+
+ vm_page_queues_remove(mem, TRUE);
if (VM_PAGE_WIRED(mem)) {
- if (mem->object) {
- assert(mem->object->wired_page_count > 0);
- mem->object->wired_page_count--;
- assert(mem->object->resident_page_count >=
- mem->object->wired_page_count);
+ assert(mem->wire_count > 0);
+
+ if (m_object) {
+
+ VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+ VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
+ VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
+
+ assert(m_object->resident_page_count >=
+ m_object->wired_page_count);
+
+ if (m_object->purgable == VM_PURGABLE_VOLATILE) {
+ OSAddAtomic(+1, &vm_page_purgeable_count);
+ assert(vm_page_purgeable_wired_count > 0);
+ OSAddAtomic(-1, &vm_page_purgeable_wired_count);
+ }
+ if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
+ m_object->purgable == VM_PURGABLE_EMPTY) &&
+ m_object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = m_object->vo_purgeable_owner;
+ /*
+ * While wired, this page was accounted
+ * as "non-volatile" but it should now
+ * be accounted as "volatile".
+ */
+ /* one less "non-volatile"... */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* ... and "phys_footprint" */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ /* one more "volatile" */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
}
if (!mem->private && !mem->fictitious)
vm_page_wire_count--;
+
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
mem->wire_count = 0;
assert(!mem->gobbled);
} else if (mem->gobbled) {
vm_page_t mem,
boolean_t remove_from_hash)
{
- if (mem->object) {
- vm_object_lock_assert_exclusive(mem->object);
- }
-
if (mem->tabled)
vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */
if (mem->private) {
mem->private = FALSE;
mem->fictitious = TRUE;
- mem->phys_page = vm_page_fictitious_addr;
+ VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
}
- if (mem->fictitious) {
- /* Some of these may be unnecessary */
- mem->gobbled = FALSE;
- mem->busy = TRUE;
- mem->absent = FALSE;
- mem->error = FALSE;
- mem->dirty = FALSE;
- mem->precious = FALSE;
- mem->reference = FALSE;
- mem->encrypted = FALSE;
- mem->encrypted_cleaning = FALSE;
- mem->pmapped = FALSE;
- mem->wpmapped = FALSE;
- mem->reusable = FALSE;
- } else {
- if (mem->zero_fill == TRUE)
- VM_ZF_COUNT_DECR();
- vm_page_init(mem, mem->phys_page, mem->lopage);
+ if ( !mem->fictitious) {
+ assert(mem->pageq.next == 0);
+ assert(mem->pageq.prev == 0);
+ assert(mem->listq.next == 0);
+ assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ assert(mem->next_m == 0);
+ vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->lopage);
}
}
+/*
+ * vm_page_free:
+ *
+ * Returns the given page to the free list,
+ * disassociating it with any VM object.
+ *
+ * Object and page queues must be locked prior to entry.
+ */
void
vm_page_free(
vm_page_t mem)
{
vm_page_free_prepare(mem);
+
if (mem->fictitious) {
vm_page_release_fictitious(mem);
} else {
- vm_page_release(mem);
+ vm_page_release(mem,
+ TRUE); /* page queues are locked */
}
}
if (mem->fictitious) {
vm_page_release_fictitious(mem);
} else {
- vm_page_release(mem);
+ vm_page_release(mem, FALSE); /* page queues are not locked */
}
}
+
/*
* Free a list of pages. The list can be up to several hundred pages,
* as blocked up by vm_pageout_scan().
* The big win is not having to take the free list lock once
- * per page. We sort the incoming pages into n lists, one for
- * each color.
+ * per page.
+ *
+ * The VM page queues lock (vm_page_queue_lock) should NOT be held.
+ * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
*/
void
vm_page_free_list(
- vm_page_t mem,
+ vm_page_t freeq,
boolean_t prepare_object)
{
+ vm_page_t mem;
vm_page_t nxt;
- int pg_count = 0;
- int color;
- int inuse_list_head = -1;
+ vm_page_t local_freeq;
+ int pg_count;
- queue_head_t free_list[MAX_COLORS];
- int inuse[MAX_COLORS];
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
- for (color = 0; color < (signed) vm_colors; color++) {
- queue_init(&free_list[color]);
- }
-
- while (mem) {
- assert(!mem->inactive);
- assert(!mem->active);
- assert(!mem->throttled);
- assert(!mem->free);
- assert(!mem->speculative);
- assert(!VM_PAGE_WIRED(mem));
- assert(mem->pageq.prev == NULL);
-
- nxt = (vm_page_t)(mem->pageq.next);
-
- if (prepare_object == TRUE)
- vm_page_free_prepare_object(mem, TRUE);
+ while (freeq) {
- if (vm_page_free_verify && !mem->fictitious && !mem->private) {
- assert(pmap_verify_free(mem->phys_page));
- }
- assert(mem->busy);
+ pg_count = 0;
+ local_freeq = VM_PAGE_NULL;
+ mem = freeq;
- if (!mem->fictitious) {
- if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
- vm_lopage_free_count < vm_lopage_free_limit &&
- mem->phys_page < max_valid_low_ppnum) {
- mem->pageq.next = NULL;
- vm_page_release(mem);
- } else {
+ /*
+ * break up the processing into smaller chunks so
+ * that we can 'pipeline' the pages onto the
+ * free list w/o introducing too much
+ * contention on the global free queue lock
+ */
+ while (mem && pg_count < 64) {
+
+ assert((mem->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
+ (mem->vm_page_q_state == VM_PAGE_IS_WIRED));
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0 &&
+ mem->vm_page_backgroundq.prev == 0 &&
+ mem->vm_page_on_backgroundq == FALSE);
+#endif
+ nxt = mem->snext;
+ mem->snext = NULL;
+ assert(mem->pageq.prev == 0);
- /*
- * IMPORTANT: we can't set the page "free" here
- * because that would make the page eligible for
- * a physically-contiguous allocation (see
- * vm_page_find_contiguous()) right away (we don't
- * hold the vm_page_queue_free lock). That would
- * cause trouble because the page is not actually
- * in the free queue yet...
- */
- color = mem->phys_page & vm_color_mask;
- if (queue_empty(&free_list[color])) {
- inuse[color] = inuse_list_head;
- inuse_list_head = color;
+ if (vm_page_free_verify && !mem->fictitious && !mem->private) {
+ assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
+ }
+ if (prepare_object == TRUE)
+ vm_page_free_prepare_object(mem, TRUE);
+
+ if (!mem->fictitious) {
+ assert(mem->busy);
+
+ if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) &&
+ vm_lopage_free_count < vm_lopage_free_limit &&
+ VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
+ vm_page_release(mem, FALSE); /* page queues are not locked */
+#if CONFIG_SECLUDED_MEMORY
+ } else if (vm_page_secluded_count < vm_page_secluded_target &&
+ num_tasks_can_use_secluded_mem == 0) {
+ vm_page_release(mem,
+ FALSE); /* page queues are not locked */
+#endif /* CONFIG_SECLUDED_MEMORY */
+ } else {
+ /*
+ * IMPORTANT: we can't set the page "free" here
+ * because that would make the page eligible for
+ * a physically-contiguous allocation (see
+ * vm_page_find_contiguous()) right away (we don't
+ * hold the vm_page_queue_free lock). That would
+ * cause trouble because the page is not actually
+ * in the free queue yet...
+ */
+ mem->snext = local_freeq;
+ local_freeq = mem;
+ pg_count++;
+
+ pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
}
- queue_enter_first(&free_list[color],
- mem,
- vm_page_t,
- pageq);
- pg_count++;
+ } else {
+ assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
+ VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
+ vm_page_release_fictitious(mem);
}
- } else {
- assert(mem->phys_page == vm_page_fictitious_addr ||
- mem->phys_page == vm_page_guard_addr);
- vm_page_release_fictitious(mem);
+ mem = nxt;
}
- mem = nxt;
- }
- if (pg_count) {
- unsigned int avail_free_count;
- unsigned int need_wakeup = 0;
- unsigned int need_priv_wakeup = 0;
+ freeq = mem;
+
+ if ( (mem = local_freeq) ) {
+ unsigned int avail_free_count;
+ unsigned int need_wakeup = 0;
+ unsigned int need_priv_wakeup = 0;
+#if CONFIG_SECLUDED_MEMORY
+ unsigned int need_wakeup_secluded = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
- lck_mtx_lock_spin(&vm_page_queue_free_lock);
+ lck_mtx_lock_spin(&vm_page_queue_free_lock);
- color = inuse_list_head;
-
- while( color != -1 ) {
- vm_page_t first, last;
- vm_page_t first_free;
+ while (mem) {
+ int color;
- /*
- * Now that we hold the vm_page_queue_free lock,
- * it's safe to mark all pages in our local queue
- * as "free"...
- */
- queue_iterate(&free_list[color],
- mem,
- vm_page_t,
- pageq) {
- assert(!mem->free);
+ nxt = mem->snext;
+
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
assert(mem->busy);
- mem->free = TRUE;
+ mem->lopage = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+ color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+ vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#else
+ vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#endif
+ mem = nxt;
}
+ vm_page_free_count += pg_count;
+ avail_free_count = vm_page_free_count;
- /*
- * ... and insert our local queue at the head of
- * the global free queue.
- */
- first = (vm_page_t) queue_first(&free_list[color]);
- last = (vm_page_t) queue_last(&free_list[color]);
- first_free = (vm_page_t) queue_first(&vm_page_queue_free[color]);
- if (queue_empty(&vm_page_queue_free[color])) {
- queue_last(&vm_page_queue_free[color]) =
- (queue_entry_t) last;
- } else {
- queue_prev(&first_free->pageq) =
- (queue_entry_t) last;
+ if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
+
+ if (avail_free_count < vm_page_free_wanted_privileged) {
+ need_priv_wakeup = avail_free_count;
+ vm_page_free_wanted_privileged -= avail_free_count;
+ avail_free_count = 0;
+ } else {
+ need_priv_wakeup = vm_page_free_wanted_privileged;
+ avail_free_count -= vm_page_free_wanted_privileged;
+ vm_page_free_wanted_privileged = 0;
+ }
}
- queue_first(&vm_page_queue_free[color]) =
- (queue_entry_t) first;
- queue_prev(&first->pageq) =
- (queue_entry_t) &vm_page_queue_free[color];
- queue_next(&last->pageq) =
- (queue_entry_t) first_free;
-
- /* next color */
- color = inuse[color];
- }
-
- vm_page_free_count += pg_count;
- avail_free_count = vm_page_free_count;
-
- if (vm_page_free_wanted_privileged > 0 &&
- avail_free_count > 0) {
- if (avail_free_count < vm_page_free_wanted_privileged) {
- need_priv_wakeup = avail_free_count;
- vm_page_free_wanted_privileged -=
- avail_free_count;
- avail_free_count = 0;
- } else {
- need_priv_wakeup = vm_page_free_wanted_privileged;
- vm_page_free_wanted_privileged = 0;
- avail_free_count -=
- vm_page_free_wanted_privileged;
+#if CONFIG_SECLUDED_MEMORY
+ if (vm_page_free_wanted_secluded > 0 &&
+ avail_free_count > vm_page_free_reserved) {
+ unsigned int available_pages;
+ available_pages = (avail_free_count -
+ vm_page_free_reserved);
+ if (available_pages <
+ vm_page_free_wanted_secluded) {
+ need_wakeup_secluded = available_pages;
+ vm_page_free_wanted_secluded -=
+ available_pages;
+ avail_free_count -= available_pages;
+ } else {
+ need_wakeup_secluded =
+ vm_page_free_wanted_secluded;
+ avail_free_count -=
+ vm_page_free_wanted_secluded;
+ vm_page_free_wanted_secluded = 0;
+ }
}
- }
+#endif /* CONFIG_SECLUDED_MEMORY */
+ if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
+ unsigned int available_pages;
- if (vm_page_free_wanted > 0 &&
- avail_free_count > vm_page_free_reserved) {
- unsigned int available_pages;
+ available_pages = avail_free_count - vm_page_free_reserved;
- available_pages = (avail_free_count -
- vm_page_free_reserved);
+ if (available_pages >= vm_page_free_wanted) {
+ need_wakeup = vm_page_free_wanted;
+ vm_page_free_wanted = 0;
+ } else {
+ need_wakeup = available_pages;
+ vm_page_free_wanted -= available_pages;
+ }
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
- if (available_pages >= vm_page_free_wanted) {
- need_wakeup = vm_page_free_wanted;
- vm_page_free_wanted = 0;
+ if (need_priv_wakeup != 0) {
+ /*
+ * There shouldn't be that many VM-privileged threads,
+ * so let's wake them all up, even if we don't quite
+ * have enough pages to satisfy them all.
+ */
+ thread_wakeup((event_t)&vm_page_free_wanted_privileged);
+ }
+#if CONFIG_SECLUDED_MEMORY
+ if (need_wakeup_secluded != 0 &&
+ vm_page_free_wanted_secluded == 0) {
+ thread_wakeup((event_t)
+ &vm_page_free_wanted_secluded);
} else {
- need_wakeup = available_pages;
- vm_page_free_wanted -= available_pages;
+ for (;
+ need_wakeup_secluded != 0;
+ need_wakeup_secluded--) {
+ thread_wakeup_one(
+ (event_t)
+ &vm_page_free_wanted_secluded);
+ }
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+ if (need_wakeup != 0 && vm_page_free_wanted == 0) {
+ /*
+ * We don't expect to have any more waiters
+ * after this, so let's wake them all up at
+ * once.
+ */
+ thread_wakeup((event_t) &vm_page_free_count);
+ } else for (; need_wakeup != 0; need_wakeup--) {
+ /*
+ * Wake up one waiter per page we just released.
+ */
+ thread_wakeup_one((event_t) &vm_page_free_count);
}
- }
- lck_mtx_unlock(&vm_page_queue_free_lock);
-
- if (need_priv_wakeup != 0) {
- /*
- * There shouldn't be that many VM-privileged threads,
- * so let's wake them all up, even if we don't quite
- * have enough pages to satisfy them all.
- */
- thread_wakeup((event_t)&vm_page_free_wanted_privileged);
- }
- if (need_wakeup != 0 && vm_page_free_wanted == 0) {
- /*
- * We don't expect to have any more waiters
- * after this, so let's wake them all up at
- * once.
- */
- thread_wakeup((event_t) &vm_page_free_count);
- } else for (; need_wakeup != 0; need_wakeup--) {
- /*
- * Wake up one waiter per page we just released.
- */
- thread_wakeup_one((event_t) &vm_page_free_count);
- }
-#if CONFIG_EMBEDDED
- {
- int percent_avail;
- /*
- * Decide if we need to poke the memorystatus notification thread.
- */
- percent_avail =
- (vm_page_active_count + vm_page_inactive_count +
- vm_page_speculative_count + vm_page_free_count +
- (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
- atop_64(max_mem);
- if (percent_avail >= (kern_memorystatus_level + 5)) {
- kern_memorystatus_level = percent_avail;
- thread_wakeup((event_t)&kern_memorystatus_wakeup);
+ VM_CHECK_MEMORYSTATUS;
}
- }
-#endif
}
}
*
* The page's object and the page queues must be locked.
*/
+
+
void
vm_page_wire(
- register vm_page_t mem)
+ vm_page_t mem,
+ vm_tag_t tag,
+ boolean_t check_memorystatus)
{
+ vm_object_t m_object;
-// dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
+ m_object = VM_PAGE_OBJECT(mem);
+
+// dbgLog(current_thread(), mem->offset, m_object, 1); /* (TEST/DEBUG) */
VM_PAGE_CHECK(mem);
- if (mem->object) {
- vm_object_lock_assert_exclusive(mem->object);
+ if (m_object) {
+ vm_object_lock_assert_exclusive(m_object);
} else {
/*
* In theory, the page should be in an object before it
* that page and update it at the same time.
*/
}
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
if ( !VM_PAGE_WIRED(mem)) {
- VM_PAGE_QUEUES_REMOVE(mem);
- if (mem->object) {
- mem->object->wired_page_count++;
- assert(mem->object->resident_page_count >=
- mem->object->wired_page_count);
- if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+ if (mem->laundry)
+ vm_pageout_steal_laundry(mem, TRUE);
+
+ vm_page_queues_remove(mem, TRUE);
+
+ assert(mem->wire_count == 0);
+ mem->vm_page_q_state = VM_PAGE_IS_WIRED;
+
+ if (m_object) {
+
+ VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+ VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
+ VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
+
+ assert(m_object->resident_page_count >=
+ m_object->wired_page_count);
+ if (m_object->purgable == VM_PURGABLE_VOLATILE) {
assert(vm_page_purgeable_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_count);
OSAddAtomic(1, &vm_page_purgeable_wired_count);
}
- if (mem->object->all_reusable) {
+ if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
+ m_object->purgable == VM_PURGABLE_EMPTY) &&
+ m_object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = m_object->vo_purgeable_owner;
+ /* less volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ /* more not-quite-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
+ if (m_object->all_reusable) {
/*
* Wired pages are not counted as "re-usable"
* in "all_reusable" VM objects, so nothing
* wired, so adjust its state and the
* accounting.
*/
- vm_object_reuse_pages(mem->object,
+ vm_object_reuse_pages(m_object,
mem->offset,
mem->offset+PAGE_SIZE_64,
FALSE);
if (mem->gobbled)
vm_page_gobble_count--;
mem->gobbled = FALSE;
- if (mem->zero_fill == TRUE) {
- mem->zero_fill = FALSE;
- VM_ZF_COUNT_DECR();
- }
-#if CONFIG_EMBEDDED
- {
- int percent_avail;
- /*
- * Decide if we need to poke the memorystatus notification thread.
- */
- percent_avail =
- (vm_page_active_count + vm_page_inactive_count +
- vm_page_speculative_count + vm_page_free_count +
- (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
- atop_64(max_mem);
- if (percent_avail <= (kern_memorystatus_level - 5)) {
- kern_memorystatus_level = percent_avail;
- thread_wakeup((event_t)&kern_memorystatus_wakeup);
+ if (check_memorystatus == TRUE) {
+ VM_CHECK_MEMORYSTATUS;
}
- }
-#endif
- /*
- * ENCRYPTED SWAP:
- * The page could be encrypted, but
- * We don't have to decrypt it here
- * because we don't guarantee that the
- * data is actually valid at this point.
- * The page will get decrypted in
- * vm_fault_wire() if needed.
- */
}
assert(!mem->gobbled);
+ assert(mem->vm_page_q_state == VM_PAGE_IS_WIRED);
mem->wire_count++;
+ if (__improbable(mem->wire_count == 0)) {
+ panic("vm_page_wire(%p): wire_count overflow", mem);
+ }
VM_PAGE_CHECK(mem);
}
-/*
- * vm_page_gobble:
- *
- * Mark this page as consumed by the vm/ipc/xmm subsystems.
- *
- * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
- */
-void
-vm_page_gobble(
- register vm_page_t mem)
-{
- vm_page_lockspin_queues();
- VM_PAGE_CHECK(mem);
-
- assert(!mem->gobbled);
- assert( !VM_PAGE_WIRED(mem));
-
- if (!mem->gobbled && !VM_PAGE_WIRED(mem)) {
- if (!mem->private && !mem->fictitious)
- vm_page_wire_count++;
- }
- vm_page_gobble_count++;
- mem->gobbled = TRUE;
- vm_page_unlock_queues();
-}
-
/*
* vm_page_unwire:
*
vm_page_t mem,
boolean_t queueit)
{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
-// dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
+// dbgLog(current_thread(), mem->offset, m_object, 0); /* (TEST/DEBUG) */
VM_PAGE_CHECK(mem);
assert(VM_PAGE_WIRED(mem));
- assert(mem->object != VM_OBJECT_NULL);
-#if DEBUG
- vm_object_lock_assert_exclusive(mem->object);
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ assert(mem->wire_count > 0);
+ assert(!mem->gobbled);
+ assert(m_object != VM_OBJECT_NULL);
+ vm_object_lock_assert_exclusive(m_object);
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
if (--mem->wire_count == 0) {
- assert(!mem->private && !mem->fictitious);
- vm_page_wire_count--;
- assert(mem->object->wired_page_count > 0);
- mem->object->wired_page_count--;
- assert(mem->object->resident_page_count >=
- mem->object->wired_page_count);
- if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
+
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+ VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
+ VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
+ VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
+ if (!mem->private && !mem->fictitious) {
+ vm_page_wire_count--;
+ }
+
+ assert(m_object->resident_page_count >=
+ m_object->wired_page_count);
+ if (m_object->purgable == VM_PURGABLE_VOLATILE) {
OSAddAtomic(+1, &vm_page_purgeable_count);
assert(vm_page_purgeable_wired_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_wired_count);
}
- assert(!mem->laundry);
- assert(mem->object != kernel_object);
- assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
+ if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
+ m_object->purgable == VM_PURGABLE_EMPTY) &&
+ m_object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = m_object->vo_purgeable_owner;
+ /* more volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ /* less not-quite-volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* less footprint */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
+ assert(m_object != kernel_object);
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
if (queueit == TRUE) {
- if (mem->object->purgable == VM_PURGABLE_EMPTY) {
+ if (m_object->purgable == VM_PURGABLE_EMPTY) {
vm_page_deactivate(mem);
} else {
vm_page_activate(mem);
}
}
-#if CONFIG_EMBEDDED
- {
- int percent_avail;
- /*
- * Decide if we need to poke the memorystatus notification thread.
- */
- percent_avail =
- (vm_page_active_count + vm_page_inactive_count +
- vm_page_speculative_count + vm_page_free_count +
- (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
- atop_64(max_mem);
- if (percent_avail >= (kern_memorystatus_level + 5)) {
- kern_memorystatus_level = percent_avail;
- thread_wakeup((event_t)&kern_memorystatus_wakeup);
- }
- }
-#endif
+ VM_CHECK_MEMORYSTATUS;
+
}
VM_PAGE_CHECK(mem);
}
vm_page_t m,
boolean_t clear_hw_reference)
{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(m);
VM_PAGE_CHECK(m);
- assert(m->object != kernel_object);
- assert(m->phys_page != vm_page_guard_addr);
+ assert(m_object != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
-// dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+// dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
/*
* This page is no longer very interesting. If it was
* interesting (active or inactive/referenced), then we
* inactive queue. Note wired pages should not have
* their reference bit cleared.
*/
-
- if (m->absent && !m->unusual)
- panic("vm_page_deactivate: %p absent", m);
+ assert ( !(m->absent && !m->unusual));
if (m->gobbled) { /* can this happen? */
assert( !VM_PAGE_WIRED(m));
vm_page_gobble_count--;
m->gobbled = FALSE;
}
- if (m->private || (VM_PAGE_WIRED(m)))
- return;
-
- if (!m->fictitious && !m->absent && clear_hw_reference == TRUE)
- pmap_clear_reference(m->phys_page);
+ /*
+ * if this page is currently on the pageout queue, we can't do the
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
+ * and we can't remove it manually since we would need the object lock
+ * (which is not required here) to decrement the activity_in_progress
+ * reference which is held on the object while the page is in the pageout queue...
+ * just let the normal laundry processing proceed
+ */
+ if (m->laundry || m->private || m->fictitious ||
+ (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
+ VM_PAGE_WIRED(m)) {
+ return;
+ }
+ if (!m->absent && clear_hw_reference == TRUE)
+ pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
m->reference = FALSE;
m->no_cache = FALSE;
- if (!m->inactive) {
- VM_PAGE_QUEUES_REMOVE(m);
-
- assert(!m->laundry);
- assert(m->pageq.next == NULL && m->pageq.prev == NULL);
-
- if (!IP_VALID(memory_manager_default) &&
- m->dirty && m->object->internal &&
- (m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE ||
- m->object->purgable == VM_PURGABLE_VOLATILE)) {
- queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
- m->throttled = TRUE;
+ if ( !VM_PAGE_INACTIVE(m)) {
+ vm_page_queues_remove(m, FALSE);
+
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
+ m->dirty && m_object->internal &&
+ (m_object->purgable == VM_PURGABLE_DENY ||
+ m_object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m_object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_check_pageable_safe(m);
+ vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
vm_page_throttled_count++;
} else {
- if (!m->fictitious && m->object->named && m->object->ref_count == 1) {
+ if (m_object->named && m_object->ref_count == 1) {
vm_page_speculate(m, FALSE);
#if DEVELOPMENT || DEBUG
vm_page_speculative_recreated++;
#endif
- return;
} else {
- if (m->zero_fill) {
- queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
- vm_zf_queue_count++;
- } else {
- queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
- }
- }
- m->inactive = TRUE;
- if (!m->fictitious) {
- vm_page_inactive_count++;
- token_new_pagecount++;
+ vm_page_enqueue_inactive(m, FALSE);
}
}
}
}
/*
- * vm_page_activate:
+ * vm_page_enqueue_cleaned
*
- * Put the specified page on the active list (if appropriate).
+ * Put the page on the cleaned queue, mark it cleaned, etc.
+ * Being on the cleaned queue (and having m->clean_queue set)
+ * does ** NOT ** guarantee that the page is clean!
*
- * The page queues must be locked.
+ * Call with the queues lock held.
*/
-void
-vm_page_activate(
- register vm_page_t m)
+void vm_page_enqueue_cleaned(vm_page_t m)
{
- VM_PAGE_CHECK(m);
-#ifdef FIXME_4778297
- assert(m->object != kernel_object);
-#endif
- assert(m->phys_page != vm_page_guard_addr);
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(m);
+
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert( !(m->absent && !m->unusual));
+
+ if (VM_PAGE_WIRED(m)) {
+ return;
+ }
+
+ if (m->gobbled) {
+ if (!m->private && !m->fictitious)
+ vm_page_wire_count--;
+ vm_page_gobble_count--;
+ m->gobbled = FALSE;
+ }
+ /*
+ * if this page is currently on the pageout queue, we can't do the
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
+ * and we can't remove it manually since we would need the object lock
+ * (which is not required here) to decrement the activity_in_progress
+ * reference which is held on the object while the page is in the pageout queue...
+ * just let the normal laundry processing proceed
+ */
+ if (m->laundry || m->private || m->fictitious ||
+ (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
+ (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
+ return;
+ }
+ vm_page_queues_remove(m, FALSE);
+
+ vm_page_check_pageable_safe(m);
+ vm_page_queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
+ vm_page_cleaned_count++;
+
+ vm_page_inactive_count++;
+ if (m_object->internal) {
+ vm_page_pageable_internal_count++;
+ } else {
+ vm_page_pageable_external_count++;
+ }
+#if CONFIG_BACKGROUND_QUEUE
+ if (m->vm_page_in_background)
+ vm_page_add_to_backgroundq(m, TRUE);
#endif
+ vm_pageout_enqueued_cleaned++;
+}
+
+/*
+ * vm_page_activate:
+ *
+ * Put the specified page on the active list (if appropriate).
+ *
+ * The page queues must be locked.
+ */
+
+void
+vm_page_activate(
+ vm_page_t m)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(m);
- if (m->absent && !m->unusual)
- panic("vm_page_activate: %p absent", m);
+ VM_PAGE_CHECK(m);
+#ifdef FIXME_4778297
+ assert(m_object != kernel_object);
+#endif
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert( !(m->absent && !m->unusual));
if (m->gobbled) {
assert( !VM_PAGE_WIRED(m));
vm_page_gobble_count--;
m->gobbled = FALSE;
}
- if (m->private)
+ /*
+ * if this page is currently on the pageout queue, we can't do the
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
+ * and we can't remove it manually since we would need the object lock
+ * (which is not required here) to decrement the activity_in_progress
+ * reference which is held on the object while the page is in the pageout queue...
+ * just let the normal laundry processing proceed
+ */
+ if (m->laundry || m->private || m->fictitious ||
+ (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q))
return;
#if DEBUG
- if (m->active)
+ if (m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q)
panic("vm_page_activate: already active");
#endif
- if (m->speculative) {
+ if (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
}
-
- VM_PAGE_QUEUES_REMOVE(m);
+
+ vm_page_queues_remove(m, FALSE);
if ( !VM_PAGE_WIRED(m)) {
- assert(!m->laundry);
- assert(m->pageq.next == NULL && m->pageq.prev == NULL);
- if (!IP_VALID(memory_manager_default) &&
- !m->fictitious && m->dirty && m->object->internal &&
- (m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE ||
- m->object->purgable == VM_PURGABLE_VOLATILE)) {
- queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
- m->throttled = TRUE;
+ vm_page_check_pageable_safe(m);
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
+ m->dirty && m_object->internal &&
+ (m_object->purgable == VM_PURGABLE_DENY ||
+ m_object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m_object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
vm_page_throttled_count++;
} else {
- queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
- m->active = TRUE;
- if (!m->fictitious)
- vm_page_active_count++;
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ vm_page_secluded_target != 0 &&
+ num_tasks_can_use_secluded_mem == 0 &&
+ m_object->eligible_for_secluded) {
+ vm_page_queue_enter(&vm_page_queue_secluded, m,
+ vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+ vm_page_secluded_count++;
+ vm_page_secluded_count_inuse++;
+ assert(!m_object->internal);
+// vm_page_pageable_external_count++;
+ } else
+#endif /* CONFIG_SECLUDED_MEMORY */
+ vm_page_enqueue_active(m, FALSE);
}
m->reference = TRUE;
m->no_cache = FALSE;
boolean_t new)
{
struct vm_speculative_age_q *aq;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(m);
VM_PAGE_CHECK(m);
- assert(m->object != kernel_object);
- assert(m->phys_page != vm_page_guard_addr);
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ vm_page_check_pageable_safe(m);
- if (m->absent && !m->unusual)
- panic("vm_page_speculate: %p absent", m);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert( !(m->absent && !m->unusual));
+ assert(m_object->internal == FALSE);
+
+ /*
+ * if this page is currently on the pageout queue, we can't do the
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
+ * and we can't remove it manually since we would need the object lock
+ * (which is not required here) to decrement the activity_in_progress
+ * reference which is held on the object while the page is in the pageout queue...
+ * just let the normal laundry processing proceed
+ */
+ if (m->laundry || m->private || m->fictitious ||
+ (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q))
+ return;
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m, FALSE);
if ( !VM_PAGE_WIRED(m)) {
mach_timespec_t ts;
/*
* set the timer to begin a new group
*/
- aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000;
- aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC;
+ aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+ aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
} else {
}
aq = &vm_page_queue_speculative[speculative_age_index];
- if (!queue_empty(&aq->age_q))
+ if (!vm_page_queue_empty(&aq->age_q))
vm_page_speculate_ageit(aq);
- aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000;
- aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC;
+ aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000;
+ aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
}
}
- enqueue_tail(&aq->age_q, &m->pageq);
- m->speculative = TRUE;
+ vm_page_enqueue_tail(&aq->age_q, &m->pageq);
+ m->vm_page_q_state = VM_PAGE_ON_SPECULATIVE_Q;
vm_page_speculative_count++;
+ vm_page_pageable_external_count++;
if (new == TRUE) {
- m->object->pages_created++;
+ vm_object_lock_assert_exclusive(m_object);
+
+ m_object->pages_created++;
#if DEVELOPMENT || DEBUG
vm_page_speculative_created++;
#endif
sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
- if (queue_empty(&sq->age_q)) {
+ if (vm_page_queue_empty(&sq->age_q)) {
sq->age_q.next = aq->age_q.next;
sq->age_q.prev = aq->age_q.prev;
- t = (vm_page_t)sq->age_q.next;
- t->pageq.prev = &sq->age_q;
+ t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
+ t->pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
- t = (vm_page_t)sq->age_q.prev;
- t->pageq.next = &sq->age_q;
+ t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
+ t->pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
} else {
- t = (vm_page_t)sq->age_q.prev;
+ t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
t->pageq.next = aq->age_q.next;
- t = (vm_page_t)aq->age_q.next;
+ t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
t->pageq.prev = sq->age_q.prev;
- t = (vm_page_t)aq->age_q.prev;
- t->pageq.next = &sq->age_q;
+ t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
+ t->pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
sq->age_q.prev = aq->age_q.prev;
}
- queue_init(&aq->age_q);
+ vm_page_queue_init(&aq->age_q);
}
vm_page_t m)
{
VM_PAGE_CHECK(m);
- assert(m->object != kernel_object);
- assert(m->phys_page != vm_page_guard_addr);
-
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
- if (m->active || m->reference)
- return;
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
- if (m->private || (VM_PAGE_WIRED(m)))
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ /*
+ * if this page is currently on the pageout queue, we can't do the
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
+ * and we can't remove it manually since we would need the object lock
+ * (which is not required here) to decrement the activity_in_progress
+ * reference which is held on the object while the page is in the pageout queue...
+ * just let the normal laundry processing proceed
+ */
+ if (m->laundry || m->private ||
+ (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
+ VM_PAGE_WIRED(m))
return;
m->no_cache = FALSE;
- VM_PAGE_QUEUES_REMOVE(m);
-
- assert(!m->laundry);
- assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+ vm_page_queues_remove(m, FALSE);
- queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
- m->inactive = TRUE;
-
- vm_page_inactive_count++;
- token_new_pagecount++;
+ vm_page_enqueue_inactive(m, FALSE);
}
vm_page_t first_active;
vm_page_t m;
int extra_active_count;
+ int extra_internal_count, extra_external_count;
+ vm_object_t m_object;
+
+ if (!VM_DYNAMIC_PAGING_ENABLED())
+ return;
extra_active_count = 0;
+ extra_internal_count = 0;
+ extra_external_count = 0;
vm_page_lock_queues();
- if (! queue_empty(&vm_page_queue_throttled)) {
+ if (! vm_page_queue_empty(&vm_page_queue_throttled)) {
/*
* Switch "throttled" pages to "active".
*/
- queue_iterate(&vm_page_queue_throttled, m, vm_page_t, pageq) {
+ vm_page_queue_iterate(&vm_page_queue_throttled, m, vm_page_t, pageq) {
VM_PAGE_CHECK(m);
- assert(m->throttled);
- assert(!m->active);
- assert(!m->inactive);
- assert(!m->speculative);
- assert(!VM_PAGE_WIRED(m));
- if (!m->fictitious) {
- extra_active_count++;
+ assert(m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q);
+
+ m_object = VM_PAGE_OBJECT(m);
+
+ extra_active_count++;
+ if (m_object->internal) {
+ extra_internal_count++;
+ } else {
+ extra_external_count++;
}
- m->throttled = FALSE;
- m->active = TRUE;
+
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
VM_PAGE_CHECK(m);
+#if CONFIG_BACKGROUND_QUEUE
+ if (m->vm_page_in_background)
+ vm_page_add_to_backgroundq(m, FALSE);
+#endif
}
/*
* get re-evaluated by the LRU algorithm first, since they've been
* completely out of it until now.
*/
- first_throttled = (vm_page_t) queue_first(&vm_page_queue_throttled);
- last_throttled = (vm_page_t) queue_last(&vm_page_queue_throttled);
- first_active = (vm_page_t) queue_first(&vm_page_queue_active);
- if (queue_empty(&vm_page_queue_active)) {
- queue_last(&vm_page_queue_active) = (queue_entry_t) last_throttled;
+ first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
+ last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
+ first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+ if (vm_page_queue_empty(&vm_page_queue_active)) {
+ vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
} else {
- queue_prev(&first_active->pageq) = (queue_entry_t) last_throttled;
+ first_active->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
}
- queue_first(&vm_page_queue_active) = (queue_entry_t) first_throttled;
- queue_prev(&first_throttled->pageq) = (queue_entry_t) &vm_page_queue_active;
- queue_next(&last_throttled->pageq) = (queue_entry_t) first_active;
+ vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
+ first_throttled->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
+ last_throttled->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
#if DEBUG
printf("reactivated %d throttled pages\n", vm_page_throttled_count);
#endif
- queue_init(&vm_page_queue_throttled);
+ vm_page_queue_init(&vm_page_queue_throttled);
/*
* Adjust the global page counts.
*/
vm_page_active_count += extra_active_count;
+ vm_page_pageable_internal_count += extra_internal_count;
+ vm_page_pageable_external_count += extra_external_count;
vm_page_throttled_count = 0;
}
assert(vm_page_throttled_count == 0);
- assert(queue_empty(&vm_page_queue_throttled));
+ assert(vm_page_queue_empty(&vm_page_queue_throttled));
vm_page_unlock_queues();
}
/*
* Switch "local" pages to "active".
*/
- assert(!queue_empty(&lq->vpl_queue));
+ assert(!vm_page_queue_empty(&lq->vpl_queue));
- queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
+ vm_page_queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
VM_PAGE_CHECK(m);
- assert(m->local);
- assert(!m->active);
- assert(!m->inactive);
- assert(!m->speculative);
- assert(!VM_PAGE_WIRED(m));
- assert(!m->throttled);
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
assert(!m->fictitious);
if (m->local_id != lid)
panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
m->local_id = 0;
- m->local = FALSE;
- m->active = TRUE;
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
VM_PAGE_CHECK(m);
-
+#if CONFIG_BACKGROUND_QUEUE
+ if (m->vm_page_in_background)
+ vm_page_add_to_backgroundq(m, FALSE);
+#endif
count++;
}
if (count != lq->vpl_count)
/*
* Transfer the entire local queue to a regular LRU page queues.
*/
- first_local = (vm_page_t) queue_first(&lq->vpl_queue);
- last_local = (vm_page_t) queue_last(&lq->vpl_queue);
- first_active = (vm_page_t) queue_first(&vm_page_queue_active);
+ first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
+ last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
+ first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
- if (queue_empty(&vm_page_queue_active)) {
- queue_last(&vm_page_queue_active) = (queue_entry_t) last_local;
+ if (vm_page_queue_empty(&vm_page_queue_active)) {
+ vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
} else {
- queue_prev(&first_active->pageq) = (queue_entry_t) last_local;
+ first_active->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
}
- queue_first(&vm_page_queue_active) = (queue_entry_t) first_local;
- queue_prev(&first_local->pageq) = (queue_entry_t) &vm_page_queue_active;
- queue_next(&last_local->pageq) = (queue_entry_t) first_active;
+ vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
+ first_local->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
+ last_local->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
- queue_init(&lq->vpl_queue);
+ vm_page_queue_init(&lq->vpl_queue);
/*
* Adjust the global page counts.
*/
vm_page_active_count += lq->vpl_count;
+ vm_page_pageable_internal_count += lq->vpl_internal_count;
+ vm_page_pageable_external_count += lq->vpl_external_count;
lq->vpl_count = 0;
+ lq->vpl_internal_count = 0;
+ lq->vpl_external_count = 0;
}
- assert(queue_empty(&lq->vpl_queue));
+ assert(vm_page_queue_empty(&lq->vpl_queue));
if (nolocks == FALSE) {
VPL_UNLOCK(&lq->vpl_lock);
*
* Zero-fill a part of the page.
*/
+#define PMAP_ZERO_PART_PAGE_IMPLEMENTED
void
vm_page_part_zero_fill(
vm_page_t m,
vm_offset_t m_pa,
vm_size_t len)
{
- vm_page_t tmp;
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
VM_PAGE_CHECK(m);
+#endif
+
#ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
- pmap_zero_part_page(m->phys_page, m_pa, len);
+ pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
#else
+ vm_page_t tmp;
while (1) {
tmp = vm_page_grab();
if (tmp == VM_PAGE_NULL) {
vm_page_t m)
{
XPR(XPR_VM_PAGE,
- "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
- m->object, m->offset, m, 0,0);
-
+ "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
+ VM_PAGE_OBJECT(m), m->offset, m, 0,0);
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
VM_PAGE_CHECK(m);
+#endif
-// dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
- pmap_zero_page(m->phys_page);
+// dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
+ pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
}
/*
vm_offset_t dst_pa,
vm_size_t len)
{
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
VM_PAGE_CHECK(src_m);
VM_PAGE_CHECK(dst_m);
-
- pmap_copy_part_page(src_m->phys_page, src_pa,
- dst_m->phys_page, dst_pa, len);
+#endif
+ pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
+ VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
}
/*
* vm_page_copy:
*
* Copy one page to another
- *
- * ENCRYPTED SWAP:
- * The source page should not be encrypted. The caller should
- * make sure the page is decrypted first, if necessary.
*/
int vm_page_copy_cs_validations = 0;
vm_page_t src_m,
vm_page_t dest_m)
{
- XPR(XPR_VM_PAGE,
- "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
- src_m->object, src_m->offset,
- dest_m->object, dest_m->offset,
- 0);
+ vm_object_t src_m_object;
- VM_PAGE_CHECK(src_m);
- VM_PAGE_CHECK(dest_m);
+ src_m_object = VM_PAGE_OBJECT(src_m);
+ XPR(XPR_VM_PAGE,
+ "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
+ src_m_object, src_m->offset,
+ VM_PAGE_OBJECT(dest_m), dest_m->offset,
+ 0);
+#if 0
/*
- * ENCRYPTED SWAP:
- * The source page should not be encrypted at this point.
- * The destination page will therefore not contain encrypted
- * data after the copy.
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
*/
- if (src_m->encrypted) {
- panic("vm_page_copy: source page %p is encrypted\n", src_m);
- }
- dest_m->encrypted = FALSE;
+ VM_PAGE_CHECK(src_m);
+ VM_PAGE_CHECK(dest_m);
+#endif
+ vm_object_lock_assert_held(src_m_object);
- if (src_m->object != VM_OBJECT_NULL &&
- src_m->object->code_signed) {
+ if (src_m_object != VM_OBJECT_NULL &&
+ src_m_object->code_signed) {
/*
* We're copying a page from a code-signed object.
* Whoever ends up mapping the copy page might care about
*/
vm_page_copy_cs_validations++;
vm_page_validate_cs(src_m);
+#if DEVELOPMENT || DEBUG
+ DTRACE_VM4(codesigned_copy,
+ vm_object_t, src_m_object,
+ vm_object_offset_t, src_m->offset,
+ int, src_m->cs_validated,
+ int, src_m->cs_tainted);
+#endif /* DEVELOPMENT || DEBUG */
+
+ }
+
+ if (vm_page_is_slideable(src_m)) {
+ boolean_t was_busy = src_m->busy;
+ src_m->busy = TRUE;
+ (void) vm_page_slide(src_m, 0);
+ assert(src_m->busy);
+ if (!was_busy) {
+ PAGE_WAKEUP_DONE(src_m);
+ }
}
+
/*
* Propagate the cs_tainted bit to the copy page. Do not propagate
* the cs_validated bit.
if (dest_m->cs_tainted) {
vm_page_copy_cs_tainted++;
}
-
- pmap_copy_page(src_m->phys_page, dest_m->phys_page);
+ dest_m->slid = src_m->slid;
+ dest_m->error = src_m->error; /* sliding src_m might have failed... */
+ pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
}
#if MACH_ASSERT
vm_page_t p)
{
printf("vm_page %p: \n", p);
- printf(" pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
- printf(" listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
- printf(" next=%p\n", p->next);
- printf(" object=%p offset=0x%llx\n", p->object, p->offset);
+ printf(" pageq: next=%p prev=%p\n",
+ (vm_page_t)VM_PAGE_UNPACK_PTR(p->pageq.next),
+ (vm_page_t)VM_PAGE_UNPACK_PTR(p->pageq.prev));
+ printf(" listq: next=%p prev=%p\n",
+ (vm_page_t)(VM_PAGE_UNPACK_PTR(p->listq.next)),
+ (vm_page_t)(VM_PAGE_UNPACK_PTR(p->listq.prev)));
+ printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->next_m)));
+ printf(" object=%p offset=0x%llx\n",VM_PAGE_OBJECT(p), p->offset);
printf(" wire_count=%u\n", p->wire_count);
+ printf(" q_state=%u\n", p->vm_page_q_state);
- printf(" %slocal, %sinactive, %sactive, %spageout_queue, %sspeculative, %slaundry\n",
- (p->local ? "" : "!"),
- (p->inactive ? "" : "!"),
- (p->active ? "" : "!"),
- (p->pageout_queue ? "" : "!"),
- (p->speculative ? "" : "!"),
- (p->laundry ? "" : "!"));
- printf(" %sfree, %sref, %sgobbled, %sprivate, %sthrottled\n",
- (p->free ? "" : "!"),
+ printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
+ (p->laundry ? "" : "!"),
(p->reference ? "" : "!"),
(p->gobbled ? "" : "!"),
- (p->private ? "" : "!"),
- (p->throttled ? "" : "!"));
+ (p->private ? "" : "!"));
printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
- (p->busy ? "" : "!"),
- (p->wanted ? "" : "!"),
- (p->tabled ? "" : "!"),
- (p->fictitious ? "" : "!"),
- (p->pmapped ? "" : "!"),
- (p->wpmapped ? "" : "!"));
- printf(" %spageout, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
- (p->pageout ? "" : "!"),
+ (p->busy ? "" : "!"),
+ (p->wanted ? "" : "!"),
+ (p->tabled ? "" : "!"),
+ (p->fictitious ? "" : "!"),
+ (p->pmapped ? "" : "!"),
+ (p->wpmapped ? "" : "!"));
+ printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
+ (p->free_when_done ? "" : "!"),
(p->absent ? "" : "!"),
(p->error ? "" : "!"),
(p->dirty ? "" : "!"),
(p->cleaning ? "" : "!"),
(p->precious ? "" : "!"),
(p->clustered ? "" : "!"));
- printf(" %soverwriting, %srestart, %sunusual, %sencrypted, %sencrypted_cleaning\n",
+ printf(" %soverwriting, %srestart, %sunusual\n",
(p->overwriting ? "" : "!"),
(p->restart ? "" : "!"),
- (p->unusual ? "" : "!"),
- (p->encrypted ? "" : "!"),
- (p->encrypted_cleaning ? "" : "!"));
- printf(" %slist_req_pending, %sdump_cleaning, %scs_validated, %scs_tainted, %sno_cache\n",
- (p->list_req_pending ? "" : "!"),
- (p->dump_cleaning ? "" : "!"),
+ (p->unusual ? "" : "!"));
+ printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n",
(p->cs_validated ? "" : "!"),
(p->cs_tainted ? "" : "!"),
+ (p->cs_nx ? "" : "!"),
(p->no_cache ? "" : "!"));
- printf(" %szero_fill\n",
- (p->zero_fill ? "" : "!"));
- printf("phys_page=0x%x\n", p->phys_page);
+ printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
}
/*
vm_page_t pages,
unsigned int npages)
{
- register vm_page_t m;
+ vm_page_t m;
unsigned int page_count;
vm_offset_t prev_addr;
- prev_addr = pages->phys_page;
+ prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
page_count = 1;
for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
- if (m->phys_page != prev_addr + 1) {
+ if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
- m, (long)prev_addr, m->phys_page);
- printf("pages %p page_count %d\n", pages, page_count);
+ m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
+ printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
panic("vm_page_verify_contiguous: not contiguous!");
}
- prev_addr = m->phys_page;
+ prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
++page_count;
}
if (page_count != npages) {
/*
* Check the free lists for proper length etc.
*/
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
static unsigned int
vm_page_verify_free_list(
- queue_head_t *vm_page_queue,
+ vm_page_queue_head_t *vm_page_queue,
unsigned int color,
vm_page_t look_for_page,
boolean_t expect_page)
vm_page_t prev_m;
boolean_t found_page;
+ if (! vm_page_verify_this_free_list_enabled)
+ return 0;
+
found_page = FALSE;
npages = 0;
- prev_m = (vm_page_t) vm_page_queue;
- queue_iterate(vm_page_queue,
- m,
- vm_page_t,
- pageq) {
+ prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
+
+ vm_page_queue_iterate(vm_page_queue,
+ m,
+ vm_page_t,
+ pageq) {
+
if (m == look_for_page) {
found_page = TRUE;
}
- if ((vm_page_t) m->pageq.prev != prev_m)
+ if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.prev) != prev_m)
panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n",
- color, npages, m, m->pageq.prev, prev_m);
- if ( ! m->free )
- panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n",
- color, npages, m);
+ color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.prev), prev_m);
if ( ! m->busy )
panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
color, npages, m);
- if ( color != (unsigned int) -1 && (m->phys_page & vm_color_mask) != color)
- panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
- color, npages, m, m->phys_page & vm_color_mask, color);
+ if (color != (unsigned int) -1) {
+ if (VM_PAGE_GET_COLOR(m) != color)
+ panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
+ color, npages, m, VM_PAGE_GET_COLOR(m), color);
+ if (m->vm_page_q_state != VM_PAGE_ON_FREE_Q)
+ panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d\n",
+ color, npages, m, m->vm_page_q_state);
+ } else {
+ if (m->vm_page_q_state != VM_PAGE_ON_FREE_LOCAL_Q)
+ panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d\n",
+ npages, m, m->vm_page_q_state);
+ }
++npages;
prev_m = m;
}
if (expect_page && !found_page) {
printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
- color, npages, look_for_page, look_for_page->phys_page);
+ color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
_vm_page_print(look_for_page);
for (other_color = 0;
other_color < vm_colors;
other_color++) {
if (other_color == color)
continue;
- vm_page_verify_free_list(&vm_page_queue_free[other_color],
- other_color, look_for_page, FALSE);
+ vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
+ other_color, look_for_page, FALSE);
}
- if (color != (unsigned int) -1) {
+ if (color == (unsigned int) -1) {
vm_page_verify_free_list(&vm_lopage_queue_free,
(unsigned int) -1, look_for_page, FALSE);
}
-
panic("vm_page_verify_free_list(color=%u)\n", color);
}
if (!expect_page && found_page) {
printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
- color, npages, look_for_page, look_for_page->phys_page);
+ color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
}
}
return npages;
}
-static boolean_t vm_page_verify_free_lists_enabled = FALSE;
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
static void
vm_page_verify_free_lists( void )
{
unsigned int color, npages, nlopages;
+ boolean_t toggle = TRUE;
- if (! vm_page_verify_free_lists_enabled)
+ if (! vm_page_verify_all_free_lists_enabled)
return;
npages = 0;
lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_verify_this_free_list_enabled == TRUE) {
+ /*
+ * This variable has been set globally for extra checking of
+ * each free list Q. Since we didn't set it, we don't own it
+ * and we shouldn't toggle it.
+ */
+ toggle = FALSE;
+ }
- for( color = 0; color < vm_colors; color++ ) {
- npages += vm_page_verify_free_list(&vm_page_queue_free[color],
- color, VM_PAGE_NULL, FALSE);
+ if (toggle == TRUE) {
+ vm_page_verify_this_free_list_enabled = TRUE;
}
+ for( color = 0; color < vm_colors; color++ ) {
+ npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
+ color, VM_PAGE_NULL, FALSE);
+ }
nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
(unsigned int) -1,
VM_PAGE_NULL, FALSE);
panic("vm_page_verify_free_lists: "
"npages %u free_count %d nlopages %u lo_free_count %u",
npages, vm_page_free_count, nlopages, vm_lopage_free_count);
+
+ if (toggle == TRUE) {
+ vm_page_verify_this_free_list_enabled = FALSE;
+ }
+
lck_mtx_unlock(&vm_page_queue_free_lock);
}
-void
-vm_page_queues_assert(
- vm_page_t mem,
- int val)
+#endif /* MACH_ASSERT */
+
+
+
+#if __arm64__
+/*
+ * 1 or more clients (currently only SEP) ask for a large contiguous chunk of memory
+ * after the system has 'aged'. To ensure that other allocation requests don't mess
+ * with the chances of that request being satisfied, we pre-allocate a single contiguous
+ * 10MB buffer and hand it out to the first request of >= 4MB.
+ */
+
+kern_return_t cpm_preallocate_early(void);
+
+vm_page_t cpm_preallocated_pages_list = NULL;
+boolean_t preallocated_buffer_available = FALSE;
+
+#define PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT ((10 * 1024 * 1024) / PAGE_SIZE_64) /* 10 MB */
+#define MIN_CONTIG_PAGES_REQUEST_FOR_PREALLOCATED_BUFFER ((4 * 1024 *1024) / PAGE_SIZE_64) /* 4 MB */
+
+kern_return_t
+cpm_preallocate_early(void)
{
- if (mem->free + mem->active + mem->inactive + mem->speculative +
- mem->throttled + mem->pageout_queue > (val)) {
- _vm_page_print(mem);
- panic("vm_page_queues_assert(%p, %d)\n", mem, val);
- }
- if (VM_PAGE_WIRED(mem)) {
- assert(!mem->active);
- assert(!mem->inactive);
- assert(!mem->speculative);
- assert(!mem->throttled);
+
+ kern_return_t kr = KERN_SUCCESS;
+ vm_map_size_t prealloc_size = (PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT * PAGE_SIZE_64);
+
+ printf("cpm_preallocate_early called to preallocate contiguous buffer of %llu pages\n", PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT);
+
+ kr = cpm_allocate(CAST_DOWN(vm_size_t, prealloc_size), &cpm_preallocated_pages_list, 0, 0, TRUE, 0);
+
+ if (kr != KERN_SUCCESS) {
+ printf("cpm_allocate for preallocated contig buffer failed with %d.\n", kr);
+ } else {
+ preallocated_buffer_available = TRUE;
}
+
+ return kr;
}
-#endif /* MACH_ASSERT */
+#endif /* __arm64__ */
+extern boolean_t (* volatile consider_buffer_cache_collect)(int);
+
/*
* CONTIGUOUS PAGE ALLOCATION
*
int flags)
{
vm_page_t m = NULL;
- ppnum_t prevcontaddr;
- ppnum_t start_pnum;
- unsigned int npages, considered, scanned;
- unsigned int page_idx, start_idx, last_idx, orig_last_idx;
+ ppnum_t prevcontaddr = 0;
+ ppnum_t start_pnum = 0;
+ unsigned int npages = 0, considered = 0, scanned = 0;
+ unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
unsigned int idx_last_contig_page_found = 0;
- int free_considered, free_available;
- int substitute_needed;
- boolean_t wrapped;
+ int free_considered = 0, free_available = 0;
+ int substitute_needed = 0;
+ boolean_t wrapped, zone_gc_called = FALSE;
+ kern_return_t kr;
#if DEBUG
- clock_sec_t tv_start_sec, tv_end_sec;
- clock_usec_t tv_start_usec, tv_end_usec;
+ clock_sec_t tv_start_sec = 0, tv_end_sec = 0;
+ clock_usec_t tv_start_usec = 0, tv_end_usec = 0;
#endif
-#if MACH_ASSERT
+
int yielded = 0;
int dumped_run = 0;
int stolen_pages = 0;
-#endif
+ int compressed_pages = 0;
+
if (contig_pages == 0)
return VM_PAGE_NULL;
+full_scan_again:
+
#if MACH_ASSERT
vm_page_verify_free_lists();
#endif
#if DEBUG
clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
#endif
+ PAGE_REPLACEMENT_ALLOWED(TRUE);
+
vm_page_lock_queues();
+
+#if __arm64__
+ if (preallocated_buffer_available) {
+
+ if ((contig_pages >= MIN_CONTIG_PAGES_REQUEST_FOR_PREALLOCATED_BUFFER) && (contig_pages <= PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT)) {
+
+ m = cpm_preallocated_pages_list;
+
+ start_idx = (unsigned int) (m - &vm_pages[0]);
+
+ if (wire == FALSE) {
+
+ last_idx = start_idx;
+
+ for(npages = 0; npages < contig_pages; npages++, last_idx++) {
+
+ assert(vm_pages[last_idx].gobbled == FALSE);
+
+ vm_pages[last_idx].gobbled = TRUE;
+ vm_page_gobble_count++;
+
+ assert(1 == vm_pages[last_idx].wire_count);
+ /*
+ * Gobbled pages are counted as wired pages. So no need to drop
+ * the global wired page count. Just the page's wire count is fine.
+ */
+ vm_pages[last_idx].wire_count--;
+ vm_pages[last_idx].vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ }
+
+ }
+
+ last_idx = start_idx + contig_pages - 1;
+
+ vm_pages[last_idx].snext = NULL;
+
+ printf("Using preallocated buffer: Requested size (pages):%d... index range: %d-%d...freeing %llu pages\n", contig_pages, start_idx, last_idx, PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT - contig_pages);
+
+ last_idx += 1;
+ for(npages = contig_pages; npages < PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT; npages++, last_idx++) {
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(&vm_pages[last_idx]);
+ vm_page_free(&vm_pages[last_idx]);
+ }
+
+ cpm_preallocated_pages_list = NULL;
+ preallocated_buffer_available = FALSE;
+
+ goto done_scanning;
+ }
+ }
+#endif /* __arm64__ */
+
lck_mtx_lock(&vm_page_queue_free_lock);
RESET_STATE_OF_RUN();
assert(!m->fictitious);
assert(!m->private);
- if (max_pnum && m->phys_page > max_pnum) {
+ if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
/* no more low pages... */
break;
}
- if (!npages && ((m->phys_page & pnum_mask) != 0)) {
+ if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
/*
* not aligned
*/
RESET_STATE_OF_RUN();
} else if (VM_PAGE_WIRED(m) || m->gobbled ||
- m->encrypted || m->encrypted_cleaning || m->cs_validated || m->cs_tainted ||
- m->error || m->absent || m->pageout_queue || m->laundry || m->wanted || m->precious ||
- m->cleaning || m->overwriting || m->restart || m->unusual || m->list_req_pending ||
- m->pageout) {
+ m->laundry || m->wanted ||
+ m->cleaning || m->overwriting || m->free_when_done) {
/*
* page is in a transient state
* or a state we don't want to deal
*/
RESET_STATE_OF_RUN();
- } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+ } else if ((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
+ (m->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
+ (m->vm_page_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
+ (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
/*
- * page needs to be on one of our queues
+ * page needs to be on one of our queues (other then the pageout or special free queues)
+ * or it needs to belong to the compressor pool (which is now indicated
+ * by vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
+ * from the check for VM_PAGE_NOT_ON_Q)
* in order for it to be stable behind the
* locks we hold at this point...
* if not, don't consider it which
*/
RESET_STATE_OF_RUN();
- } else if (!m->free && (!m->tabled || m->busy)) {
+ } else if ((m->vm_page_q_state != VM_PAGE_ON_FREE_Q) && (!m->tabled || m->busy)) {
/*
* pages on the free list are always 'busy'
* so we couldn't test for 'busy' in the check
RESET_STATE_OF_RUN();
} else {
- if (m->phys_page != prevcontaddr + 1) {
- if ((m->phys_page & pnum_mask) != 0) {
+ if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
+ if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
RESET_STATE_OF_RUN();
goto did_consider;
} else {
npages = 1;
start_idx = page_idx;
- start_pnum = m->phys_page;
+ start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
}
} else {
npages++;
}
- prevcontaddr = m->phys_page;
+ prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
VM_PAGE_CHECK(m);
- if (m->free) {
+ if (m->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
free_considered++;
} else {
/*
* into a substitute page.
*/
#if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
- if (m->pmapped || m->dirty) {
+ if (m->pmapped || m->dirty || m->precious) {
substitute_needed++;
}
#else
}
did_consider:
if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
-
+
+ PAGE_REPLACEMENT_ALLOWED(FALSE);
+
lck_mtx_unlock(&vm_page_queue_free_lock);
vm_page_unlock_queues();
mutex_pause(0);
+ PAGE_REPLACEMENT_ALLOWED(TRUE);
+
vm_page_lock_queues();
lck_mtx_lock(&vm_page_queue_free_lock);
*/
free_available = vm_page_free_count - vm_page_free_reserved;
considered = 0;
-#if MACH_ASSERT
+
yielded++;
-#endif
+
goto retry;
}
considered++;
m1 = &vm_pages[start_idx++];
#if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
- assert(m1->free);
+ assert(m1->vm_page_q_state == VM_PAGE_ON_FREE_Q);
#endif
- if (m1->free) {
+ if (m1->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
unsigned int color;
- color = m1->phys_page & vm_color_mask;
+ color = VM_PAGE_GET_COLOR(m1);
#if MACH_ASSERT
- vm_page_verify_free_list(&vm_page_queue_free[color],
- color, m1, TRUE);
+ vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
#endif
- queue_remove(&vm_page_queue_free[color],
- m1,
- vm_page_t,
- pageq);
- m1->pageq.next = NULL;
- m1->pageq.prev = NULL;
+ vm_page_queue_remove(&vm_page_queue_free[color].qhead,
+ m1,
+ vm_page_t,
+ pageq);
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
#if MACH_ASSERT
- vm_page_verify_free_list(&vm_page_queue_free[color],
- color, VM_PAGE_NULL, FALSE);
+ vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
#endif
/*
* Clear the "free" bit so that this page
* does not get considered for another
* concurrent physically-contiguous allocation.
*/
- m1->free = FALSE;
+ m1->vm_page_q_state = VM_PAGE_NOT_ON_Q;
assert(m1->busy);
vm_page_free_count--;
}
}
- /*
- * adjust global freelist counts
- */
- if (vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
-
if( flags & KMA_LOMEM)
vm_page_lomem_find_contiguous_last_idx = page_idx;
else
*/
m1 = &vm_pages[cur_idx--];
- assert(!m1->free);
- if (m1->object == VM_OBJECT_NULL) {
+ if (m1->vm_page_object == 0) {
/*
* page has already been removed from
* the free list in the 1st pass
*/
+ assert(m1->vm_page_q_state == VM_PAGE_NOT_ON_Q);
assert(m1->offset == (vm_object_offset_t) -1);
assert(m1->busy);
assert(!m1->wanted);
assert(!m1->laundry);
} else {
vm_object_t object;
+ int refmod;
+ boolean_t disconnected, reusable;
if (abort_run == TRUE)
continue;
- object = m1->object;
+ assert(m1->vm_page_q_state != VM_PAGE_NOT_ON_Q);
+
+ object = VM_PAGE_OBJECT(m1);
if (object != locked_object) {
if (locked_object) {
}
if (locked_object == VM_OBJECT_NULL ||
(VM_PAGE_WIRED(m1) || m1->gobbled ||
- m1->encrypted || m1->encrypted_cleaning || m1->cs_validated || m1->cs_tainted ||
- m1->error || m1->absent || m1->pageout_queue || m1->laundry || m1->wanted || m1->precious ||
- m1->cleaning || m1->overwriting || m1->restart || m1->unusual || m1->list_req_pending || m1->busy)) {
+ m1->laundry || m1->wanted ||
+ m1->cleaning || m1->overwriting || m1->free_when_done || m1->busy) ||
+ (m1->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
if (locked_object) {
vm_object_unlock(locked_object);
abort_run = TRUE;
continue;
}
- if (m1->pmapped || m1->dirty) {
- int refmod;
+
+ disconnected = FALSE;
+ reusable = FALSE;
+
+ if ((m1->reusable ||
+ object->all_reusable) &&
+ (m1->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
+ !m1->dirty &&
+ !m1->reference) {
+ /* reusable page... */
+ refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
+ disconnected = TRUE;
+ if (refmod == 0) {
+ /*
+ * ... not reused: can steal
+ * without relocating contents.
+ */
+ reusable = TRUE;
+ }
+ }
+
+ if ((m1->pmapped &&
+ ! reusable) ||
+ m1->dirty ||
+ m1->precious) {
vm_object_offset_t offset;
m2 = vm_page_grab();
abort_run = TRUE;
continue;
}
- if (m1->pmapped)
- refmod = pmap_disconnect(m1->phys_page);
- else
- refmod = 0;
- vm_page_copy(m1, m2);
-
- m2->reference = m1->reference;
- m2->dirty = m1->dirty;
+ if (! disconnected) {
+ if (m1->pmapped)
+ refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
+ else
+ refmod = 0;
+ }
+
+ /* copy the page's contents */
+ pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
+ /* copy the page's state */
+ assert(!VM_PAGE_WIRED(m1));
+ assert(m1->vm_page_q_state != VM_PAGE_ON_FREE_Q);
+ assert(m1->vm_page_q_state != VM_PAGE_ON_PAGEOUT_Q);
+ assert(!m1->laundry);
+ m2->reference = m1->reference;
+ assert(!m1->gobbled);
+ assert(!m1->private);
+ m2->no_cache = m1->no_cache;
+ m2->xpmapped = 0;
+ assert(!m1->busy);
+ assert(!m1->wanted);
+ assert(!m1->fictitious);
+ m2->pmapped = m1->pmapped; /* should flush cache ? */
+ m2->wpmapped = m1->wpmapped;
+ assert(!m1->free_when_done);
+ m2->absent = m1->absent;
+ m2->error = m1->error;
+ m2->dirty = m1->dirty;
+ assert(!m1->cleaning);
+ m2->precious = m1->precious;
+ m2->clustered = m1->clustered;
+ assert(!m1->overwriting);
+ m2->restart = m1->restart;
+ m2->unusual = m1->unusual;
+ m2->cs_validated = m1->cs_validated;
+ m2->cs_tainted = m1->cs_tainted;
+ m2->cs_nx = m1->cs_nx;
+
+ /*
+ * If m1 had really been reusable,
+ * we would have just stolen it, so
+ * let's not propagate it's "reusable"
+ * bit and assert that m2 is not
+ * marked as "reusable".
+ */
+ // m2->reusable = m1->reusable;
+ assert(!m2->reusable);
+
+ // assert(!m1->lopage);
+ m2->slid = m1->slid;
+
+ if (m1->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR)
+ m2->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR;
+
+ /*
+ * page may need to be flushed if
+ * it is marshalled into a UPL
+ * that is going to be used by a device
+ * that doesn't support coherency
+ */
+ m2->written_by_kernel = TRUE;
+
+ /*
+ * make sure we clear the ref/mod state
+ * from the pmap layer... else we risk
+ * inheriting state from the last time
+ * this page was used...
+ */
+ pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
if (refmod & VM_MEM_REFERENCED)
m2->reference = TRUE;
- if (refmod & VM_MEM_MODIFIED)
- m2->dirty = TRUE;
+ if (refmod & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m2, TRUE);
+ }
offset = m1->offset;
/*
vm_page_free_prepare(m1);
/*
- * make sure we clear the ref/mod state
- * from the pmap layer... else we risk
- * inheriting state from the last time
- * this page was used...
+ * now put the substitute page
+ * on the object
*/
- pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
- /*
- * now put the substitute page on the object
- */
- vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE);
+ vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
- if (m2->reference)
- vm_page_activate(m2);
- else
- vm_page_deactivate(m2);
+ if (m2->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+ m2->pmapped = TRUE;
+ m2->wpmapped = TRUE;
+
+ PMAP_ENTER(kernel_pmap, m2->offset, m2,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr);
+ assert(kr == KERN_SUCCESS);
+
+ compressed_pages++;
+
+ } else {
+ if (m2->reference)
+ vm_page_activate(m2);
+ else
+ vm_page_deactivate(m2);
+ }
PAGE_WAKEUP_DONE(m2);
} else {
+ assert(m1->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
+
/*
* completely cleans up the state
* of the page so that it is ready
*/
vm_page_free_prepare(m1);
}
-#if MACH_ASSERT
+
stolen_pages++;
-#endif
+
}
- m1->pageq.next = (queue_entry_t) m;
- m1->pageq.prev = NULL;
+#if CONFIG_BACKGROUND_QUEUE
+ vm_page_assign_background_state(m1);
+#endif
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
+ m1->snext = m;
m = m1;
}
if (locked_object) {
}
if (abort_run == TRUE) {
- if (m != VM_PAGE_NULL) {
- vm_page_free_list(m, FALSE);
- }
-#if MACH_ASSERT
- dumped_run++;
-#endif
/*
* want the index of the last
* page in this run that was
*/
page_idx = tmp_start_idx + 2;
if (page_idx >= vm_pages_count) {
- if (wrapped)
+ if (wrapped) {
+ if (m != VM_PAGE_NULL) {
+ vm_page_unlock_queues();
+ vm_page_free_list(m, FALSE);
+ vm_page_lock_queues();
+ m = VM_PAGE_NULL;
+ }
+ dumped_run++;
goto done_scanning;
+ }
page_idx = last_idx = 0;
wrapped = TRUE;
}
last_idx = page_idx;
+ if (m != VM_PAGE_NULL) {
+ vm_page_unlock_queues();
+ vm_page_free_list(m, FALSE);
+ vm_page_lock_queues();
+ m = VM_PAGE_NULL;
+ }
+ dumped_run++;
+
lck_mtx_lock(&vm_page_queue_free_lock);
/*
* reset our free page limit since we
for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
- if (wire == TRUE)
+ assert(m1->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(m1->wire_count == 0);
+
+ if (wire == TRUE) {
m1->wire_count++;
- else
+ m1->vm_page_q_state = VM_PAGE_IS_WIRED;
+ } else
m1->gobbled = TRUE;
}
if (wire == FALSE)
assert(vm_page_verify_contiguous(m, npages));
}
done_scanning:
+ PAGE_REPLACEMENT_ALLOWED(FALSE);
+
vm_page_unlock_queues();
#if DEBUG
tv_end_sec -= 1000000;
}
if (vm_page_find_contig_debug) {
- printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages\n",
- __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
- (long)tv_end_sec, tv_end_usec, orig_last_idx,
- scanned, yielded, dumped_run, stolen_pages);
+ printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
+ __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+ (long)tv_end_sec, tv_end_usec, orig_last_idx,
+ scanned, yielded, dumped_run, stolen_pages, compressed_pages);
}
#endif
#if MACH_ASSERT
vm_page_verify_free_lists();
#endif
- return m;
-}
+ if (m == NULL && zone_gc_called == FALSE) {
+ printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
+ __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+ scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
-/*
- * Allocate a list of contiguous, wired pages.
- */
+ if (consider_buffer_cache_collect != NULL) {
+ (void)(*consider_buffer_cache_collect)(1);
+ }
+
+ consider_zone_gc(FALSE);
+
+ zone_gc_called = TRUE;
+
+ printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
+ goto full_scan_again;
+ }
+
+ return m;
+}
+
+/*
+ * Allocate a list of contiguous, wired pages.
+ */
kern_return_t
cpm_allocate(
vm_size_t size,
vm_page_t pages;
unsigned int npages;
- if (size % page_size != 0)
+ if (size % PAGE_SIZE != 0)
return KERN_INVALID_ARGUMENT;
npages = (unsigned int) (size / PAGE_SIZE);
* determine need for wakeups
*/
if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
- ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
- thread_wakeup((event_t) &vm_page_free_wanted);
+ ((vm_page_free_count < vm_page_free_target) &&
+ ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min)))
+ thread_wakeup((event_t) &vm_page_free_wanted);
-#if CONFIG_EMBEDDED
- {
- int percent_avail;
-
- /*
- * Decide if we need to poke the memorystatus notification thread.
- */
- percent_avail =
- (vm_page_active_count + vm_page_inactive_count +
- vm_page_speculative_count + vm_page_free_count +
- (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
- atop_64(max_mem);
- if (percent_avail <= (kern_memorystatus_level - 5)) {
- kern_memorystatus_level = percent_avail;
- thread_wakeup((event_t)&kern_memorystatus_wakeup);
- }
- }
-#endif
+ VM_CHECK_MEMORYSTATUS;
+
/*
* The CPM pages should now be available and
* ordered by ascending physical address.
*list = pages;
return KERN_SUCCESS;
}
+
+
+unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
+
+/*
+ * when working on a 'run' of pages, it is necessary to hold
+ * the vm_page_queue_lock (a hot global lock) for certain operations
+ * on the page... however, the majority of the work can be done
+ * while merely holding the object lock... in fact there are certain
+ * collections of pages that don't require any work brokered by the
+ * vm_page_queue_lock... to mitigate the time spent behind the global
+ * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
+ * while doing all of the work that doesn't require the vm_page_queue_lock...
+ * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
+ * necessary work for each page... we will grab the busy bit on the page
+ * if it's not already held so that vm_page_do_delayed_work can drop the object lock
+ * if it can't immediately take the vm_page_queue_lock in order to compete
+ * for the locks in the same order that vm_pageout_scan takes them.
+ * the operation names are modeled after the names of the routines that
+ * need to be called in order to make the changes very obvious in the
+ * original loop
+ */
+
+void
+vm_page_do_delayed_work(
+ vm_object_t object,
+ vm_tag_t tag,
+ struct vm_page_delayed_work *dwp,
+ int dw_count)
+{
+ int j;
+ vm_page_t m;
+ vm_page_t local_free_q = VM_PAGE_NULL;
+
+ /*
+ * pageout_scan takes the vm_page_lock_queues first
+ * then tries for the object lock... to avoid what
+ * is effectively a lock inversion, we'll go to the
+ * trouble of taking them in that same order... otherwise
+ * if this object contains the majority of the pages resident
+ * in the UBC (or a small set of large objects actively being
+ * worked on contain the majority of the pages), we could
+ * cause the pageout_scan thread to 'starve' in its attempt
+ * to find pages to move to the free queue, since it has to
+ * successfully acquire the object lock of any candidate page
+ * before it can steal/clean it.
+ */
+ if (!vm_page_trylockspin_queues()) {
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+
+ for (j = 0; ; j++) {
+ if (!vm_object_lock_avoid(object) &&
+ _vm_object_lock_try(object))
+ break;
+ vm_page_unlock_queues();
+ mutex_pause(j);
+ vm_page_lockspin_queues();
+ }
+ }
+ for (j = 0; j < dw_count; j++, dwp++) {
+
+ m = dwp->dw_m;
+
+ if (dwp->dw_mask & DW_vm_pageout_throttle_up)
+ vm_pageout_throttle_up(m);
+#if CONFIG_PHANTOM_CACHE
+ if (dwp->dw_mask & DW_vm_phantom_cache_update)
+ vm_phantom_cache_update(m);
+#endif
+ if (dwp->dw_mask & DW_vm_page_wire)
+ vm_page_wire(m, tag, FALSE);
+ else if (dwp->dw_mask & DW_vm_page_unwire) {
+ boolean_t queueit;
+
+ queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
+
+ vm_page_unwire(m, queueit);
+ }
+ if (dwp->dw_mask & DW_vm_page_free) {
+ vm_page_free_prepare_queues(m);
+
+ assert(m->pageq.next == 0 && m->pageq.prev == 0);
+ /*
+ * Add this page to our list of reclaimed pages,
+ * to be freed later.
+ */
+ m->snext = local_free_q;
+ local_free_q = m;
+ } else {
+ if (dwp->dw_mask & DW_vm_page_deactivate_internal)
+ vm_page_deactivate_internal(m, FALSE);
+ else if (dwp->dw_mask & DW_vm_page_activate) {
+ if (m->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q) {
+ vm_page_activate(m);
+ }
+ }
+ else if (dwp->dw_mask & DW_vm_page_speculate)
+ vm_page_speculate(m, TRUE);
+ else if (dwp->dw_mask & DW_enqueue_cleaned) {
+ /*
+ * if we didn't hold the object lock and did this,
+ * we might disconnect the page, then someone might
+ * soft fault it back in, then we would put it on the
+ * cleaned queue, and so we would have a referenced (maybe even dirty)
+ * page on that queue, which we don't want
+ */
+ int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+
+ if ((refmod_state & VM_MEM_REFERENCED)) {
+ /*
+ * this page has been touched since it got cleaned; let's activate it
+ * if it hasn't already been
+ */
+ vm_pageout_enqueued_cleaned++;
+ vm_pageout_cleaned_reactivated++;
+ vm_pageout_cleaned_commit_reactivated++;
+
+ if (m->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q)
+ vm_page_activate(m);
+ } else {
+ m->reference = FALSE;
+ vm_page_enqueue_cleaned(m);
+ }
+ }
+ else if (dwp->dw_mask & DW_vm_page_lru)
+ vm_page_lru(m);
+ else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
+ if (m->vm_page_q_state != VM_PAGE_ON_PAGEOUT_Q)
+ vm_page_queues_remove(m, TRUE);
+ }
+ if (dwp->dw_mask & DW_set_reference)
+ m->reference = TRUE;
+ else if (dwp->dw_mask & DW_clear_reference)
+ m->reference = FALSE;
+
+ if (dwp->dw_mask & DW_move_page) {
+ if (m->vm_page_q_state != VM_PAGE_ON_PAGEOUT_Q) {
+ vm_page_queues_remove(m, FALSE);
+
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+
+ vm_page_enqueue_inactive(m, FALSE);
+ }
+ }
+ if (dwp->dw_mask & DW_clear_busy)
+ m->busy = FALSE;
+
+ if (dwp->dw_mask & DW_PAGE_WAKEUP)
+ PAGE_WAKEUP(m);
+ }
+ }
+ vm_page_unlock_queues();
+
+ if (local_free_q)
+ vm_page_free_list(local_free_q, TRUE);
+ VM_CHECK_MEMORYSTATUS;
+
+}
kern_return_t
vm_page_alloc_list(
return (KERN_RESOURCE_SHORTAGE);
}
- mem->pageq.next = (queue_entry_t) lo_page_list;
+ mem->snext = lo_page_list;
lo_page_list = mem;
}
*list = lo_page_list;
vm_page_t
vm_page_get_next(vm_page_t page)
{
- return ((vm_page_t) page->pageq.next);
+ return (page->snext);
}
vm_object_offset_t
ppnum_t
vm_page_get_phys_page(vm_page_t page)
{
- return (page->phys_page);
+ return (VM_PAGE_GET_PHYS_PAGE(page));
}
static vm_page_t hibernate_gobble_queue;
-extern boolean_t (* volatile consider_buffer_cache_collect)(int);
-
static int hibernate_drain_pageout_queue(struct vm_pageout_queue *);
-static int hibernate_flush_dirty_pages(void);
-static int hibernate_flush_queue(queue_head_t *, int);
-static void hibernate_dirty_page(vm_page_t);
+static int hibernate_flush_dirty_pages(int);
+static int hibernate_flush_queue(vm_page_queue_head_t *, int);
void hibernate_flush_wait(void);
void hibernate_mark_in_progress(void);
void hibernate_clear_in_progress(void);
+void hibernate_free_range(int, int);
+void hibernate_hash_insert_page(vm_page_t);
+uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
+void hibernate_rebuild_vm_structs(void);
+uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
+ppnum_t hibernate_lookup_paddr(unsigned int);
struct hibernate_statistics {
int hibernate_considered;
int hibernate_skipped_cleaning;
int hibernate_skipped_transient;
int hibernate_skipped_precious;
+ int hibernate_skipped_external;
int hibernate_queue_nolock;
int hibernate_queue_paused;
int hibernate_throttled;
int cd_found_cleaning;
int cd_found_laundry;
int cd_found_dirty;
+ int cd_found_xpmapped;
+ int cd_skipped_xpmapped;
int cd_local_free;
int cd_total_free;
int cd_vm_page_wire_count;
+ int cd_vm_struct_pages_unneeded;
int cd_pages;
int cd_discarded;
int cd_count_wire;
} hibernate_stats;
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define HIBERNATE_XPMAPPED_LIMIT 40000
+
static int
hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
vm_page_lock_queues();
- while (q->pgo_laundry) {
+ while ( !vm_page_queue_empty(&q->pgo_pending) ) {
q->pgo_draining = TRUE;
wait_result = thread_block(THREAD_CONTINUE_NULL);
- if (wait_result == THREAD_TIMED_OUT) {
+ if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
hibernate_stats.hibernate_drain_timeout++;
+
+ if (q == &vm_pageout_queue_external)
+ return (0);
+
return (1);
}
vm_page_lock_queues();
return (0);
}
-static void
-hibernate_dirty_page(vm_page_t m)
-{
- vm_object_t object = m->object;
- struct vm_pageout_queue *q;
-
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
- vm_object_lock_assert_exclusive(object);
-
- /*
- * protect the object from collapse -
- * locking in the object's paging_offset.
- */
- vm_object_paging_begin(object);
-
- m->list_req_pending = TRUE;
- m->cleaning = TRUE;
- m->busy = TRUE;
-
- if (object->internal == TRUE)
- q = &vm_pageout_queue_internal;
- else
- q = &vm_pageout_queue_external;
-
- /*
- * pgo_laundry count is tied to the laundry bit
- */
- m->laundry = TRUE;
- q->pgo_laundry++;
- m->pageout_queue = TRUE;
- queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
-
- if (q->pgo_idle == TRUE) {
- q->pgo_idle = FALSE;
- thread_wakeup((event_t) &q->pgo_pending);
- }
-}
+boolean_t hibernate_skip_external = FALSE;
static int
-hibernate_flush_queue(queue_head_t *q, int qcount)
+hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
{
vm_page_t m;
vm_object_t l_object = NULL;
struct vm_pageout_queue *eq;
struct vm_pageout_queue *tq;
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, q, qcount, 0, 0, 0);
-
iq = &vm_pageout_queue_internal;
eq = &vm_pageout_queue_external;
vm_page_lock_queues();
- while (qcount && !queue_empty(q)) {
+ while (qcount && !vm_page_queue_empty(q)) {
if (current_run++ == 1000) {
if (hibernate_should_abort()) {
current_run = 0;
}
- m = (vm_page_t) queue_first(q);
- m_object = m->object;
+ m = (vm_page_t) vm_page_queue_first(q);
+ m_object = VM_PAGE_OBJECT(m);
/*
* check to see if we currently are working
goto reenter_pg_on_q;
}
- vm_pageout_scan_wants_object = m_object;
vm_page_unlock_queues();
mutex_pause(try_failed_count++);
continue;
} else {
l_object = m_object;
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
}
- if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->busy || m->absent || m->error) {
+ if ( !m_object->alive || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
/*
* page is not to be cleaned
* put it back on the head of its queue
goto reenter_pg_on_q;
}
- if ( !m_object->pager_initialized && m_object->pager_created)
- goto reenter_pg_on_q;
-
if (m_object->copy == VM_OBJECT_NULL) {
if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
/*
}
}
if ( !m->dirty && m->pmapped) {
- refmod_state = pmap_get_refmod(m->phys_page);
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
- if ((refmod_state & VM_MEM_MODIFIED))
- m->dirty = TRUE;
+ if ((refmod_state & VM_MEM_MODIFIED)) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
} else
refmod_state = 0;
goto reenter_pg_on_q;
}
+
+ if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+ hibernate_stats.hibernate_skipped_external++;
+
+ goto reenter_pg_on_q;
+ }
tq = NULL;
if (m_object->internal) {
vm_object_unlock(l_object);
l_object = NULL;
}
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
-
- tq->pgo_throttled = TRUE;
while (retval == 0) {
+ tq->pgo_throttled = TRUE;
+
assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
- wait_result = thread_block(THREAD_CONTINUE_NULL);
+ wait_result = thread_block(THREAD_CONTINUE_NULL);
vm_page_lock_queues();
+ if (wait_result != THREAD_TIMED_OUT)
+ break;
+ if (!VM_PAGE_Q_THROTTLED(tq))
+ break;
+
if (hibernate_should_abort())
retval = 1;
- if (wait_result != THREAD_TIMED_OUT)
- break;
-
if (--wait_count == 0) {
- hibernate_stats.hibernate_throttle_timeout++;
- retval = 1;
- }
+
+ hibernate_stats.hibernate_throttle_timeout++;
+
+ if (tq == eq) {
+ hibernate_skip_external = TRUE;
+ break;
+ }
+ retval = 1;
+ }
}
if (retval)
break;
continue;
}
- VM_PAGE_QUEUES_REMOVE(m);
+ /*
+ * we've already factored out pages in the laundry which
+ * means this page can't be on the pageout queue so it's
+ * safe to do the vm_page_queues_remove
+ */
+ vm_page_queues_remove(m, TRUE);
- hibernate_dirty_page(m);
+ if (m_object->internal == TRUE)
+ pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
+
+ vm_pageout_cluster(m);
hibernate_stats.hibernate_found_dirty++;
goto next_pg;
reenter_pg_on_q:
- queue_remove(q, m, vm_page_t, pageq);
- queue_enter(q, m, vm_page_t, pageq);
+ vm_page_queue_remove(q, m, vm_page_t, pageq);
+ vm_page_queue_enter(q, m, vm_page_t, pageq);
hibernate_stats.hibernate_reentered_on_q++;
next_pg:
vm_object_unlock(l_object);
l_object = NULL;
}
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_page_unlock_queues();
static int
-hibernate_flush_dirty_pages()
+hibernate_flush_dirty_pages(int pass)
{
struct vm_speculative_age_q *aq;
uint32_t i;
- bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
-
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++)
vm_page_reactivate_local(i, TRUE, FALSE);
aq = &vm_page_queue_speculative[i];
- if (queue_empty(&aq->age_q))
+ if (vm_page_queue_empty(&aq->age_q))
continue;
qcount = 0;
vm_page_lockspin_queues();
- queue_iterate(&aq->age_q,
+ vm_page_queue_iterate(&aq->age_q,
m,
vm_page_t,
pageq)
return (1);
}
}
- if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count))
+ if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
return (1);
- if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_zf_queue_count))
+ /* XXX FBDP TODO: flush secluded queue */
+ if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
return (1);
- if (hibernate_flush_queue(&vm_page_queue_zf, vm_zf_queue_count))
+ if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
return (1);
-
if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
return (1);
- return (hibernate_drain_pageout_queue(&vm_pageout_queue_external));
+
+ if (pass == 1)
+ vm_compressor_record_warmup_start();
+
+ if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+ if (pass == 1)
+ vm_compressor_record_warmup_end();
+ return (1);
+ }
+ if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+ if (pass == 1)
+ vm_compressor_record_warmup_end();
+ return (1);
+ }
+ if (pass == 1)
+ vm_compressor_record_warmup_end();
+
+ if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+ return (1);
+
+ return (0);
}
-extern void IOSleep(unsigned int);
-extern int sync_internal(void);
+void
+hibernate_reset_stats()
+{
+ bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
int
hibernate_flush_memory()
{
int retval;
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
- IOSleep(2 * 1000);
+ hibernate_cleaning_in_progress = TRUE;
+ hibernate_skip_external = FALSE;
+
+ if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_NONE, vm_page_free_count, 0, 0, 0, 0);
+ vm_compressor_flush();
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
- if ((retval = hibernate_flush_dirty_pages()) == 0) {
if (consider_buffer_cache_collect != NULL) {
+ unsigned int orig_wire_count;
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, vm_page_wire_count, 0, 0, 0, 0);
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ orig_wire_count = vm_page_wire_count;
- sync_internal();
(void)(*consider_buffer_cache_collect)(1);
- consider_zone_gc(1);
+ consider_zone_gc(FALSE);
+
+ HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
- KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, vm_page_wire_count, 0, 0, 0, 0);
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
}
}
+ hibernate_cleaning_in_progress = FALSE;
+
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
+ if (retval)
+ HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
hibernate_stats.hibernate_considered,
hibernate_stats.hibernate_reentered_on_q,
hibernate_stats.hibernate_found_dirty);
- HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) queue_nolock(%d)\n",
+ HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
hibernate_stats.hibernate_skipped_cleaning,
hibernate_stats.hibernate_skipped_transient,
hibernate_stats.hibernate_skipped_precious,
+ hibernate_stats.hibernate_skipped_external,
hibernate_stats.hibernate_queue_nolock);
HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
hibernate_stats.hibernate_queue_paused,
return (retval);
}
+
static void
hibernate_page_list_zero(hibernate_page_list_t *list)
{
}
}
-void
-hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
-{
- uint32_t i;
- vm_page_t m;
- uint64_t start, end, timeout, nsec;
- clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
- clock_get_uptime(&start);
-
- for (i = 0; i < gobble_count; i++)
- {
- while (VM_PAGE_NULL == (m = vm_page_grab()))
- {
- clock_get_uptime(&end);
- if (end >= timeout)
- break;
- VM_PAGE_WAIT();
- }
- if (!m)
- break;
- m->busy = FALSE;
- vm_page_gobble(m);
-
- m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
- hibernate_gobble_queue = m;
- }
-
- clock_get_uptime(&end);
- absolutetime_to_nanoseconds(end - start, &nsec);
- HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
-}
-
void
hibernate_free_gobble_pages(void)
{
m = (vm_page_t) hibernate_gobble_queue;
while(m)
{
- next = (vm_page_t) m->pageq.next;
+ next = m->snext;
vm_page_free(m);
count++;
m = next;
}
static boolean_t
-hibernate_consider_discard(vm_page_t m)
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
{
vm_object_t object = NULL;
int refmod_state;
if (m->private)
panic("hibernate_consider_discard: private");
- if (!vm_object_lock_try(m->object)) {
- hibernate_stats.cd_lock_failed++;
+ object = VM_PAGE_OBJECT(m);
+
+ if (!vm_object_lock_try(object)) {
+ object = NULL;
+ if (!preflight) hibernate_stats.cd_lock_failed++;
break;
}
- object = m->object;
-
if (VM_PAGE_WIRED(m)) {
- hibernate_stats.cd_found_wired++;
+ if (!preflight) hibernate_stats.cd_found_wired++;
break;
}
if (m->precious) {
- hibernate_stats.cd_found_precious++;
+ if (!preflight) hibernate_stats.cd_found_precious++;
break;
}
if (m->busy || !object->alive) {
/*
* Somebody is playing with this page.
*/
- hibernate_stats.cd_found_busy++;
- break;
+ if (!preflight) hibernate_stats.cd_found_busy++;
+ break;
}
if (m->absent || m->unusual || m->error) {
/*
* If it's unusual in anyway, ignore it
*/
- hibernate_stats.cd_found_unusual++;
+ if (!preflight) hibernate_stats.cd_found_unusual++;
break;
}
if (m->cleaning) {
- hibernate_stats.cd_found_cleaning++;
+ if (!preflight) hibernate_stats.cd_found_cleaning++;
break;
}
- if (m->laundry || m->list_req_pending) {
- hibernate_stats.cd_found_laundry++;
+ if (m->laundry) {
+ if (!preflight) hibernate_stats.cd_found_laundry++;
break;
}
if (!m->dirty)
{
- refmod_state = pmap_get_refmod(m->phys_page);
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_REFERENCED)
m->reference = TRUE;
- if (refmod_state & VM_MEM_MODIFIED)
- m->dirty = TRUE;
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
}
/*
|| (VM_PURGABLE_VOLATILE == object->purgable)
|| (VM_PURGABLE_EMPTY == object->purgable);
- if (discard == FALSE)
- hibernate_stats.cd_found_dirty++;
+
+ if (discard == FALSE) {
+ if (!preflight)
+ hibernate_stats.cd_found_dirty++;
+ } else if (m->xpmapped && m->reference && !object->internal) {
+ if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+ if (!preflight)
+ hibernate_stats.cd_found_xpmapped++;
+ discard = FALSE;
+ } else {
+ if (!preflight)
+ hibernate_stats.cd_skipped_xpmapped++;
+ }
+ }
}
while (FALSE);
static void
hibernate_discard_page(vm_page_t m)
{
+ vm_object_t m_object;
+
if (m->absent || m->unusual || m->error)
/*
* If it's unusual in anyway, ignore
*/
return;
+ m_object = VM_PAGE_OBJECT(m);
+
+#if MACH_ASSERT || DEBUG
+ if (!vm_object_lock_try(m_object))
+ panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
+#else
+ /* No need to lock page queue for token delete, hibernate_vm_unlock()
+ makes sure these locks are uncontended before sleep */
+#endif /* MACH_ASSERT || DEBUG */
+
if (m->pmapped == TRUE)
{
- __unused int refmod_state = pmap_disconnect(m->phys_page);
+ __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
}
if (m->laundry)
if (m->fictitious)
panic("hibernate_discard_page(%p) fictitious", m);
- if (VM_PURGABLE_VOLATILE == m->object->purgable)
+ if (VM_PURGABLE_VOLATILE == m_object->purgable)
{
/* object should be on a queue */
- assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
- purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
+ assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
+ purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
assert(old_queue);
- /* No need to lock page queue for token delete, hibernate_vm_unlock()
- makes sure these locks are uncontended before sleep */
- vm_purgeable_token_delete_first(old_queue);
- m->object->purgable = VM_PURGABLE_EMPTY;
+ if (m_object->purgeable_when_ripe) {
+ vm_purgeable_token_delete_first(old_queue);
+ }
+ vm_object_lock_assert_exclusive(m_object);
+ m_object->purgable = VM_PURGABLE_EMPTY;
+
+ /*
+ * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
+ * accounted in the "volatile" ledger, so no change here.
+ * We have to update vm_page_purgeable_count, though, since we're
+ * effectively purging this object.
+ */
+ unsigned int delta;
+ assert(m_object->resident_page_count >= m_object->wired_page_count);
+ delta = (m_object->resident_page_count - m_object->wired_page_count);
+ assert(vm_page_purgeable_count >= delta);
+ assert(delta > 0);
+ OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
}
vm_page_free(m);
+
+#if MACH_ASSERT || DEBUG
+ vm_object_unlock(m_object);
+#endif /* MACH_ASSERT || DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+ vm_object_lock(compressor_object);
+ vm_page_lock_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ lck_mtx_lock(&vm_purgeable_queue_lock);
+
+ if (vm_page_local_q) {
+ uint32_t i;
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+ if (vm_page_local_q) {
+ uint32_t i;
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ lck_mtx_unlock(&vm_purgeable_queue_lock);
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
}
/*
void
hibernate_page_list_setall(hibernate_page_list_t * page_list,
hibernate_page_list_t * page_list_wired,
+ hibernate_page_list_t * page_list_pal,
+ boolean_t preflight,
+ boolean_t will_discard,
uint32_t * pagesOut)
{
uint64_t start, end, nsec;
vm_page_t m;
+ vm_page_t next;
uint32_t pages = page_list->page_count;
- uint32_t count_zf = 0, count_throttled = 0;
- uint32_t count_inactive = 0, count_active = 0, count_speculative = 0;
+ uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
+ uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
uint32_t count_wire = pages;
uint32_t count_discard_active = 0;
uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_cleaned = 0;
uint32_t count_discard_purgeable = 0;
uint32_t count_discard_speculative = 0;
+ uint32_t count_discard_vm_struct_pages = 0;
uint32_t i;
uint32_t bank;
hibernate_bitmap_t * bitmap;
hibernate_bitmap_t * bitmap_wired;
+ boolean_t discard_all;
+ boolean_t discard;
+
+ HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
+
+ if (preflight) {
+ page_list = NULL;
+ page_list_wired = NULL;
+ page_list_pal = NULL;
+ discard_all = FALSE;
+ } else {
+ discard_all = will_discard;
+ }
+#if MACH_ASSERT || DEBUG
+ if (!preflight)
+ {
+ assert(hibernate_vm_locks_are_safe());
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+ }
+#endif /* MACH_ASSERT || DEBUG */
- HIBLOG("hibernate_page_list_setall start %p, %p\n", page_list, page_list_wired);
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
clock_get_uptime(&start);
- hibernate_page_list_zero(page_list);
- hibernate_page_list_zero(page_list_wired);
-
- hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
- hibernate_stats.cd_pages = pages;
+ if (!preflight) {
+ hibernate_page_list_zero(page_list);
+ hibernate_page_list_zero(page_list_wired);
+ hibernate_page_list_zero(page_list_pal);
+
+ hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+ hibernate_stats.cd_pages = pages;
+ }
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++)
- vm_page_reactivate_local(i, TRUE, TRUE);
+ vm_page_reactivate_local(i, TRUE, !preflight);
+ }
+
+ if (preflight) {
+ vm_object_lock(compressor_object);
+ vm_page_lock_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
}
m = (vm_page_t) hibernate_gobble_queue;
- while(m)
+ while (m)
{
pages--;
count_wire--;
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- m = (vm_page_t) m->pageq.next;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ }
+ m = m->snext;
}
-#ifndef PPC
- for( i = 0; i < real_ncpus; i++ )
+
+ if (!preflight) for( i = 0; i < real_ncpus; i++ )
{
if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
{
- for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = (vm_page_t)m->pageq.next)
+ for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = m->snext)
{
+ assert(m->vm_page_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
+
pages--;
count_wire--;
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
hibernate_stats.cd_local_free++;
hibernate_stats.cd_total_free++;
}
}
}
-#endif
+
for( i = 0; i < vm_colors; i++ )
{
- queue_iterate(&vm_page_queue_free[i],
- m,
- vm_page_t,
- pageq)
+ vm_page_queue_iterate(&vm_page_queue_free[i].qhead,
+ m,
+ vm_page_t,
+ pageq)
{
+ assert(m->vm_page_q_state == VM_PAGE_ON_FREE_Q);
+
pages--;
count_wire--;
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-
- hibernate_stats.cd_total_free++;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ hibernate_stats.cd_total_free++;
+ }
}
}
- queue_iterate(&vm_lopage_queue_free,
- m,
- vm_page_t,
- pageq)
+ vm_page_queue_iterate(&vm_lopage_queue_free,
+ m,
+ vm_page_t,
+ pageq)
{
+ assert(m->vm_page_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
+
pages--;
count_wire--;
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
-
- hibernate_stats.cd_total_free++;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ hibernate_stats.cd_total_free++;
+ }
}
- queue_iterate( &vm_page_queue_throttled,
- m,
- vm_page_t,
- pageq )
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
+ while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m))
{
+ assert(m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+ discard = FALSE;
if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
- && hibernate_consider_discard(m))
+ && hibernate_consider_discard(m, preflight))
{
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
count_discard_inactive++;
+ discard = discard_all;
}
else
count_throttled++;
count_wire--;
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ if (discard) hibernate_discard_page(m);
+ m = next;
}
- queue_iterate( &vm_page_queue_zf,
- m,
- vm_page_t,
- pageq )
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m))
{
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+ discard = FALSE;
if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
- && hibernate_consider_discard(m))
+ && hibernate_consider_discard(m, preflight))
{
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty)
count_discard_purgeable++;
else
count_discard_inactive++;
+ discard = discard_all;
}
else
- count_zf++;
+ count_anonymous++;
count_wire--;
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
}
- queue_iterate( &vm_page_queue_inactive,
- m,
- vm_page_t,
- pageq )
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m))
{
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+ discard = FALSE;
if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
- && hibernate_consider_discard(m))
+ && hibernate_consider_discard(m, preflight))
{
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_inactive++;
+ count_discard_cleaned++;
+ discard = discard_all;
}
else
- count_inactive++;
+ count_cleaned++;
count_wire--;
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
}
- for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+ while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m))
{
- queue_iterate(&vm_page_queue_speculative[i].age_q,
- m,
- vm_page_t,
- pageq)
- {
- if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
- && hibernate_consider_discard(m))
- {
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
- count_discard_speculative++;
- }
- else
- count_speculative++;
- count_wire--;
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- }
- }
+ assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
- queue_iterate( &vm_page_queue_active,
- m,
- vm_page_t,
- pageq )
- {
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+ discard = FALSE;
if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
- && hibernate_consider_discard(m))
+ && hibernate_consider_discard(m, preflight))
{
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty)
count_discard_purgeable++;
else
count_discard_active++;
+ discard = discard_all;
}
else
count_active++;
count_wire--;
- hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_inactive++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
}
+ /* XXX FBDP TODO: secluded queue */
- // pull wired from hibernate_bitmap
+ for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+ {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ count_discard_speculative++;
+ discard = discard_all;
+ }
+ else
+ count_speculative++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+ }
- bitmap = &page_list->bank_bitmap[0];
- bitmap_wired = &page_list_wired->bank_bitmap[0];
- for (bank = 0; bank < page_list->bank_count; bank++)
+ vm_page_queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
{
- for (i = 0; i < bitmap->bitmapwords; i++)
- bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
- bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
- bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+ assert(m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR);
+
+ count_compressor++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ }
+
+ if (preflight == FALSE && discard_all == TRUE) {
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
+
+ HIBLOG("hibernate_teardown started\n");
+ count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+ HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
+
+ pages -= count_discard_vm_struct_pages;
+ count_wire -= count_discard_vm_struct_pages;
+
+ hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
+ }
+
+ if (!preflight) {
+ // pull wired from hibernate_bitmap
+ bitmap = &page_list->bank_bitmap[0];
+ bitmap_wired = &page_list_wired->bank_bitmap[0];
+ for (bank = 0; bank < page_list->bank_count; bank++)
+ {
+ for (i = 0; i < bitmap->bitmapwords; i++)
+ bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+ bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
+ bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+ }
}
// machine dependent adjustments
- hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
+ hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
- hibernate_stats.cd_count_wire = count_wire;
- hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable + count_discard_speculative;
+ if (!preflight) {
+ hibernate_stats.cd_count_wire = count_wire;
+ hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+ count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+ }
clock_get_uptime(&end);
absolutetime_to_nanoseconds(end - start, &nsec);
HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
- HIBLOG("pages %d, wire %d, act %d, inact %d, spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d\n",
- pages, count_wire, count_active, count_inactive, count_speculative, count_zf, count_throttled,
- count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
+ HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
+ pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+ discard_all ? "did" : "could",
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+ if (hibernate_stats.cd_skipped_xpmapped)
+ HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
+ *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+ if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+#if MACH_ASSERT || DEBUG
+ if (!preflight)
+ {
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+ }
+#endif /* MACH_ASSERT || DEBUG */
- *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative;
+ if (preflight) {
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
+ }
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
}
uint32_t count_discard_active = 0;
uint32_t count_discard_inactive = 0;
uint32_t count_discard_purgeable = 0;
+ uint32_t count_discard_cleaned = 0;
uint32_t count_discard_speculative = 0;
+
+#if MACH_ASSERT || DEBUG
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+#endif /* MACH_ASSERT || DEBUG */
+
clock_get_uptime(&start);
- m = (vm_page_t) queue_first(&vm_page_queue_zf);
- while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m))
{
- next = (vm_page_t) m->pageq.next;
- if (hibernate_page_bittst(page_list, m->phys_page))
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
{
if (m->dirty)
count_discard_purgeable++;
for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
{
- m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
- while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m))
{
- next = (vm_page_t) m->pageq.next;
- if (hibernate_page_bittst(page_list, m->phys_page))
+ assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
{
count_discard_speculative++;
hibernate_discard_page(m);
}
}
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
- while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m))
{
- next = (vm_page_t) m->pageq.next;
- if (hibernate_page_bittst(page_list, m->phys_page))
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
{
if (m->dirty)
count_discard_purgeable++;
}
m = next;
}
+ /* XXX FBDP TODO: secluded queue */
- m = (vm_page_t) queue_first(&vm_page_queue_active);
- while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+ while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m))
{
- next = (vm_page_t) m->pageq.next;
- if (hibernate_page_bittst(page_list, m->phys_page))
+ assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
{
if (m->dirty)
count_discard_purgeable++;
m = next;
}
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
+ {
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_cleaned++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+#if MACH_ASSERT || DEBUG
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+#endif /* MACH_ASSERT || DEBUG */
+
clock_get_uptime(&end);
absolutetime_to_nanoseconds(end - start, &nsec);
- HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d\n",
+ HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
nsec / 1000000ULL,
- count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t hibernate_paddr_map_inited = FALSE;
+unsigned int hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t hibernate_rebuild_hash_list = NULL;
+
+unsigned int hibernate_teardown_found_tabled_pages = 0;
+unsigned int hibernate_teardown_found_created_pages = 0;
+unsigned int hibernate_teardown_found_free_pages = 0;
+unsigned int hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+ struct ppnum_mapping *ppnm_next;
+ ppnum_t ppnm_base_paddr;
+ unsigned int ppnm_sindx;
+ unsigned int ppnm_eindx;
+};
+
+struct ppnum_mapping *ppnm_head;
+struct ppnum_mapping *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map()
+{
+ unsigned int i;
+ ppnum_t next_ppnum_in_run = 0;
+ struct ppnum_mapping *ppnm = NULL;
+
+ if (hibernate_paddr_map_inited == FALSE) {
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ if (ppnm)
+ ppnm->ppnm_eindx = i;
+
+ if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
+
+ ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+ ppnm->ppnm_next = ppnm_head;
+ ppnm_head = ppnm;
+
+ ppnm->ppnm_sindx = i;
+ ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
+ }
+ next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
+ }
+ ppnm->ppnm_eindx++;
+
+ hibernate_paddr_map_inited = TRUE;
+ }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+ struct ppnum_mapping *ppnm = NULL;
+
+ ppnm = ppnm_last_found;
+
+ if (ppnm) {
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+ goto done;
+ }
+ for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+ ppnm_last_found = ppnm;
+ break;
+ }
+ }
+ if (ppnm == NULL)
+ panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+ return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ addr64_t saddr_aligned;
+ addr64_t eaddr_aligned;
+ addr64_t addr;
+ ppnum_t paddr;
+ unsigned int mark_as_unneeded_pages = 0;
+
+ saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+ eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+ for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+ paddr = pmap_find_phys(kernel_pmap, addr);
+
+ assert(paddr);
+
+ hibernate_page_bitset(page_list, TRUE, paddr);
+ hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+ mark_as_unneeded_pages++;
+ }
+ return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+ vm_page_bucket_t *bucket;
+ int hash_id;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ assert(mem->hashed);
+ assert(m_object);
+ assert(mem->offset != (vm_object_offset_t) -1);
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+ hash_id = vm_page_hash(m_object, mem->offset);
+ bucket = &vm_page_buckets[hash_id];
+
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+ vm_page_t mem;
+ unsigned int color;
+
+ while (sindx < eindx) {
+ mem = &vm_pages[sindx];
+
+ vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+ mem->lopage = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+ color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+ vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#else
+ vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+#endif
+ vm_page_free_count++;
+
+ sindx++;
+ }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+ int i, cindx, sindx, eindx;
+ vm_page_t mem, tmem, mem_next;
+ AbsoluteTime startTime, endTime;
+ uint64_t nsec;
+
+ if (hibernate_rebuild_needed == FALSE)
+ return;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
+ HIBLOG("hibernate_rebuild started\n");
+
+ clock_get_uptime(&startTime);
+
+ hibernate_rebuild_pmap_structs();
+
+ bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+ eindx = vm_pages_count;
+
+ /*
+ * Mark all the vm_pages[] that have not been initialized yet as being
+ * transient. This is needed to ensure that buddy page search is corrrect.
+ * Without this random data in these vm_pages[] can trip the buddy search
+ */
+ for (i = hibernate_teardown_last_valid_compact_indx+1; i < eindx; ++i)
+ vm_pages[i].vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+ for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+
+ mem = &vm_pages[cindx];
+ assert(mem->vm_page_q_state != VM_PAGE_ON_FREE_Q);
+ /*
+ * hibernate_teardown_vm_structs leaves the location where
+ * this vm_page_t must be located in "next".
+ */
+ tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
+
+ sindx = (int)(tmem - &vm_pages[0]);
+
+ if (mem != tmem) {
+ /*
+ * this vm_page_t was moved by hibernate_teardown_vm_structs,
+ * so move it back to its real location
+ */
+ *tmem = *mem;
+ mem = tmem;
+ }
+ if (mem->hashed)
+ hibernate_hash_insert_page(mem);
+ /*
+ * the 'hole' between this vm_page_t and the previous
+ * vm_page_t we moved needs to be initialized as
+ * a range of free vm_page_t's
+ */
+ hibernate_free_range(sindx + 1, eindx);
+
+ eindx = sindx;
+ }
+ if (sindx)
+ hibernate_free_range(0, sindx);
+
+ assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+ /*
+ * process the list of vm_page_t's that were entered in the hash,
+ * but were not located in the vm_pages arrary... these are
+ * vm_page_t's that were created on the fly (i.e. fictitious)
+ */
+ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+ mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+
+ mem->next_m = 0;
+ hibernate_hash_insert_page(mem);
+ }
+ hibernate_rebuild_hash_list = NULL;
+
+ clock_get_uptime(&endTime);
+ SUB_ABSOLUTETIME(&endTime, &startTime);
+ absolutetime_to_nanoseconds(endTime, &nsec);
+
+ HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+ hibernate_rebuild_needed = FALSE;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ unsigned int i;
+ unsigned int compact_target_indx;
+ vm_page_t mem, mem_next;
+ vm_page_bucket_t *bucket;
+ unsigned int mark_as_unneeded_pages = 0;
+ unsigned int unneeded_vm_page_bucket_pages = 0;
+ unsigned int unneeded_vm_pages_pages = 0;
+ unsigned int unneeded_pmap_pages = 0;
+ addr64_t start_of_unneeded = 0;
+ addr64_t end_of_unneeded = 0;
+
+
+ if (hibernate_should_abort())
+ return (0);
+
+ hibernate_rebuild_needed = TRUE;
+
+ HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+ vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+ vm_page_cleaned_count, compressor_object->resident_page_count);
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+
+ bucket = &vm_page_buckets[i];
+
+ for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
+ assert(mem->hashed);
+
+ mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+
+ if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+ mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+ hibernate_rebuild_hash_list = mem;
+ }
+ }
+ }
+ unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+ hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+ compact_target_indx = 0;
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ mem = &vm_pages[i];
+
+ if (mem->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+ unsigned int color;
+
+ assert(mem->busy);
+ assert(!mem->lopage);
+
+ color = VM_PAGE_GET_COLOR(mem);
+
+ vm_page_queue_remove(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+
+ vm_page_free_count--;
+
+ hibernate_teardown_found_free_pages++;
+
+ if (vm_pages[compact_target_indx].vm_page_q_state != VM_PAGE_ON_FREE_Q)
+ compact_target_indx = i;
+ } else {
+ /*
+ * record this vm_page_t's original location
+ * we need this even if it doesn't get moved
+ * as an indicator to the rebuild function that
+ * we don't have to move it
+ */
+ mem->next_m = VM_PAGE_PACK_PTR(mem);
+
+ if (vm_pages[compact_target_indx].vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+ /*
+ * we've got a hole to fill, so
+ * move this vm_page_t to it's new home
+ */
+ vm_pages[compact_target_indx] = *mem;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+ hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+ compact_target_indx++;
+ } else
+ hibernate_teardown_last_valid_compact_indx = i;
+ }
+ }
+ unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+ (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+ hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+ if (start_of_unneeded) {
+ unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_pmap_pages;
+ }
+ HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+ return (mark_as_unneeded_pages);
}
+
#endif /* HIBERNATION */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
lck_spin_lock(bucket_lock);
- for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+ for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+ m != VM_PAGE_NULL;
+ m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->next_m)))
bucket_count++;
lck_spin_unlock(bucket_lock);
}
#endif /* MACH_VM_DEBUG */
-#include <mach_kdb.h>
-#if MACH_KDB
-
-#include <ddb/db_output.h>
-#include <vm/vm_print.h>
-#define printf kdbprintf
-
-/*
- * Routine: vm_page_print [exported]
- */
+#if VM_PAGE_BUCKETS_CHECK
void
-vm_page_print(
- db_addr_t db_addr)
+vm_page_buckets_check(void)
{
- vm_page_t p;
-
- p = (vm_page_t) (long) db_addr;
-
- iprintf("page 0x%x\n", p);
-
- db_indent += 2;
-
- iprintf("object=0x%x", p->object);
- printf(", offset=0x%x", p->offset);
- printf(", wire_count=%d", p->wire_count);
-
- iprintf("%slocal, %sinactive, %sactive, %sthrottled, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
- (p->local ? "" : "!"),
- (p->inactive ? "" : "!"),
- (p->active ? "" : "!"),
- (p->throttled ? "" : "!"),
- (p->gobbled ? "" : "!"),
- (p->laundry ? "" : "!"),
- (p->free ? "" : "!"),
- (p->reference ? "" : "!"),
- (p->encrypted ? "" : "!"));
- iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
- (p->busy ? "" : "!"),
- (p->wanted ? "" : "!"),
- (p->tabled ? "" : "!"),
- (p->fictitious ? "" : "!"),
- (p->private ? "" : "!"),
- (p->precious ? "" : "!"));
- iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
- (p->absent ? "" : "!"),
- (p->error ? "" : "!"),
- (p->dirty ? "" : "!"),
- (p->cleaning ? "" : "!"),
- (p->pageout ? "" : "!"),
- (p->clustered ? "" : "!"));
- iprintf("%soverwriting, %srestart, %sunusual\n",
- (p->overwriting ? "" : "!"),
- (p->restart ? "" : "!"),
- (p->unusual ? "" : "!"));
-
- iprintf("phys_page=0x%x", p->phys_page);
-
- db_indent -= 2;
+ unsigned int i;
+ vm_page_t p;
+ unsigned int p_hash;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+
+ if (!vm_page_buckets_check_ready) {
+ return;
+ }
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed ||
+ hibernate_rebuild_hash_list) {
+ panic("BUCKET_CHECK: hibernation in progress: "
+ "rebuild_needed=%d rebuild_hash_list=%p\n",
+ hibernate_rebuild_needed,
+ hibernate_rebuild_hash_list);
+ }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+ char *cp;
+ for (cp = (char *) vm_page_fake_buckets_start;
+ cp < (char *) vm_page_fake_buckets_end;
+ cp++) {
+ if (*cp != 0x5a) {
+ panic("BUCKET_CHECK: corruption at %p in fake buckets "
+ "[0x%llx:0x%llx]\n",
+ cp,
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ }
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ vm_object_t p_object;
+
+ bucket = &vm_page_buckets[i];
+ if (!bucket->page_list) {
+ continue;
+ }
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+ p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+
+ while (p != VM_PAGE_NULL) {
+ p_object = VM_PAGE_OBJECT(p);
+
+ if (!p->hashed) {
+ panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+ "hash %d in bucket %d at %p "
+ "is not hashed\n",
+ p, p_object, p->offset,
+ p_hash, i, bucket);
+ }
+ p_hash = vm_page_hash(p_object, p->offset);
+ if (p_hash != i) {
+ panic("BUCKET_CHECK: corruption in bucket %d "
+ "at %p: page %p object %p offset 0x%llx "
+ "hash %d\n",
+ i, bucket, p, p_object, p->offset,
+ p_hash);
+ }
+ p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->next_m));
+ }
+ lck_spin_unlock(bucket_lock);
+ }
+
+// printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+/*
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues... once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point... we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
+ */
+
+#if CONFIG_BACKGROUND_QUEUE
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq)
+#else
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq)
+#endif
+{
+ boolean_t was_pageable = TRUE;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q)
+ {
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ if (remove_from_backgroundq == TRUE) {
+ vm_page_remove_from_backgroundq(mem);
+ }
+ if (mem->vm_page_on_backgroundq) {
+ assert(mem->vm_page_backgroundq.next != 0);
+ assert(mem->vm_page_backgroundq.prev != 0);
+ } else {
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+ }
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ return;
+ }
+
+ if (mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR)
+ {
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0 &&
+ mem->vm_page_backgroundq.prev == 0 &&
+ mem->vm_page_on_backgroundq == FALSE);
+#endif
+ return;
+ }
+ if (mem->vm_page_q_state == VM_PAGE_IS_WIRED) {
+ /*
+ * might put these guys on a list for debugging purposes
+ * if we do, we'll need to remove this assert
+ */
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0 &&
+ mem->vm_page_backgroundq.prev == 0 &&
+ mem->vm_page_on_backgroundq == FALSE);
+#endif
+ return;
+ }
+
+ assert(m_object != compressor_object);
+ assert(m_object != kernel_object);
+ assert(m_object != vm_submap_object);
+ assert(!mem->fictitious);
+
+ switch(mem->vm_page_q_state) {
+
+ case VM_PAGE_ON_ACTIVE_LOCAL_Q:
+ {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[mem->local_id].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ vm_page_queue_remove(&lq->vpl_queue,
+ mem, vm_page_t, pageq);
+ mem->local_id = 0;
+ lq->vpl_count--;
+ if (m_object->internal) {
+ lq->vpl_internal_count--;
+ } else {
+ lq->vpl_external_count--;
+ }
+ VPL_UNLOCK(&lq->vpl_lock);
+ was_pageable = FALSE;
+ break;
+ }
+ case VM_PAGE_ON_ACTIVE_Q:
+ {
+ vm_page_queue_remove(&vm_page_queue_active,
+ mem, vm_page_t, pageq);
+ vm_page_active_count--;
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
+ {
+ assert(m_object->internal == TRUE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_anonymous,
+ mem, vm_page_t, pageq);
+ vm_page_anonymous_count--;
+ vm_purgeable_q_advance_all();
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_inactive,
+ mem, vm_page_t, pageq);
+ vm_purgeable_q_advance_all();
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_CLEANED_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_cleaned,
+ mem, vm_page_t, pageq);
+ vm_page_cleaned_count--;
+ break;
+ }
+
+ case VM_PAGE_ON_THROTTLED_Q:
+ {
+ assert(m_object->internal == TRUE);
+
+ vm_page_queue_remove(&vm_page_queue_throttled,
+ mem, vm_page_t, pageq);
+ vm_page_throttled_count--;
+ was_pageable = FALSE;
+ break;
+ }
+
+ case VM_PAGE_ON_SPECULATIVE_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_remque(&mem->pageq);
+ vm_page_speculative_count--;
+ break;
+ }
+
+#if CONFIG_SECLUDED_MEMORY
+ case VM_PAGE_ON_SECLUDED_Q:
+ {
+ vm_page_queue_remove(&vm_page_queue_secluded,
+ mem, vm_page_t, pageq);
+ vm_page_secluded_count--;
+ if (m_object == VM_OBJECT_NULL) {
+ vm_page_secluded_count_free--;
+ was_pageable = FALSE;
+ } else {
+ assert(!m_object->internal);
+ vm_page_secluded_count_inuse--;
+ was_pageable = FALSE;
+// was_pageable = TRUE;
+ }
+ break;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ default:
+ {
+ /*
+ * if (mem->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)
+ * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+ * the caller is responsible for determing if the page is on that queue, and if so, must
+ * either first remove it (it needs both the page queues lock and the object lock to do
+ * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+ *
+ * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
+ * or any of the undefined states
+ */
+ panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem, mem->vm_page_q_state);
+ break;
+ }
+
+ }
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (remove_from_backgroundq == TRUE)
+ vm_page_remove_from_backgroundq(mem);
+#endif
+ if (was_pageable) {
+ if (m_object->internal) {
+ vm_page_pageable_internal_count--;
+ } else {
+ vm_page_pageable_external_count--;
+ }
+ }
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+ vm_object_t __object = VM_PAGE_OBJECT(page);
+ if (page == __object->memq_hint) {
+ vm_page_t __new_hint;
+ vm_page_queue_entry_t __qe;
+ __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->listq);
+ if (vm_page_queue_end(&__object->memq, __qe)) {
+ __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->listq);
+ if (vm_page_queue_end(&__object->memq, __qe)) {
+ __qe = NULL;
+ }
+ }
+ __new_hint = (vm_page_t)((uintptr_t) __qe);
+ __object->memq_hint = __new_hint;
+ }
+ vm_page_queue_remove(&__object->memq, page, vm_page_t, listq);
+#if CONFIG_SECLUDED_MEMORY
+ if (__object->eligible_for_secluded) {
+ vm_page_secluded.eligible_for_secluded--;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!mem->fictitious);
+ assert(!mem->laundry);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_check_pageable_safe(mem);
+
+ if (m_object->internal) {
+ mem->vm_page_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+
+ vm_page_anonymous_count++;
+ vm_page_pageable_internal_count++;
+ } else {
+ mem->vm_page_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+
+ vm_page_pageable_external_count++;
+ }
+ vm_page_inactive_count++;
+ token_new_pagecount++;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (mem->vm_page_in_background)
+ vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+void
+vm_page_enqueue_active(vm_page_t mem, boolean_t first)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!mem->fictitious);
+ assert(!mem->laundry);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_check_pageable_safe(mem);
+
+ mem->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_active, mem, vm_page_t, pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
+ vm_page_active_count++;
+
+ if (m_object->internal) {
+ vm_page_pageable_internal_count++;
+ } else {
+ vm_page_pageable_external_count++;
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (mem->vm_page_in_background)
+ vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+ vm_object_t page_object;
+
+ page_object = VM_PAGE_OBJECT(page);
+
+ if (page_object == kernel_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from kernel object (%p) to pageable queue", kernel_object);
+ }
+
+ if (page_object == compressor_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from compressor object (%p) to pageable queue", compressor_object);
+ }
+
+ if (page_object == vm_submap_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from submap object (%p) to pageable queue", vm_submap_object);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+#define KA_SIZE(namelen, subtotalscount) \
+ (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
+
+#define KA_NAME(alloc) \
+ ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
+
+#define KA_NAME_LEN(alloc) \
+ (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
+
+vm_tag_t
+vm_tag_bt(void)
+{
+ uintptr_t* frameptr;
+ uintptr_t* frameptr_next;
+ uintptr_t retaddr;
+ uintptr_t kstackb, kstackt;
+ const vm_allocation_site_t * site;
+ thread_t cthread;
+ kern_allocation_name_t name;
+
+ cthread = current_thread();
+ if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+ if ((name = thread_get_kernel_state(cthread)->allocation_name))
+ {
+ if (!name->tag) vm_tag_alloc(name);
+ return name->tag;
+ }
+
+ kstackb = cthread->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+ site = NULL;
+ while (frameptr != NULL)
+ {
+ /* Verify thread stack bounds */
+ if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+
+ if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+ {
+ site = OSKextGetAllocationSiteForCaller(retaddr);
+ break;
+ }
+ frameptr = frameptr_next;
+ }
+
+ return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[VM_MAX_TAG_VALUE/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
+{
+ vm_tag_t tag;
+ uint64_t avail;
+ uint32_t idx;
+ vm_allocation_site_t * prev;
+
+ if (site->tag) return;
+
+ idx = 0;
+ while (TRUE)
+ {
+ avail = free_tag_bits[idx];
+ if (avail)
+ {
+ tag = __builtin_clzll(avail);
+ avail &= ~(1ULL << (63 - tag));
+ free_tag_bits[idx] = avail;
+ tag += (idx << 6);
+ break;
+ }
+ idx++;
+ if (idx >= ARRAY_COUNT(free_tag_bits))
+ {
+ for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++)
+ {
+ prev = vm_allocation_sites[idx];
+ if (!prev) continue;
+ if (!KA_NAME_LEN(prev)) continue;
+ if (!prev->tag) continue;
+ if (prev->total) continue;
+ if (1 != prev->refcount) continue;
+
+ assert(idx == prev->tag);
+ tag = idx;
+ prev->tag = VM_KERN_MEMORY_NONE;
+ *releasesiteP = prev;
+ break;
+ }
+ if (idx >= ARRAY_COUNT(vm_allocation_sites))
+ {
+ tag = VM_KERN_MEMORY_ANY;
+ }
+ break;
+ }
+ }
+ site->tag = tag;
+
+ OSAddAtomic16(1, &site->refcount);
+
+ if (VM_KERN_MEMORY_ANY != tag) vm_allocation_sites[tag] = site;
+
+ if (tag > vm_allocation_tag_highest) vm_allocation_tag_highest = tag;
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+ uint64_t avail;
+ uint32_t idx;
+ uint64_t bit;
+
+ if (VM_KERN_MEMORY_ANY == tag) return;
+
+ idx = (tag >> 6);
+ avail = free_tag_bits[idx];
+ tag &= 63;
+ bit = (1ULL << (63 - tag));
+ assert(!(avail & bit));
+ free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+ vm_tag_t tag;
+ for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+ {
+ vm_tag_free_locked(tag);
+ }
+
+ for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++)
+ {
+ vm_tag_free_locked(tag);
+ }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+ vm_tag_t tag;
+ vm_allocation_site_t * releasesite;
+
+ if (VM_TAG_BT & site->flags)
+ {
+ tag = vm_tag_bt();
+ if (VM_KERN_MEMORY_NONE != tag) return (tag);
+ }
+
+ if (!site->tag)
+ {
+ releasesite = NULL;
+ lck_spin_lock(&vm_allocation_sites_lock);
+ vm_tag_alloc_locked(site, &releasesite);
+ lck_spin_unlock(&vm_allocation_sites_lock);
+ if (releasesite) kern_allocation_name_release(releasesite);
+ }
+
+ return (site->tag);
+}
+
+void
+vm_tag_update_size(vm_tag_t tag, int64_t delta)
+{
+ vm_allocation_site_t * allocation;
+ uint64_t prior;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ allocation = vm_allocation_sites[tag];
+ assert(allocation);
+
+ if (delta < 0) {
+ assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation);
+ }
+ prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+ uint64_t new, peak;
+ new = prior + delta;
+ do
+ {
+ peak = allocation->peak;
+ if (new <= peak) break;
+ }
+ while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+ if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) return;
+
+ if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+void
+kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
+{
+ uint64_t prior;
+
+ if (delta < 0) {
+ assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
+ }
+ prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+ uint64_t new, peak;
+ new = prior + delta;
+ do
+ {
+ peak = allocation->peak;
+ if (new <= peak) break;
+ }
+ while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+ if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+#if VM_MAX_TAG_ZONES
+
+void
+vm_allocation_zones_init(void)
+{
+ kern_return_t ret;
+ vm_offset_t addr;
+ vm_size_t size;
+
+ size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **)
+ + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+
+ ret = kernel_memory_allocate(kernel_map,
+ &addr, round_page(size), 0,
+ KMA_ZERO, VM_KERN_MEMORY_DIAG);
+ assert(KERN_SUCCESS == ret);
+
+ vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
+ addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **);
+
+ // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
+ // in vm_tag_update_zone_size() won't recurse
+ vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG] = (vm_allocation_zone_total_t *) addr;
+ addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+ vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr;
+}
+
+void
+vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx)
+{
+ vm_allocation_zone_total_t * zone;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ if (zidx >= VM_MAX_TAG_ZONES) return;
+
+ zone = vm_allocation_zone_totals[tag];
+ if (!zone)
+ {
+ zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG);
+ if (!zone) return;
+ bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+ if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag]))
+ {
+ kfree(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+ }
+ }
+}
+
+void
+vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste)
+{
+ vm_allocation_zone_total_t * zone;
+ uint32_t new;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ if (zidx >= VM_MAX_TAG_ZONES) return;
+
+ zone = vm_allocation_zone_totals[tag];
+ assert(zone);
+ zone += zidx;
+
+ /* the zone is locked */
+ if (delta < 0)
+ {
+ assertf(zone->total >= ((uint64_t)-delta), "zidx %d, tag %d, %p", zidx, tag, zone);
+ zone->total += delta;
+ }
+ else
+ {
+ zone->total += delta;
+ if (zone->total > zone->peak) zone->peak = zone->total;
+ if (dwaste)
+ {
+ new = zone->waste;
+ if (zone->wastediv < 65536) zone->wastediv++;
+ else new -= (new >> 16);
+ __assert_only bool ov = os_add_overflow(new, dwaste, &new);
+ assert(!ov);
+ zone->waste = new;
+ }
+ }
+}
+
+#endif /* VM_MAX_TAG_ZONES */
+
+void
+kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
+{
+ kern_allocation_name_t other;
+ struct vm_allocation_total * total;
+ uint32_t subidx;
+
+ subidx = 0;
+ assert(VM_KERN_MEMORY_NONE != subtag);
+ for (; subidx < allocation->subtotalscount; subidx++)
+ {
+ if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag)
+ {
+ allocation->subtotals[subidx].tag = subtag;
+ break;
+ }
+ if (subtag == allocation->subtotals[subidx].tag) break;
+ }
+ assert(subidx < allocation->subtotalscount);
+ if (subidx >= allocation->subtotalscount) return;
+
+ total = &allocation->subtotals[subidx];
+ other = vm_allocation_sites[subtag];
+ assert(other);
+
+ if (delta < 0)
+ {
+ assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
+ OSAddAtomic64(delta, &total->total);
+ assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
+ OSAddAtomic64(delta, &other->mapped);
+ }
+ else
+ {
+ OSAddAtomic64(delta, &other->mapped);
+ OSAddAtomic64(delta, &total->total);
+ }
+}
+
+const char *
+kern_allocation_get_name(kern_allocation_name_t allocation)
+{
+ return (KA_NAME(allocation));
+}
+
+kern_allocation_name_t
+kern_allocation_name_allocate(const char * name, uint32_t subtotalscount)
+{
+ uint32_t namelen;
+
+ namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
+
+ kern_allocation_name_t allocation;
+ allocation = kalloc(KA_SIZE(namelen, subtotalscount));
+ bzero(allocation, KA_SIZE(namelen, subtotalscount));
+
+ allocation->refcount = 1;
+ allocation->subtotalscount = subtotalscount;
+ allocation->flags = (namelen << VM_TAG_NAME_LEN_SHIFT);
+ strlcpy(KA_NAME(allocation), name, namelen + 1);
+
+ return (allocation);
+}
+
+void
+kern_allocation_name_release(kern_allocation_name_t allocation)
+{
+ assert(allocation->refcount > 0);
+ if (1 == OSAddAtomic16(-1, &allocation->refcount))
+ {
+ kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
+ }
+}
+
+vm_tag_t
+kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
+{
+ return (vm_tag_alloc(allocation));
+}
+
+static void
+vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
+{
+ if (!object->wired_page_count) return;
+ if (object != kernel_object)
+ {
+ assert(object->wire_tag < num_info);
+ info[object->wire_tag].size += ptoa_64(object->wired_page_count);
+ }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
+ unsigned int num_info, vm_object_t object);
+
+static void
+vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
+ vm_page_iterate_proc proc, purgeable_q_t queue,
+ int group)
+{
+ vm_object_t object;
+
+ for (object = (vm_object_t) queue_first(&queue->objq[group]);
+ !queue_end(&queue->objq[group], (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(info, num_info, object);
+ }
+}
+
+static void
+vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
+ vm_page_iterate_proc proc)
+{
+ purgeable_q_t volatile_q;
+ queue_head_t * nonvolatile_q;
+ vm_object_t object;
+ int group;
+
+ lck_spin_lock(&vm_objects_wired_lock);
+ queue_iterate(&vm_objects_wired,
+ object,
+ vm_object_t,
+ objq)
+ {
+ proc(info, num_info, object);
+ }
+ lck_spin_unlock(&vm_objects_wired_lock);
+
+ lck_mtx_lock(&vm_purgeable_queue_lock);
+ nonvolatile_q = &purgeable_nonvolatile_queue;
+ for (object = (vm_object_t) queue_first(nonvolatile_q);
+ !queue_end(nonvolatile_q, (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(info, num_info, object);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+ vm_page_iterate_purgeable_objects(info, num_info, proc, volatile_q, 0);
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+ {
+ vm_page_iterate_purgeable_objects(info, num_info, proc, volatile_q, group);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+ {
+ vm_page_iterate_purgeable_objects(info, num_info, proc, volatile_q, group);
+ }
+ lck_mtx_unlock(&vm_purgeable_queue_lock);
+}
+
+static uint64_t
+process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, boolean_t iterated)
+{
+ size_t namelen;
+ unsigned int idx, count, nextinfo;
+ vm_allocation_site_t * site;
+ lck_spin_lock(&vm_allocation_sites_lock);
+
+ for (idx = 0; idx <= vm_allocation_tag_highest; idx++)
+ {
+ site = vm_allocation_sites[idx];
+ if (!site) continue;
+ info[idx].mapped = site->mapped;
+ info[idx].tag = site->tag;
+ if (!iterated)
+ {
+ info[idx].size = site->total;
+#if DEBUG || DEVELOPMENT
+ info[idx].peak = site->peak;
+#endif /* DEBUG || DEVELOPMENT */
+ }
+ else
+ {
+ if (!site->subtotalscount && (site->total != info[idx].size))
+ {
+ printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
+ info[idx].size = site->total;
+ }
+ }
+ }
+
+ nextinfo = (vm_allocation_tag_highest + 1);
+ count = nextinfo;
+ if (count >= num_info) count = num_info;
+
+ for (idx = 0; idx < count; idx++)
+ {
+ site = vm_allocation_sites[idx];
+ if (!site) continue;
+ info[idx].flags |= VM_KERN_SITE_WIRED;
+ if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+ {
+ info[idx].site = idx;
+ info[idx].flags |= VM_KERN_SITE_TAG;
+ if (VM_KERN_MEMORY_ZONE == idx)
+ {
+ info[idx].flags |= VM_KERN_SITE_HIDE;
+ info[idx].flags &= ~VM_KERN_SITE_WIRED;
+ info[idx].collectable_bytes = zones_collectable_bytes;
+ }
+ }
+ else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT))))
+ {
+ info[idx].site = 0;
+ info[idx].flags |= VM_KERN_SITE_NAMED;
+ if (namelen > sizeof(info[idx].name)) namelen = sizeof(info[idx].name);
+ strncpy(&info[idx].name[0], KA_NAME(site), namelen);
+ }
+ else if (VM_TAG_KMOD & site->flags)
+ {
+ info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
+ info[idx].flags |= VM_KERN_SITE_KMOD;
+ }
+ else
+ {
+ info[idx].site = VM_KERNEL_UNSLIDE(site);
+ info[idx].flags |= VM_KERN_SITE_KERNEL;
+ }
+#if VM_MAX_TAG_ZONES
+ vm_allocation_zone_total_t * zone;
+ unsigned int zidx;
+ vm_size_t elem_size;
+
+ if (vm_allocation_zone_totals
+ && (zone = vm_allocation_zone_totals[idx])
+ && (nextinfo < num_info))
+ {
+ for (zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++)
+ {
+ if (!zone[zidx].peak) continue;
+ info[nextinfo] = info[idx];
+ info[nextinfo].zone = zone_index_from_tag_index(zidx, &elem_size);
+ info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
+ info[nextinfo].flags |= VM_KERN_SITE_ZONE;
+ info[nextinfo].size = zone[zidx].total;
+ info[nextinfo].peak = zone[zidx].peak;
+ info[nextinfo].mapped = 0;
+ if (zone[zidx].wastediv)
+ {
+ info[nextinfo].collectable_bytes = ((zone[zidx].waste * zone[zidx].total / elem_size) / zone[zidx].wastediv);
+ }
+ nextinfo++;
+ }
+ }
+#endif /* VM_MAX_TAG_ZONES */
+ if (site->subtotalscount)
+ {
+ uint64_t mapped, mapcost, take;
+ uint32_t sub;
+ vm_tag_t alloctag;
+
+ info[idx].size = site->total;
+ mapped = info[idx].size;
+ info[idx].mapped = mapped;
+ mapcost = 0;
+ for (sub = 0; sub < site->subtotalscount; sub++)
+ {
+ alloctag = site->subtotals[sub].tag;
+ assert(alloctag < num_info);
+ if (info[alloctag].name[0]) continue;
+ take = info[alloctag].mapped;
+ if (take > info[alloctag].size) take = info[alloctag].size;
+ if (take > mapped) take = mapped;
+ info[alloctag].mapped -= take;
+ info[alloctag].size -= take;
+ mapped -= take;
+ mapcost += take;
+ }
+ info[idx].size = mapcost;
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ return (0);
+}
+
+uint32_t
+vm_page_diagnose_estimate(void)
+{
+ vm_allocation_site_t * site;
+ uint32_t count;
+ uint32_t idx;
+
+ lck_spin_lock(&vm_allocation_sites_lock);
+ for (count = idx = 0; idx < VM_MAX_TAG_VALUE; idx++)
+ {
+ site = vm_allocation_sites[idx];
+ if (!site) continue;
+ count++;
+#if VM_MAX_TAG_ZONES
+ if (vm_allocation_zone_totals)
+ {
+ vm_allocation_zone_total_t * zone;
+ zone = vm_allocation_zone_totals[idx];
+ if (!zone) continue;
+ for (uint32_t zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) if (zone[zidx].peak) count++;
+ }
+#endif
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ /* some slop for new tags created */
+ count += 8;
+ count += VM_KERN_COUNTER_COUNT;
+
+ return (count);
+}
+
+
+kern_return_t
+vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes)
+{
+ uint64_t wired_size;
+ uint64_t wired_managed_size;
+ uint64_t wired_reserved_size;
+ uint64_t booter_size;
+ boolean_t iterate;
+ mach_memory_info_t * counts;
+
+ bzero(info, num_info * sizeof(mach_memory_info_t));
+
+ if (!vm_page_wire_count_initial) return (KERN_ABORTED);
+
+#if CONFIG_EMBEDDED
+ wired_size = ptoa_64(vm_page_wire_count);
+ wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
+#else
+ wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+ wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+#endif
+ wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+ booter_size = ml_get_booter_memory_size();
+ wired_size += booter_size;
+
+ assert(num_info >= VM_KERN_COUNTER_COUNT);
+ num_info -= VM_KERN_COUNTER_COUNT;
+ counts = &info[num_info];
+
+#define SET_COUNT(xcount, xsize, xflags) \
+ counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].mapped = (xsize); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+ SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
+ SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
+ SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest) \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].mapped = (xsize); \
+ counts[xcount].free = (xfree); \
+ counts[xcount].largest = (xlargest); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER;
+
+ vm_map_size_t map_size, map_free, map_largest;
+
+ vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+ vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+ vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+ iterate = !VM_TAG_ACTIVE_UPDATE;
+ if (iterate)
+ {
+ enum { kMaxKernelDepth = 1 };
+ vm_map_t maps [kMaxKernelDepth];
+ vm_map_entry_t entries[kMaxKernelDepth];
+ vm_map_t map;
+ vm_map_entry_t entry;
+ vm_object_offset_t offset;
+ vm_page_t page;
+ int stackIdx, count;
+
+ vm_page_iterate_objects(info, num_info, &vm_page_count_object);
+
+ map = kernel_map;
+ stackIdx = 0;
+ while (map)
+ {
+ vm_map_lock(map);
+ for (entry = map->hdr.links.next; map; entry = entry->links.next)
+ {
+ if (entry->is_sub_map)
+ {
+ assert(stackIdx < kMaxKernelDepth);
+ maps[stackIdx] = map;
+ entries[stackIdx] = entry;
+ stackIdx++;
+ map = VME_SUBMAP(entry);
+ entry = NULL;
+ break;
+ }
+ if (VME_OBJECT(entry) == kernel_object)
+ {
+ count = 0;
+ vm_object_lock(VME_OBJECT(entry));
+ for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+ {
+ page = vm_page_lookup(VME_OBJECT(entry), offset);
+ if (page && VM_PAGE_WIRED(page)) count++;
+ }
+ vm_object_unlock(VME_OBJECT(entry));
+
+ if (count)
+ {
+ assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
+ assert(VME_ALIAS(entry) < num_info);
+ info[VME_ALIAS(entry)].size += ptoa_64(count);
+ }
+ }
+ while (map && (entry == vm_map_last_entry(map)))
+ {
+ vm_map_unlock(map);
+ if (!stackIdx) map = NULL;
+ else
+ {
+ --stackIdx;
+ map = maps[stackIdx];
+ entry = entries[stackIdx];
+ }
+ }
+ }
+ }
+ }
+
+ process_account(info, num_info, zones_collectable_bytes, iterate);
+
+ return (KERN_SUCCESS);
+}
+
+#if DEBUG || DEVELOPMENT
+
+kern_return_t
+vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
+{
+ kern_return_t ret;
+ vm_size_t zsize;
+ vm_map_t map;
+ vm_map_entry_t entry;
+
+ zsize = zone_element_info((void *) addr, tag);
+ if (zsize)
+ {
+ *zone_size = *size = zsize;
+ return (KERN_SUCCESS);
+ }
+
+ *zone_size = 0;
+ ret = KERN_INVALID_ADDRESS;
+ for (map = kernel_map; map; )
+ {
+ vm_map_lock(map);
+ if (!vm_map_lookup_entry(map, addr, &entry)) break;
+ if (entry->is_sub_map)
+ {
+ if (map != kernel_map) break;
+ map = VME_SUBMAP(entry);
+ continue;
+ }
+ if (entry->vme_start != addr) break;
+ *tag = VME_ALIAS(entry);
+ *size = (entry->vme_end - addr);
+ ret = KERN_SUCCESS;
+ break;
+ }
+ if (map != kernel_map) vm_map_unlock(map);
+ vm_map_unlock(kernel_map);
+
+ return (ret);
+}
+
+#endif /* DEBUG || DEVELOPMENT */
+
+uint32_t
+vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
+{
+ vm_allocation_site_t * site;
+ uint32_t kmodId;
+
+ kmodId = 0;
+ lck_spin_lock(&vm_allocation_sites_lock);
+ if ((site = vm_allocation_sites[tag]))
+ {
+ if (VM_TAG_KMOD & site->flags)
+ {
+ kmodId = OSKextGetKmodIDForSite(site, name, namelen);
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ return (kmodId);
}
-#endif /* MACH_KDB */