#include <debug.h>
#include <mach_pagemap.h>
-#include <task_swapper.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
* memory object (kernel_object) to avoid wasting data structures.
*/
static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED;
-vm_object_t kernel_object;
+SECURITY_READ_ONLY_LATE(vm_object_t) kernel_object = &kernel_object_store;
static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED;
-vm_object_t compressor_object = &compressor_object_store;
+SECURITY_READ_ONLY_LATE(vm_object_t) compressor_object = &compressor_object_store;
+
+/*
+ * This object holds all pages that have been retired due to errors like ECC.
+ * The system should never use the page or look at its contents. The offset
+ * in this object is the same as the page's physical address.
+ */
+static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
+SECURITY_READ_ONLY_LATE(vm_object_t) retired_pages_object = &retired_pages_object_store;
/*
* The submap object is used as a placeholder for vm_map_submap
* here because it must be initialized here.
*/
static struct vm_object vm_submap_object_store VM_PAGE_PACKED_ALIGNED;
+SECURITY_READ_ONLY_LATE(vm_object_t) vm_submap_object = &vm_submap_object_store;
+
/*
* Virtual memory objects are initialized from
.vo_size = 0,
.memq_hint = VM_PAGE_NULL,
.ref_count = 1,
-#if TASK_SWAPPER
- .res_count = 1,
-#endif /* TASK_SWAPPER */
.resident_page_count = 0,
.wired_page_count = 0,
.reusable_page_count = 0,
ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED,
ZONE_ID_ANY, ^(zone_t z){
#if defined(__LP64__)
- zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP);
+ zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED);
#else
(void)z;
#endif
* Initialize the "kernel object"
*/
- kernel_object = &kernel_object_store;
-
-/*
- * Note that in the following size specifications, we need to add 1 because
- * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
- */
-
- _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
- kernel_object);
-
- _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
- compressor_object);
+ /*
+ * Note that in the following size specifications, we need to add 1 because
+ * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
+ */
+ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object);
+ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object);
kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
kernel_object->no_tag_update = TRUE;
+ /*
+ * The object to hold retired VM pages.
+ */
+ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object);
+ retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+
/*
* Initialize the "submap object". Make it as large as the
* kernel object so that no limit is imposed on submap sizes.
*/
- vm_submap_object = &vm_submap_object_store;
- _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
- vm_submap_object);
+ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, vm_submap_object);
vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
/*
return;
}
- if (object == kernel_object || object == compressor_object) {
+ if (object == kernel_object || object == compressor_object || object == retired_pages_object) {
vm_object_lock_shared(object);
OSAddAtomic(-1, &object->ref_count);
if (object->ref_count == 0) {
if (object == kernel_object) {
panic("vm_object_deallocate: losing kernel_object\n");
+ } else if (object == retired_pages_object) {
+ panic("vm_object_deallocate: losing retired_pages_object\n");
} else {
panic("vm_object_deallocate: losing compressor_object\n");
}
if ((object->ref_count > 1) || object->terminating) {
vm_object_lock_assert_exclusive(object);
object->ref_count--;
- vm_object_res_deallocate(object);
if (object->ref_count == 1 &&
object->shadow != VM_OBJECT_NULL) {
continue;
}
- VM_OBJ_RES_DECR(object); /* XXX ? */
/*
* Terminate this object. If it had a shadow,
* then deallocate it; otherwise, if we need
if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
vm_page_activate(p);
- VM_STAT_INCR(reactivations);
+ counter_inc(&vm_statistics_reactivations);
vm_object_page_grab_reactivations++;
}
vm_page_unlock_queues();
vm_object_lock_assert_exclusive(object);
object->ref_count--;
assert(object->ref_count > 0);
- vm_object_res_deallocate(object);
vm_object_unlock(object);
return KERN_FAILURE;
}
object->pager = MEMORY_OBJECT_NULL;
if (pager != MEMORY_OBJECT_NULL) {
- memory_object_control_disable(object->pager_control);
+ memory_object_control_disable(&object->pager_control);
}
object->ref_count--;
-#if TASK_SWAPPER
- assert(object->res_count == 0);
-#endif /* TASK_SWAPPER */
-
assert(object->ref_count == 0);
/*
pmap_flush_context_init(&pmap_flush_context_storage);
}
- vm_page_lockspin_queues();
+ vm_page_lock_queues();
next = (vm_page_t)vm_page_queue_first(&object->memq);
loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
- vm_page_lockspin_queues();
+ vm_page_lock_queues();
}
if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
if (p->vmp_busy || p->vmp_cleaning) {
old_pager = object->pager;
object->pager = MEMORY_OBJECT_NULL;
if (old_pager != MEMORY_OBJECT_NULL) {
- memory_object_control_disable(object->pager_control);
+ memory_object_control_disable(&object->pager_control);
}
/*
assert(new_copy->ref_count > 0);
new_copy->ref_count++; /* for old_copy->shadow ref. */
-#if TASK_SWAPPER
- if (old_copy->res_count) {
- VM_OBJ_RES_INCR(new_copy);
- VM_OBJ_RES_DECR(src_object);
- }
-#endif
-
vm_object_unlock(old_copy); /* done with old_copy */
}
assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
+#if 00
+ /*
+ * The following optimization does not work in the context of submaps
+ * (the shared region, in particular).
+ * This object might have only 1 reference (in the submap) but that
+ * submap can itself be mapped multiple times, so the object is
+ * actually indirectly referenced more than once...
+ */
if (vm_object_shadow_check &&
source->vo_size == length &&
source->ref_count == 1) {
/* things changed while we were locking "source"... */
vm_object_unlock(source);
}
+#endif /* 00 */
/*
* *offset is the map entry's offset into the VM object and
object->paging_offset =
backing_object->paging_offset + backing_offset;
if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
- memory_object_control_collapse(object->pager_control,
+ memory_object_control_collapse(&object->pager_control,
object);
}
/* the backing_object has lost its pager: reset all fields */
vm_object_lock_assert_exclusive(object);
vm_object_lock_assert_exclusive(backing_object);
-#if TASK_SWAPPER
- /*
- * Do object reference in-line to
- * conditionally increment shadow's
- * residence count. If object is not
- * resident, leave residence count
- * on shadow alone.
- */
- if (backing_object->shadow != VM_OBJECT_NULL) {
- vm_object_lock(backing_object->shadow);
- vm_object_lock_assert_exclusive(backing_object->shadow);
- backing_object->shadow->ref_count++;
- if (object->res_count != 0) {
- vm_object_res_reference(backing_object->shadow);
- }
- vm_object_unlock(backing_object->shadow);
- }
-#else /* TASK_SWAPPER */
vm_object_reference(backing_object->shadow);
-#endif /* TASK_SWAPPER */
assert(!object->phys_contiguous);
assert(!backing_object->phys_contiguous);
(!backing_object->named && backing_object->ref_count > 1)) {
vm_object_lock_assert_exclusive(backing_object);
backing_object->ref_count--;
-#if TASK_SWAPPER
- if (object->res_count != 0) {
- vm_object_res_deallocate(backing_object);
- }
- assert(backing_object->ref_count > 0);
-#endif /* TASK_SWAPPER */
vm_object_unlock(backing_object);
} else {
/*
* the backing object.
*/
-#if TASK_SWAPPER
- if (object->res_count == 0) {
- /* XXX get a reference for the deallocate below */
- vm_object_res_reference(backing_object);
- }
-#endif /* TASK_SWAPPER */
/*
* vm_object_collapse (the caller of this function) is
* now called from contexts that may not guarantee that a
VM_PAGE_SET_PHYS_PAGE(m, base_page);
}
} else {
- while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) {
- vm_page_more_fictitious();
- }
+ m = vm_page_grab_fictitious(TRUE);
/*
* private normally requires lock_queues but since we
object->named = TRUE;
vm_object_lock_assert_exclusive(object);
object->ref_count++;
- vm_object_res_reference(object);
while (!object->pager_ready) {
vm_object_sleep(object,
VM_OBJECT_EVENT_PAGER_READY,
vm_object_deallocate(object);
return KERN_SUCCESS;
}
- VM_OBJ_RES_DECR(object);
shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
if (object->ref_count == 1) {
}
-#if TASK_SWAPPER
-/*
- * vm_object_res_deallocate
- *
- * (recursively) decrement residence counts on vm objects and their shadows.
- * Called from vm_object_deallocate and when swapping out an object.
- *
- * The object is locked, and remains locked throughout the function,
- * even as we iterate down the shadow chain. Locks on intermediate objects
- * will be dropped, but not the original object.
- *
- * NOTE: this function used to use recursion, rather than iteration.
- */
-
-__private_extern__ void
-vm_object_res_deallocate(
- vm_object_t object)
-{
- vm_object_t orig_object = object;
- /*
- * Object is locked so it can be called directly
- * from vm_object_deallocate. Original object is never
- * unlocked.
- */
- assert(object->res_count > 0);
- while (--object->res_count == 0) {
- assert(object->ref_count >= object->res_count);
- vm_object_deactivate_all_pages(object);
- /* iterate on shadow, if present */
- if (object->shadow != VM_OBJECT_NULL) {
- vm_object_t tmp_object = object->shadow;
- vm_object_lock(tmp_object);
- if (object != orig_object) {
- vm_object_unlock(object);
- }
- object = tmp_object;
- assert(object->res_count > 0);
- } else {
- break;
- }
- }
- if (object != orig_object) {
- vm_object_unlock(object);
- }
-}
-
-/*
- * vm_object_res_reference
- *
- * Internal function to increment residence count on a vm object
- * and its shadows. It is called only from vm_object_reference, and
- * when swapping in a vm object, via vm_map_swap.
- *
- * The object is locked, and remains locked throughout the function,
- * even as we iterate down the shadow chain. Locks on intermediate objects
- * will be dropped, but not the original object.
- *
- * NOTE: this function used to use recursion, rather than iteration.
- */
-
-__private_extern__ void
-vm_object_res_reference(
- vm_object_t object)
-{
- vm_object_t orig_object = object;
- /*
- * Object is locked, so this can be called directly
- * from vm_object_reference. This lock is never released.
- */
- while ((++object->res_count == 1) &&
- (object->shadow != VM_OBJECT_NULL)) {
- vm_object_t tmp_object = object->shadow;
-
- assert(object->ref_count >= object->res_count);
- vm_object_lock(tmp_object);
- if (object != orig_object) {
- vm_object_unlock(object);
- }
- object = tmp_object;
- }
- if (object != orig_object) {
- vm_object_unlock(object);
- }
- assert(orig_object->ref_count >= orig_object->res_count);
-}
-#endif /* TASK_SWAPPER */
-
/*
* vm_object_reference:
*
/* "ref_count" refers to the object not its contents */
assert(object1->ref_count >= 1);
assert(object2->ref_count >= 1);
-#if TASK_SWAPPER
- /* "res_count" refers to the object not its contents */
-#endif
/* "resident_page_count" was updated above when transposing pages */
/* "wired_page_count" was updated above when transposing pages */
#if !VM_TAG_ACTIVE_UPDATE
__TRANSPOSE_FIELD(pager_control);
/* update the memory_objects' pointers back to the VM objects */
if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
- memory_object_control_collapse(object1->pager_control,
+ memory_object_control_collapse(&object1->pager_control,
object1);
}
if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
- memory_object_control_collapse(object2->pager_control,
+ memory_object_control_collapse(&object2->pager_control,
object2);
}
__TRANSPOSE_FIELD(copy_strategy);
* that could give us non-page-size aligned values if we start out with values that
* are odd multiples of PAGE_SIZE.
*/
-#if CONFIG_EMBEDDED
+#if !XNU_TARGET_OS_OSX
unsigned int preheat_max_bytes = (1024 * 512);
-#else /* CONFIG_EMBEDDED */
+#else /* !XNU_TARGET_OS_OSX */
unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
-#endif /* CONFIG_EMBEDDED */
+#endif /* !XNU_TARGET_OS_OSX */
unsigned int preheat_min_bytes = (1024 * 32);
min_ph_size = round_page(preheat_min_bytes);
max_ph_size = round_page(preheat_max_bytes);
-#if !CONFIG_EMBEDDED
+#if XNU_TARGET_OS_OSX
if (isSSD) {
min_ph_size /= 2;
max_ph_size /= 8;
max_ph_size = trunc_page(max_ph_size);
}
}
-#endif /* !CONFIG_EMBEDDED */
+#endif /* XNU_TARGET_OS_OSX */
if (min_ph_size < PAGE_SIZE) {
min_ph_size = PAGE_SIZE;
vm_object_unlock(object);
}
- if (__improbable(task->task_volatile_objects != 0 ||
- task->task_nonvolatile_objects != 0 ||
- task->task_owned_objects != 0)) {
+ if (__improbable(task->task_owned_objects != 0)) {
panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
__FUNCTION__,
task,