]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_object.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
index b7a718aea16d8e709529455e430453ba88cf43fd..178a195525ad1acf31aab41c123cffe836756985 100644 (file)
@@ -64,7 +64,6 @@
 
 #include <debug.h>
 #include <mach_pagemap.h>
 
 #include <debug.h>
 #include <mach_pagemap.h>
-#include <task_swapper.h>
 
 #include <mach/mach_types.h>
 #include <mach/memory_object.h>
 
 #include <mach/mach_types.h>
 #include <mach/memory_object.h>
@@ -244,10 +243,18 @@ SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
  *     memory object (kernel_object) to avoid wasting data structures.
  */
 static struct vm_object                 kernel_object_store VM_PAGE_PACKED_ALIGNED;
  *     memory object (kernel_object) to avoid wasting data structures.
  */
 static struct vm_object                 kernel_object_store VM_PAGE_PACKED_ALIGNED;
-vm_object_t                             kernel_object;
+SECURITY_READ_ONLY_LATE(vm_object_t)    kernel_object = &kernel_object_store;
 
 static struct vm_object                 compressor_object_store VM_PAGE_PACKED_ALIGNED;
 
 static struct vm_object                 compressor_object_store VM_PAGE_PACKED_ALIGNED;
-vm_object_t                             compressor_object = &compressor_object_store;
+SECURITY_READ_ONLY_LATE(vm_object_t)    compressor_object = &compressor_object_store;
+
+/*
+ * This object holds all pages that have been retired due to errors like ECC.
+ * The system should never use the page or look at its contents. The offset
+ * in this object is the same as the page's physical address.
+ */
+static struct vm_object                 retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
+SECURITY_READ_ONLY_LATE(vm_object_t)    retired_pages_object = &retired_pages_object_store;
 
 /*
  *     The submap object is used as a placeholder for vm_map_submap
 
 /*
  *     The submap object is used as a placeholder for vm_map_submap
@@ -256,6 +263,8 @@ vm_object_t                             compressor_object = &compressor_object_s
  *     here because it must be initialized here.
  */
 static struct vm_object                 vm_submap_object_store VM_PAGE_PACKED_ALIGNED;
  *     here because it must be initialized here.
  */
 static struct vm_object                 vm_submap_object_store VM_PAGE_PACKED_ALIGNED;
+SECURITY_READ_ONLY_LATE(vm_object_t)    vm_submap_object = &vm_submap_object_store;
+
 
 /*
  *     Virtual memory objects are initialized from
 
 /*
  *     Virtual memory objects are initialized from
@@ -279,9 +288,6 @@ static const struct vm_object vm_object_template = {
        .vo_size = 0,
        .memq_hint = VM_PAGE_NULL,
        .ref_count = 1,
        .vo_size = 0,
        .memq_hint = VM_PAGE_NULL,
        .ref_count = 1,
-#if     TASK_SWAPPER
-       .res_count = 1,
-#endif  /* TASK_SWAPPER */
        .resident_page_count = 0,
        .wired_page_count = 0,
        .reusable_page_count = 0,
        .resident_page_count = 0,
        .wired_page_count = 0,
        .reusable_page_count = 0,
@@ -554,7 +560,7 @@ vm_object_bootstrap(void)
            ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED,
            ZONE_ID_ANY, ^(zone_t z){
 #if defined(__LP64__)
            ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED,
            ZONE_ID_ANY, ^(zone_t z){
 #if defined(__LP64__)
-               zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP);
+               zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED);
 #else
                (void)z;
 #endif
 #else
                (void)z;
 #endif
@@ -568,30 +574,28 @@ vm_object_bootstrap(void)
         *      Initialize the "kernel object"
         */
 
         *      Initialize the "kernel object"
         */
 
-       kernel_object = &kernel_object_store;
-
-/*
- *     Note that in the following size specifications, we need to add 1 because
- *     VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
- */
-
-       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
-           kernel_object);
-
-       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
-           compressor_object);
+       /*
+        * Note that in the following size specifications, we need to add 1 because
+        * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
+        */
+       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object);
+       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object);
        kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
        compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
        kernel_object->no_tag_update = TRUE;
 
        kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
        compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
        kernel_object->no_tag_update = TRUE;
 
+       /*
+        * The object to hold retired VM pages.
+        */
+       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object);
+       retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+
        /*
         *      Initialize the "submap object".  Make it as large as the
         *      kernel object so that no limit is imposed on submap sizes.
         */
 
        /*
         *      Initialize the "submap object".  Make it as large as the
         *      kernel object so that no limit is imposed on submap sizes.
         */
 
-       vm_submap_object = &vm_submap_object_store;
-       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
-           vm_submap_object);
+       _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, vm_submap_object);
        vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
 
        /*
        vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
 
        /*
@@ -666,7 +670,7 @@ vm_object_deallocate(
                return;
        }
 
                return;
        }
 
-       if (object == kernel_object || object == compressor_object) {
+       if (object == kernel_object || object == compressor_object || object == retired_pages_object) {
                vm_object_lock_shared(object);
 
                OSAddAtomic(-1, &object->ref_count);
                vm_object_lock_shared(object);
 
                OSAddAtomic(-1, &object->ref_count);
@@ -674,6 +678,8 @@ vm_object_deallocate(
                if (object->ref_count == 0) {
                        if (object == kernel_object) {
                                panic("vm_object_deallocate: losing kernel_object\n");
                if (object->ref_count == 0) {
                        if (object == kernel_object) {
                                panic("vm_object_deallocate: losing kernel_object\n");
+                       } else if (object == retired_pages_object) {
+                               panic("vm_object_deallocate: losing retired_pages_object\n");
                        } else {
                                panic("vm_object_deallocate: losing compressor_object\n");
                        }
                        } else {
                                panic("vm_object_deallocate: losing compressor_object\n");
                        }
@@ -805,7 +811,6 @@ vm_object_deallocate(
                if ((object->ref_count > 1) || object->terminating) {
                        vm_object_lock_assert_exclusive(object);
                        object->ref_count--;
                if ((object->ref_count > 1) || object->terminating) {
                        vm_object_lock_assert_exclusive(object);
                        object->ref_count--;
-                       vm_object_res_deallocate(object);
 
                        if (object->ref_count == 1 &&
                            object->shadow != VM_OBJECT_NULL) {
 
                        if (object->ref_count == 1 &&
                            object->shadow != VM_OBJECT_NULL) {
@@ -847,7 +852,6 @@ vm_object_deallocate(
                        continue;
                }
 
                        continue;
                }
 
-               VM_OBJ_RES_DECR(object);        /* XXX ? */
                /*
                 *      Terminate this object. If it had a shadow,
                 *      then deallocate it; otherwise, if we need
                /*
                 *      Terminate this object. If it had a shadow,
                 *      then deallocate it; otherwise, if we need
@@ -928,7 +932,7 @@ vm_object_page_grab(
                        if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
                                vm_page_activate(p);
 
                        if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
                                vm_page_activate(p);
 
-                               VM_STAT_INCR(reactivations);
+                               counter_inc(&vm_statistics_reactivations);
                                vm_object_page_grab_reactivations++;
                        }
                        vm_page_unlock_queues();
                                vm_object_page_grab_reactivations++;
                        }
                        vm_page_unlock_queues();
@@ -1297,7 +1301,6 @@ vm_object_terminate(
                vm_object_lock_assert_exclusive(object);
                object->ref_count--;
                assert(object->ref_count > 0);
                vm_object_lock_assert_exclusive(object);
                object->ref_count--;
                assert(object->ref_count > 0);
-               vm_object_res_deallocate(object);
                vm_object_unlock(object);
                return KERN_FAILURE;
        }
                vm_object_unlock(object);
                return KERN_FAILURE;
        }
@@ -1443,14 +1446,10 @@ vm_object_reap(
        object->pager = MEMORY_OBJECT_NULL;
 
        if (pager != MEMORY_OBJECT_NULL) {
        object->pager = MEMORY_OBJECT_NULL;
 
        if (pager != MEMORY_OBJECT_NULL) {
-               memory_object_control_disable(object->pager_control);
+               memory_object_control_disable(&object->pager_control);
        }
 
        object->ref_count--;
        }
 
        object->ref_count--;
-#if     TASK_SWAPPER
-       assert(object->res_count == 0);
-#endif  /* TASK_SWAPPER */
-
        assert(object->ref_count == 0);
 
        /*
        assert(object->ref_count == 0);
 
        /*
@@ -1646,7 +1645,7 @@ restart_after_sleep:
                pmap_flush_context_init(&pmap_flush_context_storage);
        }
 
                pmap_flush_context_init(&pmap_flush_context_storage);
        }
 
-       vm_page_lockspin_queues();
+       vm_page_lock_queues();
 
        next = (vm_page_t)vm_page_queue_first(&object->memq);
 
 
        next = (vm_page_t)vm_page_queue_first(&object->memq);
 
@@ -1675,7 +1674,7 @@ restart_after_sleep:
 
                        loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
 
 
                        loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
 
-                       vm_page_lockspin_queues();
+                       vm_page_lock_queues();
                }
                if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
                        if (p->vmp_busy || p->vmp_cleaning) {
                }
                if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
                        if (p->vmp_busy || p->vmp_cleaning) {
@@ -1974,7 +1973,7 @@ vm_object_destroy(
        old_pager = object->pager;
        object->pager = MEMORY_OBJECT_NULL;
        if (old_pager != MEMORY_OBJECT_NULL) {
        old_pager = object->pager;
        object->pager = MEMORY_OBJECT_NULL;
        if (old_pager != MEMORY_OBJECT_NULL) {
-               memory_object_control_disable(object->pager_control);
+               memory_object_control_disable(&object->pager_control);
        }
 
        /*
        }
 
        /*
@@ -3736,13 +3735,6 @@ Retry:
                assert(new_copy->ref_count > 0);
                new_copy->ref_count++;          /* for old_copy->shadow ref. */
 
                assert(new_copy->ref_count > 0);
                new_copy->ref_count++;          /* for old_copy->shadow ref. */
 
-#if TASK_SWAPPER
-               if (old_copy->res_count) {
-                       VM_OBJ_RES_INCR(new_copy);
-                       VM_OBJ_RES_DECR(src_object);
-               }
-#endif
-
                vm_object_unlock(old_copy);     /* done with old_copy */
        }
 
                vm_object_unlock(old_copy);     /* done with old_copy */
        }
 
@@ -3926,6 +3918,14 @@ vm_object_shadow(
 
        assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
 
 
        assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
 
+#if 00
+       /*
+        * The following optimization does not work in the context of submaps
+        * (the shared region, in particular).
+        * This object might have only 1 reference (in the submap) but that
+        * submap can itself be mapped multiple times, so the object is
+        * actually indirectly referenced more than once...
+        */
        if (vm_object_shadow_check &&
            source->vo_size == length &&
            source->ref_count == 1) {
        if (vm_object_shadow_check &&
            source->vo_size == length &&
            source->ref_count == 1) {
@@ -3951,6 +3951,7 @@ vm_object_shadow(
                /* things changed while we were locking "source"... */
                vm_object_unlock(source);
        }
                /* things changed while we were locking "source"... */
                vm_object_unlock(source);
        }
+#endif /* 00 */
 
        /*
         * *offset is the map entry's offset into the VM object and
 
        /*
         * *offset is the map entry's offset into the VM object and
@@ -4489,7 +4490,7 @@ vm_object_do_collapse(
                object->paging_offset =
                    backing_object->paging_offset + backing_offset;
                if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
                object->paging_offset =
                    backing_object->paging_offset + backing_offset;
                if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
-                       memory_object_control_collapse(object->pager_control,
+                       memory_object_control_collapse(&object->pager_control,
                            object);
                }
                /* the backing_object has lost its pager: reset all fields */
                            object);
                }
                /* the backing_object has lost its pager: reset all fields */
@@ -4581,26 +4582,7 @@ vm_object_do_bypass(
        vm_object_lock_assert_exclusive(object);
        vm_object_lock_assert_exclusive(backing_object);
 
        vm_object_lock_assert_exclusive(object);
        vm_object_lock_assert_exclusive(backing_object);
 
-#if     TASK_SWAPPER
-       /*
-        *      Do object reference in-line to
-        *      conditionally increment shadow's
-        *      residence count.  If object is not
-        *      resident, leave residence count
-        *      on shadow alone.
-        */
-       if (backing_object->shadow != VM_OBJECT_NULL) {
-               vm_object_lock(backing_object->shadow);
-               vm_object_lock_assert_exclusive(backing_object->shadow);
-               backing_object->shadow->ref_count++;
-               if (object->res_count != 0) {
-                       vm_object_res_reference(backing_object->shadow);
-               }
-               vm_object_unlock(backing_object->shadow);
-       }
-#else   /* TASK_SWAPPER */
        vm_object_reference(backing_object->shadow);
        vm_object_reference(backing_object->shadow);
-#endif  /* TASK_SWAPPER */
 
        assert(!object->phys_contiguous);
        assert(!backing_object->phys_contiguous);
 
        assert(!object->phys_contiguous);
        assert(!backing_object->phys_contiguous);
@@ -4654,12 +4636,6 @@ vm_object_do_bypass(
            (!backing_object->named && backing_object->ref_count > 1)) {
                vm_object_lock_assert_exclusive(backing_object);
                backing_object->ref_count--;
            (!backing_object->named && backing_object->ref_count > 1)) {
                vm_object_lock_assert_exclusive(backing_object);
                backing_object->ref_count--;
-#if     TASK_SWAPPER
-               if (object->res_count != 0) {
-                       vm_object_res_deallocate(backing_object);
-               }
-               assert(backing_object->ref_count > 0);
-#endif  /* TASK_SWAPPER */
                vm_object_unlock(backing_object);
        } else {
                /*
                vm_object_unlock(backing_object);
        } else {
                /*
@@ -4667,12 +4643,6 @@ vm_object_do_bypass(
                 *      the backing object.
                 */
 
                 *      the backing object.
                 */
 
-#if     TASK_SWAPPER
-               if (object->res_count == 0) {
-                       /* XXX get a reference for the deallocate below */
-                       vm_object_res_reference(backing_object);
-               }
-#endif  /* TASK_SWAPPER */
                /*
                 * vm_object_collapse (the caller of this function) is
                 * now called from contexts that may not guarantee that a
                /*
                 * vm_object_collapse (the caller of this function) is
                 * now called from contexts that may not guarantee that a
@@ -5373,9 +5343,7 @@ vm_object_populate_with_private(
                                        VM_PAGE_SET_PHYS_PAGE(m, base_page);
                                }
                        } else {
                                        VM_PAGE_SET_PHYS_PAGE(m, base_page);
                                }
                        } else {
-                               while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) {
-                                       vm_page_more_fictitious();
-                               }
+                               m = vm_page_grab_fictitious(TRUE);
 
                                /*
                                 * private normally requires lock_queues but since we
 
                                /*
                                 * private normally requires lock_queues but since we
@@ -5496,7 +5464,6 @@ restart:
        object->named = TRUE;
        vm_object_lock_assert_exclusive(object);
        object->ref_count++;
        object->named = TRUE;
        vm_object_lock_assert_exclusive(object);
        object->ref_count++;
-       vm_object_res_reference(object);
        while (!object->pager_ready) {
                vm_object_sleep(object,
                    VM_OBJECT_EVENT_PAGER_READY,
        while (!object->pager_ready) {
                vm_object_sleep(object,
                    VM_OBJECT_EVENT_PAGER_READY,
@@ -5579,7 +5546,6 @@ vm_object_release_name(
                        vm_object_deallocate(object);
                        return KERN_SUCCESS;
                }
                        vm_object_deallocate(object);
                        return KERN_SUCCESS;
                }
-               VM_OBJ_RES_DECR(object);
                shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
 
                if (object->ref_count == 1) {
                shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
 
                if (object->ref_count == 1) {
@@ -6287,93 +6253,6 @@ out:
 }
 
 
 }
 
 
-#if     TASK_SWAPPER
-/*
- * vm_object_res_deallocate
- *
- * (recursively) decrement residence counts on vm objects and their shadows.
- * Called from vm_object_deallocate and when swapping out an object.
- *
- * The object is locked, and remains locked throughout the function,
- * even as we iterate down the shadow chain.  Locks on intermediate objects
- * will be dropped, but not the original object.
- *
- * NOTE: this function used to use recursion, rather than iteration.
- */
-
-__private_extern__ void
-vm_object_res_deallocate(
-       vm_object_t     object)
-{
-       vm_object_t orig_object = object;
-       /*
-        * Object is locked so it can be called directly
-        * from vm_object_deallocate.  Original object is never
-        * unlocked.
-        */
-       assert(object->res_count > 0);
-       while (--object->res_count == 0) {
-               assert(object->ref_count >= object->res_count);
-               vm_object_deactivate_all_pages(object);
-               /* iterate on shadow, if present */
-               if (object->shadow != VM_OBJECT_NULL) {
-                       vm_object_t tmp_object = object->shadow;
-                       vm_object_lock(tmp_object);
-                       if (object != orig_object) {
-                               vm_object_unlock(object);
-                       }
-                       object = tmp_object;
-                       assert(object->res_count > 0);
-               } else {
-                       break;
-               }
-       }
-       if (object != orig_object) {
-               vm_object_unlock(object);
-       }
-}
-
-/*
- * vm_object_res_reference
- *
- * Internal function to increment residence count on a vm object
- * and its shadows.  It is called only from vm_object_reference, and
- * when swapping in a vm object, via vm_map_swap.
- *
- * The object is locked, and remains locked throughout the function,
- * even as we iterate down the shadow chain.  Locks on intermediate objects
- * will be dropped, but not the original object.
- *
- * NOTE: this function used to use recursion, rather than iteration.
- */
-
-__private_extern__ void
-vm_object_res_reference(
-       vm_object_t     object)
-{
-       vm_object_t orig_object = object;
-       /*
-        * Object is locked, so this can be called directly
-        * from vm_object_reference.  This lock is never released.
-        */
-       while ((++object->res_count == 1) &&
-           (object->shadow != VM_OBJECT_NULL)) {
-               vm_object_t tmp_object = object->shadow;
-
-               assert(object->ref_count >= object->res_count);
-               vm_object_lock(tmp_object);
-               if (object != orig_object) {
-                       vm_object_unlock(object);
-               }
-               object = tmp_object;
-       }
-       if (object != orig_object) {
-               vm_object_unlock(object);
-       }
-       assert(orig_object->ref_count >= orig_object->res_count);
-}
-#endif  /* TASK_SWAPPER */
-
 /*
  *     vm_object_reference:
  *
 /*
  *     vm_object_reference:
  *
@@ -6576,9 +6455,6 @@ MACRO_END
        /* "ref_count" refers to the object not its contents */
        assert(object1->ref_count >= 1);
        assert(object2->ref_count >= 1);
        /* "ref_count" refers to the object not its contents */
        assert(object1->ref_count >= 1);
        assert(object2->ref_count >= 1);
-#if TASK_SWAPPER
-       /* "res_count" refers to the object not its contents */
-#endif
        /* "resident_page_count" was updated above when transposing pages */
        /* "wired_page_count" was updated above when transposing pages */
 #if !VM_TAG_ACTIVE_UPDATE
        /* "resident_page_count" was updated above when transposing pages */
        /* "wired_page_count" was updated above when transposing pages */
 #if !VM_TAG_ACTIVE_UPDATE
@@ -6597,11 +6473,11 @@ MACRO_END
        __TRANSPOSE_FIELD(pager_control);
        /* update the memory_objects' pointers back to the VM objects */
        if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
        __TRANSPOSE_FIELD(pager_control);
        /* update the memory_objects' pointers back to the VM objects */
        if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
-               memory_object_control_collapse(object1->pager_control,
+               memory_object_control_collapse(&object1->pager_control,
                    object1);
        }
        if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
                    object1);
        }
        if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
-               memory_object_control_collapse(object2->pager_control,
+               memory_object_control_collapse(&object2->pager_control,
                    object2);
        }
        __TRANSPOSE_FIELD(copy_strategy);
                    object2);
        }
        __TRANSPOSE_FIELD(copy_strategy);
@@ -6754,11 +6630,11 @@ extern int speculative_reads_disabled;
  * that could give us non-page-size aligned values if we start out with values that
  * are odd multiples of PAGE_SIZE.
  */
  * that could give us non-page-size aligned values if we start out with values that
  * are odd multiples of PAGE_SIZE.
  */
-#if CONFIG_EMBEDDED
+#if !XNU_TARGET_OS_OSX
 unsigned int preheat_max_bytes = (1024 * 512);
 unsigned int preheat_max_bytes = (1024 * 512);
-#else /* CONFIG_EMBEDDED */
+#else /* !XNU_TARGET_OS_OSX */
 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
-#endif /* CONFIG_EMBEDDED */
+#endif /* !XNU_TARGET_OS_OSX */
 unsigned int preheat_min_bytes = (1024 * 32);
 
 
 unsigned int preheat_min_bytes = (1024 * 32);
 
 
@@ -6821,7 +6697,7 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
        min_ph_size = round_page(preheat_min_bytes);
        max_ph_size = round_page(preheat_max_bytes);
 
        min_ph_size = round_page(preheat_min_bytes);
        max_ph_size = round_page(preheat_max_bytes);
 
-#if !CONFIG_EMBEDDED
+#if XNU_TARGET_OS_OSX
        if (isSSD) {
                min_ph_size /= 2;
                max_ph_size /= 8;
        if (isSSD) {
                min_ph_size /= 2;
                max_ph_size /= 8;
@@ -6834,7 +6710,7 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
                        max_ph_size = trunc_page(max_ph_size);
                }
        }
                        max_ph_size = trunc_page(max_ph_size);
                }
        }
-#endif /* !CONFIG_EMBEDDED */
+#endif /* XNU_TARGET_OS_OSX */
 
        if (min_ph_size < PAGE_SIZE) {
                min_ph_size = PAGE_SIZE;
 
        if (min_ph_size < PAGE_SIZE) {
                min_ph_size = PAGE_SIZE;
@@ -8779,9 +8655,7 @@ again:
                vm_object_unlock(object);
        }
 
                vm_object_unlock(object);
        }
 
-       if (__improbable(task->task_volatile_objects != 0 ||
-           task->task_nonvolatile_objects != 0 ||
-           task->task_owned_objects != 0)) {
+       if (__improbable(task->task_owned_objects != 0)) {
                panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
                    __FUNCTION__,
                    task,
                panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
                    __FUNCTION__,
                    task,