xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
index 416e90fa259aa7a7a3aff16145c3b554f8f9e528..f7fcaceb38c4d418139a7d5883d1654833439f0c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -59,7 +59,6 @@
 #include <vm/vm_protos.h>
 #include <vm/vm_kern.h>
 
-
 /*
  * APPLE PROTECT MEMORY PAGER
  *
@@ -113,6 +112,11 @@ kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
     vm_prot_t prot);
 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
+boolean_t apple_protect_pager_backing_object(
+       memory_object_t mem_obj,
+       memory_object_offset_t mem_obj_offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset);
 
 #define CRYPT_INFO_DEBUG 0
 void crypt_info_reference(struct pager_crypt_info *crypt_info);
@@ -123,19 +127,20 @@ void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
  * These routines are invoked by VM via the memory_object_*() interfaces.
  */
 const struct memory_object_pager_ops apple_protect_pager_ops = {
-       apple_protect_pager_reference,
-       apple_protect_pager_deallocate,
-       apple_protect_pager_init,
-       apple_protect_pager_terminate,
-       apple_protect_pager_data_request,
-       apple_protect_pager_data_return,
-       apple_protect_pager_data_initialize,
-       apple_protect_pager_data_unlock,
-       apple_protect_pager_synchronize,
-       apple_protect_pager_map,
-       apple_protect_pager_last_unmap,
-       NULL, /* data_reclaim */
-       "apple_protect"
+       .memory_object_reference = apple_protect_pager_reference,
+       .memory_object_deallocate = apple_protect_pager_deallocate,
+       .memory_object_init = apple_protect_pager_init,
+       .memory_object_terminate = apple_protect_pager_terminate,
+       .memory_object_data_request = apple_protect_pager_data_request,
+       .memory_object_data_return = apple_protect_pager_data_return,
+       .memory_object_data_initialize = apple_protect_pager_data_initialize,
+       .memory_object_data_unlock = apple_protect_pager_data_unlock,
+       .memory_object_synchronize = apple_protect_pager_synchronize,
+       .memory_object_map = apple_protect_pager_map,
+       .memory_object_last_unmap = apple_protect_pager_last_unmap,
+       .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = apple_protect_pager_backing_object,
+       .memory_object_pager_name = "apple_protect"
 };
 
 /*
@@ -144,13 +149,18 @@ const struct memory_object_pager_ops apple_protect_pager_ops = {
  */
 typedef struct apple_protect_pager {
        /* mandatory generic header */
-       struct memory_object ap_pgr_hdr;
+       struct memory_object    ap_pgr_hdr;
 
        /* pager-specific data */
        queue_chain_t           pager_queue;    /* next & prev pagers */
-       struct os_refcnt        ref_count;      /* reference count */
-       boolean_t               is_ready;       /* is this pager ready ? */
-       boolean_t               is_mapped;      /* is this mem_obj mapped ? */
+#if MEMORY_OBJECT_HAS_REFCOUNT
+#define ap_pgr_hdr_ref          ap_pgr_hdr.mo_ref
+#else
+       os_ref_atomic_t         ap_pgr_hdr_ref;      /* reference count */
+#endif
+       bool                    is_ready;       /* is this pager ready ? */
+       bool                    is_mapped;      /* is this mem_obj mapped ? */
+       bool                    is_cached;      /* is this pager cached ? */
        vm_object_t             backing_object; /* VM obj w/ encrypted data */
        vm_object_offset_t      backing_offset;
        vm_object_offset_t      crypto_backing_offset; /* for key... */
@@ -164,28 +174,25 @@ typedef struct apple_protect_pager {
  * List of memory objects managed by this EMM.
  * The list is protected by the "apple_protect_pager_lock" lock.
  */
-int apple_protect_pager_count = 0;              /* number of pagers */
-int apple_protect_pager_count_mapped = 0;       /* number of unmapped pagers */
-queue_head_t apple_protect_pager_queue;
-decl_lck_mtx_data(, apple_protect_pager_lock)
+unsigned int apple_protect_pager_count = 0;        /* number of pagers */
+unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
+queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
+LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
+LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
 
 /*
  * Maximum number of unmapped pagers we're willing to keep around.
  */
-int apple_protect_pager_cache_limit = 20;
+unsigned int apple_protect_pager_cache_limit = 20;
 
 /*
  * Statistics & counters.
  */
-int apple_protect_pager_count_max = 0;
-int apple_protect_pager_count_unmapped_max = 0;
-int apple_protect_pager_num_trim_max = 0;
-int apple_protect_pager_num_trim_total = 0;
-
+unsigned int apple_protect_pager_count_max = 0;
+unsigned int apple_protect_pager_count_unmapped_max = 0;
+unsigned int apple_protect_pager_num_trim_max = 0;
+unsigned int apple_protect_pager_num_trim_total = 0;
 
-lck_grp_t               apple_protect_pager_lck_grp;
-lck_grp_attr_t  apple_protect_pager_lck_grp_attr;
-lck_attr_t              apple_protect_pager_lck_attr;
 
 
 /* internal prototypes */
@@ -195,7 +202,8 @@ apple_protect_pager_t apple_protect_pager_create(
        vm_object_offset_t crypto_backing_offset,
        struct pager_crypt_info *crypt_info,
        vm_object_offset_t crypto_start,
-       vm_object_offset_t crypto_end);
+       vm_object_offset_t crypto_end,
+       boolean_t cache_pager);
 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
@@ -220,17 +228,6 @@ int apple_protect_pagerdebug = 0;
 #define PAGER_DEBUG(LEVEL, A)
 #endif
 
-
-void
-apple_protect_pager_bootstrap(void)
-{
-       lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
-       lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
-       lck_attr_setdefault(&apple_protect_pager_lck_attr);
-       lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
-       queue_init(&apple_protect_pager_queue);
-}
-
 /*
  * apple_protect_pager_init()
  *
@@ -383,7 +380,7 @@ apple_protect_pager_data_request(
 
        pager = apple_protect_pager_lookup(mem_obj);
        assert(pager->is_ready);
-       assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */
+       assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
 
        PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
 
@@ -410,7 +407,7 @@ apple_protect_pager_data_request(
                retval = kr;
                goto done;
        }
-       dst_object = mo_control->moc_object;
+       dst_object = memory_object_control_to_vm_object(mo_control);
        assert(dst_object != VM_OBJECT_NULL);
 
        /*
@@ -470,7 +467,7 @@ retry_src_fault:
                        if (vm_page_wait(interruptible)) {
                                goto retry_src_fault;
                        }
-               /* fall thru */
+                       OS_FALLTHROUGH;
                case VM_FAULT_INTERRUPTED:
                        retval = MACH_SEND_INTERRUPTED;
                        goto done;
@@ -478,7 +475,7 @@ retry_src_fault:
                        /* success but no VM page: fail */
                        vm_object_paging_end(src_top_object);
                        vm_object_unlock(src_top_object);
-               /*FALLTHROUGH*/
+                       OS_FALLTHROUGH;
                case VM_FAULT_MEMORY_ERROR:
                        /* the page is not there ! */
                        if (error_code) {
@@ -511,24 +508,13 @@ retry_src_fault:
                dst_pnum = (ppnum_t)
                    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
                assert(dst_pnum != 0);
-#if __x86_64__
-               src_vaddr = (vm_map_offset_t)
-                   PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
-                       << PAGE_SHIFT);
-               dst_vaddr = (vm_map_offset_t)
-                   PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
 
-#elif __arm__ || __arm64__
                src_vaddr = (vm_map_offset_t)
                    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
                        << PAGE_SHIFT);
                dst_vaddr = (vm_map_offset_t)
                    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
-#else
-#error "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
-               src_vaddr = 0;
-               dst_vaddr = 0;
-#endif
+
                src_page_object = VM_PAGE_OBJECT(src_page);
 
                /*
@@ -536,7 +522,7 @@ retry_src_fault:
                 */
                if (src_page_object->code_signed) {
                        vm_page_validate_cs_mapped(
-                               src_page,
+                               src_page, PAGE_SIZE, 0,
                                (const void *) src_vaddr);
                }
                /*
@@ -728,7 +714,10 @@ done:
                        }
                } else {
                        boolean_t empty;
-                       upl_commit_range(upl, 0, upl->size,
+                       assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
+                           "upl %p offset 0x%llx size 0x%x",
+                           upl, upl->u_offset, upl->u_size);
+                       upl_commit_range(upl, 0, upl->u_size,
                            UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
                            upl_pl, pl_count, &empty);
                }
@@ -759,7 +748,7 @@ apple_protect_pager_reference(
        pager = apple_protect_pager_lookup(mem_obj);
 
        lck_mtx_lock(&apple_protect_pager_lock);
-       os_ref_retain_locked(&pager->ref_count);
+       os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
        lck_mtx_unlock(&apple_protect_pager_lock);
 }
 
@@ -840,7 +829,8 @@ apple_protect_pager_deallocate_internal(
        boolean_t               locked)
 {
        boolean_t       needs_trimming;
-       int             count_unmapped;
+       unsigned int    count_unmapped;
+       os_ref_count_t  ref_count;
 
        if (!locked) {
                lck_mtx_lock(&apple_protect_pager_lock);
@@ -856,7 +846,7 @@ apple_protect_pager_deallocate_internal(
        }
 
        /* drop a reference on this pager */
-       os_ref_count_t ref_count = os_ref_release_locked(&pager->ref_count);
+       ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
 
        if (ref_count == 1) {
                /*
@@ -959,7 +949,7 @@ apple_protect_pager_map(
 
        lck_mtx_lock(&apple_protect_pager_lock);
        assert(pager->is_ready);
-       assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */
+       assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
        if (pager->is_mapped == FALSE) {
                /*
                 * First mapping of this pager:  take an extra reference
@@ -967,7 +957,7 @@ apple_protect_pager_map(
                 * are removed.
                 */
                pager->is_mapped = TRUE;
-               os_ref_retain_locked(&pager->ref_count);
+               os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
                apple_protect_pager_count_mapped++;
        }
        lck_mtx_unlock(&apple_protect_pager_lock);
@@ -985,7 +975,7 @@ apple_protect_pager_last_unmap(
        memory_object_t         mem_obj)
 {
        apple_protect_pager_t   pager;
-       int                     count_unmapped;
+       unsigned int            count_unmapped;
 
        PAGER_DEBUG(PAGER_ALL,
            ("apple_protect_pager_last_unmap: %p\n", mem_obj));
@@ -1014,6 +1004,25 @@ apple_protect_pager_last_unmap(
        return KERN_SUCCESS;
 }
 
+boolean_t
+apple_protect_pager_backing_object(
+       memory_object_t mem_obj,
+       memory_object_offset_t offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset)
+{
+       apple_protect_pager_t   pager;
+
+       PAGER_DEBUG(PAGER_ALL,
+           ("apple_protect_pager_backing_object: %p\n", mem_obj));
+
+       pager = apple_protect_pager_lookup(mem_obj);
+
+       *backing_object = pager->backing_object;
+       *backing_offset = pager->backing_offset + offset;
+
+       return TRUE;
+}
 
 /*
  *
@@ -1026,7 +1035,7 @@ apple_protect_pager_lookup(
 
        assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
        pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
-       assert(os_ref_get_count(&pager->ref_count) > 0);
+       assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
        return pager;
 }
 
@@ -1037,7 +1046,8 @@ apple_protect_pager_create(
        vm_object_offset_t      crypto_backing_offset,
        struct pager_crypt_info *crypt_info,
        vm_object_offset_t      crypto_start,
-       vm_object_offset_t      crypto_end)
+       vm_object_offset_t      crypto_end,
+       boolean_t               cache_pager)
 {
        apple_protect_pager_t   pager, pager2;
        memory_object_control_t control;
@@ -1061,8 +1071,16 @@ apple_protect_pager_create(
        pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
 
        pager->is_ready = FALSE;/* not ready until it has a "name" */
-       os_ref_init_count(&pager->ref_count, NULL, 2); /* existence reference (for the cache) and another for the caller */
+       /* one reference for the caller */
+       os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
        pager->is_mapped = FALSE;
+       if (cache_pager) {
+               /* extra reference for the cache */
+               os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
+               pager->is_cached = true;
+       } else {
+               pager->is_cached = false;
+       }
        pager->backing_object = backing_object;
        pager->backing_offset = backing_offset;
        pager->crypto_backing_offset = crypto_backing_offset;
@@ -1164,6 +1182,8 @@ apple_protect_pager_create(
            &control);
        assert(kr == KERN_SUCCESS);
 
+       memory_object_mark_trusted(control);
+
        lck_mtx_lock(&apple_protect_pager_lock);
        /* the new pager is now ready to be used */
        pager->is_ready = TRUE;
@@ -1203,7 +1223,8 @@ apple_protect_pager_setup(
        vm_object_offset_t      crypto_backing_offset,
        struct pager_crypt_info *crypt_info,
        vm_object_offset_t      crypto_start,
-       vm_object_offset_t      crypto_end)
+       vm_object_offset_t      crypto_end,
+       boolean_t               cache_pager)
 {
        apple_protect_pager_t   pager;
        struct pager_crypt_info *old_crypt_info, *new_crypt_info;
@@ -1290,7 +1311,7 @@ apple_protect_pager_setup(
                        crypt_info_deallocate(old_crypt_info);
                        assert(old_crypt_info->crypt_refcnt > 0);
                        /* give extra reference on pager to the caller */
-                       os_ref_retain_locked(&pager->ref_count);
+                       os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
                        break;
                }
        }
@@ -1330,7 +1351,8 @@ apple_protect_pager_setup(
                                crypto_backing_offset,
                                new_crypt_info,
                                crypto_start,
-                               crypto_end);
+                               crypto_end,
+                               cache_pager);
                }
                if (pager == APPLE_PROTECT_PAGER_NULL) {
                        /* could not create a new pager */
@@ -1381,8 +1403,8 @@ apple_protect_pager_trim(void)
 {
        apple_protect_pager_t   pager, prev_pager;
        queue_head_t            trim_queue;
-       int                     num_trim;
-       int                     count_unmapped;
+       unsigned int            num_trim;
+       unsigned int            count_unmapped;
 
        lck_mtx_lock(&apple_protect_pager_lock);
 
@@ -1402,7 +1424,8 @@ apple_protect_pager_trim(void)
                prev_pager = (apple_protect_pager_t)
                    queue_prev(&pager->pager_queue);
 
-               if (os_ref_get_count(&pager->ref_count) == 2 &&
+               if (pager->is_cached &&
+                   os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
                    pager->is_ready &&
                    !pager->is_mapped) {
                        /* this pager can be trimmed */
@@ -1436,6 +1459,8 @@ apple_protect_pager_trim(void)
                    pager,
                    apple_protect_pager_t,
                    pager_queue);
+               assert(pager->is_cached);
+               pager->is_cached = false;
                pager->pager_queue.next = NULL;
                pager->pager_queue.prev = NULL;
                /*
@@ -1443,7 +1468,8 @@ apple_protect_pager_trim(void)
                 * has already been dequeued, but we still need to remove
                 * a reference.
                 */
-               os_ref_count_t __assert_only count = os_ref_release_locked(&pager->ref_count);
+               os_ref_count_t __assert_only count;
+               count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
                assert(count == 1);
                apple_protect_pager_terminate_internal(pager);
        }