/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <vm/vm_protos.h>
#include <vm/vm_kern.h>
-
/*
* APPLE PROTECT MEMORY PAGER
*
kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
vm_prot_t prot);
kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
+boolean_t apple_protect_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
#define CRYPT_INFO_DEBUG 0
void crypt_info_reference(struct pager_crypt_info *crypt_info);
* These routines are invoked by VM via the memory_object_*() interfaces.
*/
const struct memory_object_pager_ops apple_protect_pager_ops = {
- apple_protect_pager_reference,
- apple_protect_pager_deallocate,
- apple_protect_pager_init,
- apple_protect_pager_terminate,
- apple_protect_pager_data_request,
- apple_protect_pager_data_return,
- apple_protect_pager_data_initialize,
- apple_protect_pager_data_unlock,
- apple_protect_pager_synchronize,
- apple_protect_pager_map,
- apple_protect_pager_last_unmap,
- NULL, /* data_reclaim */
- "apple_protect"
+ .memory_object_reference = apple_protect_pager_reference,
+ .memory_object_deallocate = apple_protect_pager_deallocate,
+ .memory_object_init = apple_protect_pager_init,
+ .memory_object_terminate = apple_protect_pager_terminate,
+ .memory_object_data_request = apple_protect_pager_data_request,
+ .memory_object_data_return = apple_protect_pager_data_return,
+ .memory_object_data_initialize = apple_protect_pager_data_initialize,
+ .memory_object_data_unlock = apple_protect_pager_data_unlock,
+ .memory_object_synchronize = apple_protect_pager_synchronize,
+ .memory_object_map = apple_protect_pager_map,
+ .memory_object_last_unmap = apple_protect_pager_last_unmap,
+ .memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = apple_protect_pager_backing_object,
+ .memory_object_pager_name = "apple_protect"
};
/*
*/
typedef struct apple_protect_pager {
/* mandatory generic header */
- struct memory_object ap_pgr_hdr;
+ struct memory_object ap_pgr_hdr;
/* pager-specific data */
queue_chain_t pager_queue; /* next & prev pagers */
- struct os_refcnt ref_count; /* reference count */
- boolean_t is_ready; /* is this pager ready ? */
- boolean_t is_mapped; /* is this mem_obj mapped ? */
+#if MEMORY_OBJECT_HAS_REFCOUNT
+#define ap_pgr_hdr_ref ap_pgr_hdr.mo_ref
+#else
+ os_ref_atomic_t ap_pgr_hdr_ref; /* reference count */
+#endif
+ bool is_ready; /* is this pager ready ? */
+ bool is_mapped; /* is this mem_obj mapped ? */
+ bool is_cached; /* is this pager cached ? */
vm_object_t backing_object; /* VM obj w/ encrypted data */
vm_object_offset_t backing_offset;
vm_object_offset_t crypto_backing_offset; /* for key... */
* List of memory objects managed by this EMM.
* The list is protected by the "apple_protect_pager_lock" lock.
*/
-int apple_protect_pager_count = 0; /* number of pagers */
-int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
-queue_head_t apple_protect_pager_queue;
-decl_lck_mtx_data(, apple_protect_pager_lock)
+unsigned int apple_protect_pager_count = 0; /* number of pagers */
+unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
+queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
+LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
+LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
/*
* Maximum number of unmapped pagers we're willing to keep around.
*/
-int apple_protect_pager_cache_limit = 20;
+unsigned int apple_protect_pager_cache_limit = 20;
/*
* Statistics & counters.
*/
-int apple_protect_pager_count_max = 0;
-int apple_protect_pager_count_unmapped_max = 0;
-int apple_protect_pager_num_trim_max = 0;
-int apple_protect_pager_num_trim_total = 0;
-
+unsigned int apple_protect_pager_count_max = 0;
+unsigned int apple_protect_pager_count_unmapped_max = 0;
+unsigned int apple_protect_pager_num_trim_max = 0;
+unsigned int apple_protect_pager_num_trim_total = 0;
-lck_grp_t apple_protect_pager_lck_grp;
-lck_grp_attr_t apple_protect_pager_lck_grp_attr;
-lck_attr_t apple_protect_pager_lck_attr;
/* internal prototypes */
vm_object_offset_t crypto_backing_offset,
struct pager_crypt_info *crypt_info,
vm_object_offset_t crypto_start,
- vm_object_offset_t crypto_end);
+ vm_object_offset_t crypto_end,
+ boolean_t cache_pager);
apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
void apple_protect_pager_dequeue(apple_protect_pager_t pager);
void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
#define PAGER_DEBUG(LEVEL, A)
#endif
-
-void
-apple_protect_pager_bootstrap(void)
-{
- lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
- lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
- lck_attr_setdefault(&apple_protect_pager_lck_attr);
- lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
- queue_init(&apple_protect_pager_queue);
-}
-
/*
* apple_protect_pager_init()
*
pager = apple_protect_pager_lookup(mem_obj);
assert(pager->is_ready);
- assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */
+ assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
retval = kr;
goto done;
}
- dst_object = mo_control->moc_object;
+ dst_object = memory_object_control_to_vm_object(mo_control);
assert(dst_object != VM_OBJECT_NULL);
/*
if (vm_page_wait(interruptible)) {
goto retry_src_fault;
}
- /* fall thru */
+ OS_FALLTHROUGH;
case VM_FAULT_INTERRUPTED:
retval = MACH_SEND_INTERRUPTED;
goto done;
/* success but no VM page: fail */
vm_object_paging_end(src_top_object);
vm_object_unlock(src_top_object);
- /*FALLTHROUGH*/
+ OS_FALLTHROUGH;
case VM_FAULT_MEMORY_ERROR:
/* the page is not there ! */
if (error_code) {
dst_pnum = (ppnum_t)
upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
assert(dst_pnum != 0);
-#if __x86_64__
- src_vaddr = (vm_map_offset_t)
- PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
- << PAGE_SHIFT);
- dst_vaddr = (vm_map_offset_t)
- PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
-#elif __arm__ || __arm64__
src_vaddr = (vm_map_offset_t)
phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
<< PAGE_SHIFT);
dst_vaddr = (vm_map_offset_t)
phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
-#else
-#error "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
- src_vaddr = 0;
- dst_vaddr = 0;
-#endif
+
src_page_object = VM_PAGE_OBJECT(src_page);
/*
*/
if (src_page_object->code_signed) {
vm_page_validate_cs_mapped(
- src_page,
+ src_page, PAGE_SIZE, 0,
(const void *) src_vaddr);
}
/*
}
} else {
boolean_t empty;
- upl_commit_range(upl, 0, upl->size,
+ assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
+ "upl %p offset 0x%llx size 0x%x",
+ upl, upl->u_offset, upl->u_size);
+ upl_commit_range(upl, 0, upl->u_size,
UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
upl_pl, pl_count, &empty);
}
pager = apple_protect_pager_lookup(mem_obj);
lck_mtx_lock(&apple_protect_pager_lock);
- os_ref_retain_locked(&pager->ref_count);
+ os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
lck_mtx_unlock(&apple_protect_pager_lock);
}
boolean_t locked)
{
boolean_t needs_trimming;
- int count_unmapped;
+ unsigned int count_unmapped;
+ os_ref_count_t ref_count;
if (!locked) {
lck_mtx_lock(&apple_protect_pager_lock);
}
/* drop a reference on this pager */
- os_ref_count_t ref_count = os_ref_release_locked(&pager->ref_count);
+ ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
if (ref_count == 1) {
/*
lck_mtx_lock(&apple_protect_pager_lock);
assert(pager->is_ready);
- assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */
+ assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
if (pager->is_mapped == FALSE) {
/*
* First mapping of this pager: take an extra reference
* are removed.
*/
pager->is_mapped = TRUE;
- os_ref_retain_locked(&pager->ref_count);
+ os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
apple_protect_pager_count_mapped++;
}
lck_mtx_unlock(&apple_protect_pager_lock);
memory_object_t mem_obj)
{
apple_protect_pager_t pager;
- int count_unmapped;
+ unsigned int count_unmapped;
PAGER_DEBUG(PAGER_ALL,
("apple_protect_pager_last_unmap: %p\n", mem_obj));
return KERN_SUCCESS;
}
+boolean_t
+apple_protect_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ apple_protect_pager_t pager;
+
+ PAGER_DEBUG(PAGER_ALL,
+ ("apple_protect_pager_backing_object: %p\n", mem_obj));
+
+ pager = apple_protect_pager_lookup(mem_obj);
+
+ *backing_object = pager->backing_object;
+ *backing_offset = pager->backing_offset + offset;
+
+ return TRUE;
+}
/*
*
assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
- assert(os_ref_get_count(&pager->ref_count) > 0);
+ assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
return pager;
}
vm_object_offset_t crypto_backing_offset,
struct pager_crypt_info *crypt_info,
vm_object_offset_t crypto_start,
- vm_object_offset_t crypto_end)
+ vm_object_offset_t crypto_end,
+ boolean_t cache_pager)
{
apple_protect_pager_t pager, pager2;
memory_object_control_t control;
pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
pager->is_ready = FALSE;/* not ready until it has a "name" */
- os_ref_init_count(&pager->ref_count, NULL, 2); /* existence reference (for the cache) and another for the caller */
+ /* one reference for the caller */
+ os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
pager->is_mapped = FALSE;
+ if (cache_pager) {
+ /* extra reference for the cache */
+ os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
+ pager->is_cached = true;
+ } else {
+ pager->is_cached = false;
+ }
pager->backing_object = backing_object;
pager->backing_offset = backing_offset;
pager->crypto_backing_offset = crypto_backing_offset;
&control);
assert(kr == KERN_SUCCESS);
+ memory_object_mark_trusted(control);
+
lck_mtx_lock(&apple_protect_pager_lock);
/* the new pager is now ready to be used */
pager->is_ready = TRUE;
vm_object_offset_t crypto_backing_offset,
struct pager_crypt_info *crypt_info,
vm_object_offset_t crypto_start,
- vm_object_offset_t crypto_end)
+ vm_object_offset_t crypto_end,
+ boolean_t cache_pager)
{
apple_protect_pager_t pager;
struct pager_crypt_info *old_crypt_info, *new_crypt_info;
crypt_info_deallocate(old_crypt_info);
assert(old_crypt_info->crypt_refcnt > 0);
/* give extra reference on pager to the caller */
- os_ref_retain_locked(&pager->ref_count);
+ os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
break;
}
}
crypto_backing_offset,
new_crypt_info,
crypto_start,
- crypto_end);
+ crypto_end,
+ cache_pager);
}
if (pager == APPLE_PROTECT_PAGER_NULL) {
/* could not create a new pager */
{
apple_protect_pager_t pager, prev_pager;
queue_head_t trim_queue;
- int num_trim;
- int count_unmapped;
+ unsigned int num_trim;
+ unsigned int count_unmapped;
lck_mtx_lock(&apple_protect_pager_lock);
prev_pager = (apple_protect_pager_t)
queue_prev(&pager->pager_queue);
- if (os_ref_get_count(&pager->ref_count) == 2 &&
+ if (pager->is_cached &&
+ os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
pager->is_ready &&
!pager->is_mapped) {
/* this pager can be trimmed */
pager,
apple_protect_pager_t,
pager_queue);
+ assert(pager->is_cached);
+ pager->is_cached = false;
pager->pager_queue.next = NULL;
pager->pager_queue.prev = NULL;
/*
* has already been dequeued, but we still need to remove
* a reference.
*/
- os_ref_count_t __assert_only count = os_ref_release_locked(&pager->ref_count);
+ os_ref_count_t __assert_only count;
+ count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
assert(count == 1);
apple_protect_pager_terminate_internal(pager);
}