X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/593a1d5fd87cdf5b46dd5fcb84467b432cea0f91..3e170ce000f1506b7b5d2c5c7faec85ceabb573d:/osfmk/vm/vm_apple_protect.c?ds=sidebyside diff --git a/osfmk/vm/vm_apple_protect.c b/osfmk/vm/vm_apple_protect.c index 54e618e40..81301fd11 100644 --- a/osfmk/vm/vm_apple_protect.c +++ b/osfmk/vm/vm_apple_protect.c @@ -84,16 +84,16 @@ void apple_protect_pager_reference(memory_object_t mem_obj); void apple_protect_pager_deallocate(memory_object_t mem_obj); kern_return_t apple_protect_pager_init(memory_object_t mem_obj, memory_object_control_t control, - vm_size_t pg_size); + memory_object_cluster_size_t pg_size); kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj); kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_cluster_size_t length, vm_prot_t protection_required, memory_object_fault_info_t fault_info); kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t data_cnt, + memory_object_cluster_size_t data_cnt, memory_object_offset_t *resid_offset, int *io_error, boolean_t dirty, @@ -101,19 +101,23 @@ kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, int upl_flags); kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t data_cnt); + memory_object_cluster_size_t data_cnt); kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t size, + memory_object_size_t size, vm_prot_t desired_access); kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_size_t length, vm_sync_t sync_flags); kern_return_t apple_protect_pager_map(memory_object_t mem_obj, vm_prot_t prot); kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj); +#define CRYPT_INFO_DEBUG 0 +void crypt_info_reference(struct pager_crypt_info *crypt_info); +void crypt_info_deallocate(struct pager_crypt_info *crypt_info); + /* * Vector of VM operations for this EMM. * These routines are invoked by VM via the memory_object_*() interfaces. @@ -130,7 +134,8 @@ const struct memory_object_pager_ops apple_protect_pager_ops = { apple_protect_pager_synchronize, apple_protect_pager_map, apple_protect_pager_last_unmap, - "apple protect pager" + NULL, /* data_reclaim */ + "apple_protect" }; /* @@ -138,17 +143,22 @@ const struct memory_object_pager_ops apple_protect_pager_ops = { * the "apple protect" EMM. */ typedef struct apple_protect_pager { + struct ipc_object_header pager_header; /* fake ip_kotype() */ memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */ - unsigned int pager_ikot; /* JMM: fake ip_kotype() */ queue_chain_t pager_queue; /* next & prev pagers */ unsigned int ref_count; /* reference count */ boolean_t is_ready; /* is this pager ready ? */ boolean_t is_mapped; /* is this mem_obj mapped ? */ memory_object_control_t pager_control; /* mem object control handle */ vm_object_t backing_object; /* VM obj w/ encrypted data */ - struct pager_crypt_info crypt; + vm_object_offset_t backing_offset; + vm_object_offset_t crypto_backing_offset; /* for key... */ + vm_object_offset_t crypto_start; + vm_object_offset_t crypto_end; + struct pager_crypt_info *crypt_info; } *apple_protect_pager_t; #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) +#define pager_ikot pager_header.io_bits /* * List of memory objects managed by this EMM. @@ -157,7 +167,7 @@ typedef struct apple_protect_pager { int apple_protect_pager_count = 0; /* number of pagers */ int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ queue_head_t apple_protect_pager_queue; -decl_mutex_data(,apple_protect_pager_lock) +decl_lck_mtx_data(,apple_protect_pager_lock) /* * Maximum number of unmapped pagers we're willing to keep around. @@ -172,9 +182,20 @@ int apple_protect_pager_count_unmapped_max = 0; int apple_protect_pager_num_trim_max = 0; int apple_protect_pager_num_trim_total = 0; + +lck_grp_t apple_protect_pager_lck_grp; +lck_grp_attr_t apple_protect_pager_lck_grp_attr; +lck_attr_t apple_protect_pager_lck_attr; + + /* internal prototypes */ -apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object, - struct pager_crypt_info *crypt_info); +apple_protect_pager_t apple_protect_pager_create( + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, + struct pager_crypt_info *crypt_info, + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end); apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); void apple_protect_pager_dequeue(apple_protect_pager_t pager); void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager, @@ -203,7 +224,10 @@ int apple_protect_pagerdebug = 0; void apple_protect_pager_bootstrap(void) { - mutex_init(&apple_protect_pager_lock, 0); + lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr); + lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr); + lck_attr_setdefault(&apple_protect_pager_lck_attr); + lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr); queue_init(&apple_protect_pager_queue); } @@ -219,7 +243,7 @@ apple_protect_pager_init( #if !DEBUG __unused #endif - vm_size_t pg_size) + memory_object_cluster_size_t pg_size) { apple_protect_pager_t pager; kern_return_t kr; @@ -268,7 +292,7 @@ kern_return_t apple_protect_pager_data_return( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused vm_size_t data_cnt, + __unused memory_object_cluster_size_t data_cnt, __unused memory_object_offset_t *resid_offset, __unused int *io_error, __unused boolean_t dirty, @@ -283,7 +307,7 @@ kern_return_t apple_protect_pager_data_initialize( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused vm_size_t data_cnt) + __unused memory_object_cluster_size_t data_cnt) { panic("apple_protect_pager_data_initialize: should never get called"); return KERN_FAILURE; @@ -293,7 +317,7 @@ kern_return_t apple_protect_pager_data_unlock( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused vm_size_t size, + __unused memory_object_size_t size, __unused vm_prot_t desired_access) { return KERN_FAILURE; @@ -304,11 +328,12 @@ apple_protect_pager_data_unlock( * * Handles page-in requests from VM. */ +int apple_protect_pager_data_request_debug = 0; kern_return_t apple_protect_pager_data_request( memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_cluster_size_t length, #if !DEBUG __unused #endif @@ -320,28 +345,34 @@ apple_protect_pager_data_request( upl_t upl; int upl_flags; upl_size_t upl_size; - upl_page_info_t *upl_pl = NULL; + upl_page_info_t *upl_pl; unsigned int pl_count; vm_object_t src_object, dst_object; kern_return_t kr, retval; vm_map_offset_t kernel_mapping; vm_offset_t src_vaddr, dst_vaddr; vm_offset_t cur_offset; - vm_map_entry_t map_entry; + vm_offset_t offset_in_page; kern_return_t error_code; vm_prot_t prot; vm_page_t src_page, top_page; int interruptible; - vm_object_fault_info_t fault_info; + struct vm_object_fault_info fault_info; + int ret; PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); + retval = KERN_SUCCESS; src_object = VM_OBJECT_NULL; kernel_mapping = 0; upl = NULL; upl_pl = NULL; - fault_info = (vm_object_fault_info_t) mo_fault_info; - interruptible = fault_info->interruptible; + fault_info = *((struct vm_object_fault_info *) mo_fault_info); + fault_info.stealth = TRUE; + fault_info.io_sync = FALSE; + fault_info.mark_zf_absent = FALSE; + fault_info.batch_pmap_op = FALSE; + interruptible = fault_info.interruptible; pager = apple_protect_pager_lookup(mem_obj); assert(pager->is_ready); @@ -373,11 +404,18 @@ apple_protect_pager_data_request( assert(dst_object != VM_OBJECT_NULL); +#if __x86_64__ || __arm__ || __arm64__ + /* we'll use the 1-to-1 mapping of physical memory */ + src_vaddr = 0; + dst_vaddr = 0; +#else /* __x86_64__ || __arm__ || __arm64__ */ /* * Reserve 2 virtual pages in the kernel address space to map each * source and destination physical pages when it's their turn to * be processed. */ + vm_map_entry_t map_entry; + vm_object_reference(kernel_object); /* ref. for mapping */ kr = vm_map_find_space(kernel_map, &kernel_mapping, @@ -391,10 +429,11 @@ apple_protect_pager_data_request( goto done; } map_entry->object.vm_object = kernel_object; - map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS; + map_entry->offset = kernel_mapping; vm_map_unlock(kernel_map); src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping); dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64); +#endif /* __x86_64__ || __arm__ || __arm64__ */ /* * We'll map the encrypted data in the kernel address space from the @@ -410,10 +449,12 @@ apple_protect_pager_data_request( */ upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); pl_count = length / PAGE_SIZE; - for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) { + for (cur_offset = 0; + retval == KERN_SUCCESS && cur_offset < length; + cur_offset += PAGE_SIZE) { ppnum_t dst_pnum; - if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) { + if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { /* this page is not in the UPL: skip it */ continue; } @@ -428,10 +469,12 @@ apple_protect_pager_data_request( vm_object_paging_begin(src_object); error_code = 0; prot = VM_PROT_READ; + src_page = VM_PAGE_NULL; kr = vm_fault_page(src_object, - offset + cur_offset, + pager->backing_offset + offset + cur_offset, VM_PROT_READ, FALSE, + FALSE, /* src_page not looked up */ &prot, &src_page, &top_page, @@ -439,7 +482,7 @@ apple_protect_pager_data_request( &error_code, FALSE, FALSE, - fault_info); + &fault_info); switch (kr) { case VM_FAULT_SUCCESS: break; @@ -453,6 +496,11 @@ apple_protect_pager_data_request( case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto done; + case VM_FAULT_SUCCESS_NO_VM_PAGE: + /* success but no VM page: fail */ + vm_object_paging_end(src_object); + vm_object_unlock(src_object); + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: /* the page is not there ! */ if (error_code) { @@ -462,44 +510,71 @@ apple_protect_pager_data_request( } goto done; default: - retval = KERN_FAILURE; - goto done; + panic("apple_protect_pager_data_request: " + "vm_fault_page() unexpected error 0x%x\n", + kr); } assert(src_page != VM_PAGE_NULL); assert(src_page->busy); - + + if (!src_page->active && + !src_page->inactive && + !src_page->throttled) { + vm_page_lockspin_queues(); + if (!src_page->active && + !src_page->inactive && + !src_page->throttled) { + vm_page_deactivate(src_page); + } + vm_page_unlock_queues(); + } + /* * Establish an explicit mapping of the source * physical page. */ +#if __x86_64__ + src_vaddr = (vm_map_offset_t) + PHYSMAP_PTOV((pmap_paddr_t)src_page->phys_page + << PAGE_SHIFT); +#else pmap_enter(kernel_pmap, - kernel_mapping, + src_vaddr, src_page->phys_page, VM_PROT_READ, - src_object->wimg_bits & VM_WIMG_MASK, + VM_PROT_NONE, + 0, TRUE); +#endif /* * Establish an explicit pmap mapping of the destination * physical page. * We can't do a regular VM mapping because the VM page * is "busy". */ - dst_pnum = (addr64_t) - upl_phys_page(upl_pl, cur_offset / PAGE_SIZE); + dst_pnum = (ppnum_t) + upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); assert(dst_pnum != 0); +#if __x86_64__ + dst_vaddr = (vm_map_offset_t) + PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); +#else pmap_enter(kernel_pmap, - kernel_mapping + PAGE_SIZE_64, + dst_vaddr, dst_pnum, VM_PROT_READ | VM_PROT_WRITE, - dst_object->wimg_bits & VM_WIMG_MASK, + VM_PROT_NONE, + 0, TRUE); +#endif /* * Validate the original page... */ if (src_page->object->code_signed) { - vm_page_validate_cs_mapped(src_page, - (const void *) src_vaddr); + vm_page_validate_cs_mapped( + src_page, + (const void *) src_vaddr); } /* * ... and transfer the results to the destination page. @@ -508,15 +583,128 @@ apple_protect_pager_data_request( src_page->cs_validated); UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->cs_tainted); + UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_nx); + + /* + * page_decrypt() might access a mapped file, so let's release + * the object lock for the source page to avoid a potential + * deadlock. The source page is kept busy and we have a + * "paging_in_progress" reference on its object, so it's safe + * to unlock the object here. + */ + assert(src_page->busy); + assert(src_page->object->paging_in_progress > 0); + vm_object_unlock(src_page->object); /* * Decrypt the encrypted contents of the source page * into the destination page. */ - pager->crypt.page_decrypt((const void *) src_vaddr, - (void *) dst_vaddr, offset+cur_offset, - pager->crypt.crypt_ops); - + for (offset_in_page = 0; + offset_in_page < PAGE_SIZE; + offset_in_page += 4096) { + if (offset + cur_offset + offset_in_page < + pager->crypto_start || + offset + cur_offset + offset_in_page >= + pager->crypto_end) { + /* not encrypted: just copy */ + bcopy((const char *)(src_vaddr + + offset_in_page), + (char *)(dst_vaddr + offset_in_page), + 4096); + if (apple_protect_pager_data_request_debug) { + printf("apple_protect_data_request" + "(%p,0x%llx+0x%llx+0x%04llx): " + "out of crypto range " + "[0x%llx:0x%llx]: " + "COPY [0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + pager->crypto_start, + pager->crypto_end, + *(uint64_t *)(dst_vaddr+ + offset_in_page), + *(uint64_t *)(dst_vaddr+ + offset_in_page+8), + src_page->object->code_signed, + src_page->cs_validated, + src_page->cs_tainted, + src_page->cs_nx); + } + ret = 0; + continue; + } + ret = pager->crypt_info->page_decrypt( + (const void *)(src_vaddr + offset_in_page), + (void *)(dst_vaddr + offset_in_page), + ((pager->crypto_backing_offset - + pager->crypto_start) + /* XXX ? */ + offset + + cur_offset + + offset_in_page), + pager->crypt_info->crypt_ops); + if (apple_protect_pager_data_request_debug) { + printf("apple_protect_data_request" + "(%p,0x%llx+0x%llx+0x%04llx): " + "in crypto range [0x%llx:0x%llx]: " + "DECRYPT offset 0x%llx=" + "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)" + "[0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d " + "ret=0x%x\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + pager->crypto_start, pager->crypto_end, + ((pager->crypto_backing_offset - + pager->crypto_start) + + offset + + cur_offset + + offset_in_page), + pager->crypto_backing_offset, + pager->crypto_start, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + *(uint64_t *)(dst_vaddr+offset_in_page), + *(uint64_t *)(dst_vaddr+offset_in_page+8), + src_page->object->code_signed, + src_page->cs_validated, + src_page->cs_tainted, + src_page->cs_nx, + ret); + } + if (ret) { + break; + } + } + if (ret) { + /* + * Decryption failed. Abort the fault. + */ + retval = KERN_ABORTED; + } + + assert(src_page->busy); + assert(src_page->object->paging_in_progress > 0); + vm_object_lock(src_page->object); + +#if __x86_64__ || __arm__ || __arm64__ + /* we used the 1-to-1 mapping of physical memory */ + src_vaddr = 0; + dst_vaddr = 0; +#else /* __x86_64__ || __arm__ || __arm64__ */ /* * Remove the pmap mapping of the source and destination pages * in the kernel. @@ -524,6 +712,7 @@ apple_protect_pager_data_request( pmap_remove(kernel_pmap, (addr64_t) kernel_mapping, (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64))); +#endif /* __x86_64__ || __arm__ || __arm64__ */ /* * Cleanup the result of vm_fault_page() of the source page. @@ -542,7 +731,6 @@ apple_protect_pager_data_request( } } - retval = KERN_SUCCESS; done: if (upl != NULL) { /* clean up the UPL */ @@ -559,10 +747,37 @@ done: /* abort or commit the UPL */ if (retval != KERN_SUCCESS) { upl_abort(upl, 0); + if (retval == KERN_ABORTED) { + wait_result_t wait_result; + + /* + * We aborted the fault and did not provide + * any contents for the requested pages but + * the pages themselves are not invalid, so + * let's return success and let the caller + * retry the fault, in case it might succeed + * later (when the decryption code is up and + * running in the kernel, for example). + */ + retval = KERN_SUCCESS; + /* + * Wait a little bit first to avoid using + * too much CPU time retrying and failing + * the same fault over and over again. + */ + wait_result = assert_wait_timeout( + (event_t) apple_protect_pager_data_request, + THREAD_UNINT, + 10000, /* 10ms */ + NSEC_PER_USEC); + assert(wait_result == THREAD_WAITING); + wait_result = thread_block(THREAD_CONTINUE_NULL); + assert(wait_result == THREAD_TIMED_OUT); + } } else { boolean_t empty; upl_commit_range(upl, 0, upl->size, - UPL_COMMIT_CS_VALIDATED, + UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, upl_pl, pl_count, &empty); } @@ -603,10 +818,10 @@ apple_protect_pager_reference( pager = apple_protect_pager_lookup(mem_obj); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); assert(pager->ref_count > 0); pager->ref_count++; - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); } @@ -658,12 +873,18 @@ apple_protect_pager_terminate_internal( pager->backing_object = VM_OBJECT_NULL; } + /* one less pager using this "pager_crypt_info" */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d\n", + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(pager->crypt_info); + pager->crypt_info = NULL; + /* trigger the destruction of the memory object */ memory_object_destroy(pager->pager_control, 0); - - /* deallocate any crypt module data */ - if(pager->crypt.crypt_end) - pager->crypt.crypt_end(pager->crypt.crypt_ops); } /* @@ -683,7 +904,7 @@ apple_protect_pager_deallocate_internal( int count_unmapped; if (! locked) { - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); } count_unmapped = (apple_protect_pager_count - @@ -706,7 +927,7 @@ apple_protect_pager_deallocate_internal( */ apple_protect_pager_dequeue(pager); /* the pager is all ours: no need for the lock now */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); apple_protect_pager_terminate_internal(pager); } else if (pager->ref_count == 0) { /* @@ -714,7 +935,7 @@ apple_protect_pager_deallocate_internal( * been terminated. Do some final cleanup and release the * pager structure. */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) { memory_object_control_deallocate(pager->pager_control); pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; @@ -723,7 +944,7 @@ apple_protect_pager_deallocate_internal( pager = APPLE_PROTECT_PAGER_NULL; } else { /* there are still plenty of references: keep going... */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); } if (needs_trimming) { @@ -771,7 +992,7 @@ kern_return_t apple_protect_pager_synchronize( memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_size_t length, __unused vm_sync_t sync_flags) { apple_protect_pager_t pager; @@ -790,8 +1011,8 @@ apple_protect_pager_synchronize( * apple_protect_pager_map() * * This allows VM to let us, the EMM, know that this memory object - * is currently mapped one or more times. This is called by VM only the first - * time the memory object gets mapped and we take one extra reference on the + * is currently mapped one or more times. This is called by VM each time + * the memory object gets mapped and we take one extra reference on the * memory object to account for all its mappings. */ kern_return_t @@ -805,7 +1026,7 @@ apple_protect_pager_map( pager = apple_protect_pager_lookup(mem_obj); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); assert(pager->is_ready); assert(pager->ref_count > 0); /* pager is alive */ if (pager->is_mapped == FALSE) { @@ -818,7 +1039,7 @@ apple_protect_pager_map( pager->ref_count++; apple_protect_pager_count_mapped++; } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); return KERN_SUCCESS; } @@ -840,7 +1061,7 @@ apple_protect_pager_last_unmap( pager = apple_protect_pager_lookup(mem_obj); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); if (pager->is_mapped) { /* * All the mappings are gone, so let go of the one extra @@ -856,7 +1077,7 @@ apple_protect_pager_last_unmap( apple_protect_pager_deallocate_internal(pager, TRUE); /* caution: deallocate_internal() released the lock ! */ } else { - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); } return KERN_SUCCESS; @@ -880,12 +1101,17 @@ apple_protect_pager_lookup( apple_protect_pager_t apple_protect_pager_create( - vm_object_t backing_object, - struct pager_crypt_info *crypt_info) + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, + struct pager_crypt_info *crypt_info, + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end) { apple_protect_pager_t pager, pager2; memory_object_control_t control; kern_return_t kr; + struct pager_crypt_info *old_crypt_info; pager = (apple_protect_pager_t) kalloc(sizeof (*pager)); if (pager == APPLE_PROTECT_PAGER_NULL) { @@ -896,38 +1122,94 @@ apple_protect_pager_create( * The vm_map call takes both named entry ports and raw memory * objects in the same parameter. We need to make sure that * vm_map does not see this object as a named entry port. So, - * we reserve the second word in the object for a fake ip_kotype + * we reserve the first word in the object for a fake ip_kotype * setting - that will tell vm_map to use it as a memory object. */ pager->pager_ops = &apple_protect_pager_ops; pager->pager_ikot = IKOT_MEMORY_OBJECT; pager->is_ready = FALSE;/* not ready until it has a "name" */ - pager->ref_count = 2; /* existence + setup reference */ + pager->ref_count = 1; /* existence reference (for the cache) */ + pager->ref_count++; /* for the caller */ pager->is_mapped = FALSE; pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; pager->backing_object = backing_object; - pager->crypt = *crypt_info; + pager->backing_offset = backing_offset; + pager->crypto_backing_offset = crypto_backing_offset; + pager->crypto_start = crypto_start; + pager->crypto_end = crypto_end; + pager->crypt_info = crypt_info; /* allocated by caller */ + +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ vm_object_reference(backing_object); - mutex_lock(&apple_protect_pager_lock); + old_crypt_info = NULL; + + lck_mtx_lock(&apple_protect_pager_lock); /* see if anyone raced us to create a pager for the same object */ queue_iterate(&apple_protect_pager_queue, pager2, apple_protect_pager_t, pager_queue) { - if (pager2->backing_object == backing_object) { + if ((pager2->crypt_info->page_decrypt != + crypt_info->page_decrypt) || + (pager2->crypt_info->crypt_end != + crypt_info->crypt_end) || + (pager2->crypt_info->crypt_ops != + crypt_info->crypt_ops)) { + /* crypt_info contents do not match: next pager */ + continue; + } + + /* found a match for crypt_info ... */ + if (old_crypt_info) { + /* ... already switched to that crypt_info */ + assert(old_crypt_info == pager2->crypt_info); + } else if (pager2->crypt_info != crypt_info) { + /* ... switch to that pager's crypt_info */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: reference %p ref %d " + "(create match)\n", + __FUNCTION__, + pager2->crypt_info, + pager2->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + old_crypt_info = pager2->crypt_info; + crypt_info_reference(old_crypt_info); + pager->crypt_info = old_crypt_info; + } + + if (pager2->backing_object == backing_object && + pager2->backing_offset == backing_offset && + pager2->crypto_backing_offset == crypto_backing_offset && + pager2->crypto_start == crypto_start && + pager2->crypto_end == crypto_end) { + /* full match: use that pager */ break; } } if (! queue_end(&apple_protect_pager_queue, (queue_entry_t) pager2)) { - /* while we hold the lock, transfer our setup ref to winner */ - pager2->ref_count++; /* we lost the race, down with the loser... */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); vm_object_deallocate(pager->backing_object); pager->backing_object = VM_OBJECT_NULL; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: %p ref %d (create pager match)\n", + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(pager->crypt_info); + pager->crypt_info = NULL; kfree(pager, sizeof (*pager)); /* ... and go with the winner */ pager = pager2; @@ -944,21 +1226,35 @@ apple_protect_pager_create( if (apple_protect_pager_count > apple_protect_pager_count_max) { apple_protect_pager_count_max = apple_protect_pager_count; } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); kr = memory_object_create_named((memory_object_t) pager, 0, &control); assert(kr == KERN_SUCCESS); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); /* the new pager is now ready to be used */ pager->is_ready = TRUE; - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); /* wakeup anyone waiting for this pager to be ready */ thread_wakeup(&pager->is_ready); + if (old_crypt_info != NULL && + old_crypt_info != crypt_info) { + /* we re-used an old crypt_info instead of using our new one */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(create used old)\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(crypt_info); + crypt_info = NULL; + } + return pager; } @@ -971,52 +1267,181 @@ apple_protect_pager_create( */ memory_object_t apple_protect_pager_setup( - vm_object_t backing_object, - struct pager_crypt_info *crypt_info) + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, + struct pager_crypt_info *crypt_info, + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end) { apple_protect_pager_t pager; + struct pager_crypt_info *old_crypt_info, *new_crypt_info; + +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ - mutex_lock(&apple_protect_pager_lock); + old_crypt_info = NULL; + + lck_mtx_lock(&apple_protect_pager_lock); queue_iterate(&apple_protect_pager_queue, pager, apple_protect_pager_t, pager_queue) { - if (pager->backing_object == backing_object) { - /* For the same object we must always use the same protection options */ - if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) && - (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) { - mutex_unlock(&apple_protect_pager_lock); - return MEMORY_OBJECT_NULL; - } + if ((pager->crypt_info->page_decrypt != + crypt_info->page_decrypt) || + (pager->crypt_info->crypt_end != + crypt_info->crypt_end) || + (pager->crypt_info->crypt_ops != + crypt_info->crypt_ops)) { + /* no match for "crypt_info": next pager */ + continue; + } + /* found a match for crypt_info ... */ + if (old_crypt_info) { + /* ... already switched to that crypt_info */ + assert(old_crypt_info == pager->crypt_info); + } else { + /* ... switch to that pager's crypt_info */ + old_crypt_info = pager->crypt_info; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "switching crypt_info from %p [%p,%p,%p,%d] " + "to %p [%p,%p,%p,%d] from pager %p\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt, + old_crypt_info, + old_crypt_info->page_decrypt, + old_crypt_info->crypt_end, + old_crypt_info->crypt_ops, + old_crypt_info->crypt_refcnt, + pager); + printf("CRYPT_INFO %s: %p ref %d (setup match)\n", + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_reference(pager->crypt_info); + } + + if (pager->backing_object == backing_object && + pager->backing_offset == backing_offset && + pager->crypto_backing_offset == crypto_backing_offset && + pager->crypto_start == crypto_start && + pager->crypto_end == crypto_end) { + /* full match: use that pager! */ + assert(old_crypt_info == pager->crypt_info); + assert(old_crypt_info->crypt_refcnt > 1); +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "pager match with %p crypt_info %p\n", + __FUNCTION__, + pager, + pager->crypt_info); + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(pager match)\n", + __FUNCTION__, + old_crypt_info, + old_crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + /* release the extra ref on crypt_info we got above */ + crypt_info_deallocate(old_crypt_info); + assert(old_crypt_info->crypt_refcnt > 0); + /* give extra reference on pager to the caller */ + assert(pager->ref_count > 0); + pager->ref_count++; break; } } if (queue_end(&apple_protect_pager_queue, (queue_entry_t) pager)) { + lck_mtx_unlock(&apple_protect_pager_lock); /* no existing pager for this backing object */ pager = APPLE_PROTECT_PAGER_NULL; - } else { - /* make sure pager doesn't disappear */ - pager->ref_count++; - } - - mutex_unlock(&apple_protect_pager_lock); - - if (pager == APPLE_PROTECT_PAGER_NULL) { - pager = apple_protect_pager_create(backing_object, crypt_info); + if (old_crypt_info) { + /* use this old crypt_info for new pager */ + new_crypt_info = old_crypt_info; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "will use old_crypt_info %p for new pager\n", + __FUNCTION__, + old_crypt_info); +#endif /* CRYPT_INFO_DEBUG */ + } else { + /* allocate a new crypt_info for new pager */ + new_crypt_info = kalloc(sizeof (*new_crypt_info)); + *new_crypt_info = *crypt_info; + new_crypt_info->crypt_refcnt = 1; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "will use new_crypt_info %p for new pager\n", + __FUNCTION__, + new_crypt_info); +#endif /* CRYPT_INFO_DEBUG */ + } + if (new_crypt_info == NULL) { + /* can't create new pager without a crypt_info */ + } else { + /* create new pager */ + pager = apple_protect_pager_create( + backing_object, + backing_offset, + crypto_backing_offset, + new_crypt_info, + crypto_start, + crypto_end); + } if (pager == APPLE_PROTECT_PAGER_NULL) { + /* could not create a new pager */ + if (new_crypt_info == old_crypt_info) { + /* release extra reference on old_crypt_info */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(create fail old_crypt_info)\n", + __FUNCTION__, + old_crypt_info, + old_crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(old_crypt_info); + old_crypt_info = NULL; + } else { + /* release unused new_crypt_info */ + assert(new_crypt_info->crypt_refcnt == 1); +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(create fail new_crypt_info)\n", + __FUNCTION__, + new_crypt_info, + new_crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(new_crypt_info); + new_crypt_info = NULL; + } return MEMORY_OBJECT_NULL; } + lck_mtx_lock(&apple_protect_pager_lock); + } else { + assert(old_crypt_info == pager->crypt_info); } - mutex_lock(&apple_protect_pager_lock); while (!pager->is_ready) { - thread_sleep_mutex(&pager->is_ready, - &apple_protect_pager_lock, - THREAD_UNINT); + lck_mtx_sleep(&apple_protect_pager_lock, + LCK_SLEEP_DEFAULT, + &pager->is_ready, + THREAD_UNINT); } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); return (memory_object_t) pager; } @@ -1029,7 +1454,7 @@ apple_protect_pager_trim(void) int num_trim; int count_unmapped; - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); /* * We have too many pagers, try and trim some unused ones, @@ -1073,7 +1498,7 @@ apple_protect_pager_trim(void) } apple_protect_pager_num_trim_total += num_trim; - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); /* terminate the trimmed pagers */ while (!queue_empty(&trim_queue)) { @@ -1093,3 +1518,47 @@ apple_protect_pager_trim(void) apple_protect_pager_terminate_internal(pager); } } + + +void +crypt_info_reference( + struct pager_crypt_info *crypt_info) +{ + assert(crypt_info->crypt_refcnt != 0); +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: %p ref %d -> %d\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt, + crypt_info->crypt_refcnt + 1); +#endif /* CRYPT_INFO_DEBUG */ + OSAddAtomic(+1, &crypt_info->crypt_refcnt); +} + +void +crypt_info_deallocate( + struct pager_crypt_info *crypt_info) +{ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: %p ref %d -> %d\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt, + crypt_info->crypt_refcnt - 1); +#endif /* CRYPT_INFO_DEBUG */ + OSAddAtomic(-1, &crypt_info->crypt_refcnt); + if (crypt_info->crypt_refcnt == 0) { + /* deallocate any crypt module data */ + if (crypt_info->crypt_end) { + crypt_info->crypt_end(crypt_info->crypt_ops); + crypt_info->crypt_end = NULL; + } +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: freeing %p\n", + __FUNCTION__, + crypt_info); +#endif /* CRYPT_INFO_DEBUG */ + kfree(crypt_info, sizeof (*crypt_info)); + crypt_info = NULL; + } +}