X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..9d749ea394c01276fa19e397e70f46858e849c76:/osfmk/vm/vm_apple_protect.c?ds=sidebyside diff --git a/osfmk/vm/vm_apple_protect.c b/osfmk/vm/vm_apple_protect.c index 7cb6b93cf..707c3e695 100644 --- a/osfmk/vm/vm_apple_protect.c +++ b/osfmk/vm/vm_apple_protect.c @@ -45,19 +45,18 @@ #include #include #include +#include #include #include -#include -#include - #include #include #include #include #include #include +#include /* @@ -114,6 +113,10 @@ kern_return_t apple_protect_pager_map(memory_object_t mem_obj, vm_prot_t prot); kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj); +#define CRYPT_INFO_DEBUG 0 +void crypt_info_reference(struct pager_crypt_info *crypt_info); +void crypt_info_deallocate(struct pager_crypt_info *crypt_info); + /* * Vector of VM operations for this EMM. * These routines are invoked by VM via the memory_object_*() interfaces. @@ -131,7 +134,7 @@ const struct memory_object_pager_ops apple_protect_pager_ops = { apple_protect_pager_map, apple_protect_pager_last_unmap, NULL, /* data_reclaim */ - "apple protect pager" + "apple_protect" }; /* @@ -139,18 +142,22 @@ const struct memory_object_pager_ops apple_protect_pager_ops = { * the "apple protect" EMM. */ typedef struct apple_protect_pager { - struct ipc_object_header pager_header; /* fake ip_kotype() */ - memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */ + /* mandatory generic header */ + struct memory_object ap_pgr_hdr; + + /* pager-specific data */ queue_chain_t pager_queue; /* next & prev pagers */ unsigned int ref_count; /* reference count */ boolean_t is_ready; /* is this pager ready ? */ boolean_t is_mapped; /* is this mem_obj mapped ? */ - memory_object_control_t pager_control; /* mem object control handle */ vm_object_t backing_object; /* VM obj w/ encrypted data */ - struct pager_crypt_info crypt; + vm_object_offset_t backing_offset; + vm_object_offset_t crypto_backing_offset; /* for key... */ + vm_object_offset_t crypto_start; + vm_object_offset_t crypto_end; + struct pager_crypt_info *crypt_info; } *apple_protect_pager_t; #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) -#define pager_ikot pager_header.io_bits /* * List of memory objects managed by this EMM. @@ -164,7 +171,7 @@ decl_lck_mtx_data(,apple_protect_pager_lock) /* * Maximum number of unmapped pagers we're willing to keep around. */ -int apple_protect_pager_cache_limit = 10; +int apple_protect_pager_cache_limit = 20; /* * Statistics & counters. @@ -181,8 +188,13 @@ lck_attr_t apple_protect_pager_lck_attr; /* internal prototypes */ -apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object, - struct pager_crypt_info *crypt_info); +apple_protect_pager_t apple_protect_pager_create( + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, + struct pager_crypt_info *crypt_info, + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end); apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); void apple_protect_pager_dequeue(apple_protect_pager_t pager); void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager, @@ -247,7 +259,7 @@ apple_protect_pager_init( memory_object_control_reference(control); - pager->pager_control = control; + pager->ap_pgr_hdr.mo_control = control; attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ @@ -264,6 +276,12 @@ apple_protect_pager_init( panic("apple_protect_pager_init: " "memory_object_change_attributes() failed"); +#if CONFIG_SECLUDED_MEMORY + if (secluded_for_filecache) { + memory_object_mark_eligible_for_secluded(control, TRUE); + } +#endif /* CONFIG_SECLUDED_MEMORY */ + return KERN_SUCCESS; } @@ -315,6 +333,7 @@ apple_protect_pager_data_unlock( * * Handles page-in requests from VM. */ +int apple_protect_pager_data_request_debug = 0; kern_return_t apple_protect_pager_data_request( memory_object_t mem_obj, @@ -333,13 +352,12 @@ apple_protect_pager_data_request( upl_size_t upl_size; upl_page_info_t *upl_pl; unsigned int pl_count; - vm_object_t src_object, dst_object; + vm_object_t src_top_object, src_page_object, dst_object; kern_return_t kr, retval; vm_map_offset_t kernel_mapping; vm_offset_t src_vaddr, dst_vaddr; vm_offset_t cur_offset; vm_offset_t offset_in_page; - vm_map_entry_t map_entry; kern_return_t error_code; vm_prot_t prot; vm_page_t src_page, top_page; @@ -350,7 +368,8 @@ apple_protect_pager_data_request( PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); retval = KERN_SUCCESS; - src_object = VM_OBJECT_NULL; + src_top_object = VM_OBJECT_NULL; + src_page_object = VM_OBJECT_NULL; kernel_mapping = 0; upl = NULL; upl_pl = NULL; @@ -370,7 +389,7 @@ apple_protect_pager_data_request( /* * Gather in a UPL all the VM pages requested by VM. */ - mo_control = pager->pager_control; + mo_control = pager->ap_pgr_hdr.mo_control; upl_size = length; upl_flags = @@ -382,7 +401,7 @@ apple_protect_pager_data_request( pl_count = 0; kr = memory_object_upl_request(mo_control, offset, upl_size, - &upl, NULL, NULL, upl_flags); + &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); if (kr != KERN_SUCCESS) { retval = kr; goto done; @@ -391,17 +410,25 @@ apple_protect_pager_data_request( assert(dst_object != VM_OBJECT_NULL); +#if __x86_64__ || __arm__ || __arm64__ + /* we'll use the 1-to-1 mapping of physical memory */ + src_vaddr = 0; + dst_vaddr = 0; +#else /* __x86_64__ || __arm__ || __arm64__ */ /* * Reserve 2 virtual pages in the kernel address space to map each * source and destination physical pages when it's their turn to * be processed. */ + vm_map_entry_t map_entry; + vm_object_reference(kernel_object); /* ref. for mapping */ kr = vm_map_find_space(kernel_map, &kernel_mapping, 2 * PAGE_SIZE_64, 0, 0, + VM_MAP_KERNEL_FLAGS_NONE, &map_entry); if (kr != KERN_SUCCESS) { vm_object_deallocate(kernel_object); @@ -413,15 +440,16 @@ apple_protect_pager_data_request( vm_map_unlock(kernel_map); src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping); dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64); +#endif /* __x86_64__ || __arm__ || __arm64__ */ /* * We'll map the encrypted data in the kernel address space from the * backing VM object (itself backed by the encrypted file via * the vnode pager). */ - src_object = pager->backing_object; - assert(src_object != VM_OBJECT_NULL); - vm_object_reference(src_object); /* to keep the source object alive */ + src_top_object = pager->backing_object; + assert(src_top_object != VM_OBJECT_NULL); + vm_object_reference(src_top_object); /* keep the source object alive */ /* * Fill in the contents of the pages requested by VM. @@ -441,16 +469,16 @@ apple_protect_pager_data_request( /* * Map the source (encrypted) page in the kernel's * virtual address space. - * We already hold a reference on the src_object. + * We already hold a reference on the src_top_object. */ retry_src_fault: - vm_object_lock(src_object); - vm_object_paging_begin(src_object); + vm_object_lock(src_top_object); + vm_object_paging_begin(src_top_object); error_code = 0; prot = VM_PROT_READ; src_page = VM_PAGE_NULL; - kr = vm_fault_page(src_object, - offset + cur_offset, + kr = vm_fault_page(src_top_object, + pager->backing_offset + offset + cur_offset, VM_PROT_READ, FALSE, FALSE, /* src_page not looked up */ @@ -477,8 +505,8 @@ apple_protect_pager_data_request( goto done; case VM_FAULT_SUCCESS_NO_VM_PAGE: /* success but no VM page: fail */ - vm_object_paging_end(src_object); - vm_object_unlock(src_object); + vm_object_paging_end(src_top_object); + vm_object_unlock(src_top_object); /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: /* the page is not there ! */ @@ -496,29 +524,39 @@ apple_protect_pager_data_request( assert(src_page != VM_PAGE_NULL); assert(src_page->busy); - if (!src_page->active && - !src_page->inactive && - !src_page->throttled) { + if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(src_page))) { + vm_page_lockspin_queues(); - if (!src_page->active && - !src_page->inactive && - !src_page->throttled) { + + if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(src_page))) { vm_page_deactivate(src_page); } vm_page_unlock_queues(); } - + /* * Establish an explicit mapping of the source * physical page. */ - pmap_enter(kernel_pmap, - kernel_mapping, - src_page->phys_page, - VM_PROT_READ, - VM_PROT_NONE, - 0, - TRUE); +#if __x86_64__ + src_vaddr = (vm_map_offset_t) + PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); +#elif __arm__ || __arm64__ + src_vaddr = (vm_map_offset_t) + phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); +#else + kr = pmap_enter(kernel_pmap, + src_vaddr, + VM_PAGE_GET_PHYS_PAGE(src_page), + VM_PROT_READ, + VM_PROT_NONE, + 0, + TRUE); + + assert(kr == KERN_SUCCESS); +#endif /* * Establish an explicit pmap mapping of the destination * physical page. @@ -528,13 +566,53 @@ apple_protect_pager_data_request( dst_pnum = (ppnum_t) upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); assert(dst_pnum != 0); - pmap_enter(kernel_pmap, - kernel_mapping + PAGE_SIZE_64, - dst_pnum, - VM_PROT_READ | VM_PROT_WRITE, - VM_PROT_NONE, - 0, - TRUE); +#if __x86_64__ + dst_vaddr = (vm_map_offset_t) + PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); +#elif __arm__ || __arm64__ + dst_vaddr = (vm_map_offset_t) + phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); +#else + kr = pmap_enter(kernel_pmap, + dst_vaddr, + dst_pnum, + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_NONE, + 0, + TRUE); + + assert(kr == KERN_SUCCESS); +#endif + src_page_object = VM_PAGE_OBJECT(src_page); + + /* + * Validate the original page... + */ + if (src_page_object->code_signed) { + vm_page_validate_cs_mapped( + src_page, + (const void *) src_vaddr); + } + /* + * ... and transfer the results to the destination page. + */ + UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_validated); + UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_tainted); + UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_nx); + + /* + * page_decrypt() might access a mapped file, so let's release + * the object lock for the source page to avoid a potential + * deadlock. The source page is kept busy and we have a + * "paging_in_progress" reference on its object, so it's safe + * to unlock the object here. + */ + assert(src_page->busy); + assert(src_page_object->paging_in_progress > 0); + vm_object_unlock(src_page_object); /* * Decrypt the encrypted contents of the source page @@ -543,16 +621,87 @@ apple_protect_pager_data_request( for (offset_in_page = 0; offset_in_page < PAGE_SIZE; offset_in_page += 4096) { - ret = pager->crypt.page_decrypt((const void *) - (src_vaddr + - offset_in_page), - (void *) - (dst_vaddr + - offset_in_page), - (offset + - cur_offset + - offset_in_page), - pager->crypt.crypt_ops); + if (offset + cur_offset + offset_in_page < + pager->crypto_start || + offset + cur_offset + offset_in_page >= + pager->crypto_end) { + /* not encrypted: just copy */ + bcopy((const char *)(src_vaddr + + offset_in_page), + (char *)(dst_vaddr + offset_in_page), + 4096); + if (apple_protect_pager_data_request_debug) { + printf("apple_protect_data_request" + "(%p,0x%llx+0x%llx+0x%04llx): " + "out of crypto range " + "[0x%llx:0x%llx]: " + "COPY [0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + pager->crypto_start, + pager->crypto_end, + *(uint64_t *)(dst_vaddr+ + offset_in_page), + *(uint64_t *)(dst_vaddr+ + offset_in_page+8), + src_page_object->code_signed, + src_page->cs_validated, + src_page->cs_tainted, + src_page->cs_nx); + } + ret = 0; + continue; + } + ret = pager->crypt_info->page_decrypt( + (const void *)(src_vaddr + offset_in_page), + (void *)(dst_vaddr + offset_in_page), + ((pager->crypto_backing_offset - + pager->crypto_start) + /* XXX ? */ + offset + + cur_offset + + offset_in_page), + pager->crypt_info->crypt_ops); + if (apple_protect_pager_data_request_debug) { + printf("apple_protect_data_request" + "(%p,0x%llx+0x%llx+0x%04llx): " + "in crypto range [0x%llx:0x%llx]: " + "DECRYPT offset 0x%llx=" + "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)" + "[0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d " + "ret=0x%x\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + pager->crypto_start, pager->crypto_end, + ((pager->crypto_backing_offset - + pager->crypto_start) + + offset + + cur_offset + + offset_in_page), + pager->crypto_backing_offset, + pager->crypto_start, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + *(uint64_t *)(dst_vaddr+offset_in_page), + *(uint64_t *)(dst_vaddr+offset_in_page+8), + src_page_object->code_signed, + src_page->cs_validated, + src_page->cs_tainted, + src_page->cs_nx, + ret); + } if (ret) { break; } @@ -562,24 +711,18 @@ apple_protect_pager_data_request( * Decryption failed. Abort the fault. */ retval = KERN_ABORTED; - } else { - /* - * Validate the original page... - */ - if (src_page->object->code_signed) { - vm_page_validate_cs_mapped( - src_page, - (const void *) src_vaddr); - } - /* - * ... and transfer the results to the destination page. - */ - UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, - src_page->cs_validated); - UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, - src_page->cs_tainted); } - + + assert(VM_PAGE_OBJECT(src_page) == src_page_object); + assert(src_page->busy); + assert(src_page_object->paging_in_progress > 0); + vm_object_lock(src_page_object); + +#if __x86_64__ || __arm__ || __arm64__ + /* we used the 1-to-1 mapping of physical memory */ + src_vaddr = 0; + dst_vaddr = 0; +#else /* __x86_64__ || __arm__ || __arm64__ */ /* * Remove the pmap mapping of the source and destination pages * in the kernel. @@ -587,21 +730,43 @@ apple_protect_pager_data_request( pmap_remove(kernel_pmap, (addr64_t) kernel_mapping, (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64))); +#endif /* __x86_64__ || __arm__ || __arm64__ */ /* * Cleanup the result of vm_fault_page() of the source page. */ - PAGE_WAKEUP_DONE(src_page); - vm_object_paging_end(src_page->object); - vm_object_unlock(src_page->object); + if (retval == KERN_SUCCESS && + src_page->busy && + !VM_PAGE_WIRED(src_page) && + !src_page->dirty && + !src_page->precious && + !src_page->laundry && + !src_page->cleaning) { + int refmod_state; + + refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(src_page)); + + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(src_page, FALSE); + } + if (!src_page->dirty) { + vm_page_free_unlocked(src_page, TRUE); + src_page = VM_PAGE_NULL; + } else { + PAGE_WAKEUP_DONE(src_page); + } + } else { + PAGE_WAKEUP_DONE(src_page); + } + src_page = VM_PAGE_NULL; + vm_object_paging_end(src_page_object); + vm_object_unlock(src_page_object); if (top_page != VM_PAGE_NULL) { - vm_object_t top_object; - - top_object = top_page->object; - vm_object_lock(top_object); + assert(VM_PAGE_OBJECT(top_page) == src_top_object); + vm_object_lock(src_top_object); VM_PAGE_FREE(top_page); - vm_object_paging_end(top_object); - vm_object_unlock(top_object); + vm_object_paging_end(src_top_object); + vm_object_unlock(src_top_object); } } @@ -670,8 +835,8 @@ done: src_vaddr = 0; dst_vaddr = 0; } - if (src_object != VM_OBJECT_NULL) { - vm_object_deallocate(src_object); + if (src_top_object != VM_OBJECT_NULL) { + vm_object_deallocate(src_top_object); } return retval; @@ -746,13 +911,19 @@ apple_protect_pager_terminate_internal( vm_object_deallocate(pager->backing_object); pager->backing_object = VM_OBJECT_NULL; } - - /* deallocate any crypt module data */ - if(pager->crypt.crypt_end) - pager->crypt.crypt_end(pager->crypt.crypt_ops); + + /* one less pager using this "pager_crypt_info" */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d\n", + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(pager->crypt_info); + pager->crypt_info = NULL; /* trigger the destruction of the memory object */ - memory_object_destroy(pager->pager_control, 0); + memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0); } /* @@ -804,9 +975,9 @@ apple_protect_pager_deallocate_internal( * pager structure. */ lck_mtx_unlock(&apple_protect_pager_lock); - if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) { - memory_object_control_deallocate(pager->pager_control); - pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; + if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) { + memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control); + pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; } kfree(pager, sizeof (*pager)); pager = APPLE_PROTECT_PAGER_NULL; @@ -858,21 +1029,13 @@ apple_protect_pager_terminate( */ kern_return_t apple_protect_pager_synchronize( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, __unused vm_sync_t sync_flags) { - apple_protect_pager_t pager; - - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj)); - - pager = apple_protect_pager_lookup(mem_obj); - - memory_object_synchronize_completed(pager->pager_control, - offset, length); - - return KERN_SUCCESS; + panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n"); + return KERN_FAILURE; } /* @@ -961,20 +1124,25 @@ apple_protect_pager_lookup( { apple_protect_pager_t pager; + assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops); pager = (apple_protect_pager_t) mem_obj; - assert(pager->pager_ops == &apple_protect_pager_ops); assert(pager->ref_count > 0); return pager; } apple_protect_pager_t apple_protect_pager_create( - vm_object_t backing_object, - struct pager_crypt_info *crypt_info) + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, + struct pager_crypt_info *crypt_info, + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end) { apple_protect_pager_t pager, pager2; memory_object_control_t control; kern_return_t kr; + struct pager_crypt_info *old_crypt_info; pager = (apple_protect_pager_t) kalloc(sizeof (*pager)); if (pager == APPLE_PROTECT_PAGER_NULL) { @@ -988,35 +1156,92 @@ apple_protect_pager_create( * we reserve the first word in the object for a fake ip_kotype * setting - that will tell vm_map to use it as a memory object. */ - pager->pager_ops = &apple_protect_pager_ops; - pager->pager_ikot = IKOT_MEMORY_OBJECT; + pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; + pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops; + pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; + pager->is_ready = FALSE;/* not ready until it has a "name" */ - pager->ref_count = 2; /* existence + setup reference */ + pager->ref_count = 1; /* existence reference (for the cache) */ + pager->ref_count++; /* for the caller */ pager->is_mapped = FALSE; - pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; pager->backing_object = backing_object; - pager->crypt = *crypt_info; + pager->backing_offset = backing_offset; + pager->crypto_backing_offset = crypto_backing_offset; + pager->crypto_start = crypto_start; + pager->crypto_end = crypto_end; + pager->crypt_info = crypt_info; /* allocated by caller */ + +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ vm_object_reference(backing_object); + old_crypt_info = NULL; + lck_mtx_lock(&apple_protect_pager_lock); /* see if anyone raced us to create a pager for the same object */ queue_iterate(&apple_protect_pager_queue, pager2, apple_protect_pager_t, pager_queue) { - if (pager2->backing_object == backing_object) { + if ((pager2->crypt_info->page_decrypt != + crypt_info->page_decrypt) || + (pager2->crypt_info->crypt_end != + crypt_info->crypt_end) || + (pager2->crypt_info->crypt_ops != + crypt_info->crypt_ops)) { + /* crypt_info contents do not match: next pager */ + continue; + } + + /* found a match for crypt_info ... */ + if (old_crypt_info) { + /* ... already switched to that crypt_info */ + assert(old_crypt_info == pager2->crypt_info); + } else if (pager2->crypt_info != crypt_info) { + /* ... switch to that pager's crypt_info */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: reference %p ref %d " + "(create match)\n", + __FUNCTION__, + pager2->crypt_info, + pager2->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + old_crypt_info = pager2->crypt_info; + crypt_info_reference(old_crypt_info); + pager->crypt_info = old_crypt_info; + } + + if (pager2->backing_object == backing_object && + pager2->backing_offset == backing_offset && + pager2->crypto_backing_offset == crypto_backing_offset && + pager2->crypto_start == crypto_start && + pager2->crypto_end == crypto_end) { + /* full match: use that pager */ break; } } if (! queue_end(&apple_protect_pager_queue, (queue_entry_t) pager2)) { - /* while we hold the lock, transfer our setup ref to winner */ - pager2->ref_count++; /* we lost the race, down with the loser... */ lck_mtx_unlock(&apple_protect_pager_lock); vm_object_deallocate(pager->backing_object); pager->backing_object = VM_OBJECT_NULL; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: %p ref %d (create pager match)\n", + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(pager->crypt_info); + pager->crypt_info = NULL; kfree(pager, sizeof (*pager)); /* ... and go with the winner */ pager = pager2; @@ -1048,6 +1273,20 @@ apple_protect_pager_create( /* wakeup anyone waiting for this pager to be ready */ thread_wakeup(&pager->is_ready); + if (old_crypt_info != NULL && + old_crypt_info != crypt_info) { + /* we re-used an old crypt_info instead of using our new one */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(create used old)\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(crypt_info); + crypt_info = NULL; + } + return pager; } @@ -1060,10 +1299,27 @@ apple_protect_pager_create( */ memory_object_t apple_protect_pager_setup( - vm_object_t backing_object, - struct pager_crypt_info *crypt_info) + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, + struct pager_crypt_info *crypt_info, + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end) { apple_protect_pager_t pager; + struct pager_crypt_info *old_crypt_info, *new_crypt_info; + +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + + old_crypt_info = NULL; lck_mtx_lock(&apple_protect_pager_lock); @@ -1071,35 +1327,146 @@ apple_protect_pager_setup( pager, apple_protect_pager_t, pager_queue) { - if (pager->backing_object == backing_object) { - /* For the same object we must always use the same protection options */ - if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) && - (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) { - lck_mtx_unlock(&apple_protect_pager_lock); - return MEMORY_OBJECT_NULL; - } + if ((pager->crypt_info->page_decrypt != + crypt_info->page_decrypt) || + (pager->crypt_info->crypt_end != + crypt_info->crypt_end) || + (pager->crypt_info->crypt_ops != + crypt_info->crypt_ops)) { + /* no match for "crypt_info": next pager */ + continue; + } + /* found a match for crypt_info ... */ + if (old_crypt_info) { + /* ... already switched to that crypt_info */ + assert(old_crypt_info == pager->crypt_info); + } else { + /* ... switch to that pager's crypt_info */ + old_crypt_info = pager->crypt_info; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "switching crypt_info from %p [%p,%p,%p,%d] " + "to %p [%p,%p,%p,%d] from pager %p\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt, + old_crypt_info, + old_crypt_info->page_decrypt, + old_crypt_info->crypt_end, + old_crypt_info->crypt_ops, + old_crypt_info->crypt_refcnt, + pager); + printf("CRYPT_INFO %s: %p ref %d (setup match)\n", + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_reference(pager->crypt_info); + } + + if (pager->backing_object == backing_object && + pager->backing_offset == backing_offset && + pager->crypto_backing_offset == crypto_backing_offset && + pager->crypto_start == crypto_start && + pager->crypto_end == crypto_end) { + /* full match: use that pager! */ + assert(old_crypt_info == pager->crypt_info); + assert(old_crypt_info->crypt_refcnt > 1); +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "pager match with %p crypt_info %p\n", + __FUNCTION__, + pager, + pager->crypt_info); + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(pager match)\n", + __FUNCTION__, + old_crypt_info, + old_crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + /* release the extra ref on crypt_info we got above */ + crypt_info_deallocate(old_crypt_info); + assert(old_crypt_info->crypt_refcnt > 0); + /* give extra reference on pager to the caller */ + assert(pager->ref_count > 0); + pager->ref_count++; break; } } if (queue_end(&apple_protect_pager_queue, (queue_entry_t) pager)) { + lck_mtx_unlock(&apple_protect_pager_lock); /* no existing pager for this backing object */ pager = APPLE_PROTECT_PAGER_NULL; - } else { - /* make sure pager doesn't disappear */ - pager->ref_count++; - } - - lck_mtx_unlock(&apple_protect_pager_lock); - - if (pager == APPLE_PROTECT_PAGER_NULL) { - pager = apple_protect_pager_create(backing_object, crypt_info); + if (old_crypt_info) { + /* use this old crypt_info for new pager */ + new_crypt_info = old_crypt_info; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "will use old_crypt_info %p for new pager\n", + __FUNCTION__, + old_crypt_info); +#endif /* CRYPT_INFO_DEBUG */ + } else { + /* allocate a new crypt_info for new pager */ + new_crypt_info = kalloc(sizeof (*new_crypt_info)); + *new_crypt_info = *crypt_info; + new_crypt_info->crypt_refcnt = 1; +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: " + "will use new_crypt_info %p for new pager\n", + __FUNCTION__, + new_crypt_info); +#endif /* CRYPT_INFO_DEBUG */ + } + if (new_crypt_info == NULL) { + /* can't create new pager without a crypt_info */ + } else { + /* create new pager */ + pager = apple_protect_pager_create( + backing_object, + backing_offset, + crypto_backing_offset, + new_crypt_info, + crypto_start, + crypto_end); + } if (pager == APPLE_PROTECT_PAGER_NULL) { + /* could not create a new pager */ + if (new_crypt_info == old_crypt_info) { + /* release extra reference on old_crypt_info */ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(create fail old_crypt_info)\n", + __FUNCTION__, + old_crypt_info, + old_crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(old_crypt_info); + old_crypt_info = NULL; + } else { + /* release unused new_crypt_info */ + assert(new_crypt_info->crypt_refcnt == 1); +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: deallocate %p ref %d " + "(create fail new_crypt_info)\n", + __FUNCTION__, + new_crypt_info, + new_crypt_info->crypt_refcnt); +#endif /* CRYPT_INFO_DEBUG */ + crypt_info_deallocate(new_crypt_info); + new_crypt_info = NULL; + } return MEMORY_OBJECT_NULL; } + lck_mtx_lock(&apple_protect_pager_lock); + } else { + assert(old_crypt_info == pager->crypt_info); } - lck_mtx_lock(&apple_protect_pager_lock); while (!pager->is_ready) { lck_mtx_sleep(&apple_protect_pager_lock, LCK_SLEEP_DEFAULT, @@ -1183,3 +1550,47 @@ apple_protect_pager_trim(void) apple_protect_pager_terminate_internal(pager); } } + + +void +crypt_info_reference( + struct pager_crypt_info *crypt_info) +{ + assert(crypt_info->crypt_refcnt != 0); +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: %p ref %d -> %d\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt, + crypt_info->crypt_refcnt + 1); +#endif /* CRYPT_INFO_DEBUG */ + OSAddAtomic(+1, &crypt_info->crypt_refcnt); +} + +void +crypt_info_deallocate( + struct pager_crypt_info *crypt_info) +{ +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: %p ref %d -> %d\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt, + crypt_info->crypt_refcnt - 1); +#endif /* CRYPT_INFO_DEBUG */ + OSAddAtomic(-1, &crypt_info->crypt_refcnt); + if (crypt_info->crypt_refcnt == 0) { + /* deallocate any crypt module data */ + if (crypt_info->crypt_end) { + crypt_info->crypt_end(crypt_info->crypt_ops); + crypt_info->crypt_end = NULL; + } +#if CRYPT_INFO_DEBUG + printf("CRYPT_INFO %s: freeing %p\n", + __FUNCTION__, + crypt_info); +#endif /* CRYPT_INFO_DEBUG */ + kfree(crypt_info, sizeof (*crypt_info)); + crypt_info = NULL; + } +}