X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..db6096698656d32db7df630594bd9617ee54f828:/osfmk/vm/vm_apple_protect.c diff --git a/osfmk/vm/vm_apple_protect.c b/osfmk/vm/vm_apple_protect.c index 41f70d9c9..a98fdbb3a 100644 --- a/osfmk/vm/vm_apple_protect.c +++ b/osfmk/vm/vm_apple_protect.c @@ -52,6 +52,7 @@ #include #include +#include #include #include #include @@ -83,15 +84,16 @@ void apple_protect_pager_reference(memory_object_t mem_obj); void apple_protect_pager_deallocate(memory_object_t mem_obj); kern_return_t apple_protect_pager_init(memory_object_t mem_obj, memory_object_control_t control, - vm_size_t pg_size); + memory_object_cluster_size_t pg_size); kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj); kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, - vm_prot_t protection_required); + memory_object_cluster_size_t length, + vm_prot_t protection_required, + memory_object_fault_info_t fault_info); kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t data_cnt, + memory_object_cluster_size_t data_cnt, memory_object_offset_t *resid_offset, int *io_error, boolean_t dirty, @@ -99,16 +101,18 @@ kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, int upl_flags); kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t data_cnt); + memory_object_cluster_size_t data_cnt); kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t size, + memory_object_size_t size, vm_prot_t desired_access); kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_size_t length, vm_sync_t sync_flags); -kern_return_t apple_protect_pager_unmap(memory_object_t mem_obj); +kern_return_t apple_protect_pager_map(memory_object_t mem_obj, + vm_prot_t prot); +kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj); /* * Vector of VM operations for this EMM. @@ -124,7 +128,9 @@ const struct memory_object_pager_ops apple_protect_pager_ops = { apple_protect_pager_data_initialize, apple_protect_pager_data_unlock, apple_protect_pager_synchronize, - apple_protect_pager_unmap, + apple_protect_pager_map, + apple_protect_pager_last_unmap, + NULL, /* data_reclaim */ "apple protect pager" }; @@ -133,16 +139,18 @@ const struct memory_object_pager_ops apple_protect_pager_ops = { * the "apple protect" EMM. */ typedef struct apple_protect_pager { + struct ipc_object_header pager_header; /* fake ip_kotype() */ memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */ - unsigned int pager_ikot; /* JMM: fake ip_kotype() */ queue_chain_t pager_queue; /* next & prev pagers */ unsigned int ref_count; /* reference count */ boolean_t is_ready; /* is this pager ready ? */ boolean_t is_mapped; /* is this mem_obj mapped ? */ memory_object_control_t pager_control; /* mem object control handle */ vm_object_t backing_object; /* VM obj w/ encrypted data */ + struct pager_crypt_info crypt; } *apple_protect_pager_t; #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) +#define pager_ikot pager_header.io_bits /* * List of memory objects managed by this EMM. @@ -151,7 +159,7 @@ typedef struct apple_protect_pager { int apple_protect_pager_count = 0; /* number of pagers */ int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ queue_head_t apple_protect_pager_queue; -decl_mutex_data(,apple_protect_pager_lock) +decl_lck_mtx_data(,apple_protect_pager_lock) /* * Maximum number of unmapped pagers we're willing to keep around. @@ -166,8 +174,15 @@ int apple_protect_pager_count_unmapped_max = 0; int apple_protect_pager_num_trim_max = 0; int apple_protect_pager_num_trim_total = 0; + +lck_grp_t apple_protect_pager_lck_grp; +lck_grp_attr_t apple_protect_pager_lck_grp_attr; +lck_attr_t apple_protect_pager_lck_attr; + + /* internal prototypes */ -apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object); +apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object, + struct pager_crypt_info *crypt_info); apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); void apple_protect_pager_dequeue(apple_protect_pager_t pager); void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager, @@ -196,7 +211,10 @@ int apple_protect_pagerdebug = 0; void apple_protect_pager_bootstrap(void) { - mutex_init(&apple_protect_pager_lock, 0); + lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr); + lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr); + lck_attr_setdefault(&apple_protect_pager_lck_attr); + lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr); queue_init(&apple_protect_pager_queue); } @@ -212,7 +230,7 @@ apple_protect_pager_init( #if !DEBUG __unused #endif - vm_size_t pg_size) + memory_object_cluster_size_t pg_size) { apple_protect_pager_t pager; kern_return_t kr; @@ -261,7 +279,7 @@ kern_return_t apple_protect_pager_data_return( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused vm_size_t data_cnt, + __unused memory_object_cluster_size_t data_cnt, __unused memory_object_offset_t *resid_offset, __unused int *io_error, __unused boolean_t dirty, @@ -276,7 +294,7 @@ kern_return_t apple_protect_pager_data_initialize( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused vm_size_t data_cnt) + __unused memory_object_cluster_size_t data_cnt) { panic("apple_protect_pager_data_initialize: should never get called"); return KERN_FAILURE; @@ -286,7 +304,7 @@ kern_return_t apple_protect_pager_data_unlock( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused vm_size_t size, + __unused memory_object_size_t size, __unused vm_prot_t desired_access) { return KERN_FAILURE; @@ -301,93 +319,52 @@ kern_return_t apple_protect_pager_data_request( memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_cluster_size_t length, #if !DEBUG __unused #endif - vm_prot_t protection_required) + vm_prot_t protection_required, + memory_object_fault_info_t mo_fault_info) { apple_protect_pager_t pager; memory_object_control_t mo_control; - upl_t upl = NULL; + upl_t upl; int upl_flags; upl_size_t upl_size; upl_page_info_t *upl_pl; + unsigned int pl_count; vm_object_t src_object, dst_object; kern_return_t kr, retval; - vm_map_offset_t src_mapping = 0, dst_mapping = 0; + vm_map_offset_t kernel_mapping; vm_offset_t src_vaddr, dst_vaddr; vm_offset_t cur_offset; - boolean_t src_map_page_by_page; vm_map_entry_t map_entry; + kern_return_t error_code; + vm_prot_t prot; + vm_page_t src_page, top_page; + int interruptible; + struct vm_object_fault_info fault_info; + int ret; - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %x, %llx, %llxx, %x\n", mem_obj, offset, length, protection_required)); + PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); + + retval = KERN_SUCCESS; + src_object = VM_OBJECT_NULL; + kernel_mapping = 0; + upl = NULL; + upl_pl = NULL; + fault_info = *((struct vm_object_fault_info *) mo_fault_info); + fault_info.stealth = TRUE; + fault_info.io_sync = FALSE; + fault_info.mark_zf_absent = FALSE; + fault_info.batch_pmap_op = FALSE; + interruptible = fault_info.interruptible; pager = apple_protect_pager_lookup(mem_obj); assert(pager->is_ready); assert(pager->ref_count > 1); /* pager is alive and mapped */ - PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %x, %llx, %llx, %x, pager %x\n", mem_obj, offset, length, protection_required, pager)); - - /* - * Map the encrypted data in the kernel address space from the - * backing VM object (itself backed by the encrypted file via - * the vnode pager). - */ - src_object = pager->backing_object; - assert(src_object != VM_OBJECT_NULL); - vm_object_reference(src_object); /* ref. for the mapping */ - src_mapping = 0; - kr = vm_map_enter(kernel_map, - &src_mapping, - length, - 0, - VM_FLAGS_ANYWHERE, - src_object, - offset, - FALSE, - VM_PROT_READ, - VM_PROT_READ, - VM_INHERIT_NONE); - switch (kr) { - case KERN_SUCCESS: - /* wire the memory to make sure it is available */ - kr = vm_map_wire(kernel_map, - src_mapping, - src_mapping + length, - VM_PROT_READ, - FALSE); - if (kr != KERN_SUCCESS) { - /* - * Wiring failed, so unmap source and fall back - * to page by page mapping of the source. - */ - kr = vm_map_remove(kernel_map, - src_mapping, - src_mapping + length, - VM_MAP_NO_FLAGS); - assert(kr == KERN_SUCCESS); - src_mapping = 0; - src_vaddr = 0; - src_map_page_by_page = TRUE; - break; - } - /* source region is now fully mapped and wired */ - src_map_page_by_page = FALSE; - src_vaddr = CAST_DOWN(vm_offset_t, src_mapping); - break; - case KERN_NO_SPACE: - /* we couldn't map the entire source, so map it page by page */ - src_map_page_by_page = TRUE; - /* release the reference for the failed mapping */ - vm_object_deallocate(src_object); - break; - default: - vm_object_deallocate(src_object); - retval = kr; - goto done; - } - + PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager)); /* * Gather in a UPL all the VM pages requested by VM. @@ -401,6 +378,7 @@ apple_protect_pager_data_request( UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ UPL_SET_INTERNAL; + pl_count = 0; kr = memory_object_upl_request(mo_control, offset, upl_size, &upl, NULL, NULL, upl_flags); @@ -408,18 +386,19 @@ apple_protect_pager_data_request( retval = kr; goto done; } + dst_object = mo_control->moc_object; + assert(dst_object != VM_OBJECT_NULL); + /* - * Reserve a virtual page in the kernel address space to map each - * destination physical page when it's its turn to be filled. + * Reserve 2 virtual pages in the kernel address space to map each + * source and destination physical pages when it's their turn to + * be processed. */ - dst_object = mo_control->moc_object; - assert(dst_object != VM_OBJECT_NULL); - dst_mapping = 0; vm_object_reference(kernel_object); /* ref. for mapping */ kr = vm_map_find_space(kernel_map, - &dst_mapping, - PAGE_SIZE_64, + &kernel_mapping, + 2 * PAGE_SIZE_64, 0, 0, &map_entry); @@ -429,18 +408,31 @@ apple_protect_pager_data_request( goto done; } map_entry->object.vm_object = kernel_object; - map_entry->offset = dst_mapping - VM_MIN_KERNEL_ADDRESS; + map_entry->offset = kernel_mapping; vm_map_unlock(kernel_map); - dst_vaddr = CAST_DOWN(vm_offset_t, dst_mapping); + src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping); + dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64); + + /* + * We'll map the encrypted data in the kernel address space from the + * backing VM object (itself backed by the encrypted file via + * the vnode pager). + */ + src_object = pager->backing_object; + assert(src_object != VM_OBJECT_NULL); + vm_object_reference(src_object); /* to keep the source object alive */ /* * Fill in the contents of the pages requested by VM. */ upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); - for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) { + pl_count = length / PAGE_SIZE; + for (cur_offset = 0; + retval == KERN_SUCCESS && cur_offset < length; + cur_offset += PAGE_SIZE) { ppnum_t dst_pnum; - if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) { + if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { /* this page is not in the UPL: skip it */ continue; } @@ -448,109 +440,156 @@ apple_protect_pager_data_request( /* * Map the source (encrypted) page in the kernel's * virtual address space. + * We already hold a reference on the src_object. */ - if (src_map_page_by_page) { - vm_object_reference(src_object); /* ref. for mapping */ - kr = vm_map_enter(kernel_map, - &src_mapping, - PAGE_SIZE_64, - 0, - VM_FLAGS_ANYWHERE, - src_object, - offset + cur_offset, - FALSE, - VM_PROT_READ, - VM_PROT_READ, - VM_INHERIT_NONE); - if (kr != KERN_SUCCESS) { - vm_object_deallocate(src_object); - retval = kr; - goto done; + retry_src_fault: + vm_object_lock(src_object); + vm_object_paging_begin(src_object); + error_code = 0; + prot = VM_PROT_READ; + kr = vm_fault_page(src_object, + offset + cur_offset, + VM_PROT_READ, + FALSE, + &prot, + &src_page, + &top_page, + NULL, + &error_code, + FALSE, + FALSE, + &fault_info); + switch (kr) { + case VM_FAULT_SUCCESS: + break; + case VM_FAULT_RETRY: + goto retry_src_fault; + case VM_FAULT_MEMORY_SHORTAGE: + if (vm_page_wait(interruptible)) { + goto retry_src_fault; } - kr = vm_map_wire(kernel_map, - src_mapping, - src_mapping + PAGE_SIZE_64, - VM_PROT_READ, - FALSE); - if (kr != KERN_SUCCESS) { - retval = kr; - kr = vm_map_remove(kernel_map, - src_mapping, - src_mapping + PAGE_SIZE_64, - VM_MAP_NO_FLAGS); - assert(kr == KERN_SUCCESS); - src_mapping = 0; - src_vaddr = 0; - printf("apple_protect_pager_data_request: " - "failed to resolve page fault for src " - "object %p offset 0x%llx " - "preempt %d error 0x%x\n", - src_object, offset + cur_offset, - get_preemption_level(), retval); - goto done; + /* fall thru */ + case VM_FAULT_INTERRUPTED: + retval = MACH_SEND_INTERRUPTED; + goto done; + case VM_FAULT_SUCCESS_NO_VM_PAGE: + /* success but no VM page: fail */ + vm_object_paging_end(src_object); + vm_object_unlock(src_object); + /*FALLTHROUGH*/ + case VM_FAULT_MEMORY_ERROR: + /* the page is not there ! */ + if (error_code) { + retval = error_code; + } else { + retval = KERN_MEMORY_ERROR; } - src_vaddr = CAST_DOWN(vm_offset_t, src_mapping); - } else { - src_vaddr = src_mapping + cur_offset; + goto done; + default: + panic("apple_protect_pager_data_request: " + "vm_fault_page() unexpected error 0x%x\n", + kr); } - + assert(src_page != VM_PAGE_NULL); + assert(src_page->busy); + + if (!src_page->active && + !src_page->inactive && + !src_page->throttled) { + vm_page_lockspin_queues(); + if (!src_page->active && + !src_page->inactive && + !src_page->throttled) { + vm_page_deactivate(src_page); + } + vm_page_unlock_queues(); + } + + /* + * Establish an explicit mapping of the source + * physical page. + */ + pmap_enter(kernel_pmap, + kernel_mapping, + src_page->phys_page, + VM_PROT_READ, + VM_PROT_NONE, + 0, + TRUE); /* * Establish an explicit pmap mapping of the destination * physical page. * We can't do a regular VM mapping because the VM page * is "busy". */ - dst_pnum = (addr64_t) - upl_phys_page(upl_pl, cur_offset / PAGE_SIZE); + dst_pnum = (ppnum_t) + upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); assert(dst_pnum != 0); - pmap_enter(kernel_pmap, dst_mapping, dst_pnum, + pmap_enter(kernel_pmap, + kernel_mapping + PAGE_SIZE_64, + dst_pnum, VM_PROT_READ | VM_PROT_WRITE, - dst_object->wimg_bits & VM_WIMG_MASK, - FALSE); + VM_PROT_NONE, + 0, + TRUE); /* * Decrypt the encrypted contents of the source page * into the destination page. */ - dsmos_page_transform((const void *) src_vaddr, - (void *) dst_vaddr); - + ret = pager->crypt.page_decrypt((const void *) src_vaddr, + (void *) dst_vaddr, + offset+cur_offset, + pager->crypt.crypt_ops); + if (ret) { + /* + * Decryption failed. Abort the fault. + */ + retval = KERN_ABORTED; + } else { + /* + * Validate the original page... + */ + if (src_page->object->code_signed) { + vm_page_validate_cs_mapped( + src_page, + (const void *) src_vaddr); + } + /* + * ... and transfer the results to the destination page. + */ + UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_validated); + UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_tainted); + } + /* - * Remove the pmap mapping of the destination page + * Remove the pmap mapping of the source and destination pages * in the kernel. */ pmap_remove(kernel_pmap, - (addr64_t) dst_mapping, - (addr64_t) (dst_mapping + PAGE_SIZE_64)); + (addr64_t) kernel_mapping, + (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64))); - if (src_map_page_by_page) { - /* - * Remove the wired kernel mapping of the source page. - * This releases the extra reference we took on - * src_object. - */ - kr = vm_map_remove(kernel_map, - src_mapping, - src_mapping + PAGE_SIZE_64, - VM_MAP_REMOVE_KUNWIRE); - assert(kr == KERN_SUCCESS); - src_mapping = 0; - src_vaddr = 0; + /* + * Cleanup the result of vm_fault_page() of the source page. + */ + PAGE_WAKEUP_DONE(src_page); + vm_object_paging_end(src_page->object); + vm_object_unlock(src_page->object); + if (top_page != VM_PAGE_NULL) { + vm_object_t top_object; + + top_object = top_page->object; + vm_object_lock(top_object); + VM_PAGE_FREE(top_page); + vm_object_paging_end(top_object); + vm_object_unlock(top_object); } } - retval = KERN_SUCCESS; done: - if (src_mapping != 0) { - /* remove the wired mapping of the source pages */ - kr = vm_map_remove(kernel_map, - src_mapping, - src_mapping + length, - VM_MAP_REMOVE_KUNWIRE); - assert(kr == KERN_SUCCESS); - src_mapping = 0; - src_vaddr = 0; - } if (upl != NULL) { /* clean up the UPL */ @@ -566,24 +605,58 @@ done: /* abort or commit the UPL */ if (retval != KERN_SUCCESS) { upl_abort(upl, 0); + if (retval == KERN_ABORTED) { + wait_result_t wait_result; + + /* + * We aborted the fault and did not provide + * any contents for the requested pages but + * the pages themselves are not invalid, so + * let's return success and let the caller + * retry the fault, in case it might succeed + * later (when the decryption code is up and + * running in the kernel, for example). + */ + retval = KERN_SUCCESS; + /* + * Wait a little bit first to avoid using + * too much CPU time retrying and failing + * the same fault over and over again. + */ + wait_result = assert_wait_timeout( + (event_t) apple_protect_pager_data_request, + THREAD_UNINT, + 10000, /* 10ms */ + NSEC_PER_USEC); + assert(wait_result == THREAD_WAITING); + wait_result = thread_block(THREAD_CONTINUE_NULL); + assert(wait_result == THREAD_TIMED_OUT); + } } else { - upl_commit(upl, NULL, 0); + boolean_t empty; + upl_commit_range(upl, 0, upl->size, + UPL_COMMIT_CS_VALIDATED, + upl_pl, pl_count, &empty); } /* and deallocate the UPL */ upl_deallocate(upl); upl = NULL; } - if (dst_mapping != 0) { - /* clean up the mapping of the destination pages */ + if (kernel_mapping != 0) { + /* clean up the mapping of the source and destination pages */ kr = vm_map_remove(kernel_map, - dst_mapping, - dst_mapping + PAGE_SIZE_64, + kernel_mapping, + kernel_mapping + (2 * PAGE_SIZE_64), VM_MAP_NO_FLAGS); assert(kr == KERN_SUCCESS); - dst_mapping = 0; + kernel_mapping = 0; + src_vaddr = 0; dst_vaddr = 0; } + if (src_object != VM_OBJECT_NULL) { + vm_object_deallocate(src_object); + } return retval; } @@ -603,10 +676,10 @@ apple_protect_pager_reference( pager = apple_protect_pager_lookup(mem_obj); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); assert(pager->ref_count > 0); pager->ref_count++; - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); } @@ -657,6 +730,10 @@ apple_protect_pager_terminate_internal( vm_object_deallocate(pager->backing_object); pager->backing_object = VM_OBJECT_NULL; } + + /* deallocate any crypt module data */ + if(pager->crypt.crypt_end) + pager->crypt.crypt_end(pager->crypt.crypt_ops); /* trigger the destruction of the memory object */ memory_object_destroy(pager->pager_control, 0); @@ -679,7 +756,7 @@ apple_protect_pager_deallocate_internal( int count_unmapped; if (! locked) { - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); } count_unmapped = (apple_protect_pager_count - @@ -697,12 +774,12 @@ apple_protect_pager_deallocate_internal( if (pager->ref_count == 1) { /* * Only the "named" reference is left, which means that - * no one is realy holding on to this pager anymore. + * no one is really holding on to this pager anymore. * Terminate it. */ apple_protect_pager_dequeue(pager); /* the pager is all ours: no need for the lock now */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); apple_protect_pager_terminate_internal(pager); } else if (pager->ref_count == 0) { /* @@ -710,7 +787,7 @@ apple_protect_pager_deallocate_internal( * been terminated. Do some final cleanup and release the * pager structure. */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) { memory_object_control_deallocate(pager->pager_control); pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; @@ -719,7 +796,7 @@ apple_protect_pager_deallocate_internal( pager = APPLE_PROTECT_PAGER_NULL; } else { /* there are still plenty of references: keep going... */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); } if (needs_trimming) { @@ -740,7 +817,7 @@ apple_protect_pager_deallocate( { apple_protect_pager_t pager; - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %x\n", mem_obj)); + PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj)); pager = apple_protect_pager_lookup(mem_obj); apple_protect_pager_deallocate_internal(pager, FALSE); } @@ -755,7 +832,7 @@ apple_protect_pager_terminate( #endif memory_object_t mem_obj) { - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %x\n", mem_obj)); + PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj)); return KERN_SUCCESS; } @@ -767,12 +844,12 @@ kern_return_t apple_protect_pager_synchronize( memory_object_t mem_obj, memory_object_offset_t offset, - vm_size_t length, + memory_object_size_t length, __unused vm_sync_t sync_flags) { apple_protect_pager_t pager; - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %x\n", mem_obj)); + PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj)); pager = apple_protect_pager_lookup(mem_obj); @@ -786,21 +863,22 @@ apple_protect_pager_synchronize( * apple_protect_pager_map() * * This allows VM to let us, the EMM, know that this memory object - * is currently mapped one or more times. This is called by VM only the first - * time the memory object gets mapped and we take one extra reference on the + * is currently mapped one or more times. This is called by VM each time + * the memory object gets mapped and we take one extra reference on the * memory object to account for all its mappings. */ -void +kern_return_t apple_protect_pager_map( - memory_object_t mem_obj) + memory_object_t mem_obj, + __unused vm_prot_t prot) { apple_protect_pager_t pager; - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %x\n", mem_obj)); + PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj)); pager = apple_protect_pager_lookup(mem_obj); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); assert(pager->is_ready); assert(pager->ref_count > 0); /* pager is alive */ if (pager->is_mapped == FALSE) { @@ -813,26 +891,29 @@ apple_protect_pager_map( pager->ref_count++; apple_protect_pager_count_mapped++; } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); + + return KERN_SUCCESS; } /* - * apple_protect_pager_unmap() + * apple_protect_pager_last_unmap() * * This is called by VM when this memory object is no longer mapped anywhere. */ kern_return_t -apple_protect_pager_unmap( +apple_protect_pager_last_unmap( memory_object_t mem_obj) { apple_protect_pager_t pager; int count_unmapped; - PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_unmap: %x\n", mem_obj)); + PAGER_DEBUG(PAGER_ALL, + ("apple_protect_pager_last_unmap: %p\n", mem_obj)); pager = apple_protect_pager_lookup(mem_obj); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); if (pager->is_mapped) { /* * All the mappings are gone, so let go of the one extra @@ -848,7 +929,7 @@ apple_protect_pager_unmap( apple_protect_pager_deallocate_internal(pager, TRUE); /* caution: deallocate_internal() released the lock ! */ } else { - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); } return KERN_SUCCESS; @@ -872,7 +953,8 @@ apple_protect_pager_lookup( apple_protect_pager_t apple_protect_pager_create( - vm_object_t backing_object) + vm_object_t backing_object, + struct pager_crypt_info *crypt_info) { apple_protect_pager_t pager, pager2; memory_object_control_t control; @@ -887,7 +969,7 @@ apple_protect_pager_create( * The vm_map call takes both named entry ports and raw memory * objects in the same parameter. We need to make sure that * vm_map does not see this object as a named entry port. So, - * we reserve the second word in the object for a fake ip_kotype + * we reserve the first word in the object for a fake ip_kotype * setting - that will tell vm_map to use it as a memory object. */ pager->pager_ops = &apple_protect_pager_ops; @@ -897,9 +979,11 @@ apple_protect_pager_create( pager->is_mapped = FALSE; pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; pager->backing_object = backing_object; + pager->crypt = *crypt_info; + vm_object_reference(backing_object); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); /* see if anyone raced us to create a pager for the same object */ queue_iterate(&apple_protect_pager_queue, pager2, @@ -914,7 +998,7 @@ apple_protect_pager_create( /* while we hold the lock, transfer our setup ref to winner */ pager2->ref_count++; /* we lost the race, down with the loser... */ - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); vm_object_deallocate(pager->backing_object); pager->backing_object = VM_OBJECT_NULL; kfree(pager, sizeof (*pager)); @@ -933,17 +1017,17 @@ apple_protect_pager_create( if (apple_protect_pager_count > apple_protect_pager_count_max) { apple_protect_pager_count_max = apple_protect_pager_count; } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); kr = memory_object_create_named((memory_object_t) pager, 0, &control); assert(kr == KERN_SUCCESS); - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); /* the new pager is now ready to be used */ pager->is_ready = TRUE; - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); /* wakeup anyone waiting for this pager to be ready */ thread_wakeup(&pager->is_ready); @@ -960,17 +1044,24 @@ apple_protect_pager_create( */ memory_object_t apple_protect_pager_setup( - vm_object_t backing_object) + vm_object_t backing_object, + struct pager_crypt_info *crypt_info) { apple_protect_pager_t pager; - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); queue_iterate(&apple_protect_pager_queue, pager, apple_protect_pager_t, pager_queue) { if (pager->backing_object == backing_object) { + /* For the same object we must always use the same protection options */ + if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) && + (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) { + lck_mtx_unlock(&apple_protect_pager_lock); + return MEMORY_OBJECT_NULL; + } break; } } @@ -983,22 +1074,23 @@ apple_protect_pager_setup( pager->ref_count++; } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); if (pager == APPLE_PROTECT_PAGER_NULL) { - pager = apple_protect_pager_create(backing_object); + pager = apple_protect_pager_create(backing_object, crypt_info); if (pager == APPLE_PROTECT_PAGER_NULL) { return MEMORY_OBJECT_NULL; } } - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); while (!pager->is_ready) { - thread_sleep_mutex(&pager->is_ready, - &apple_protect_pager_lock, - THREAD_UNINT); + lck_mtx_sleep(&apple_protect_pager_lock, + LCK_SLEEP_DEFAULT, + &pager->is_ready, + THREAD_UNINT); } - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); return (memory_object_t) pager; } @@ -1011,7 +1103,7 @@ apple_protect_pager_trim(void) int num_trim; int count_unmapped; - mutex_lock(&apple_protect_pager_lock); + lck_mtx_lock(&apple_protect_pager_lock); /* * We have too many pagers, try and trim some unused ones, @@ -1055,7 +1147,7 @@ apple_protect_pager_trim(void) } apple_protect_pager_num_trim_total += num_trim; - mutex_unlock(&apple_protect_pager_lock); + lck_mtx_unlock(&apple_protect_pager_lock); /* terminate the trimmed pagers */ while (!queue_empty(&trim_queue)) {