#include <kern/page_decrypt.h>
#include <kern/queue.h>
#include <kern/thread.h>
+#include <kern/ipc_kobject.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
-#include <default_pager/default_pager_types.h>
-#include <default_pager/default_pager_object_server.h>
-
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/memory_object.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
+#include <vm/vm_kern.h>
/*
* the "apple protect" EMM.
*/
typedef struct apple_protect_pager {
- struct ipc_object_header pager_header; /* fake ip_kotype() */
- memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
+ /* mandatory generic header */
+ struct memory_object ap_pgr_hdr;
+
+ /* pager-specific data */
queue_chain_t pager_queue; /* next & prev pagers */
unsigned int ref_count; /* reference count */
boolean_t is_ready; /* is this pager ready ? */
boolean_t is_mapped; /* is this mem_obj mapped ? */
- memory_object_control_t pager_control; /* mem object control handle */
vm_object_t backing_object; /* VM obj w/ encrypted data */
vm_object_offset_t backing_offset;
vm_object_offset_t crypto_backing_offset; /* for key... */
struct pager_crypt_info *crypt_info;
} *apple_protect_pager_t;
#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
-#define pager_ikot pager_header.io_bits
/*
* List of memory objects managed by this EMM.
memory_object_control_reference(control);
- pager->pager_control = control;
+ pager->ap_pgr_hdr.mo_control = control;
attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
panic("apple_protect_pager_init: "
"memory_object_change_attributes() failed");
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache) {
+ memory_object_mark_eligible_for_secluded(control, TRUE);
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
return KERN_SUCCESS;
}
upl_size_t upl_size;
upl_page_info_t *upl_pl;
unsigned int pl_count;
- vm_object_t src_object, dst_object;
+ vm_object_t src_top_object, src_page_object, dst_object;
kern_return_t kr, retval;
vm_map_offset_t kernel_mapping;
vm_offset_t src_vaddr, dst_vaddr;
PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
retval = KERN_SUCCESS;
- src_object = VM_OBJECT_NULL;
+ src_top_object = VM_OBJECT_NULL;
+ src_page_object = VM_OBJECT_NULL;
kernel_mapping = 0;
upl = NULL;
upl_pl = NULL;
/*
* Gather in a UPL all the VM pages requested by VM.
*/
- mo_control = pager->pager_control;
+ mo_control = pager->ap_pgr_hdr.mo_control;
upl_size = length;
upl_flags =
pl_count = 0;
kr = memory_object_upl_request(mo_control,
offset, upl_size,
- &upl, NULL, NULL, upl_flags);
+ &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
if (kr != KERN_SUCCESS) {
retval = kr;
goto done;
2 * PAGE_SIZE_64,
0,
0,
+ VM_MAP_KERNEL_FLAGS_NONE,
&map_entry);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(kernel_object);
* backing VM object (itself backed by the encrypted file via
* the vnode pager).
*/
- src_object = pager->backing_object;
- assert(src_object != VM_OBJECT_NULL);
- vm_object_reference(src_object); /* to keep the source object alive */
+ src_top_object = pager->backing_object;
+ assert(src_top_object != VM_OBJECT_NULL);
+ vm_object_reference(src_top_object); /* keep the source object alive */
/*
* Fill in the contents of the pages requested by VM.
/*
* Map the source (encrypted) page in the kernel's
* virtual address space.
- * We already hold a reference on the src_object.
+ * We already hold a reference on the src_top_object.
*/
retry_src_fault:
- vm_object_lock(src_object);
- vm_object_paging_begin(src_object);
+ vm_object_lock(src_top_object);
+ vm_object_paging_begin(src_top_object);
error_code = 0;
prot = VM_PROT_READ;
src_page = VM_PAGE_NULL;
- kr = vm_fault_page(src_object,
+ kr = vm_fault_page(src_top_object,
pager->backing_offset + offset + cur_offset,
VM_PROT_READ,
FALSE,
goto done;
case VM_FAULT_SUCCESS_NO_VM_PAGE:
/* success but no VM page: fail */
- vm_object_paging_end(src_object);
- vm_object_unlock(src_object);
+ vm_object_paging_end(src_top_object);
+ vm_object_unlock(src_top_object);
/*FALLTHROUGH*/
case VM_FAULT_MEMORY_ERROR:
/* the page is not there ! */
assert(src_page != VM_PAGE_NULL);
assert(src_page->busy);
- if (!src_page->active &&
- !src_page->inactive &&
- !src_page->throttled) {
+ if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(src_page))) {
+
vm_page_lockspin_queues();
- if (!src_page->active &&
- !src_page->inactive &&
- !src_page->throttled) {
+
+ if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(src_page))) {
vm_page_deactivate(src_page);
}
vm_page_unlock_queues();
*/
#if __x86_64__
src_vaddr = (vm_map_offset_t)
- PHYSMAP_PTOV((pmap_paddr_t)src_page->phys_page
+ PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
<< PAGE_SHIFT);
+#elif __arm__ || __arm64__
+ src_vaddr = (vm_map_offset_t)
+ phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
+ << PAGE_SHIFT);
#else
- pmap_enter(kernel_pmap,
- src_vaddr,
- src_page->phys_page,
- VM_PROT_READ,
- VM_PROT_NONE,
- 0,
- TRUE);
+ kr = pmap_enter(kernel_pmap,
+ src_vaddr,
+ VM_PAGE_GET_PHYS_PAGE(src_page),
+ VM_PROT_READ,
+ VM_PROT_NONE,
+ 0,
+ TRUE);
+
+ assert(kr == KERN_SUCCESS);
#endif
/*
* Establish an explicit pmap mapping of the destination
#if __x86_64__
dst_vaddr = (vm_map_offset_t)
PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
+#elif __arm__ || __arm64__
+ dst_vaddr = (vm_map_offset_t)
+ phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
#else
- pmap_enter(kernel_pmap,
- dst_vaddr,
- dst_pnum,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_NONE,
- 0,
- TRUE);
+ kr = pmap_enter(kernel_pmap,
+ dst_vaddr,
+ dst_pnum,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_NONE,
+ 0,
+ TRUE);
+
+ assert(kr == KERN_SUCCESS);
#endif
+ src_page_object = VM_PAGE_OBJECT(src_page);
/*
* Validate the original page...
*/
- if (src_page->object->code_signed) {
+ if (src_page_object->code_signed) {
vm_page_validate_cs_mapped(
src_page,
(const void *) src_vaddr);
* to unlock the object here.
*/
assert(src_page->busy);
- assert(src_page->object->paging_in_progress > 0);
- vm_object_unlock(src_page->object);
+ assert(src_page_object->paging_in_progress > 0);
+ vm_object_unlock(src_page_object);
/*
* Decrypt the encrypted contents of the source page
offset_in_page),
*(uint64_t *)(dst_vaddr+
offset_in_page+8),
- src_page->object->code_signed,
+ src_page_object->code_signed,
src_page->cs_validated,
src_page->cs_tainted,
src_page->cs_nx);
(uint64_t) offset_in_page,
*(uint64_t *)(dst_vaddr+offset_in_page),
*(uint64_t *)(dst_vaddr+offset_in_page+8),
- src_page->object->code_signed,
+ src_page_object->code_signed,
src_page->cs_validated,
src_page->cs_tainted,
src_page->cs_nx,
retval = KERN_ABORTED;
}
+ assert(VM_PAGE_OBJECT(src_page) == src_page_object);
assert(src_page->busy);
- assert(src_page->object->paging_in_progress > 0);
- vm_object_lock(src_page->object);
+ assert(src_page_object->paging_in_progress > 0);
+ vm_object_lock(src_page_object);
#if __x86_64__ || __arm__ || __arm64__
/* we used the 1-to-1 mapping of physical memory */
/*
* Cleanup the result of vm_fault_page() of the source page.
*/
- PAGE_WAKEUP_DONE(src_page);
- vm_object_paging_end(src_page->object);
- vm_object_unlock(src_page->object);
+ if (retval == KERN_SUCCESS &&
+ src_page->busy &&
+ !VM_PAGE_WIRED(src_page) &&
+ !src_page->dirty &&
+ !src_page->precious &&
+ !src_page->laundry &&
+ !src_page->cleaning) {
+ int refmod_state;
+
+ refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(src_page));
+
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(src_page, FALSE);
+ }
+ if (!src_page->dirty) {
+ vm_page_free_unlocked(src_page, TRUE);
+ src_page = VM_PAGE_NULL;
+ } else {
+ PAGE_WAKEUP_DONE(src_page);
+ }
+ } else {
+ PAGE_WAKEUP_DONE(src_page);
+ }
+ src_page = VM_PAGE_NULL;
+ vm_object_paging_end(src_page_object);
+ vm_object_unlock(src_page_object);
if (top_page != VM_PAGE_NULL) {
- vm_object_t top_object;
-
- top_object = top_page->object;
- vm_object_lock(top_object);
+ assert(VM_PAGE_OBJECT(top_page) == src_top_object);
+ vm_object_lock(src_top_object);
VM_PAGE_FREE(top_page);
- vm_object_paging_end(top_object);
- vm_object_unlock(top_object);
+ vm_object_paging_end(src_top_object);
+ vm_object_unlock(src_top_object);
}
}
src_vaddr = 0;
dst_vaddr = 0;
}
- if (src_object != VM_OBJECT_NULL) {
- vm_object_deallocate(src_object);
+ if (src_top_object != VM_OBJECT_NULL) {
+ vm_object_deallocate(src_top_object);
}
return retval;
pager->crypt_info = NULL;
/* trigger the destruction of the memory object */
- memory_object_destroy(pager->pager_control, 0);
+ memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
}
/*
* pager structure.
*/
lck_mtx_unlock(&apple_protect_pager_lock);
- if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
- memory_object_control_deallocate(pager->pager_control);
- pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
+ pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
}
kfree(pager, sizeof (*pager));
pager = APPLE_PROTECT_PAGER_NULL;
*/
kern_return_t
apple_protect_pager_synchronize(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_size_t length,
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t length,
__unused vm_sync_t sync_flags)
{
- apple_protect_pager_t pager;
-
- PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
-
- pager = apple_protect_pager_lookup(mem_obj);
-
- memory_object_synchronize_completed(pager->pager_control,
- offset, length);
-
- return KERN_SUCCESS;
+ panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
+ return KERN_FAILURE;
}
/*
{
apple_protect_pager_t pager;
+ assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
pager = (apple_protect_pager_t) mem_obj;
- assert(pager->pager_ops == &apple_protect_pager_ops);
assert(pager->ref_count > 0);
return pager;
}
* we reserve the first word in the object for a fake ip_kotype
* setting - that will tell vm_map to use it as a memory object.
*/
- pager->pager_ops = &apple_protect_pager_ops;
- pager->pager_ikot = IKOT_MEMORY_OBJECT;
+ pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
+ pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
+ pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
+
pager->is_ready = FALSE;/* not ready until it has a "name" */
pager->ref_count = 1; /* existence reference (for the cache) */
pager->ref_count++; /* for the caller */
pager->is_mapped = FALSE;
- pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
pager->backing_object = backing_object;
pager->backing_offset = backing_offset;
pager->crypto_backing_offset = crypto_backing_offset;