* the "apple protect" EMM.
*/
typedef struct apple_protect_pager {
- struct ipc_object_header pager_header; /* fake ip_kotype() */
- memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
+ /* mandatory generic header */
+ struct memory_object ap_pgr_hdr;
+
+ /* pager-specific data */
queue_chain_t pager_queue; /* next & prev pagers */
unsigned int ref_count; /* reference count */
boolean_t is_ready; /* is this pager ready ? */
boolean_t is_mapped; /* is this mem_obj mapped ? */
- memory_object_control_t pager_control; /* mem object control handle */
vm_object_t backing_object; /* VM obj w/ encrypted data */
vm_object_offset_t backing_offset;
vm_object_offset_t crypto_backing_offset; /* for key... */
struct pager_crypt_info *crypt_info;
} *apple_protect_pager_t;
#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
-#define pager_ikot pager_header.io_bits
/*
* List of memory objects managed by this EMM.
memory_object_control_reference(control);
- pager->pager_control = control;
+ pager->ap_pgr_hdr.mo_control = control;
attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
/*
* Gather in a UPL all the VM pages requested by VM.
*/
- mo_control = pager->pager_control;
+ mo_control = pager->ap_pgr_hdr.mo_control;
upl_size = length;
upl_flags =
pl_count = 0;
kr = memory_object_upl_request(mo_control,
offset, upl_size,
- &upl, NULL, NULL, upl_flags);
+ &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
if (kr != KERN_SUCCESS) {
retval = kr;
goto done;
2 * PAGE_SIZE_64,
0,
0,
+ VM_MAP_KERNEL_FLAGS_NONE,
&map_entry);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(kernel_object);
src_vaddr = (vm_map_offset_t)
PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
<< PAGE_SHIFT);
+#elif __arm__ || __arm64__
+ src_vaddr = (vm_map_offset_t)
+ phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
+ << PAGE_SHIFT);
#else
- pmap_enter(kernel_pmap,
- src_vaddr,
- VM_PAGE_GET_PHYS_PAGE(src_page),
- VM_PROT_READ,
- VM_PROT_NONE,
- 0,
- TRUE);
+ kr = pmap_enter(kernel_pmap,
+ src_vaddr,
+ VM_PAGE_GET_PHYS_PAGE(src_page),
+ VM_PROT_READ,
+ VM_PROT_NONE,
+ 0,
+ TRUE);
+
+ assert(kr == KERN_SUCCESS);
#endif
/*
* Establish an explicit pmap mapping of the destination
#if __x86_64__
dst_vaddr = (vm_map_offset_t)
PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
+#elif __arm__ || __arm64__
+ dst_vaddr = (vm_map_offset_t)
+ phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
#else
- pmap_enter(kernel_pmap,
- dst_vaddr,
- dst_pnum,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_NONE,
- 0,
- TRUE);
+ kr = pmap_enter(kernel_pmap,
+ dst_vaddr,
+ dst_pnum,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_NONE,
+ 0,
+ TRUE);
+
+ assert(kr == KERN_SUCCESS);
#endif
src_page_object = VM_PAGE_OBJECT(src_page);
pager->crypt_info = NULL;
/* trigger the destruction of the memory object */
- memory_object_destroy(pager->pager_control, 0);
+ memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
}
/*
* pager structure.
*/
lck_mtx_unlock(&apple_protect_pager_lock);
- if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
- memory_object_control_deallocate(pager->pager_control);
- pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
+ pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
}
kfree(pager, sizeof (*pager));
pager = APPLE_PROTECT_PAGER_NULL;
*/
kern_return_t
apple_protect_pager_synchronize(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_size_t length,
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t length,
__unused vm_sync_t sync_flags)
{
- apple_protect_pager_t pager;
-
- PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
-
- pager = apple_protect_pager_lookup(mem_obj);
-
- memory_object_synchronize_completed(pager->pager_control,
- offset, length);
-
- return KERN_SUCCESS;
+ panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
+ return KERN_FAILURE;
}
/*
{
apple_protect_pager_t pager;
+ assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
pager = (apple_protect_pager_t) mem_obj;
- assert(pager->pager_ops == &apple_protect_pager_ops);
assert(pager->ref_count > 0);
return pager;
}
* we reserve the first word in the object for a fake ip_kotype
* setting - that will tell vm_map to use it as a memory object.
*/
- pager->pager_ops = &apple_protect_pager_ops;
- pager->pager_ikot = IKOT_MEMORY_OBJECT;
+ pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
+ pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
+ pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
+
pager->is_ready = FALSE;/* not ready until it has a "name" */
pager->ref_count = 1; /* existence reference (for the cache) */
pager->ref_count++; /* for the caller */
pager->is_mapped = FALSE;
- pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
pager->backing_object = backing_object;
pager->backing_offset = backing_offset;
pager->crypto_backing_offset = crypto_backing_offset;