m->cs_validated = FALSE;
m->cs_tainted = FALSE;
+ m->cs_nx = FALSE;
if (no_zero_fill == TRUE) {
my_fault = DBG_NZF_PAGE_FAULT;
}
#define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
+#define page_nx(m) ((m)->cs_nx)
map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
(pmap == vm_map_pmap(current_thread()->map)));
return KERN_CODESIGN_ERROR;
}
+ if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) {
+ if (cs_debug)
+ printf("page marked to be NX, not letting it be mapped EXEC\n");
+ return KERN_CODESIGN_ERROR;
+ }
+
/* A page could be tainted, or pose a risk of being tainted later.
* Check whether the receiving process wants it, and make it feel
* the consequences (that hapens in cs_invalid_page()).
kern_return_t kr;
memory_object_t pager;
void *blobs;
- boolean_t validated, tainted;
+ boolean_t validated;
+ unsigned tainted;
assert(page->busy);
vm_object_lock_assert_exclusive(page->object);
}
/* verify the SHA1 hash for this page */
+ tainted = 0;
validated = cs_validate_page(blobs,
pager,
offset + object->paging_offset,
page->cs_validated = validated;
if (validated) {
- page->cs_tainted = tainted;
+ page->cs_tainted = !!(tainted & CS_VALIDATE_TAINTED);
+ page->cs_nx = !!(tainted & CS_VALIDATE_NX);
}
}