boolean_t current_thread_aborted(void);
/* Forward declarations of internal routines. */
-extern kern_return_t vm_fault_wire_fast(
+static kern_return_t vm_fault_wire_fast(
vm_map_t map,
vm_map_offset_t va,
+ vm_prot_t prot,
vm_map_entry_t entry,
pmap_t pmap,
vm_map_offset_t pmap_addr,
ppnum_t *physpage_p);
-extern void vm_fault_continue(void);
-
-extern void vm_fault_copy_cleanup(
+static kern_return_t vm_fault_internal(
+ vm_map_t map,
+ vm_map_offset_t vaddr,
+ vm_prot_t caller_prot,
+ boolean_t change_wiring,
+ int interruptible,
+ pmap_t pmap,
+ vm_map_offset_t pmap_addr,
+ ppnum_t *physpage_p);
+
+static void vm_fault_copy_cleanup(
vm_page_t page,
vm_page_t top_page);
-extern void vm_fault_copy_dst_cleanup(
+static void vm_fault_copy_dst_cleanup(
vm_page_t page);
#if VM_FAULT_CLASSIFY
void vm_pre_fault(vm_map_offset_t);
+extern int not_in_kdp;
+extern char *kdp_compressor_decompressed_page;
+extern addr64_t kdp_compressor_decompressed_page_paddr;
+extern ppnum_t kdp_compressor_decompressed_page_ppnum;
+
/*
* Routine: vm_fault_init
* Purpose:
PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
}
PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count));
+
+ if (PE_parse_boot_argn("vm_compressor_immediate", &vm_compressor_temp, sizeof (vm_compressor_temp)))
+ vm_compressor_immediate_preferred_override = TRUE;
+ else {
+ if (PE_get_default("kern.vm_compressor_immediate", &vm_compressor_temp, sizeof(vm_compressor_temp)))
+ vm_compressor_immediate_preferred_override = TRUE;
+ }
+ if (vm_compressor_immediate_preferred_override == TRUE) {
+ if (vm_compressor_temp)
+ vm_compressor_immediate_preferred = TRUE;
+ else
+ vm_compressor_immediate_preferred = FALSE;
+ }
printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
}
#if (DEVELOPMENT || DEBUG)
uint32_t vm_page_creation_throttled_hard = 0;
uint32_t vm_page_creation_throttled_soft = 0;
+uint64_t vm_page_creation_throttle_avoided = 0;
#endif /* DEVELOPMENT || DEBUG */
static int
if ((vm_page_free_count < vm_page_throttle_limit || ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
+ if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
+#if (DEVELOPMENT || DEBUG)
+ OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
+#endif
+ goto no_throttle;
+ }
clock_get_system_microtime(&tv_sec, &tv_usec);
elapsed_sec = tv_sec - thread->t_page_creation_time;
return (0);
}
+
/*
* check for various conditions that would
* prevent us from creating a ZF page...
return (VM_FAULT_RETRY);
}
}
- if (page_throttle == TRUE && (throttle_delay = vm_page_throttled(FALSE))) {
- /*
- * we're throttling zero-fills...
- * treat this as if we couldn't grab a page
- */
- if (m != VM_PAGE_NULL)
- VM_PAGE_FREE(m);
- vm_fault_cleanup(object, first_m);
+ if (page_throttle == TRUE) {
+ if ((throttle_delay = vm_page_throttled(FALSE))) {
+ /*
+ * we're throttling zero-fills...
+ * treat this as if we couldn't grab a page
+ */
+ if (m != VM_PAGE_NULL)
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
- VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+ VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
- delay(throttle_delay);
+ delay(throttle_delay);
- if (current_thread_aborted()) {
+ if (current_thread_aborted()) {
+ thread_interrupt_level(interruptible_state);
+ return VM_FAULT_INTERRUPTED;
+ }
thread_interrupt_level(interruptible_state);
- return VM_FAULT_INTERRUPTED;
- }
- thread_interrupt_level(interruptible_state);
- return (VM_FAULT_MEMORY_SHORTAGE);
+ return (VM_FAULT_MEMORY_SHORTAGE);
+ }
}
return (VM_FAULT_SUCCESS);
}
*/
assert(!m->pageout_queue);
- VM_PAGE_QUEUES_REMOVE(m);
-
+ vm_page_queues_remove(m);
+ vm_page_check_pageable_safe(m);
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
vm_page_lockspin_queues();
assert(!m->pageout_queue);
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
vm_page_unlock_queues();
}
*/
vm_page_lockspin_queues();
if (m->speculative)
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
vm_page_unlock_queues();
}
}
if (fault_info && fault_info->batch_pmap_op == TRUE) {
- vm_page_insert_internal(m, object, offset, FALSE, TRUE, TRUE);
+ vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
} else {
vm_page_insert(m, object, offset);
}
m->absent = TRUE;
if (fault_info && fault_info->batch_pmap_op == TRUE) {
- vm_page_insert_internal(m, object, offset, FALSE, TRUE, TRUE);
+ vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
} else {
vm_page_insert(m, object, offset);
}
pmap_t pmap,
vm_map_offset_t vaddr,
vm_prot_t prot,
- vm_prot_t fault_type,
+ vm_prot_t caller_prot,
boolean_t wired,
boolean_t change_wiring,
boolean_t no_cache,
boolean_t must_disconnect = 0;
boolean_t map_is_switched, map_is_switch_protected;
int cs_enforcement_enabled;
+ vm_prot_t fault_type;
+ fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
+
vm_object_lock_assert_held(m->object);
#if DEBUG
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
* can be changed without the kernel noticing, therefore unsigned
* code can be created
*/
- if (m->cs_tainted ||
- ((cs_enforcement_enabled && !cs_bypass ) &&
- (/* The page is unsigned and wants to be executable */
- (!m->cs_validated && (prot & VM_PROT_EXECUTE)) ||
- /* The page should be immutable, but is in danger of being modified
+ if (!cs_bypass &&
+ (m->cs_tainted ||
+ (cs_enforcement_enabled &&
+ (/* The page is unsigned and wants to be executable */
+ (!m->cs_validated && (prot & VM_PROT_EXECUTE)) ||
+ /* The page should be immutable, but is in danger of being modified
* This is the case where we want policy from the code directory -
* is the page immutable or not? For now we have to assume that
* code pages will be immutable, data pages not.
*/
(page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped))
))
- )
+ ))
{
/* We will have a tainted page. Have to handle the special case
* of a switched map now. If the map is not switched, standard
pathname_len = 0;
filename_len = 0;
truncated_path = FALSE;
- if (file_object->pager == NULL) {
- /* no pager -> no file -> no pathname */
- pathname = (char *) "<nil>";
- } else {
+ /* no pager -> no file -> no pathname, use "<nil>" in that case */
+ if (file_object->pager != NULL) {
pathname = (char *)kalloc(__PATH_MAX * 2);
if (pathname) {
pathname[0] = '\0';
filename,
filename_len,
&truncated_path);
+ if (pathname) {
+ /* safety first... */
+ pathname[__PATH_MAX-1] = '\0';
+ filename[__PATH_MAX-1] = '\0';
+ }
vnode_pager_get_object_mtime(file_object->pager,
&mtime,
&cs_mtime);
"wpmapped:%d slid:%d)\n",
pid, procname, (addr64_t) vaddr,
file_offset,
- (pathname ? pathname : ""),
+ (pathname ? pathname : "<nil>"),
(truncated_path ? "/.../" : ""),
(truncated_path ? filename : ""),
cs_mtime.tv_sec, cs_mtime.tv_nsec,
if (wired) {
if (kr == KERN_SUCCESS) {
- vm_page_wire(m);
+ vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE);
}
} else {
vm_page_unwire(m, TRUE);
VPL_LOCK(&lq->vpl_lock);
+ vm_page_check_pageable_safe(m);
queue_enter(&lq->vpl_queue, m,
vm_page_t, pageq);
m->local = TRUE;
*/
if (!VM_PAGE_WIRED(m)) {
if (m->clean_queue) {
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
vm_pageout_cleaned_reactivated++;
vm_pageout_cleaned_fault_reactivated++;
NULL);
}
+
kern_return_t
vm_fault_internal(
vm_map_t map,
vm_map_offset_t vaddr,
- vm_prot_t fault_type,
+ vm_prot_t caller_prot,
boolean_t change_wiring,
int interruptible,
pmap_t caller_pmap,
boolean_t interruptible_state;
vm_map_t real_map = map;
vm_map_t original_map = map;
+ vm_prot_t fault_type;
vm_prot_t original_fault_type;
struct vm_object_fault_info fault_info;
boolean_t need_collapse = FALSE;
interruptible_state = thread_interrupt_level(interruptible);
+ fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
+
VM_STAT_INCR(faults);
current_task()->faults++;
original_fault_type = fault_type;
&fault_info,
&real_map);
+
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(map);
goto done;
caller_pmap,
caller_pmap_addr,
prot,
- fault_type,
+ caller_prot,
wired,
change_wiring,
fault_info.no_cache,
pmap,
vaddr,
prot,
- fault_type,
+ caller_prot,
wired,
change_wiring,
fault_info.no_cache,
caller_pmap,
caller_pmap_addr,
prot,
- fault_type,
+ caller_prot,
wired,
change_wiring,
fault_info.no_cache,
pmap,
vaddr,
prot,
- fault_type,
+ caller_prot,
wired,
change_wiring,
fault_info.no_cache,
hdelta = entry->vme_end - laddr;
if (entry->is_sub_map) {
- laddr = (laddr - entry->vme_start)
- + entry->offset;
- vm_map_lock_read(entry->object.sub_map);
+ laddr = ((laddr - entry->vme_start)
+ + VME_OFFSET(entry));
+ vm_map_lock_read(VME_SUBMAP(entry));
if (map != real_map)
vm_map_unlock_read(map);
if (entry->use_pmap) {
vm_map_unlock_read(real_map);
- real_map = entry->object.sub_map;
+ real_map = VME_SUBMAP(entry);
}
- map = entry->object.sub_map;
+ map = VME_SUBMAP(entry);
} else {
break;
}
if (vm_map_lookup_entry(map, laddr, &entry) &&
- (entry->object.vm_object != NULL) &&
- (entry->object.vm_object == object)) {
+ (VME_OBJECT(entry) != NULL) &&
+ (VME_OBJECT(entry) == object)) {
+ int superpage;
- int superpage = (!object->pager_created && object->phys_contiguous)? VM_MEM_SUPERPAGE : 0;
+ if (!object->pager_created &&
+ object->phys_contiguous) {
+ superpage = VM_MEM_SUPERPAGE;
+ } else {
+ superpage = 0;
+ }
if (superpage && physpage_p) {
/* for vm_map_wire_and_extract() */
- *physpage_p = (ppnum_t) ((((vm_map_offset_t) entry->object.vm_object->vo_shadow_offset)
- + entry->offset
- + (laddr - entry->vme_start))
- >> PAGE_SHIFT);
+ *physpage_p = (ppnum_t)
+ ((((vm_map_offset_t)
+ object->vo_shadow_offset)
+ + VME_OFFSET(entry)
+ + (laddr - entry->vme_start))
+ >> PAGE_SHIFT);
}
if (caller_pmap) {
assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT));
pmap_map_block(caller_pmap,
(addr64_t)(caller_pmap_addr - ldelta),
- (ppnum_t)((((vm_map_offset_t) (entry->object.vm_object->vo_shadow_offset)) +
- entry->offset + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
+ (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) +
+ VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
(uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot,
(VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
} else {
assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT));
pmap_map_block(real_map->pmap,
(addr64_t)(vaddr - ldelta),
- (ppnum_t)((((vm_map_offset_t)(entry->object.vm_object->vo_shadow_offset)) +
- entry->offset + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
+ (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) +
+ VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT),
(uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot,
(VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
}
vm_fault_wire(
vm_map_t map,
vm_map_entry_t entry,
+ vm_prot_t prot,
pmap_t pmap,
vm_map_offset_t pmap_addr,
ppnum_t *physpage_p)
assert(entry->in_transition);
- if ((entry->object.vm_object != NULL) &&
+ if ((VME_OBJECT(entry) != NULL) &&
!entry->is_sub_map &&
- entry->object.vm_object->phys_contiguous) {
+ VME_OBJECT(entry)->phys_contiguous) {
return KERN_SUCCESS;
}
*/
for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
- rc = vm_fault_wire_fast(map, va, entry, pmap,
+ rc = vm_fault_wire_fast(map, va, prot, entry, pmap,
pmap_addr + (va - entry->vme_start),
physpage_p);
if (rc != KERN_SUCCESS) {
- rc = vm_fault_internal(map, va, VM_PROT_NONE, TRUE,
+ rc = vm_fault_internal(map, va, prot, TRUE,
((pmap == kernel_pmap)
? THREAD_UNINT
: THREAD_ABORTSAFE),
vm_object_t object;
struct vm_object_fault_info fault_info;
- object = (entry->is_sub_map)
- ? VM_OBJECT_NULL : entry->object.vm_object;
+ object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
/*
* If it's marked phys_contiguous, then vm_fault_wire() didn't actually
fault_info.interruptible = THREAD_UNINT;
fault_info.behavior = entry->behavior;
- fault_info.user_tag = entry->alias;
+ fault_info.user_tag = VME_ALIAS(entry);
fault_info.pmap_options = 0;
if (entry->iokit_acct ||
(!entry->is_sub_map && !entry->use_pmap)) {
fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
}
- fault_info.lo_offset = entry->offset;
- fault_info.hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
+ fault_info.lo_offset = VME_OFFSET(entry);
+ fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
fault_info.no_cache = entry->no_cache;
fault_info.stealth = TRUE;
fault_info.io_sync = FALSE;
result_page = VM_PAGE_NULL;
result = vm_fault_page(
object,
- entry->offset + (va - entry->vme_start),
+ (VME_OFFSET(entry) +
+ (va - entry->vme_start)),
VM_PROT_NONE, TRUE,
FALSE, /* page not looked up */
&prot, &result_page, &top_page,
* other than the common case will return KERN_FAILURE, and the caller
* is expected to call vm_fault().
*/
-kern_return_t
+static kern_return_t
vm_fault_wire_fast(
__unused vm_map_t map,
vm_map_offset_t va,
+ vm_prot_t caller_prot,
vm_map_entry_t entry,
pmap_t pmap,
vm_map_offset_t pmap_addr,
* Find the backing store object and offset into it.
*/
- object = entry->object.vm_object;
- offset = (va - entry->vme_start) + entry->offset;
+ object = VME_OBJECT(entry);
+ offset = (va - entry->vme_start) + VME_OFFSET(entry);
prot = entry->protection;
/*
*/
vm_page_lockspin_queues();
- vm_page_wire(m);
+ vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE);
vm_page_unlock_queues();
/*
FALSE,
FALSE,
FALSE,
- entry->alias,
+ VME_ALIAS(entry),
((entry->iokit_acct ||
(!entry->is_sub_map && !entry->use_pmap))
? PMAP_OPTIONS_ALT_ACCT
* Release a page used by vm_fault_copy.
*/
-void
+static void
vm_fault_copy_cleanup(
vm_page_t page,
vm_page_t top_page)
vm_fault_cleanup(object, top_page);
}
-void
+static void
vm_fault_copy_dst_cleanup(
vm_page_t page)
{
*/
vm_page_lockspin_queues();
- vm_page_wire(dst_page);
+ vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(dst_page);
vm_object_unlock(dst_page->object);
}
#endif /* VM_FAULT_CLASSIFY */
+vm_offset_t
+kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, uint32_t *fault_results)
+{
+#pragma unused(map, cur_target_addr, fault_results)
+
+ return 0;
+#if 0
+ vm_map_entry_t entry;
+ vm_object_t object;
+ vm_offset_t object_offset;
+ vm_page_t m;
+ int compressor_external_state, compressed_count_delta;
+ int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
+ int my_fault_type = VM_PROT_READ;
+ kern_return_t kr;
+
+
+ if (not_in_kdp) {
+ panic("kdp_lightweight_fault called from outside of debugger context");
+ }
+
+ assert(map != VM_MAP_NULL);
+
+ assert((cur_target_addr & PAGE_MASK) == 0);
+ if ((cur_target_addr & PAGE_MASK) != 0) {
+ return 0;
+ }
+
+ if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
+ return 0;
+ }
+
+ if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
+ return 0;
+ }
+
+ if (entry->is_sub_map) {
+ return 0;
+ }
+
+ object = VME_OBJECT(entry);
+ if (object == VM_OBJECT_NULL) {
+ return 0;
+ }
+
+ object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
+
+ while (TRUE) {
+ if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
+ return 0;
+ }
+ if (object->pager_created && (object->paging_in_progress ||
+ object->activity_in_progress)) {
+ return 0;
+ }
+
+ m = kdp_vm_page_lookup(object, object_offset);
+
+ if (m != VM_PAGE_NULL) {
+
+ if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
+ return 0;
+ }
+
+ if (m->laundry || m->busy || m->pageout || m->absent || m->error || m->cleaning ||
+ m->overwriting || m->restart || m->unusual) {
+ return 0;
+ }
+
+ assert(!m->private);
+ if (m->private) {
+ return 0;
+ }
+
+ assert(!m->fictitious);
+ if (m->fictitious) {
+ return 0;
+ }
+
+ assert(!m->encrypted);
+ if (m->encrypted) {
+ return 0;
+ }
+
+ assert(!m->encrypted_cleaning);
+ if (m->encrypted_cleaning) {
+ return 0;
+ }
+
+ assert(!m->compressor);
+ if (m->compressor) {
+ return 0;
+ }
+
+ if (fault_results) {
+ *fault_results |= kThreadFaultedBT;
+ }
+ return ptoa(m->phys_page);
+ }
+
+ compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
+
+ if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
+ if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
+ kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset),
+ kdp_compressor_decompressed_page_ppnum, &my_fault_type,
+ compressor_flags, &compressed_count_delta);
+ if (kr == KERN_SUCCESS) {
+ if (fault_results) {
+ *fault_results |= kThreadDecompressedBT;
+ }
+ return kdp_compressor_decompressed_page_paddr;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ if (object->shadow == VM_OBJECT_NULL) {
+ return 0;
+ }
+
+ object_offset += object->vo_shadow_offset;
+ object = object->shadow;
+ }
+#endif /* 0 */
+}
+
+
+#define CODE_SIGNING_CHUNK_SIZE 4096
void
vm_page_validate_cs_mapped(
vm_page_t page,
const void *kaddr)
{
vm_object_t object;
- vm_object_offset_t offset;
+ vm_object_offset_t offset, offset_in_page;
kern_return_t kr;
memory_object_t pager;
void *blobs;
boolean_t validated;
- unsigned tainted;
+ unsigned tainted;
+ int num_chunks, num_chunks_validated;
assert(page->busy);
vm_object_lock_assert_exclusive(page->object);
- if (!cs_validation) {
- return;
- }
-
if (page->wpmapped && !page->cs_tainted) {
/*
* This page was mapped for "write" access sometime in the
vm_cs_validated_dirtied++;
}
- if (page->cs_validated) {
+ if (page->cs_validated || page->cs_tainted) {
return;
}
}
/* verify the SHA1 hash for this page */
- tainted = 0;
- validated = cs_validate_page(blobs,
- pager,
- offset + object->paging_offset,
- (const void *)kaddr,
- &tainted);
-
- page->cs_validated = validated;
- if (validated) {
- page->cs_tainted = !!(tainted & CS_VALIDATE_TAINTED);
- page->cs_nx = !!(tainted & CS_VALIDATE_NX);
+ num_chunks_validated = 0;
+ for (offset_in_page = 0, num_chunks = 0;
+ offset_in_page < PAGE_SIZE_64;
+ offset_in_page += CODE_SIGNING_CHUNK_SIZE, num_chunks++) {
+ tainted = 0;
+ validated = cs_validate_page(blobs,
+ pager,
+ (object->paging_offset +
+ offset +
+ offset_in_page),
+ (const void *)((const char *)kaddr
+ + offset_in_page),
+ &tainted);
+ if (validated) {
+ num_chunks_validated++;
+ }
+ if (tainted & CS_VALIDATE_TAINTED) {
+ page->cs_tainted = TRUE;
+ }
+ if (tainted & CS_VALIDATE_NX) {
+ page->cs_nx = TRUE;
+ }
+ }
+ /* page is validated only if all its chunks are */
+ if (num_chunks_validated == num_chunks) {
+ page->cs_validated = TRUE;
}
}
vm_object_lock_assert_held(page->object);
- if (!cs_validation) {
- return;
- }
-
if (page->wpmapped && !page->cs_tainted) {
vm_object_lock_assert_exclusive(page->object);
vm_cs_validated_dirtied++;
}
- if (page->cs_validated) {
+ if (page->cs_validated || page->cs_tainted) {
return;
}
}
vm_object_paging_end(object);
}
+
+void
+vm_page_validate_cs_mapped_chunk(
+ vm_page_t page,
+ const void *kaddr,
+ vm_offset_t chunk_offset,
+ boolean_t *validated_p,
+ unsigned *tainted_p)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, offset_in_page;
+ kern_return_t kr;
+ memory_object_t pager;
+ void *blobs;
+ boolean_t validated;
+ unsigned tainted;
+
+ *validated_p = FALSE;
+ *tainted_p = 0;
+
+ assert(page->busy);
+ vm_object_lock_assert_exclusive(page->object);
+
+ object = page->object;
+ assert(object->code_signed);
+ offset = page->offset;
+
+ if (!object->alive || object->terminating || object->pager == NULL) {
+ /*
+ * The object is terminating and we don't have its pager
+ * so we can't validate the data...
+ */
+ return;
+ }
+ /*
+ * Since we get here to validate a page that was brought in by
+ * the pager, we know that this pager is all setup and ready
+ * by now.
+ */
+ assert(!object->internal);
+ assert(object->pager != NULL);
+ assert(object->pager_ready);
+
+ pager = object->pager;
+ assert(object->paging_in_progress);
+ kr = vnode_pager_get_object_cs_blobs(pager, &blobs);
+ if (kr != KERN_SUCCESS) {
+ blobs = NULL;
+ }
+
+ /* verify the signature for this chunk */
+ offset_in_page = chunk_offset;
+ assert(offset_in_page < PAGE_SIZE);
+ assert((offset_in_page & (CODE_SIGNING_CHUNK_SIZE-1)) == 0);
+
+ tainted = 0;
+ validated = cs_validate_page(blobs,
+ pager,
+ (object->paging_offset +
+ offset +
+ offset_in_page),
+ (const void *)((const char *)kaddr
+ + offset_in_page),
+ &tainted);
+ if (validated) {
+ *validated_p = TRUE;
+ }
+ if (tainted) {
+ *tainted_p = tainted;
+ }
+}