SYSCTL_INT(_vm, OID_AUTO, cs_executable_create_upl, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_create_upl, 0, "");
SYSCTL_INT(_vm, OID_AUTO, cs_executable_wire, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_wire, 0, "");
+extern int apple_protect_pager_count;
+extern int apple_protect_pager_count_mapped;
+extern unsigned int apple_protect_pager_cache_limit;
+SYSCTL_INT(_vm, OID_AUTO, apple_protect_pager_count, CTLFLAG_RD | CTLFLAG_LOCKED, &apple_protect_pager_count, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, apple_protect_pager_count_mapped, CTLFLAG_RD | CTLFLAG_LOCKED, &apple_protect_pager_count_mapped, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, apple_protect_pager_cache_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &apple_protect_pager_cache_limit, 0, "");
+
#if DEVELOPMENT || DEBUG
extern int radar_20146450;
SYSCTL_INT(_vm, OID_AUTO, radar_20146450, CTLFLAG_RW | CTLFLAG_LOCKED, &radar_20146450, 0, "");
SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, "");
__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
- mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid);
+ mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid, mach_task_flavor_t flavor);
/*
* Sysctl's related to data/stack execution. See osfmk/vm/vm_map.c
*/
*/
__attribute__((noinline)) int
__KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
- mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid)
+ mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid, mach_task_flavor_t flavor)
{
- return check_task_access(task_access_port, calling_pid, calling_gid, target_pid);
+ return check_task_access_with_flavor(task_access_port, calling_pid, calling_gid, target_pid, flavor);
}
/*
/* Always check if pid == 0 */
if (pid == 0) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
return KERN_FAILURE;
}
t1 = port_name_to_task(target_tport);
if (t1 == TASK_NULL) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
return KERN_FAILURE;
}
p = PROC_NULL;
#if CONFIG_MACF
- error = mac_proc_check_get_task(kauth_cred_get(), &pident);
+ error = mac_proc_check_get_task(kauth_cred_get(), &pident, TASK_FLAVOR_CONTROL);
if (error) {
error = KERN_FAILURE;
goto tfpout;
}
/* Call up to the task access server */
- error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport,
+ proc_selfpid(), kauth_getgid(), pid, TASK_FLAVOR_CONTROL);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED) {
/* Grant task port access */
extmod_statistics_incr_task_for_pid(task);
- sright = (void *) convert_task_to_port(task);
+
+ if (task == current_task()) {
+ /* return pinned self if current_task() so equality check with mach_task_self_ passes */
+ sright = (void *)convert_task_to_port_pinned(task);
+ } else {
+ sright = (void *)convert_task_to_port(task);
+ }
/* Check if the task has been corpsified */
if (is_corpsetask(task)) {
mach_port_name_t target_tport = args->target_tport;
int pid = args->pid;
user_addr_t task_addr = args->t;
- proc_t p = PROC_NULL;
- task_t t1;
- mach_port_name_t tret;
+ proc_t p = PROC_NULL;
+ task_t t1 = TASK_NULL;
+ mach_port_name_t tret = MACH_PORT_NULL;
void * sright;
int error = 0, refheld = 0;
kauth_cred_t target_cred;
t1 = port_name_to_task(target_tport);
if (t1 == TASK_NULL) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
return KERN_FAILURE;
}
proc_rele(p);
p = PROC_NULL;
#if CONFIG_MACF
- error = mac_proc_check_get_task_name(kauth_cred_get(), &pident);
+ error = mac_proc_check_get_task(kauth_cred_get(), &pident, TASK_FLAVOR_NAME);
if (error) {
task_deallocate(task);
goto noperm;
/* Disallow inspect port for kernel_task */
if (pid == 0) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
return EPERM;
}
t1 = port_name_to_task(target_tport);
if (t1 == TASK_NULL) {
- (void) copyout((char *) &t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
return EINVAL;
}
proc_rele(proc);
proc = PROC_NULL;
- /*
- * For now, it performs the same set of permission checks as task_for_pid. This
- * will be addressed in rdar://problem/53478660
- */
#if CONFIG_MACF
- error = mac_proc_check_get_task(kauth_cred_get(), &pident);
+ error = mac_proc_check_get_task(kauth_cred_get(), &pident, TASK_FLAVOR_INSPECT);
if (error) {
error = EPERM;
goto tifpout;
/* Call up to the task access server */
- error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport,
+ proc_selfpid(), kauth_getgid(), pid, TASK_FLAVOR_INSPECT);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED) {
/* Disallow read port for kernel_task */
if (pid == 0) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
return EPERM;
}
t1 = port_name_to_task(target_tport);
if (t1 == TASK_NULL) {
- (void) copyout((char *) &t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
return EINVAL;
}
proc_rele(proc);
proc = PROC_NULL;
- /*
- * For now, it performs the same set of permission checks as task_for_pid. This
- * will be addressed in rdar://problem/53478660
- */
#if CONFIG_MACF
- error = mac_proc_check_get_task(kauth_cred_get(), &pident);
+ error = mac_proc_check_get_task(kauth_cred_get(), &pident, TASK_FLAVOR_READ);
if (error) {
error = EPERM;
goto trfpout;
/* Call up to the task access server */
- error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport,
+ proc_selfpid(), kauth_getgid(), pid, TASK_FLAVOR_READ);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED) {
#endif
target = targetproc->task;
-#ifndef CONFIG_EMBEDDED
+#if XNU_TARGET_OS_OSX
if (target != TASK_NULL) {
/* If we aren't root and target's task access port is set... */
if (!kauth_cred_issuser(kauth_cred_get()) &&
}
/* Call up to the task access server */
- error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport,
+ proc_selfpid(), kauth_getgid(), pid, TASK_FLAVOR_CONTROL);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED) {
}
}
}
-#endif
+#endif /* XNU_TARGET_OS_OSX */
task_reference(target);
error = task_pidsuspend(target);
/* Always check if pid == 0 */
if (pid == 0) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
return KERN_FAILURE;
}
t1 = port_name_to_task(target_tport);
if (t1 == TASK_NULL) {
- (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
+ (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
return KERN_FAILURE;
}
if (!IOTaskHasEntitlement(current_task(), DEBUG_PORT_ENTITLEMENT)) {
#if CONFIG_MACF
- error = mac_proc_check_get_task(kauth_cred_get(), &pident);
+ error = mac_proc_check_get_task(kauth_cred_get(), &pident, TASK_FLAVOR_CONTROL);
if (error) {
error = KERN_FAILURE;
goto tfpout;
/* Call up to the task access server */
- error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport,
+ proc_selfpid(), kauth_getgid(), pid, TASK_FLAVOR_CONTROL);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED) {
#endif
target = targetproc->task;
-#ifndef CONFIG_EMBEDDED
+#if XNU_TARGET_OS_OSX
if (target != TASK_NULL) {
/* If we aren't root and target's task access port is set... */
if (!kauth_cred_issuser(kauth_cred_get()) &&
}
/* Call up to the task access server */
- error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport,
+ proc_selfpid(), kauth_getgid(), pid, TASK_FLAVOR_CONTROL);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED) {
}
}
}
-#endif
+#endif /* XNU_TARGET_OS_OSX */
#if !XNU_TARGET_OS_OSX
#if SOCKETS
return error;
}
-#if CONFIG_EMBEDDED
+#if !XNU_TARGET_OS_OSX
/*
* Freeze the specified process (provided in args->pid), or find and freeze a PID.
* When a process is specified, this call is blocking, otherwise we wake up the
*ret = error;
return error;
}
-#endif /* CONFIG_EMBEDDED */
+#endif /* !XNU_TARGET_OS_OSX */
#if SOCKETS
int
* proc lock NOT held
* a reference on the proc has been held / shall be dropped by the caller.
*/
- LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
proc_fdlock(p);
mach_vm_offset_t start_address = 0;
int error = 0;
kern_return_t kr;
+ task_t task = current_task();
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n",
(uint64_t)uap->start_address));
/* retrieve the current tasks's shared region */
- shared_region = vm_shared_region_get(current_task());
+ shared_region = vm_shared_region_get(task);
if (shared_region != NULL) {
/* retrieve address of its first mapping... */
- kr = vm_shared_region_start_address(shared_region, &start_address);
+ kr = vm_shared_region_start_address(shared_region, &start_address, task);
if (kr != KERN_SUCCESS) {
error = ENOMEM;
} else {
uint32_t mappings_count,
struct shared_file_mapping_slide_np *mappings,
struct _sr_file_mappings **sr_file_mappings,
- struct vm_shared_region **shared_region,
- struct vnode **scdir_vp)
+ struct vm_shared_region **shared_region_ptr,
+ struct vnode **scdir_vp,
+ struct vnode *rdir_vp)
{
int error = 0;
struct _sr_file_mappings *srfmp;
vm_prot_t maxprot = VM_PROT_ALL;
#endif
uint32_t i;
+ struct vm_shared_region *shared_region;
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] -> map\n",
}
/* get the process's shared region (setup in vm_map_exec()) */
- *shared_region = vm_shared_region_trim_and_get(current_task());
- if (*shared_region == NULL) {
+ shared_region = vm_shared_region_trim_and_get(current_task());
+ *shared_region_ptr = shared_region;
+ if (shared_region == NULL) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(): "
"no shared region\n",
goto done;
}
+ /*
+ * Check the shared region matches the current root
+ * directory of this process. Deny the mapping to
+ * avoid tainting the shared region with something that
+ * doesn't quite belong into it.
+ */
+ struct vnode *sr_vnode = vm_shared_region_root_dir(shared_region);
+ if (sr_vnode != NULL ? rdir_vp != sr_vnode : rdir_vp != rootvnode) {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: map(%p) root_dir mismatch\n",
+ (void *)VM_KERNEL_ADDRPERM(current_thread())));
+ error = EPERM;
+ goto done;
+ }
+
+
for (srfmp = &(*sr_file_mappings)[0];
srfmp < &(*sr_file_mappings)[files_count];
srfmp++) {
#else /* CONFIG_CSR */
/* Devices without SIP/ROSP need to make sure that the shared cache is on the root volume. */
- struct vnode *root_vp = p->p_fd->fd_rdir;
- if (root_vp == NULL) {
- root_vp = rootvnode;
- }
- if (srfmp->vp->v_mount != root_vp->v_mount) {
+ assert(rdir_vp != NULL);
+ if (srfmp->vp->v_mount != rdir_vp->v_mount) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"not on process's root volume\n",
}
done:
if (error != 0) {
- shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, *shared_region, *scdir_vp);
+ shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, shared_region, *scdir_vp);
*sr_file_mappings = NULL;
- *shared_region = NULL;
+ *shared_region_ptr = NULL;
*scdir_vp = NULL;
}
return error;
kern_return_t kr = KERN_SUCCESS;
struct _sr_file_mappings *sr_file_mappings = NULL;
struct vnode *scdir_vp = NULL;
+ struct vnode *rdir_vp = NULL;
struct vm_shared_region *shared_region = NULL;
+ /*
+ * Get a reference to the current proc's root dir.
+ * Need this to prevent racing with chroot.
+ */
+ proc_fdlock(p);
+ rdir_vp = p->p_fd->fd_rdir;
+ if (rdir_vp == NULL) {
+ rdir_vp = rootvnode;
+ }
+ assert(rdir_vp != NULL);
+ vnode_get(rdir_vp);
+ proc_fdunlock(p);
+
/*
* Turn files, mappings into sr_file_mappings and other setup.
*/
error = shared_region_map_and_slide_setup(p, files_count,
files, mappings_count, mappings,
- &sr_file_mappings, &shared_region, &scdir_vp);
+ &sr_file_mappings, &shared_region, &scdir_vp, rdir_vp);
if (error != 0) {
+ vnode_put(rdir_vp);
return error;
}
/* map the file(s) into that shared region's submap */
- kr = vm_shared_region_map_file(shared_region,
- (void *) p->p_fd->fd_rdir,
- files_count,
- sr_file_mappings);
+ kr = vm_shared_region_map_file(shared_region, files_count, sr_file_mappings);
if (kr != KERN_SUCCESS) {
SHARED_REGION_TRACE_ERROR(("shared_region: %p [%d(%s)] map(): "
"vm_shared_region_map_file() failed kr=0x%x\n",
OSBitAndAtomic(~((uint32_t)P_NOSHLIB), &p->p_flag);
}
+ vnode_put(rdir_vp);
shared_region_map_and_slide_cleanup(p, files_count, sr_file_mappings, shared_region, scdir_vp);
SHARED_REGION_TRACE_DEBUG(
* a max value. The kernel will choose a random value based on that, then use it
* for all shared regions.
*/
-#define SLIDE_AMOUNT_MASK ~PAGE_MASK
+#if defined (__x86_64__)
+#define SLIDE_AMOUNT_MASK ~FOURK_PAGE_MASK
+#else
+#define SLIDE_AMOUNT_MASK ~SIXTEENK_PAGE_MASK
+#endif
int
shared_region_map_and_slide_2_np(
}
mappings[m].sms_address += slide_amount;
if (mappings[m].sms_slide_size != 0) {
- mappings[i].sms_slide_start += slide_amount;
+ mappings[m].sms_slide_start += slide_amount;
}
}
}
SYSCTL_INT(_debug, OID_AUTO, vm_mixed_pagesize_supported, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED,
&vm_mixed_pagesize_supported, 0, "kernel support for mixed pagesize");
-
-extern uint64_t get_pages_grabbed_count(void);
-
-static int
-pages_grabbed SYSCTL_HANDLER_ARGS
-{
-#pragma unused(arg1, arg2, oidp)
- uint64_t value = get_pages_grabbed_count();
- return SYSCTL_OUT(req, &value, sizeof(value));
-}
-
-SYSCTL_PROC(_vm, OID_AUTO, pages_grabbed, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
- 0, 0, &pages_grabbed, "QU", "Total pages grabbed");
+SCALABLE_COUNTER_DECLARE(vm_page_grab_count);
+SYSCTL_SCALABLE_COUNTER(_vm, pages_grabbed, vm_page_grab_count, "Total pages grabbed");
SYSCTL_ULONG(_vm, OID_AUTO, pages_freed, CTLFLAG_RD | CTLFLAG_LOCKED,
&vm_pageout_vminfo.vm_page_pages_freed, "Total pages freed");
SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf,
CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, "");
-#if PMAP_CS
-extern uint64_t vm_cs_defer_to_pmap_cs;
-extern uint64_t vm_cs_defer_to_pmap_cs_not;
-SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs,
- CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, "");
-SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not,
- CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, "");
-#endif /* PMAP_CS */
extern uint64_t shared_region_pager_copied;
extern uint64_t shared_region_pager_slid;
SYSCTL_INT(_vm, OID_AUTO, pmap_ledgers_panic_leeway, CTLFLAG_RW | CTLFLAG_LOCKED, &pmap_ledgers_panic_leeway, 0, "");
#endif /* MACH_ASSERT */
+
+extern uint64_t vm_map_lookup_locked_copy_slowly_count;
+extern uint64_t vm_map_lookup_locked_copy_slowly_size;
+extern uint64_t vm_map_lookup_locked_copy_slowly_max;
+extern uint64_t vm_map_lookup_locked_copy_slowly_restart;
+extern uint64_t vm_map_lookup_locked_copy_slowly_error;
+extern uint64_t vm_map_lookup_locked_copy_strategically_count;
+extern uint64_t vm_map_lookup_locked_copy_strategically_size;
+extern uint64_t vm_map_lookup_locked_copy_strategically_max;
+extern uint64_t vm_map_lookup_locked_copy_strategically_restart;
+extern uint64_t vm_map_lookup_locked_copy_strategically_error;
+extern uint64_t vm_map_lookup_locked_copy_shadow_count;
+extern uint64_t vm_map_lookup_locked_copy_shadow_size;
+extern uint64_t vm_map_lookup_locked_copy_shadow_max;
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_slowly_count,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_slowly_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_slowly_size,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_slowly_size, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_slowly_max,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_slowly_max, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_slowly_restart,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_slowly_restart, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_slowly_error,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_slowly_error, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_strategically_count,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_strategically_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_strategically_size,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_strategically_size, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_strategically_max,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_strategically_max, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_strategically_restart,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_strategically_restart, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_strategically_error,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_strategically_error, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_shadow_count,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_shadow_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_shadow_size,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_shadow_size, "");
+SYSCTL_QUAD(_vm, OID_AUTO, map_lookup_locked_copy_shadow_max,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_lookup_locked_copy_shadow_max, "");
+
extern int vm_protect_privileged_from_untrusted;
SYSCTL_INT(_vm, OID_AUTO, protect_privileged_from_untrusted,
CTLFLAG_RW | CTLFLAG_LOCKED, &vm_protect_privileged_from_untrusted, 0, "");
CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED,
0, 0, shared_region_pivot, "I", "");
-extern int vm_remap_old_path, vm_remap_new_path;
-SYSCTL_INT(_vm, OID_AUTO, remap_old_path,
- CTLFLAG_RD | CTLFLAG_LOCKED, &vm_remap_old_path, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, remap_new_path,
- CTLFLAG_RD | CTLFLAG_LOCKED, &vm_remap_new_path, 0, "");
+/*
+ * sysctl to return the number of pages on retired_pages_object
+ */
+static int
+retired_pages_count SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ extern uint32_t vm_retired_pages_count(void);
+ uint32_t value = vm_retired_pages_count();
+
+ return SYSCTL_OUT(req, &value, sizeof(value));
+}
+SYSCTL_PROC(_vm, OID_AUTO, retired_pages_count, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, &retired_pages_count, "I", "");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_total, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_total, 0, "total text page corruptions detected");
+SYSCTL_INT(_vm, OID_AUTO, vmtc_undiagnosed, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_undiagnosed, 0, "undiagnosed text page corruptions");
+SYSCTL_INT(_vm, OID_AUTO, vmtc_not_eligible, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_not_eligible, 0, "text page corruptions not eligible for correction");
+SYSCTL_INT(_vm, OID_AUTO, vmtc_copyin_fail, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_copyin_fail, 0, "undiagnosed text page corruptions due to copyin failure");
+SYSCTL_INT(_vm, OID_AUTO, vmtc_not_found, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_not_found, 0, "text page corruptions but no diff found");
+SYSCTL_INT(_vm, OID_AUTO, vmtc_one_bit_flip, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_one_bit_flip, 0, "text page corruptions that had a single bit flip");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_1_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[0], 0, "text page corruptions with 1 changed byte");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_2_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[1], 0, "text page corruptions with 2 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_4_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[2], 0, "text page corruptions with 3 to 4 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_8_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[3], 0, "text page corruptions with 5 to 8 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_16_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[4], 0, "text page corruptions with 9 to 16 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_32_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[5], 0, "text page corruptions with 17 to 32 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_64_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[6], 0, "text page corruptions with 33 to 64 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_128byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[7], 0, "text page corruptions with 65 to 128 changed bytes");
+
+SYSCTL_INT(_vm, OID_AUTO, vmtc_256_byte, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vmtc_byte_counts[8], 0, "text page corruptions with >128 changed bytes");
+
+#if DEBUG || DEVELOPMENT
+/*
+ * A sysctl that can be used to corrupt a text page with an illegal instruction.
+ * Used for testing text page self healing.
+ */
+extern kern_return_t vm_corrupt_text_addr(uintptr_t);
+static int
+corrupt_text_addr(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ uint64_t value = 0;
+ int error = sysctl_handle_quad(oidp, &value, 0, req);
+ if (error || !req->newptr) {
+ return error;
+ }
+
+ if (vm_corrupt_text_addr((uintptr_t)value) == KERN_SUCCESS) {
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, corrupt_text_addr,
+ CTLTYPE_QUAD | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+ 0, 0, corrupt_text_addr, "-", "");
+#endif /* DEBUG || DEVELOPMENT */