X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..8a3053a07cee346dca737a5670e546fd26a7c9d6:/bsd/vm/vm_unix.c?ds=sidebyside diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index 0190e70f7..1aa660399 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include @@ -94,13 +95,10 @@ #include -#if CONFIG_FREEZE #include -#endif -int _shared_region_map( struct proc*, int, unsigned int, struct shared_file_mapping_np*, memory_object_control_t*, struct shared_file_mapping_np*); -int _shared_region_slide(uint32_t, mach_vm_offset_t, mach_vm_size_t, mach_vm_offset_t, mach_vm_size_t, memory_object_control_t); +int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t); int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *); SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, ""); @@ -179,9 +177,15 @@ useracc( user_size_t len, int prot) { + vm_map_t map; + + map = current_map(); return (vm_map_check_protection( - current_map(), - vm_map_trunc_page(addr), vm_map_round_page(addr+len), + map, + vm_map_trunc_page(addr, + vm_map_page_mask(map)), + vm_map_round_page(addr+len, + vm_map_page_mask(map)), prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); } @@ -190,10 +194,17 @@ vslock( user_addr_t addr, user_size_t len) { - kern_return_t kret; - kret = vm_map_wire(current_map(), vm_map_trunc_page(addr), - vm_map_round_page(addr+len), - VM_PROT_READ | VM_PROT_WRITE ,FALSE); + kern_return_t kret; + vm_map_t map; + + map = current_map(); + kret = vm_map_wire(map, + vm_map_trunc_page(addr, + vm_map_page_mask(map)), + vm_map_round_page(addr+len, + vm_map_page_mask(map)), + VM_PROT_READ | VM_PROT_WRITE, + FALSE); switch (kret) { case KERN_SUCCESS: @@ -220,14 +231,17 @@ vsunlock( vm_map_offset_t vaddr; ppnum_t paddr; #endif /* FIXME ] */ - kern_return_t kret; + kern_return_t kret; + vm_map_t map; + + map = current_map(); #if FIXME /* [ */ if (dirtied) { pmap = get_task_pmap(current_task()); - for (vaddr = vm_map_trunc_page(addr); - vaddr < vm_map_round_page(addr+len); - vaddr += PAGE_SIZE) { + for (vaddr = vm_map_trunc_page(addr, PAGE_MASK); + vaddr < vm_map_round_page(addr+len, PAGE_MASK); + vaddr += PAGE_SIZE) { paddr = pmap_extract(pmap, vaddr); pg = PHYS_TO_VM_PAGE(paddr); vm_page_set_modified(pg); @@ -237,8 +251,12 @@ vsunlock( #ifdef lint dirtied++; #endif /* lint */ - kret = vm_map_unwire(current_map(), vm_map_trunc_page(addr), - vm_map_round_page(addr+len), FALSE); + kret = vm_map_unwire(map, + vm_map_trunc_page(addr, + vm_map_page_mask(map)), + vm_map_round_page(addr+len, + vm_map_page_mask(map)), + FALSE); switch (kret) { case KERN_SUCCESS: return (0); @@ -474,7 +492,7 @@ task_for_pid_posix_check(proc_t target) int allowed; /* No task_for_pid on bad targets */ - if (target == PROC_NULL || target->p_stat == SZOMB) { + if (target->p_stat == SZOMB) { return FALSE; } @@ -573,9 +591,13 @@ task_for_pid( p = proc_find(pid); + if (p == PROC_NULL) { + error = KERN_FAILURE; + goto tfpout; + } + #if CONFIG_AUDIT - if (p != PROC_NULL) - AUDIT_ARG(process, p); + AUDIT_ARG(process, p); #endif if (!(task_for_pid_posix_check(p))) { @@ -745,13 +767,17 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) } targetproc = proc_find(pid); + if (targetproc == PROC_NULL) { + error = ESRCH; + goto out; + } + if (!task_for_pid_posix_check(targetproc)) { error = EPERM; goto out; } target = targetproc->task; -#ifndef CONFIG_EMBEDDED if (target != TASK_NULL) { mach_port_t tfpport; @@ -778,10 +804,9 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) } } } -#endif task_reference(target); - error = task_suspend(target); + error = task_pidsuspend(target); if (error) { if (error == KERN_INVALID_ARGUMENT) { error = EINVAL; @@ -789,12 +814,14 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) error = EPERM; } } - task_deallocate(target); - -#if CONFIG_FREEZE - kern_hibernation_on_pid_suspend(pid); +#if CONFIG_MEMORYSTATUS + else { + memorystatus_on_suspend(targetproc); + } #endif + task_deallocate(target); + out: if (targetproc != PROC_NULL) proc_rele(targetproc); @@ -824,13 +851,17 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) } targetproc = proc_find(pid); + if (targetproc == PROC_NULL) { + error = ESRCH; + goto out; + } + if (!task_for_pid_posix_check(targetproc)) { error = EPERM; goto out; } target = targetproc->task; -#ifndef CONFIG_EMBEDDED if (target != TASK_NULL) { mach_port_t tfpport; @@ -857,144 +888,36 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) } } } -#endif task_reference(target); -#if CONFIG_FREEZE - kern_hibernation_on_pid_resume(pid, target); +#if CONFIG_MEMORYSTATUS + memorystatus_on_resume(targetproc); #endif - error = task_resume(target); + error = task_pidresume(target); if (error) { if (error == KERN_INVALID_ARGUMENT) { error = EINVAL; } else { - error = EPERM; + if (error == KERN_MEMORY_ERROR) { + psignal(targetproc, SIGKILL); + error = EIO; + } else + error = EPERM; } } + task_deallocate(target); out: if (targetproc != PROC_NULL) proc_rele(targetproc); - *ret = error; - return error; - - return 0; -} - -#if CONFIG_EMBEDDED -kern_return_t -pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret) -{ - int error = 0; - proc_t targetproc = PROC_NULL; - int pid = args->pid; - -#ifndef CONFIG_FREEZE - #pragma unused(pid) -#else - -#if CONFIG_MACF - error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_HIBERNATE); - if (error) { - error = EPERM; - goto out; - } -#endif - - /* - * The only accepted pid value here is currently -1, since we just kick off the hibernation thread - * here - individual ids aren't required. However, it's intended that that this call is to change - * in the future to initiate hibernation of individual processes. In anticipation, we'll obtain the - * process handle for potentially valid values and call task_for_pid_posix_check(); this way, everything - * is validated correctly and set for further refactoring. See for more details. - */ - if (pid >= 0) { - targetproc = proc_find(pid); - if (!task_for_pid_posix_check(targetproc)) { - error = EPERM; - goto out; - } - } - - if (pid == -1) { - kern_hibernation_on_pid_hibernate(pid); - } else { - error = EPERM; - } - -out: - -#endif /* CONFIG_FREEZE */ - - if (targetproc != PROC_NULL) - proc_rele(targetproc); + *ret = error; return error; } -int -pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args *args, int *ret) -{ - int error = 0; - proc_t targetproc = PROC_NULL; - struct filedesc *fdp; - struct fileproc *fp; - int pid = args->pid; - int level = args->level; - int i; - - if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC && - level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) - { - error = EINVAL; - goto out; - } - -#if CONFIG_MACF - error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SHUTDOWN_SOCKETS); - if (error) { - error = EPERM; - goto out; - } -#endif - - targetproc = proc_find(pid); - if (!task_for_pid_posix_check(targetproc)) { - error = EPERM; - goto out; - } - - proc_fdlock(targetproc); - fdp = targetproc->p_fd; - - for (i = 0; i < fdp->fd_nfiles; i++) { - struct socket *sockp; - - fp = fdp->fd_ofiles[i]; - if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - fp->f_fglob->fg_type != DTYPE_SOCKET) - { - continue; - } - - sockp = (struct socket *)fp->f_fglob->fg_data; - - /* Call networking stack with socket and level */ - (void) socket_defunct(targetproc, sockp, level); - } - - proc_fdunlock(targetproc); - -out: - if (targetproc != PROC_NULL) - proc_rele(targetproc); - *ret = error; - return error; -} -#endif /* CONFIG_EMBEDDED */ static int sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1, @@ -1007,7 +930,7 @@ sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1, if (error || req->newptr == USER_ADDR_NULL) return(error); - if (!is_suser()) + if (!kauth_cred_issuser(kauth_cred_get())) return(EPERM); if ((error = SYSCTL_IN(req, &new_value, sizeof(int)))) { @@ -1075,7 +998,7 @@ shared_region_check_np( __unused int *retvalp) { vm_shared_region_t shared_region; - mach_vm_offset_t start_address; + mach_vm_offset_t start_address = 0; int error; kern_return_t kr; @@ -1157,13 +1080,14 @@ shared_region_copyin_mappings( * requiring any further setup. */ int -_shared_region_map( +_shared_region_map_and_slide( struct proc *p, int fd, uint32_t mappings_count, struct shared_file_mapping_np *mappings, - memory_object_control_t *sr_file_control, - struct shared_file_mapping_np *mapping_to_slide) + uint32_t slide, + user_addr_t slide_start, + user_addr_t slide_size) { int error; kern_return_t kr; @@ -1172,7 +1096,9 @@ _shared_region_map( struct vnode_attr va; off_t fs; memory_object_size_t file_size; +#if CONFIG_MACF vm_prot_t maxprot = VM_PROT_ALL; +#endif memory_object_control_t file_control; struct vm_shared_region *shared_region; @@ -1195,12 +1121,12 @@ _shared_region_map( } /* make sure we're attempting to map a vnode */ - if (fp->f_fglob->fg_type != DTYPE_VNODE) { + if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not a vnode (type=%d)\n", current_thread(), p->p_pid, p->p_comm, - fd, fp->f_fglob->fg_type)); + fd, FILEGLOB_DTYPE(fp->f_fglob))); error = EINVAL; goto done; } @@ -1248,12 +1174,10 @@ _shared_region_map( #if CONFIG_PROTECT /* check for content protection access */ { - void *cnode; - if ((cnode = cp_get_protected_cnode(vp)) != NULL) { - error = cp_handle_vnop(cnode, CP_READ_ACCESS | CP_WRITE_ACCESS); - if (error) + error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0); + if (error) { goto done; - } + } } #endif /* CONFIG_PROTECT */ @@ -1325,11 +1249,6 @@ _shared_region_map( goto done; } - if (sr_file_control != NULL) { - *sr_file_control = file_control; - } - - /* get the process's shared region (setup in vm_map_exec()) */ shared_region = vm_shared_region_get(current_task()); @@ -1349,7 +1268,9 @@ _shared_region_map( file_control, file_size, (void *) p->p_fd->fd_rdir, - mapping_to_slide); + slide, + slide_start, + slide_size); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " @@ -1422,80 +1343,36 @@ done: return error; } -int -_shared_region_slide(uint32_t slide, - mach_vm_offset_t entry_start_address, - mach_vm_size_t entry_size, - mach_vm_offset_t slide_start, - mach_vm_size_t slide_size, - memory_object_control_t sr_file_control) -{ - void *slide_info_entry = NULL; - int error; - - if((error = vm_shared_region_slide_init(slide_size, entry_start_address, entry_size, slide, sr_file_control))) { - printf("slide_info initialization failed with kr=%d\n", error); - goto done; - } - - slide_info_entry = vm_shared_region_get_slide_info_entry(); - if (slide_info_entry == NULL){ - error = EFAULT; - } else { - error = copyin(slide_start, - slide_info_entry, - (vm_size_t)slide_size); - } - if (error) { - goto done; - } - - if (vm_shared_region_slide_sanity_check() != KERN_SUCCESS) { - error = EFAULT; - printf("Sanity Check failed for slide_info\n"); - } else { -#if DEBUG - printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n", - (void*)(uintptr_t)entry_start_address, - (unsigned long)entry_size, - (unsigned long)slide_size); -#endif - } -done: - return error; -} - int shared_region_map_and_slide_np( struct proc *p, struct shared_region_map_and_slide_np_args *uap, __unused int *retvalp) { - struct shared_file_mapping_np mapping_to_slide; struct shared_file_mapping_np *mappings; - unsigned int mappings_count = uap->count; - - memory_object_control_t sr_file_control; + unsigned int mappings_count = uap->count; kern_return_t kr = KERN_SUCCESS; uint32_t slide = uap->slide; #define SFM_MAX_STACK 8 struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK]; + /* Is the process chrooted?? */ + if (p->p_fd->fd_rdir != NULL) { + kr = EINVAL; + goto done; + } + if ((kr = vm_shared_region_sliding_valid(slide)) != KERN_SUCCESS) { if (kr == KERN_INVALID_ARGUMENT) { /* * This will happen if we request sliding again * with the same slide value that was used earlier - * for the very first sliding. We continue through - * to the mapping layer. This is so that we can be - * absolutely certain that the same mappings have - * been requested. + * for the very first sliding. */ kr = KERN_SUCCESS; - } else { - goto done; } + goto done; } if (mappings_count == 0) { @@ -1522,23 +1399,13 @@ shared_region_map_and_slide_np( } - kr = _shared_region_map(p, uap->fd, mappings_count, mappings, &sr_file_control, &mapping_to_slide); + kr = _shared_region_map_and_slide(p, uap->fd, mappings_count, mappings, + slide, + uap->slide_start, uap->slide_size); if (kr != KERN_SUCCESS) { return kr; } - if (slide) { - kr = _shared_region_slide(slide, - mapping_to_slide.sfm_file_offset, - mapping_to_slide.sfm_size, - uap->slide_start, - uap->slide_size, - sr_file_control); - if (kr != KERN_SUCCESS) { - vm_shared_region_undo_mappings(NULL, 0, mappings, mappings_count); - return kr; - } - } done: return kr; } @@ -1577,6 +1444,10 @@ extern unsigned int vm_page_purgeable_wired_count; SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_purgeable_wired_count, 0, "Wired purgeable page count"); +extern int madvise_free_debug; +SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED, + &madvise_free_debug, 0, "zero-fill on madvise(MADV_FREE*)"); + SYSCTL_INT(_vm, OID_AUTO, page_reusable_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_stats_reusable.reusable_count, 0, "Reusable page count"); SYSCTL_QUAD(_vm, OID_AUTO, reusable_success, CTLFLAG_RD | CTLFLAG_LOCKED, @@ -1601,7 +1472,69 @@ SYSCTL_QUAD(_vm, OID_AUTO, can_reuse_success, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_stats_reusable.can_reuse_success, ""); SYSCTL_QUAD(_vm, OID_AUTO, can_reuse_failure, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_stats_reusable.can_reuse_failure, ""); +SYSCTL_QUAD(_vm, OID_AUTO, reusable_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED, + &vm_page_stats_reusable.reusable_reclaimed, ""); + + +extern unsigned int vm_page_free_count, vm_page_speculative_count; +SYSCTL_UINT(_vm, OID_AUTO, page_free_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_free_count, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, page_speculative_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_speculative_count, 0, ""); + +extern unsigned int vm_page_cleaned_count; +SYSCTL_UINT(_vm, OID_AUTO, page_cleaned_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_cleaned_count, 0, "Cleaned queue size"); + +/* pageout counts */ +extern unsigned int vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external, vm_pageout_inactive_clean, vm_pageout_speculative_clean, vm_pageout_inactive_used; +extern unsigned int vm_pageout_freed_from_inactive_clean, vm_pageout_freed_from_speculative; +SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_internal, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_external, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_clean, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_speculative_clean, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_used, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_inactive_clean, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_speculative, 0, ""); + +extern unsigned int vm_pageout_freed_from_cleaned; +SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_cleaned, 0, ""); + +/* counts of pages entering the cleaned queue */ +extern unsigned int vm_pageout_enqueued_cleaned, vm_pageout_enqueued_cleaned_from_inactive_clean, vm_pageout_enqueued_cleaned_from_inactive_dirty; +SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */ +SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_clean, 0, ""); +SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_dirty, 0, ""); + +/* counts of pages leaving the cleaned queue */ +extern unsigned int vm_pageout_cleaned_reclaimed, vm_pageout_cleaned_reactivated, vm_pageout_cleaned_reference_reactivated, vm_pageout_cleaned_volatile_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated, vm_pageout_cleaned_busy, vm_pageout_cleaned_nolock; +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reclaimed, 0, "Cleaned pages reclaimed"); +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */ +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated"); +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated"); +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated"); +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_commit_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_commit_reactivated, 0, "Cleaned pages commit reactivated"); +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)"); +SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)"); + +#include +#include + +void vm_pageout_io_throttle(void); +void vm_pageout_io_throttle(void) { + struct uthread *uthread = get_bsdthread_info(current_thread()); + + /* + * thread is marked as a low priority I/O type + * and the I/O we issued while in this cleaning operation + * collided with normal I/O operations... we'll + * delay in order to mitigate the impact of this + * task on the normal operation of the system + */ + + if (uthread->uu_lowpri_window) { + throttle_lowpri_io(1); + } + +} int vm_pressure_monitor( @@ -1639,3 +1572,77 @@ vm_pressure_monitor( *retval = (int) pages_wanted; return 0; } + +int +kas_info(struct proc *p, + struct kas_info_args *uap, + int *retval __unused) +{ +#ifdef SECURE_KERNEL + (void)p; + (void)uap; + return ENOTSUP; +#else /* !SECURE_KERNEL */ + int selector = uap->selector; + user_addr_t valuep = uap->value; + user_addr_t sizep = uap->size; + user_size_t size; + int error; + + if (!kauth_cred_issuser(kauth_cred_get())) { + return EPERM; + } + +#if CONFIG_MACF + error = mac_system_check_kas_info(kauth_cred_get(), selector); + if (error) { + return error; + } +#endif + + if (IS_64BIT_PROCESS(p)) { + user64_size_t size64; + error = copyin(sizep, &size64, sizeof(size64)); + size = (user_size_t)size64; + } else { + user32_size_t size32; + error = copyin(sizep, &size32, sizeof(size32)); + size = (user_size_t)size32; + } + if (error) { + return error; + } + + switch (selector) { + case KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR: + { + uint64_t slide = vm_kernel_slide; + + if (sizeof(slide) != size) { + return EINVAL; + } + + if (IS_64BIT_PROCESS(p)) { + user64_size_t size64 = (user64_size_t)size; + error = copyout(&size64, sizep, sizeof(size64)); + } else { + user32_size_t size32 = (user32_size_t)size; + error = copyout(&size32, sizep, sizeof(size32)); + } + if (error) { + return error; + } + + error = copyout(&slide, valuep, sizeof(slide)); + if (error) { + return error; + } + } + break; + default: + return EINVAL; + } + + return 0; +#endif /* !SECURE_KERNEL */ +}