X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..9bccf70c0258c7cac2dcb80011b2a964d884c552:/bsd/kern/kern_exec.c diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index 1db47973a..6eb6ebe8f 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -94,6 +94,7 @@ #include #include +#include #include #include @@ -103,6 +104,11 @@ #include #include #include +#if KTRACE +#include +#endif + +int app_profile = 0; extern vm_map_t bsd_pageable_map; @@ -112,6 +118,8 @@ extern vm_map_t bsd_pageable_map; static int load_return_to_errno(load_return_t lrtn); int execve(struct proc *p, struct execve_args *uap, register_t *retval); +static int execargs_alloc(vm_offset_t *addrp); +static int execargs_free(vm_offset_t addr); int execv(p, args, retval) @@ -155,7 +163,10 @@ execve(p, uap, retval) load_return_t lret; load_result_t load_result; struct uthread *uthread; + vm_map_t old_map; + vm_map_t map; int i; + boolean_t new_shared_regions = FALSE; union { /* #! and name of interpreter */ char ex_shell[SHSIZE]; @@ -170,26 +181,35 @@ execve(p, uap, retval) int savedpathlen = 0; vm_offset_t *execargsp; char *cpnospace; - task_t tsk; + task_t task; + task_t new_task; + thread_act_t thr_act; int numthreads; + int vfexec=0; + unsigned long arch_offset =0; + unsigned long arch_size = 0; + char *ws_cache_name = NULL; /* used for pre-heat */ - tsk = current_task(); - + task = current_task(); + thr_act = current_act(); + uthread = get_bsdthread_info(thr_act); - if(tsk != kernel_task) { - numthreads = get_task_numacts(tsk); - if (numthreads <= 0 ) - return(EINVAL); - if (numthreads > 1) { - return(EOPNOTSUPP); + if (uthread->uu_flag & P_VFORK) { + vfexec = 1; /* Mark in exec */ + } else { + if (task != kernel_task) { + numthreads = get_task_numacts(task); + if (numthreads <= 0 ) + return(EINVAL); + if (numthreads > 1) { + return(EOPNOTSUPP); + } } } - ret = kmem_alloc_pageable(bsd_pageable_map, &execargs, NCARGS); - if (ret != KERN_SUCCESS) - return(ENOMEM); - - uthread = get_bsdthread_info(current_act()); + error = execargs_alloc(&execargs); + if (error) + return(error); savedpath = execargs; @@ -202,7 +222,7 @@ execve(p, uap, retval) * We have to do this before namei() because in case of * symbolic links, namei() would overwrite the original "path". * In case the last symbolic link resolved was a relative pathname - * we would loose the original "path", which could be an + * we would lose the original "path", which could be an * absolute pathname. This might be unacceptable for dyld. */ /* XXX We could optimize to avoid copyinstr in the namei() */ @@ -214,6 +234,22 @@ execve(p, uap, retval) * copyinstr will put in savedpathlen, the count of * characters (including NULL) in the path. */ + + if(app_profile != 0) { + + /* grab the name of the file out of its path */ + /* we will need this for lookup within the */ + /* name file */ + ws_cache_name = savedpath + savedpathlen; + while (ws_cache_name[0] != '/') { + if(ws_cache_name == savedpath) { + ws_cache_name--; + break; + } + ws_cache_name--; + } + ws_cache_name++; + } /* Save the name aside for future use */ execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); @@ -459,24 +495,64 @@ again: goto bad; } - /* - * Load the Mach-O file. - */ - VOP_UNLOCK(vp, 0, p); - lret = load_machfile(vp, mach_header, fat_arch.offset, - fat_arch.size, &load_result); + arch_offset = fat_arch.offset; + arch_size = fat_arch.size; } else { /* * Load the Mach-O file. */ - VOP_UNLOCK(vp, 0, p); - lret = load_machfile(vp, mach_header, 0, - (u_long)vattr.va_size, &load_result); + arch_offset = 0; + arch_size = (u_long)vattr.va_size; } + if (vfexec) { + kern_return_t result; + + result = task_create_local(task, FALSE, FALSE, &new_task); + if (result != KERN_SUCCESS) + printf("execve: task_create failed. Code: 0x%x\n", result); + p->task = new_task; + set_bsdtask_info(new_task, p); + if (p->p_nice != 0) + resetpriority(p); + task = new_task; + map = get_task_map(new_task); + result = thread_create(new_task, &thr_act); + if (result != KERN_SUCCESS) + printf("execve: thread_create failed. Code: 0x%x\n", result); + uthread = get_bsdthread_info(thr_act); + } else { + map = VM_MAP_NULL; + + } + + /* + * Load the Mach-O file. + */ + VOP_UNLOCK(vp, 0, p); + if(ws_cache_name) { + tws_handle_startup_file(task, cred->cr_uid, + ws_cache_name, vp, &new_shared_regions); + } + if (new_shared_regions) { + shared_region_mapping_t new_shared_region; + shared_region_mapping_t old_shared_region; + + if (shared_file_create_system_region(&new_shared_region)) + panic("couldn't create system_shared_region\n"); + + vm_get_shared_region(task, &old_shared_region); + vm_set_shared_region(task, new_shared_region); + + shared_region_mapping_dealloc(old_shared_region); + } + + lret = load_machfile(vp, mach_header, arch_offset, + arch_size, &load_result, thr_act, map); + if (lret != LOAD_SUCCESS) { error = load_return_to_errno(lret); - goto bad; + goto badtoolate; } /* load_machfile() maps the vnode */ @@ -497,9 +573,10 @@ again: * root set it. */ if (p->p_tracep && !(p->p_traceflag & KTRFAC_ROOT)) { - vrele(p->p_tracep); + struct vnode *tvp = p->p_tracep; p->p_tracep = NULL; p->p_traceflag = 0; + vrele(tvp); } #endif if (origvattr.va_mode & VSUID) @@ -546,34 +623,36 @@ again: p->p_cred->p_svuid = p->p_ucred->cr_uid; p->p_cred->p_svgid = p->p_ucred->cr_gid; - if (p->p_flag & P_TRACED) { + if (!vfexec && (p->p_flag & P_TRACED)) psignal(p, SIGTRAP); -#ifdef BSD_USE_APC - thread_apc_set(current_act(), bsd_ast); -#else - ast_on(AST_BSD); -#endif - } if (error) { - goto bad; + goto badtoolate; } VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); vput(vp); vp = NULL; if (load_result.unixproc && - create_unix_stack(current_map(), - load_result.user_stack, p)) { + create_unix_stack(get_task_map(task), + load_result.user_stack, load_result.customstack, p)) { error = load_return_to_errno(LOAD_NOSPACE); - goto bad; + goto badtoolate; + } + + if (vfexec) { + uthread->uu_ar0 = (void *)get_user_regs(thr_act); } /* * Copy back arglist if necessary. */ + ucp = p->user_stack; + if (vfexec) { + old_map = vm_map_switch(get_task_map(task)); + } if (load_result.unixproc) { int pathptr; @@ -584,13 +663,16 @@ again: * the "path" at the begining of the execargs buffer. * copy it just before the string area. */ - savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1); + savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1); len = 0; pathptr = ucp - savedpathlen; error = copyoutstr(savedpath, (caddr_t)pathptr, (unsigned)savedpathlen, &len); - if (error) - goto bad; + if (error) { + if (vfexec) + vm_map_switch(old_map); + goto badtoolate; + } /* Save a NULL pointer below it */ (void) suword((caddr_t)(pathptr - NBPW), 0); @@ -606,7 +688,11 @@ again: * and NBPW for the NULL after pointer to path. */ ap = ucp - na*NBPW - 3*NBPW - savedpathlen - 2*NBPW; +#if defined(ppc) + thread_setuserstack(thr_act, ap); /* Set the stack */ +#else uthread->uu_ar0[SP] = ap; +#endif (void) suword((caddr_t)ap, na-ne); /* argc */ nc = 0; cc = 0; @@ -641,11 +727,20 @@ again: } if (load_result.dynlinker) { +#if defined(ppc) + ap = thread_adjuserstack(thr_act, -4); /* Adjust the stack */ +#else ap = uthread->uu_ar0[SP] -= 4; +#endif (void) suword((caddr_t)ap, load_result.mach_header); } -#if defined(i386) || defined(ppc) + if (vfexec) { + vm_map_switch(old_map); + } +#if defined(ppc) + thread_setentrypoint(thr_act, load_result.entry_point); /* Set the entry point */ +#elif defined(i386) uthread->uu_ar0[PC] = load_result.entry_point; #else #error architecture not implemented! @@ -657,7 +752,7 @@ again: /* * Reset signal state. */ - execsigs(p); + execsigs(p, thr_act); /* * Close file descriptors @@ -665,8 +760,10 @@ again: */ fdexec(p); /* FIXME: Till vmspace inherit is fixed: */ - if (p->vm_shm) + if (!vfexec && p->vm_shm) shmexit(p); + /* Clean up the semaphores */ + semexit(p); /* * Remember file name for accounting. @@ -684,8 +781,13 @@ again: /* Collect the pathname for tracing */ kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); - KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, - dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); + + if (vfexec) + KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, getshuttle_thread(thr_act)); + else + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } /* @@ -698,18 +800,29 @@ again: wakeup((caddr_t)p->p_pptr); } + if (vfexec && (p->p_flag & P_TRACED)) { + psignal_vfork(p, new_task, thr_act, SIGTRAP); + } + +badtoolate: + if (vfexec) { + task_deallocate(new_task); + act_deallocate(thr_act); + if (error) + error = 0; + } bad: FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); if (vp) vput(vp); bad1: -#if FIXME /* [ */ if (execargs) - kmem_free_wakeup(bsd_pageable_map, execargs, NCARGS); -#else /* FIXME ][ */ - if (execargs) - kmem_free(bsd_pageable_map, execargs, NCARGS); -#endif /* FIXME ] */ + execargs_free(execargs); + if (!error && vfexec) { + vfork_return(current_act(), p->p_pptr, p, retval); + (void) thread_resume(thr_act); + return(0); + } return(error); } @@ -717,23 +830,22 @@ bad1: #define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) kern_return_t -create_unix_stack(map, user_stack, p) +create_unix_stack(map, user_stack, customstack, p) vm_map_t map; vm_offset_t user_stack; + int customstack; struct proc *p; { vm_size_t size; vm_offset_t addr; p->user_stack = user_stack; - size = round_page(unix_stack_size(p)); -#if STACK_GROWTH_UP - /* stack always points to first address for stacks */ - addr = user_stack; -#else STACK_GROWTH_UP - addr = trunc_page(user_stack - size); -#endif /* STACK_GROWTH_UP */ - return (vm_allocate(map,&addr, size, FALSE)); + if (!customstack) { + size = round_page(unix_stack_size(p)); + addr = trunc_page(user_stack - size); + return (vm_allocate(map,&addr, size, FALSE)); + } else + return(KERN_SUCCESS); } #include @@ -757,8 +869,6 @@ load_init_program(p) register_t retval[2]; struct uthread * ut; - unix_master(); - error = 0; /* init_args are copied in string form directly from bootstrap */ @@ -851,8 +961,6 @@ load_init_program(p) error = execve(p,&init_exec_args,retval); } while (error); - - unix_release(); } /* @@ -906,3 +1014,61 @@ check_exec_access(p, vp, vap) return (0); } +#include +#include +#include +#include +#include +#include + +extern semaphore_t execve_semaphore; + +static int +execargs_alloc(addrp) + vm_offset_t *addrp; +{ + kern_return_t kret; + + kret = semaphore_wait(execve_semaphore); + if (kret != KERN_SUCCESS) + switch (kret) { + default: + return (EINVAL); + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EACCES); + case KERN_ABORTED: + case KERN_OPERATION_TIMED_OUT: + return (EINTR); + } + + kret = kmem_alloc_pageable(bsd_pageable_map, addrp, NCARGS); + if (kret != KERN_SUCCESS) + return (ENOMEM); + + return (0); +} + +static int +execargs_free(addr) + vm_offset_t addr; +{ + kern_return_t kret; + + kmem_free(bsd_pageable_map, addr, NCARGS); + + kret = semaphore_signal(execve_semaphore); + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EINVAL); + case KERN_ABORTED: + case KERN_OPERATION_TIMED_OUT: + return (EINTR); + case KERN_SUCCESS: + return(0); + default: + return (EINVAL); + } +} +