+ int error = 0;
+
+ do {
+ size_t len = 0;
+ if (imgp->ip_strspace <= 0) {
+ error = E2BIG;
+ break;
+ }
+ if (IS_UIO_SYS_SPACE(seg)) {
+ char *kstr = CAST_DOWN(char *,str); /* SAFE */
+ error = copystr(kstr, imgp->ip_strendp, imgp->ip_strspace, &len);
+ } else {
+ error = copyinstr(str, imgp->ip_strendp, imgp->ip_strspace,
+ &len);
+ }
+ imgp->ip_strendp += len;
+ imgp->ip_strspace -= len;
+ } while (error == ENAMETOOLONG);
+
+ return error;
+}
+
+/*
+ * exec_save_path
+ *
+ * To support new app package launching for Mac OS X, the dyld needs the
+ * first argument to execve() stored on the user stack.
+ *
+ * Save the executable path name at the top of the strings area and set
+ * the argument vector pointer to the location following that to indicate
+ * the start of the argument and environment tuples, setting the remaining
+ * string space count to the size of the string area minus the path length
+ * and a reserve for two pointers.
+ *
+ * Parameters; struct image_params * image parameter block
+ * char * path used to invoke program
+ * uio_seg segment where path is located
+ *
+ * Returns: int 0 Success
+ * !0 Failure: error number
+ * Implicit returns:
+ * (imgp->ip_strings) saved path
+ * (imgp->ip_strspace) space remaining in ip_strings
+ * (imgp->ip_argv) beginning of argument list
+ * (imgp->ip_strendp) start of remaining copy area
+ *
+ * Note: We have to do this before the initial namei() since in the
+ * path contains symbolic links, namei() will overwrite the
+ * original path buffer contents. If the last symbolic link
+ * resolved was a relative pathname, we would lose the original
+ * "path", which could be an absolute pathname. This might be
+ * unacceptable for dyld.
+ */
+static int
+exec_save_path(struct image_params *imgp, user_addr_t path, /*uio_seg*/int seg)
+{
+ int error;
+ size_t len;
+ char *kpath = CAST_DOWN(char *,path); /* SAFE */
+
+ imgp->ip_strendp = imgp->ip_strings;
+ imgp->ip_strspace = SIZE_IMG_STRSPACE;
+
+ len = MIN(MAXPATHLEN, imgp->ip_strspace);
+
+ switch( seg) {
+ case UIO_USERSPACE32:
+ case UIO_USERSPACE64: /* Same for copyin()... */
+ error = copyinstr(path, imgp->ip_strings, len, &len);
+ break;
+ case UIO_SYSSPACE32:
+ error = copystr(kpath, imgp->ip_strings, len, &len);
+ break;
+ default:
+ error = EFAULT;
+ break;
+ }
+
+ if (!error) {
+ imgp->ip_strendp += len;
+ imgp->ip_strspace -= len;
+ imgp->ip_argv = imgp->ip_strendp;
+ }
+
+ return(error);
+}
+
+
+
+/*
+ * exec_shell_imgact
+ *
+ * Image activator for interpreter scripts. If the image begins with the
+ * characters "#!", then it is an interpreter script. Verify that we are
+ * not already executing in Classic mode, and that the length of the script
+ * line indicating the interpreter is not in excess of the maximum allowed
+ * size. If this is the case, then break out the arguments, if any, which
+ * are separated by white space, and copy them into the argument save area
+ * as if they were provided on the command line before all other arguments.
+ * The line ends when we encounter a comment character ('#') or newline.
+ *
+ * Parameters; struct image_params * image parameter block
+ *
+ * Returns: -1 not an interpreter (keep looking)
+ * -3 Success: interpreter: relookup
+ * >0 Failure: interpreter: error number
+ *
+ * A return value other than -1 indicates subsequent image activators should
+ * not be given the opportunity to attempt to activate the image.
+ */
+static int
+exec_shell_imgact(struct image_params *imgp)
+{
+ char *vdata = imgp->ip_vdata;
+ char *ihp;
+ char *line_endp;
+ char *interp;
+
+ /*
+ * Make sure it's a shell script. If we've already redirected
+ * from an interpreted file once, don't do it again.
+ *
+ * Note: We disallow Classic, since the expectation is that we
+ * may run a Classic interpreter, but not an interpret a Classic
+ * image. This is consistent with historical behaviour.
+ */
+ if (vdata[0] != '#' ||
+ vdata[1] != '!' ||
+ (imgp->ip_flags & IMGPF_INTERPRET) != 0) {
+ return (-1);
+ }
+
+
+ imgp->ip_flags |= IMGPF_INTERPRET;
+
+ /* Check to see if SUGID scripts are permitted. If they aren't then
+ * clear the SUGID bits.
+ * imgp->ip_vattr is known to be valid.
+ */
+ if (sugid_scripts == 0) {
+ imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID);
+ }
+
+ /* Find the nominal end of the interpreter line */
+ for( ihp = &vdata[2]; *ihp != '\n' && *ihp != '#'; ihp++) {
+ if (ihp >= &vdata[IMG_SHSIZE])
+ return (ENOEXEC);
+ }
+
+ line_endp = ihp;
+ ihp = &vdata[2];
+ /* Skip over leading spaces - until the interpreter name */
+ while ( ihp < line_endp && ((*ihp == ' ') || (*ihp == '\t')))
+ ihp++;
+
+ /*
+ * Find the last non-whitespace character before the end of line or
+ * the beginning of a comment; this is our new end of line.
+ */
+ for (;line_endp > ihp && ((*line_endp == ' ') || (*line_endp == '\t')); line_endp--)
+ continue;
+
+ /* Empty? */
+ if (line_endp == ihp)
+ return (ENOEXEC);
+
+ /* copy the interpreter name */
+ interp = imgp->ip_interp_name;
+ while ((ihp < line_endp) && (*ihp != ' ') && (*ihp != '\t'))
+ *interp++ = *ihp++;
+ *interp = '\0';
+
+ exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_name),
+ UIO_SYSSPACE32);
+
+ ihp = &vdata[2];
+ while (ihp < line_endp) {
+ /* Skip leading whitespace before each argument */
+ while ((*ihp == ' ') || (*ihp == '\t'))
+ ihp++;
+
+ if (ihp >= line_endp)
+ break;
+
+ /* We have an argument; copy it */
+ while ((ihp < line_endp) && (*ihp != ' ') && (*ihp != '\t')) {
+ *imgp->ip_strendp++ = *ihp++;
+ imgp->ip_strspace--;
+ }
+ *imgp->ip_strendp++ = 0;
+ imgp->ip_strspace--;
+ imgp->ip_argc++;
+ }
+
+ return (-3);
+}
+
+
+
+/*
+ * exec_fat_imgact
+ *
+ * Image activator for fat 1.0 binaries. If the binary is fat, then we
+ * need to select an image from it internally, and make that the image
+ * we are going to attempt to execute. At present, this consists of
+ * reloading the first page for the image with a first page from the
+ * offset location indicated by the fat header.
+ *
+ * Important: This image activator is byte order neutral.
+ *
+ * Note: If we find an encapsulated binary, we make no assertions
+ * about its validity; instead, we leave that up to a rescan
+ * for an activator to claim it, and, if it is claimed by one,
+ * that activator is responsible for determining validity.
+ */
+static int
+exec_fat_imgact(struct image_params *imgp)
+{
+ struct proc *p = vfs_context_proc(imgp->ip_vfs_context);
+ kauth_cred_t cred = p->p_ucred;
+ struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata;
+ struct fat_arch fat_arch;
+ int resid, error;
+ load_return_t lret;
+
+ /* Make sure it's a fat binary */
+ if ((fat_header->magic != FAT_MAGIC) &&
+ (fat_header->magic != FAT_CIGAM)) {
+ error = -1;
+ goto bad;
+ }
+
+ /* Look up our preferred architecture in the fat file. */
+ lret = fatfile_getarch_affinity(imgp->ip_vp,
+ (vm_offset_t)fat_header,
+ &fat_arch,
+ (p->p_flag & P_AFFINITY));
+ if (lret != LOAD_SUCCESS) {
+ error = load_return_to_errno(lret);
+ goto bad;
+ }
+
+ /* Read the Mach-O header out of it */
+ error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata,
+ PAGE_SIZE, fat_arch.offset,
+ UIO_SYSSPACE32, (IO_UNIT|IO_NODELOCKED),
+ cred, &resid, p);
+ if (error) {
+ goto bad;
+ }
+
+ /* Did we read a complete header? */
+ if (resid) {
+ error = EBADEXEC;
+ goto bad;
+ }
+
+ /* Success. Indicate we have identified an encapsulated binary */
+ error = -2;
+ imgp->ip_arch_offset = (user_size_t)fat_arch.offset;
+ imgp->ip_arch_size = (user_size_t)fat_arch.size;
+
+bad:
+ return (error);
+}
+
+/*
+ * exec_mach_imgact
+ *
+ * Image activator for mach-o 1.0 binaries.
+ *
+ * Important: This image activator is NOT byte order neutral.
+ */
+static int
+exec_mach_imgact(struct image_params *imgp)
+{
+ struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
+ kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
+ struct proc *p = vfs_context_proc(imgp->ip_vfs_context);
+ int error = 0;
+ int vfexec = 0;
+ task_t task;
+ task_t new_task;
+ thread_t thread;
+ struct uthread *uthread;
+ vm_map_t old_map = VM_MAP_NULL;
+ vm_map_t map;
+ boolean_t clean_regions = FALSE;
+ shared_region_mapping_t initial_region = NULL;
+ load_return_t lret;
+ load_result_t load_result;
+
+ /*
+ * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
+ * is a reserved field on the end, so for the most part, we can
+ * treat them as if they were identical.
+ */
+ if ((mach_header->magic != MH_MAGIC) &&
+ (mach_header->magic != MH_MAGIC_64)) {
+ error = -1;
+ goto bad;
+ }
+
+ task = current_task();
+ thread = current_thread();
+ uthread = get_bsdthread_info(thread);
+
+ if (uthread->uu_flag & UT_VFORK)
+ vfexec = 1; /* Mark in exec */
+
+ if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64)
+ imgp->ip_flags |= IMGPF_IS_64BIT;
+
+ if (!grade_binary(mach_header->cputype, mach_header->cpusubtype)) {
+ error = EBADARCH;
+ goto bad;
+ }
+
+ /*
+ * Copy in arguments/environment from the old process, if the
+ * vector is non-NULL (i.e. exec is not being called from
+ * load_init_program(), as a special case, at system startup).
+ */
+ if (imgp->ip_user_argv != 0LL) {
+ error = exec_extract_strings(imgp);
+ if (error)
+ goto bad;
+ }
+
+ /*
+ * Hack for binary compatability; put three NULs on the end of the
+ * string area, and round it up to the next word boundary. This
+ * ensures padding with NULs to the boundary.
+ */
+ imgp->ip_strendp[0] = 0;
+ imgp->ip_strendp[1] = 0;
+ imgp->ip_strendp[2] = 0;
+ imgp->ip_strendp += (((imgp->ip_strendp - imgp->ip_strings) + NBPW-1) & ~(NBPW-1));
+
+
+ if (vfexec) {
+ kern_return_t result;
+
+ result = task_create_internal(task, FALSE, &new_task);
+ if (result != KERN_SUCCESS)
+ printf("execve: task_create failed. Code: 0x%x\n", result);
+ p->task = new_task;
+ set_bsdtask_info(new_task, p);
+ if (p->p_nice != 0)
+ resetpriority(p);
+ map = get_task_map(new_task);
+ result = thread_create(new_task, &imgp->ip_vfork_thread);
+ if (result != KERN_SUCCESS)
+ printf("execve: thread_create failed. Code: 0x%x\n", result);
+ /* reset local idea of task, thread, uthread */
+ task = new_task;
+ thread = imgp->ip_vfork_thread;
+ uthread = get_bsdthread_info(thread);
+ } else {
+ map = VM_MAP_NULL;
+ }
+
+ /*
+ * We set these flags here; this is OK, since if we fail after
+ * this point, we have already destroyed the parent process anyway.
+ */
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ task_set_64bit(task, TRUE);
+ p->p_flag |= P_LP64;
+ } else {
+ task_set_64bit(task, FALSE);
+ p->p_flag &= ~P_LP64;
+ }
+
+ /*
+ * Load the Mach-O file.
+ */
+/* LP64 - remove following "if" statement after osfmk/vm/task_working_set.c */
+if((imgp->ip_flags & IMGPF_IS_64BIT) == 0)
+ if(imgp->ip_tws_cache_name) {
+ tws_handle_startup_file(task, kauth_cred_getuid(cred),
+ imgp->ip_tws_cache_name, imgp->ip_vp, &clean_regions);
+ }
+
+ vm_get_shared_region(task, &initial_region);
+
+
+ /*
+ * NOTE: An error after this point indicates we have potentially
+ * destroyed or overwrote some process state while attempting an
+ * execve() following a vfork(), which is an unrecoverable condition.
+ */
+
+ /*
+ * We reset the task to 64-bit (or not) here. It may have picked up
+ * a new map, and we need that to reflect its true 64-bit nature.
+ */
+ task_set_64bit(task,
+ ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT));
+
+ /*
+ * Actually load the image file we previously decided to load.
+ */
+ lret = load_machfile(imgp, mach_header, thread, map, clean_regions, &load_result);
+
+ if (lret != LOAD_SUCCESS) {
+ error = load_return_to_errno(lret);
+ goto badtoolate;
+ }
+
+ /* load_machfile() maps the vnode */
+ (void)ubc_map(imgp->ip_vp, PROT_EXEC);
+
+ /*
+ * deal with set[ug]id.
+ */
+ error = exec_handle_sugid(imgp);
+
+ KNOTE(&p->p_klist, NOTE_EXEC);
+
+ if (!vfexec && (p->p_flag & P_TRACED))
+ psignal(p, SIGTRAP);
+
+ if (error) {
+ goto badtoolate;
+ }
+ vnode_put(imgp->ip_vp);
+ imgp->ip_vp = NULL;
+
+ if (load_result.unixproc &&
+ create_unix_stack(get_task_map(task),
+ load_result.user_stack, load_result.customstack, p)) {
+ error = load_return_to_errno(LOAD_NOSPACE);
+ goto badtoolate;
+ }
+
+ if (vfexec) {
+ uthread->uu_ar0 = (void *)get_user_regs(thread);
+ old_map = vm_map_switch(get_task_map(task));
+ }
+
+ if (load_result.unixproc) {
+ user_addr_t ap;
+
+ /*
+ * Copy the strings area out into the new process address
+ * space.
+ */
+ ap = p->user_stack;
+ error = exec_copyout_strings(imgp, &ap);
+ if (error) {
+ if (vfexec)
+ vm_map_switch(old_map);
+ goto badtoolate;
+ }
+ /* Set the stack */
+ thread_setuserstack(thread, ap);
+ }
+
+ if (load_result.dynlinker) {
+ uint64_t ap;
+
+ /* Adjust the stack */
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ ap = thread_adjuserstack(thread, -8);
+ (void)copyoutptr(load_result.mach_header, ap, 8);
+ } else {
+ ap = thread_adjuserstack(thread, -4);
+ (void)suword(ap, load_result.mach_header);
+ }
+ }
+
+ if (vfexec) {
+ vm_map_switch(old_map);
+ }
+ /* Set the entry point */
+ thread_setentrypoint(thread, load_result.entry_point);
+
+ /* Stop profiling */
+ stopprofclock(p);
+
+ /*
+ * Reset signal state.
+ */
+ execsigs(p, thread);
+
+ /*
+ * Close file descriptors
+ * which specify close-on-exec.
+ */
+ fdexec(p);
+
+ /*
+ * need to cancel async IO requests that can be cancelled and wait for those
+ * already active. MAY BLOCK!
+ */
+ _aio_exec( p );
+
+ /* FIXME: Till vmspace inherit is fixed: */
+ if (!vfexec && p->vm_shm)
+ shmexec(p);
+ /* Clean up the semaphores */
+ semexit(p);
+
+ /*
+ * Remember file name for accounting.
+ */
+ p->p_acflag &= ~AFORK;
+ /* If the translated name isn't NULL, then we want to use
+ * that translated name as the name we show as the "real" name.
+ * Otherwise, use the name passed into exec.
+ */
+ if (0 != imgp->ip_p_comm[0]) {
+ bcopy((caddr_t)imgp->ip_p_comm, (caddr_t)p->p_comm,
+ sizeof(p->p_comm));
+ } else {
+ if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN)
+ imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
+ bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
+ (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
+ p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
+ }
+
+ {
+ /* This is for kdebug */
+ long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
+
+ /* Collect the pathname for tracing */
+ kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
+
+
+
+ if (vfexec)
+ {
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE,
+ p->p_pid ,0,0,0, (unsigned int)thread);
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (unsigned int)thread);
+ }
+ else
+ {
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE,
+ p->p_pid ,0,0,0,0);
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
+ }
+ }
+
+ p->p_flag &= ~P_CLASSIC;
+
+ /*
+ * mark as execed, wakeup the process that vforked (if any) and tell
+ * it that it now has it's own resources back
+ */
+ p->p_flag |= P_EXEC;
+ if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
+ p->p_flag &= ~P_PPWAIT;
+ wakeup((caddr_t)p->p_pptr);
+ }
+
+ if (vfexec && (p->p_flag & P_TRACED)) {
+ psignal_vfork(p, new_task, thread, SIGTRAP);
+ }
+
+badtoolate:
+ if (vfexec) {
+ task_deallocate(new_task);
+ thread_deallocate(thread);
+ if (error)
+ error = 0;
+ }
+
+bad:
+ return(error);