X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..89b3af67bb32e691275bf6fa803d1834b2284115:/bsd/kern/kern_exec.c?ds=sidebyside diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index 88b0a60c3..b486595ed 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ /* @@ -77,31 +80,53 @@ #include #include #include -#include +#include +#include #include -#include #include #include #include -#include -#include -#include +#include +#include +#include #include -#include +#include #include #include #include #include +#include +#include +#include /* shmexec() */ +#include /* ubc_map() */ +#include + +#include +#include +#include +#include +#include #include #include #include +#include +#include +#include #include +/* + * Mach things for which prototypes are unavailable from Mach headers + */ +void ipc_task_reset( + task_t task); + +extern struct savearea *get_user_regs(thread_t); + + #include #include - #include #include #include @@ -110,454 +135,1555 @@ #if KTRACE #include #endif +#include + + +/* + * SIZE_MAXPTR The maximum size of a user space pointer, in bytes + * SIZE_IMG_STRSPACE The available string space, minus two pointers; we + * define it interms of the maximum, since we don't + * know the pointer size going in, until after we've + * parsed the executable image. + */ +#define SIZE_MAXPTR 8 /* 64 bits */ +#define SIZE_IMG_STRSPACE (NCARGS - 2 * SIZE_MAXPTR) int app_profile = 0; extern vm_map_t bsd_pageable_map; +extern struct fileops vnops; #define ROUND_PTR(type, addr) \ (type *)( ( (unsigned)(addr) + 16 - 1) \ & ~(16 - 1) ) +struct image_params; /* Forward */ +static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp); static int load_return_to_errno(load_return_t lrtn); -int execve(struct proc *p, struct execve_args *uap, register_t *retval); -static int execargs_alloc(vm_offset_t *addrp); -static int execargs_free(vm_offset_t addr); +static int execargs_alloc(struct image_params *imgp); +static int execargs_free(struct image_params *imgp); +static int exec_check_permissions(struct image_params *imgp); +static int exec_extract_strings(struct image_params *imgp); +static int exec_handle_sugid(struct image_params *imgp); +static int sugid_scripts = 0; +SYSCTL_INT (_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW, &sugid_scripts, 0, ""); +static kern_return_t create_unix_stack(vm_map_t map, user_addr_t user_stack, + int customstack, struct proc *p); +static int copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size); + +/* XXX forward; should be in headers, but can't be for one reason or another */ +extern void vfork_return(thread_t th_act, + struct proc * p, + struct proc *p2, + register_t *retval); -int -execv(p, args, retval) - struct proc *p; - void *args; - int *retval; +/* + * exec_add_string + * + * Add the requested string to the string space area. + * + * Parameters; struct image_params * image parameter block + * user_addr_t string to add to strings area + * uio_seg segment where string is located + * + * Returns: 0 Success + * !0 Failure errno from copyinstr() + * + * Implicit returns: + * (imgp->ip_strendp) updated location of next add, if any + * (imgp->ip_strspace) updated byte count of space remaining + */ +static int +exec_add_string(struct image_params *imgp, user_addr_t str, /*uio_seg*/int seg) +{ + int error = 0; + + do { + size_t len = 0; + if (imgp->ip_strspace <= 0) { + error = E2BIG; + break; + } + if (IS_UIO_SYS_SPACE(seg)) { + char *kstr = CAST_DOWN(char *,str); /* SAFE */ + error = copystr(kstr, imgp->ip_strendp, imgp->ip_strspace, &len); + } else { + error = copyinstr(str, imgp->ip_strendp, imgp->ip_strspace, + &len); + } + imgp->ip_strendp += len; + imgp->ip_strspace -= len; + } while (error == ENAMETOOLONG); + + return error; +} + +/* + * exec_save_path + * + * To support new app package launching for Mac OS X, the dyld needs the + * first argument to execve() stored on the user stack. + * + * Save the executable path name at the top of the strings area and set + * the argument vector pointer to the location following that to indicate + * the start of the argument and environment tuples, setting the remaining + * string space count to the size of the string area minus the path length + * and a reserve for two pointers. + * + * Parameters; struct image_params * image parameter block + * char * path used to invoke program + * uio_seg segment where path is located + * + * Returns: int 0 Success + * !0 Failure: error number + * Implicit returns: + * (imgp->ip_strings) saved path + * (imgp->ip_strspace) space remaining in ip_strings + * (imgp->ip_argv) beginning of argument list + * (imgp->ip_strendp) start of remaining copy area + * + * Note: We have to do this before the initial namei() since in the + * path contains symbolic links, namei() will overwrite the + * original path buffer contents. If the last symbolic link + * resolved was a relative pathname, we would lose the original + * "path", which could be an absolute pathname. This might be + * unacceptable for dyld. + */ +static int +exec_save_path(struct image_params *imgp, user_addr_t path, /*uio_seg*/int seg) +{ + int error; + size_t len; + char *kpath = CAST_DOWN(char *,path); /* SAFE */ + + imgp->ip_strendp = imgp->ip_strings; + imgp->ip_strspace = SIZE_IMG_STRSPACE; + + len = MIN(MAXPATHLEN, imgp->ip_strspace); + + switch( seg) { + case UIO_USERSPACE32: + case UIO_USERSPACE64: /* Same for copyin()... */ + error = copyinstr(path, imgp->ip_strings, len, &len); + break; + case UIO_SYSSPACE32: + error = copystr(kpath, imgp->ip_strings, len, &len); + break; + default: + error = EFAULT; + break; + } + + if (!error) { + imgp->ip_strendp += len; + imgp->ip_strspace -= len; + imgp->ip_argv = imgp->ip_strendp; + } + + return(error); +} + +#ifdef IMGPF_POWERPC +/* + * exec_powerpc32_imgact + * + * Implicitly invoke the PowerPC handler for a byte-swapped image magic + * number. This may happen either as a result of an attempt to invoke a + * PowerPC image directly, or indirectly as the interpreter used in an + * interpreter script. + * + * Parameters; struct image_params * image parameter block + * + * Returns: -1 not an PowerPC image (keep looking) + * -3 Success: exec_archhandler_ppc: relookup + * >0 Failure: exec_archhandler_ppc: error number + * + * Note: This image activator does not handle the case of a direct + * invocation of the exec_archhandler_ppc, since in that case, the + * exec_archhandler_ppc itself is not a PowerPC binary; instead, + * binary image activators must recognize the exec_archhandler_ppc; + * This is managed in exec_check_permissions(). + * + * Note: This image activator is limited to 32 bit powerpc images; + * if support for 64 bit powerpc images is desired, it would + * be more in line with this design to write a separate 64 bit + * image activator. + */ +static int +exec_powerpc32_imgact(struct image_params *imgp) +{ + struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata; + int error; + size_t len = 0; + + /* + * Make sure it's a PowerPC binary. If we've already redirected + * from an interpreted file once, don't do it again. + */ + if (mach_header->magic != MH_CIGAM) + return (-1); + + /* If there is no exec_archhandler_ppc, we can't run it */ + if (exec_archhandler_ppc.path[0] == 0) + return (EBADARCH); + + /* + * The PowerPC flag will be set by the exec_check_permissions() + * call anyway; however, we set this flag here so that the relookup + * in execve() does not follow symbolic links, as a side effect. + */ + imgp->ip_flags |= IMGPF_POWERPC; + + /* impute an interpreter */ + error = copystr(exec_archhandler_ppc.path, imgp->ip_interp_name, + IMG_SHSIZE, &len); + if (error) + return (error); + + /* + * provide a replacement string for p->p_comm; we have to use an + * an alternate buffer for this, rather than replacing it directly, + * since the exec may fail and return to the parent. In that case, + * we would have erroneously changed the parent p->p_comm instead. + */ + strncpy(imgp->ip_p_comm, imgp->ip_ndp->ni_cnd.cn_nameptr, MAXCOMLEN); + imgp->ip_p_comm[MAXCOMLEN] = '\0'; + + return (-3); +} +#endif /* IMGPF_POWERPC */ + + +/* + * exec_shell_imgact + * + * Image activator for interpreter scripts. If the image begins with the + * characters "#!", then it is an interpreter script. Verify that we are + * not already executing in PowerPC mode, and that the length of the script + * line indicating the interpreter is not in excess of the maximum allowed + * size. If this is the case, then break out the arguments, if any, which + * are separated by white space, and copy them into the argument save area + * as if they were provided on the command line before all other arguments. + * The line ends when we encounter a comment character ('#') or newline. + * + * Parameters; struct image_params * image parameter block + * + * Returns: -1 not an interpreter (keep looking) + * -3 Success: interpreter: relookup + * >0 Failure: interpreter: error number + * + * A return value other than -1 indicates subsequent image activators should + * not be given the opportunity to attempt to activate the image. + */ +static int +exec_shell_imgact(struct image_params *imgp) +{ + char *vdata = imgp->ip_vdata; + char *ihp; + char *line_endp; + char *interp; + + /* + * Make sure it's a shell script. If we've already redirected + * from an interpreted file once, don't do it again. + * + * Note: We disallow PowerPC, since the expectation is that we + * may run a PowerPC interpreter, but not an interpret a PowerPC + * image. This is consistent with historical behaviour. + */ + if (vdata[0] != '#' || + vdata[1] != '!' || + (imgp->ip_flags & IMGPF_INTERPRET) != 0) { + return (-1); + } + +#ifdef IMGPF_POWERPC + if ((imgp->ip_flags & IMGPF_POWERPC) != 0) + return (EBADARCH); +#endif /* IMGPF_POWERPC */ + + imgp->ip_flags |= IMGPF_INTERPRET; + + /* Check to see if SUGID scripts are permitted. If they aren't then + * clear the SUGID bits. + * imgp->ip_vattr is known to be valid. + */ + if (sugid_scripts == 0) { + imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID); + } + + /* Find the nominal end of the interpreter line */ + for( ihp = &vdata[2]; *ihp != '\n' && *ihp != '#'; ihp++) { + if (ihp >= &vdata[IMG_SHSIZE]) + return (ENOEXEC); + } + + line_endp = ihp; + ihp = &vdata[2]; + /* Skip over leading spaces - until the interpreter name */ + while ( ihp < line_endp && ((*ihp == ' ') || (*ihp == '\t'))) + ihp++; + + /* + * Find the last non-whitespace character before the end of line or + * the beginning of a comment; this is our new end of line. + */ + for (;line_endp > ihp && ((*line_endp == ' ') || (*line_endp == '\t')); line_endp--) + continue; + + /* Empty? */ + if (line_endp == ihp) + return (ENOEXEC); + + /* copy the interpreter name */ + interp = imgp->ip_interp_name; + while ((ihp < line_endp) && (*ihp != ' ') && (*ihp != '\t')) + *interp++ = *ihp++; + *interp = '\0'; + + exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_name), + UIO_SYSSPACE32); + + ihp = &vdata[2]; + while (ihp < line_endp) { + /* Skip leading whitespace before each argument */ + while ((*ihp == ' ') || (*ihp == '\t')) + ihp++; + + if (ihp >= line_endp) + break; + + /* We have an argument; copy it */ + while ((ihp < line_endp) && (*ihp != ' ') && (*ihp != '\t')) { + *imgp->ip_strendp++ = *ihp++; + imgp->ip_strspace--; + } + *imgp->ip_strendp++ = 0; + imgp->ip_strspace--; + imgp->ip_argc++; + } + + return (-3); +} + + + +/* + * exec_fat_imgact + * + * Image activator for fat 1.0 binaries. If the binary is fat, then we + * need to select an image from it internally, and make that the image + * we are going to attempt to execute. At present, this consists of + * reloading the first page for the image with a first page from the + * offset location indicated by the fat header. + * + * Important: This image activator is byte order neutral. + * + * Note: If we find an encapsulated binary, we make no assertions + * about its validity; instead, we leave that up to a rescan + * for an activator to claim it, and, if it is claimed by one, + * that activator is responsible for determining validity. + */ +static int +exec_fat_imgact(struct image_params *imgp) +{ + struct proc *p = vfs_context_proc(imgp->ip_vfs_context); + kauth_cred_t cred = kauth_cred_proc_ref(p); + struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata; + struct fat_arch fat_arch; + int resid, error; + load_return_t lret; + + /* Make sure it's a fat binary */ + if ((fat_header->magic != FAT_MAGIC) && + (fat_header->magic != FAT_CIGAM)) { + error = -1; + goto bad; + } + + /* Look up our preferred architecture in the fat file. */ + lret = fatfile_getarch_affinity(imgp->ip_vp, + (vm_offset_t)fat_header, + &fat_arch, + (p->p_flag & P_AFFINITY)); + if (lret != LOAD_SUCCESS) { + error = load_return_to_errno(lret); + goto bad; + } + + /* Read the Mach-O header out of it */ + error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, + PAGE_SIZE, fat_arch.offset, + UIO_SYSSPACE32, (IO_UNIT|IO_NODELOCKED), + cred, &resid, p); + if (error) { + goto bad; + } + + /* Did we read a complete header? */ + if (resid) { + error = EBADEXEC; + goto bad; + } + + /* Success. Indicate we have identified an encapsulated binary */ + error = -2; + imgp->ip_arch_offset = (user_size_t)fat_arch.offset; + imgp->ip_arch_size = (user_size_t)fat_arch.size; + +bad: + kauth_cred_unref(&cred); + return (error); +} + +/* + * exec_mach_imgact + * + * Image activator for mach-o 1.0 binaries. + * + * Important: This image activator is NOT byte order neutral. + */ +static int +exec_mach_imgact(struct image_params *imgp) { - ((struct execve_args *)args)->envp = NULL; - return (execve(p, args, retval)); + struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata; + kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context); + struct proc *p = vfs_context_proc(imgp->ip_vfs_context); + int error = 0; + int vfexec = 0; + task_t task; + task_t new_task; + thread_t thread; + struct uthread *uthread; + vm_map_t old_map = VM_MAP_NULL; + vm_map_t map; + boolean_t clean_regions = FALSE; + load_return_t lret; + load_result_t load_result; + shared_region_mapping_t shared_region, initial_region; +#ifdef IMGPF_POWERPC + int powerpcParent, powerpcImage; +#endif /* IMGPF_POWERPC */ + + /* + * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference + * is a reserved field on the end, so for the most part, we can + * treat them as if they were identical. + */ + if ((mach_header->magic != MH_MAGIC) && + (mach_header->magic != MH_MAGIC_64)) { + error = -1; + goto bad; + } + + task = current_task(); + thread = current_thread(); + uthread = get_bsdthread_info(thread); + + if (uthread->uu_flag & UT_VFORK) + vfexec = 1; /* Mark in exec */ + + if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64) + imgp->ip_flags |= IMGPF_IS_64BIT; + + if (!grade_binary(mach_header->cputype, mach_header->cpusubtype)) { + error = EBADARCH; + goto bad; + } + + /* + * Copy in arguments/environment from the old process, if the + * vector is non-NULL (i.e. exec is not being called from + * load_init_program(), as a special case, at system startup). + */ + if (imgp->ip_user_argv != 0LL) { + error = exec_extract_strings(imgp); + if (error) + goto bad; + } + + /* + * Hack for binary compatability; put three NULs on the end of the + * string area, and round it up to the next word boundary. This + * ensures padding with NULs to the boundary. + */ + imgp->ip_strendp[0] = 0; + imgp->ip_strendp[1] = 0; + imgp->ip_strendp[2] = 0; + imgp->ip_strendp += (((imgp->ip_strendp - imgp->ip_strings) + NBPW-1) & ~(NBPW-1)); + +#ifdef IMGPF_POWERPC + /* + * XXX + * + * Should be factored out; this is here because we might be getting + * invoked this way as the result of a shell script, and the check + * in exec_check_permissions() is not interior to the jump back up + * to the "encapsulated_binary:" label in execve(). + */ + if (imgp->ip_vattr->va_fsid == exec_archhandler_ppc.fsid && + imgp->ip_vattr->va_fileid == (uint64_t)((u_long)exec_archhandler_ppc.fileid)) { + imgp->ip_flags |= IMGPF_POWERPC; + } +#endif /* IMGPF_POWERPC */ + + if (vfexec) { + kern_return_t result; + + result = task_create_internal(task, FALSE, (imgp->ip_flags & IMGPF_IS_64BIT), &new_task); + if (result != KERN_SUCCESS) + printf("execve: task_create failed. Code: 0x%x\n", result); + p->task = new_task; + set_bsdtask_info(new_task, p); + if (p->p_nice != 0) + resetpriority(p); + map = get_task_map(new_task); + + if (imgp->ip_flags & IMGPF_IS_64BIT) + vm_map_set_64bit(map); + else + vm_map_set_32bit(map); + + result = thread_create(new_task, &imgp->ip_vfork_thread); + if (result != KERN_SUCCESS) + printf("execve: thread_create failed. Code: 0x%x\n", result); + /* reset local idea of task, thread, uthread */ + task = new_task; + thread = imgp->ip_vfork_thread; + uthread = get_bsdthread_info(thread); + } else { + map = VM_MAP_NULL; + } + + /* + * We set these flags here; this is OK, since if we fail after + * this point, we have already destroyed the parent process anyway. + */ + if (imgp->ip_flags & IMGPF_IS_64BIT) { + task_set_64bit(task, TRUE); + p->p_flag |= P_LP64; + } else { + task_set_64bit(task, FALSE); + p->p_flag &= ~P_LP64; + } + + /* + * Load the Mach-O file. + */ +/* LP64 - remove following "if" statement after osfmk/vm/task_working_set.c */ +if((imgp->ip_flags & IMGPF_IS_64BIT) == 0) + if(imgp->ip_tws_cache_name) { + tws_handle_startup_file(task, kauth_cred_getuid(cred), + imgp->ip_tws_cache_name, imgp->ip_vp, &clean_regions); + } + + vm_get_shared_region(task, &initial_region); + +#ifdef IMGPF_POWERPC + /* + * If we are transitioning to/from powerpc, then we need to do extra + * work here. + */ + powerpcParent = (p->p_flag & P_TRANSLATED) ? 1 : 0; + powerpcImage = (imgp->ip_flags & IMGPF_POWERPC) ? 1 : 0; + + if (powerpcParent ^ powerpcImage) { + cpu_type_t cpu = (powerpcImage ? CPU_TYPE_POWERPC : cpu_type()); + struct vnode *rootDir = p->p_fd->fd_rdir; + + shared_region = lookup_default_shared_region((int)rootDir, cpu); + if (shared_region == NULL) { + shared_region_mapping_t old_region; + shared_region_mapping_t new_region; + vm_get_shared_region(current_task(), &old_region); + /* grrrr... this sets current_task(), not task + * -- they're different (usually) + */ + shared_file_boot_time_init((int)rootDir,cpu); + if ( current_task() != task ) { + vm_get_shared_region(current_task(),&new_region); + vm_set_shared_region(task,new_region); + vm_set_shared_region(current_task(),old_region); + } + } else { + vm_set_shared_region(task, shared_region); + } + shared_region_mapping_dealloc(initial_region); + } else +#endif /* IMGPF_POWERPC */ + + { + struct shared_region_task_mappings map_info; + shared_region_mapping_t next; + + shared_region_mapping_info(initial_region, + &map_info.text_region, + &map_info.text_size, + &map_info.data_region, + &map_info.data_size, + &map_info.region_mappings, + &map_info.client_base, + &map_info.alternate_base, + &map_info.alternate_next, + &map_info.fs_base, + &map_info.system, + &map_info.flags, + &next); + if (map_info.flags & SHARED_REGION_STANDALONE) { + /* + * We were using a private shared region. + * Try and get back to a system-wide shared region + * with matching "fs_base" (for chroot) and "system" + * (for CPU type). + */ + shared_region = lookup_default_shared_region( + map_info.fs_base, + map_info.system); + if (shared_region == NULL) { + /* + * No system-wide default regions, stick to + * our private region... + */ + } else { + SHARED_REGION_TRACE( + SHARED_REGION_TRACE_INFO, + ("shared_region: %p [%d(%s)] " + "exec(\"%s\"): " + "moving from private %p[%x,%x,%x] " + "to default %p\n", + current_thread(), + p->p_pid, p->p_comm, + (imgp->ip_p_comm[0] ? + imgp->ip_p_comm : + imgp->ip_ndp->ni_cnd.cn_nameptr), + initial_region, + map_info.fs_base, + map_info.system, + map_info.flags, + shared_region)); + vm_set_shared_region(task, shared_region); + shared_region_mapping_dealloc(initial_region); + } + } + } + + /* + * NOTE: An error after this point indicates we have potentially + * destroyed or overwrote some process state while attempting an + * execve() following a vfork(), which is an unrecoverable condition. + */ + + /* + * We reset the task to 64-bit (or not) here. It may have picked up + * a new map, and we need that to reflect its true 64-bit nature. + */ + + task_set_64bit(task, + ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT)); + + /* + * Actually load the image file we previously decided to load. + */ + lret = load_machfile(imgp, mach_header, thread, map, clean_regions, &load_result); + + if (lret != LOAD_SUCCESS) { + error = load_return_to_errno(lret); + goto badtoolate; + } + + /* load_machfile() maps the vnode */ + (void)ubc_map(imgp->ip_vp, PROT_EXEC); + + /* + * deal with set[ug]id. + */ + error = exec_handle_sugid(imgp); + + KNOTE(&p->p_klist, NOTE_EXEC); + + if (!vfexec && (p->p_flag & P_TRACED)) + psignal(p, SIGTRAP); + + if (error) { + goto badtoolate; + } + vnode_put(imgp->ip_vp); + imgp->ip_vp = NULL; + + if (load_result.unixproc && + create_unix_stack(get_task_map(task), + load_result.user_stack, load_result.customstack, p)) { + error = load_return_to_errno(LOAD_NOSPACE); + goto badtoolate; + } + + if (vfexec) { + old_map = vm_map_switch(get_task_map(task)); + } + + if (load_result.unixproc) { + user_addr_t ap; + + /* + * Copy the strings area out into the new process address + * space. + */ + ap = p->user_stack; + error = exec_copyout_strings(imgp, &ap); + if (error) { + if (vfexec) + vm_map_switch(old_map); + goto badtoolate; + } + /* Set the stack */ + thread_setuserstack(thread, ap); + } + + if (load_result.dynlinker) { + uint64_t ap; + + /* Adjust the stack */ + if (imgp->ip_flags & IMGPF_IS_64BIT) { + ap = thread_adjuserstack(thread, -8); + error = copyoutptr(load_result.mach_header, ap, 8); + } else { + ap = thread_adjuserstack(thread, -4); + error = suword(ap, load_result.mach_header); + } + if (error) { + if (vfexec) + vm_map_switch(old_map); + goto badtoolate; + } + } + + if (vfexec) { + vm_map_switch(old_map); + } + /* Set the entry point */ + thread_setentrypoint(thread, load_result.entry_point); + + /* Stop profiling */ + stopprofclock(p); + + /* + * Reset signal state. + */ + execsigs(p, thread); + + /* + * Close file descriptors + * which specify close-on-exec. + */ + fdexec(p); + + /* + * need to cancel async IO requests that can be cancelled and wait for those + * already active. MAY BLOCK! + */ + _aio_exec( p ); + + /* FIXME: Till vmspace inherit is fixed: */ + if (!vfexec && p->vm_shm) + shmexec(p); + /* Clean up the semaphores */ + semexit(p); + + /* + * Remember file name for accounting. + */ + p->p_acflag &= ~AFORK; + /* If the translated name isn't NULL, then we want to use + * that translated name as the name we show as the "real" name. + * Otherwise, use the name passed into exec. + */ + if (0 != imgp->ip_p_comm[0]) { + bcopy((caddr_t)imgp->ip_p_comm, (caddr_t)p->p_comm, + sizeof(p->p_comm)); + } else { + if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN) + imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN; + bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm, + (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen); + p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0'; + } + + if (kdebug_enable) { + long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; + + /* + * Collect the pathname for tracing + */ + kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); + + if (vfexec) + { + KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE, + p->p_pid ,0,0,0, (unsigned int)thread); + KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (unsigned int)thread); + } + else + { + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE, + p->p_pid ,0,0,0,0); + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); + } + } + +#ifdef IMGPF_POWERPC + /* + * Mark the process as powerpc or not. If powerpc, set the affinity + * flag, which will be used for grading binaries in future exec's + * from the process. + */ + if (((imgp->ip_flags & IMGPF_POWERPC) != 0)) + p->p_flag |= P_TRANSLATED; + else +#endif /* IMGPF_POWERPC */ + p->p_flag &= ~P_TRANSLATED; + p->p_flag &= ~P_AFFINITY; + + /* + * mark as execed, wakeup the process that vforked (if any) and tell + * it that it now has it's own resources back + */ + p->p_flag |= P_EXEC; + if (p->p_pptr && (p->p_flag & P_PPWAIT)) { + p->p_flag &= ~P_PPWAIT; + wakeup((caddr_t)p->p_pptr); + } + + if (vfexec && (p->p_flag & P_TRACED)) { + psignal_vfork(p, new_task, thread, SIGTRAP); + } + +badtoolate: + if (vfexec) { + task_deallocate(new_task); + thread_deallocate(thread); + if (error) + error = 0; + } + +bad: + return(error); } + + + +/* + * Our image activator table; this is the table of the image types we are + * capable of loading. We list them in order of preference to ensure the + * fastest image load speed. + * + * XXX hardcoded, for now; should use linker sets + */ +struct execsw { + int (*ex_imgact)(struct image_params *); + const char *ex_name; +} execsw[] = { + { exec_mach_imgact, "Mach-o Binary" }, + { exec_fat_imgact, "Fat Binary" }, +#ifdef IMGPF_POWERPC + { exec_powerpc32_imgact, "PowerPC binary" }, +#endif /* IMGPF_POWERPC */ + { exec_shell_imgact, "Interpreter Script" }, + { NULL, NULL} +}; + + +/* + * TODO: Dynamic linker header address on stack is copied via suword() + */ /* ARGSUSED */ int -execve(p, uap, retval) - register struct proc *p; - register struct execve_args *uap; - register_t *retval; +execve(struct proc *p, struct execve_args *uap, register_t *retval) { - register struct ucred *cred = p->p_ucred; - register struct filedesc *fdp = p->p_fd; - register nc; - register char *cp; - int na, ne, ucp, ap, cc; - unsigned len; - int indir; - char *sharg; - char *execnamep; - struct vnode *vp; - struct vattr vattr; - struct vattr origvattr; - vm_offset_t execargs; + kauth_cred_t cred = kauth_cred_proc_ref(p); + struct image_params image_params, *imgp; + struct vnode_attr va; + struct vnode_attr origva; struct nameidata nd; - struct ps_strings ps; -#define SHSIZE 512 - char cfarg[SHSIZE]; - boolean_t is_fat; - kern_return_t ret; - struct mach_header *mach_header; - struct fat_header *fat_header; - struct fat_arch fat_arch; - load_return_t lret; - load_result_t load_result; struct uthread *uthread; - vm_map_t old_map; - vm_map_t map; int i; - boolean_t clean_regions = FALSE; - union { - /* #! and name of interpreter */ - char ex_shell[SHSIZE]; - /* Mach-O executable */ - struct mach_header mach_header; - /* Fat executable */ - struct fat_header fat_header; - char pad[512]; - } exdata; int resid, error; - char *savedpath; - int savedpathlen = 0; - vm_offset_t *execargsp; - char *cpnospace; task_t task; - task_t new_task; - thread_act_t thr_act; int numthreads; int vfexec=0; - unsigned long arch_offset =0; - unsigned long arch_size = 0; - char *ws_cache_name = NULL; /* used for pre-heat */ + int once = 1; /* save SGUID-ness for interpreted files */ + char alt_p_comm[sizeof(p->p_comm)] = {0}; /* for PowerPC */ + int is_64 = IS_64BIT_PROCESS(p); + int seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32); + struct vfs_context context; + + context.vc_proc = p; + context.vc_ucred = cred; /* XXX must NOT be kauth_cred_get() */ + + + imgp = &image_params; + + /* Initialize the common data in the image_params structure */ + bzero(imgp, sizeof(*imgp)); + imgp->ip_user_fname = uap->fname; + imgp->ip_user_argv = uap->argp; + imgp->ip_user_envv = uap->envp; + imgp->ip_vattr = &va; + imgp->ip_origvattr = &origva; + imgp->ip_vfs_context = &context; + imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT : IMGPF_NONE); + imgp->ip_tws_cache_name = NULL; + imgp->ip_p_comm = alt_p_comm; /* for PowerPC */ + + /* + * XXXAUDIT: Currently, we only audit the pathname of the binary. + * There may also be poor interaction with dyld. + */ task = current_task(); - thr_act = current_act(); - uthread = get_bsdthread_info(thr_act); + uthread = get_bsdthread_info(current_thread()); - if (uthread->uu_flag & P_VFORK) { + if (uthread->uu_flag & UT_VFORK) { vfexec = 1; /* Mark in exec */ } else { if (task != kernel_task) { numthreads = get_task_numacts(task); - if (numthreads <= 0 ) + if (numthreads <= 0 ) { + kauth_cred_unref(&cred); return(EINVAL); + } if (numthreads > 1) { - return(EOPNOTSUPP); + kauth_cred_unref(&cred); + return(ENOTSUP); } } } - error = execargs_alloc(&execargs); - if (error) + error = execargs_alloc(imgp); + if (error) { + kauth_cred_unref(&cred); return(error); - - savedpath = execargs; - + } /* - * To support new app package launching for Mac OS X, the dyld - * needs the first argument to execve() stored on the user stack. - * Copyin the "path" at the begining of the "execargs" buffer - * allocated above. - * - * We have to do this before namei() because in case of - * symbolic links, namei() would overwrite the original "path". - * In case the last symbolic link resolved was a relative pathname - * we would lose the original "path", which could be an - * absolute pathname. This might be unacceptable for dyld. + * XXXAUDIT: Note: the double copyin introduces an audit + * race. To correct this race, we must use a single + * copyin(), e.g. by passing a flag to namei to indicate an + * external path buffer is being used. */ - /* XXX We could optimize to avoid copyinstr in the namei() */ - - error = copyinstr(uap->fname, savedpath, MAXPATHLEN, &savedpathlen); - if (error) - return (error); + error = exec_save_path(imgp, uap->fname, seg); + if (error) { + execargs_free(imgp); + kauth_cred_unref(&cred); + return(error); + } + /* - * copyinstr will put in savedpathlen, the count of - * characters (including NULL) in the path. + * No app profiles under chroot */ - - if(app_profile != 0) { + if((p->p_fd->fd_rdir == NULLVP) && (app_profile != 0)) { /* grab the name of the file out of its path */ /* we will need this for lookup within the */ /* name file */ - ws_cache_name = savedpath + savedpathlen; - while (ws_cache_name[0] != '/') { - if(ws_cache_name == savedpath) { - ws_cache_name--; + /* Scan backwards for the first '/' or start of string */ + imgp->ip_tws_cache_name = imgp->ip_strendp; + while (imgp->ip_tws_cache_name[0] != '/') { + if(imgp->ip_tws_cache_name == imgp->ip_strings) { + imgp->ip_tws_cache_name--; break; } - ws_cache_name--; + imgp->ip_tws_cache_name--; } - ws_cache_name++; + imgp->ip_tws_cache_name++; } - - /* Save the name aside for future use */ - execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); - - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | SAVENAME, - UIO_USERSPACE, uap->fname, p); - if ((error = namei(&nd))) - goto bad1; - vp = nd.ni_vp; - VOP_LEASE(vp, p, p->p_ucred, LEASE_READ); - - if ((error = VOP_GETATTR(vp, &origvattr, p->p_ucred, p))) + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, + seg, uap->fname, imgp->ip_vfs_context); + +again: + error = namei(&nd); + if (error) goto bad; + imgp->ip_ndp = &nd; /* successful namei(); call nameidone() later */ + imgp->ip_vp = nd.ni_vp; /* if set, need to vnode_put() at some point */ - /* Check mount point */ - if (vp->v_mount->mnt_flag & MNT_NOEXEC) { - error = EACCES; + error = exec_check_permissions(imgp); + if (error) goto bad; - } - indir = 0; - if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED)) - origvattr.va_mode &= ~(VSUID | VSGID); - - *(&vattr) = *(&origvattr); + /* Copy; avoid invocation of an interpreter overwriting the original */ + if (once) { + once = 0; + origva = va; + } -again: - error = check_exec_access(p, vp, &vattr); + error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, PAGE_SIZE, 0, + UIO_SYSSPACE32, IO_NODELOCKED, cred, &resid, p); if (error) goto bad; + +encapsulated_binary: + error = -1; + for(i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) { + + error = (*execsw[i].ex_imgact)(imgp); + + switch (error) { + /* case -1: not claimed: continue */ + case -2: /* Encapsulated binary */ + goto encapsulated_binary; + + case -3: /* Interpreter */ + vnode_put(imgp->ip_vp); + imgp->ip_vp = NULL; /* already put */ + nd.ni_cnd.cn_nameiop = LOOKUP; + nd.ni_cnd.cn_flags = (nd.ni_cnd.cn_flags & HASBUF) | + (FOLLOW | LOCKLEAF); + +#ifdef IMGPF_POWERPC + /* + * PowerPC does not follow symlinks because the + * code which sets exec_archhandler_ppc.fsid and + * exec_archhandler_ppc.fileid doesn't follow them. + */ + if (imgp->ip_flags & IMGPF_POWERPC) + nd.ni_cnd.cn_flags &= ~FOLLOW; +#endif /* IMGPF_POWERPC */ + + nd.ni_segflg = UIO_SYSSPACE32; + nd.ni_dirp = CAST_USER_ADDR_T(imgp->ip_interp_name); + goto again; - /* - * Read in first few bytes of file for segment sizes, magic number: - * 407 = plain executable - * 410 = RO text - * 413 = demand paged RO text - * Also an ASCII line beginning with #! is - * the file name of a ``shell'' and arguments may be prepended - * to the argument list if given here. - * - * SHELL NAMES ARE LIMITED IN LENGTH. - * - * ONLY ONE ARGUMENT MAY BE PASSED TO THE SHELL FROM - * THE ASCII LINE. + default: + break; + } + } + + /* call out to allow 3rd party notification of exec. + * Ignore result of kauth_authorize_fileop call. */ + if (error == 0 && kauth_authorize_fileop_has_listeners()) { + kauth_authorize_fileop(vfs_context_ucred(&context), KAUTH_FILEOP_EXEC, + (uintptr_t)nd.ni_vp, 0); + } + + /* Image not claimed by any activator? */ + if (error == -1) + error = ENOEXEC; - exdata.ex_shell[0] = '\0'; /* for zero length files */ +bad: + kauth_cred_unref(&cred); + + if (imgp->ip_ndp) + nameidone(imgp->ip_ndp); + if (imgp->ip_vp) + vnode_put(imgp->ip_vp); + if (imgp->ip_strings) + execargs_free(imgp); + if (!error && vfexec) { + vfork_return(current_thread(), p->p_pptr, p, retval); + (void)thread_resume(imgp->ip_vfork_thread); + return(0); + } + return(error); +} - error = vn_rdwr(UIO_READ, vp, (caddr_t)&exdata, sizeof (exdata), 0, - UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p); - if (error) - goto bad; +static int +copyinptr(user_addr_t froma, user_addr_t *toptr, int ptr_size) +{ + int error; -#ifndef lint - if (resid > sizeof(exdata) - min(sizeof(exdata.mach_header), - sizeof(exdata.fat_header)) - && exdata.ex_shell[0] != '#') { - error = ENOEXEC; - goto bad; + if (ptr_size == 4) { + /* 64 bit value containing 32 bit address */ + unsigned int i; + + error = copyin(froma, &i, 4); + *toptr = CAST_USER_ADDR_T(i); /* SAFE */ + } else { + error = copyin(froma, toptr, 8); } -#endif /* lint */ - mach_header = &exdata.mach_header; - fat_header = &exdata.fat_header; - if (mach_header->magic == MH_MAGIC) - is_fat = FALSE; - else if (fat_header->magic == FAT_MAGIC || - fat_header->magic == FAT_CIGAM) - is_fat = TRUE; - else if (mach_header->magic == MH_CIGAM) { - error = EBADARCH; - goto bad; + return (error); +} + + +static int +copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size) +{ + int error; + + if (ptr_size == 4) { + /* 64 bit value containing 32 bit address */ + unsigned int i = CAST_DOWN(unsigned int,ua); /* SAFE */ + + error = copyout(&i, ptr, 4); } else { - if (exdata.ex_shell[0] != '#' || - exdata.ex_shell[1] != '!' || - indir) { - error = ENOEXEC; - goto bad; - } - cp = &exdata.ex_shell[2]; /* skip "#!" */ - while (cp < &exdata.ex_shell[SHSIZE]) { - if (*cp == '\t') - *cp = ' '; - else if (*cp == '\n') { - *cp = '\0'; - break; - } - cp++; - } - if (*cp != '\0') { - error = ENOEXEC; - goto bad; - } - cp = &exdata.ex_shell[2]; - while (*cp == ' ') - cp++; - execnamep = cp; - while (*cp && *cp != ' ') - cp++; - cfarg[0] = '\0'; - cpnospace = cp; - if (*cp) { - *cp++ = '\0'; - while (*cp == ' ') - cp++; - if (*cp) - bcopy((caddr_t)cp, (caddr_t)cfarg, SHSIZE); - } + error = copyout(&ua, ptr, 8); + } + return (error); +} - /* - * Support for new app package launching for Mac OS X. - * We are about to retry the execve() by changing the path to the - * interpreter name. Need to re-initialize the savedpath and - * savedpathlen. +1 for NULL. - */ - savedpathlen = (cpnospace - execnamep + 1); - error = copystr(execnamep, savedpath, savedpathlen, &savedpathlen); - if (error) - goto bad; - /* Save the name aside for future use */ - execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); - - indir = 1; - vput(vp); - nd.ni_cnd.cn_nameiop = LOOKUP; - nd.ni_cnd.cn_flags = (nd.ni_cnd.cn_flags & HASBUF) | - (FOLLOW | LOCKLEAF | SAVENAME); - nd.ni_segflg = UIO_SYSSPACE; - nd.ni_dirp = execnamep; - if ((error = namei(&nd))) - goto bad1; - vp = nd.ni_vp; - VOP_LEASE(vp, p, cred, LEASE_READ); - if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p))) - goto bad; - goto again; +/* + * exec_copyout_strings + * + * Copy out the strings segment to user space. The strings segment is put + * on a preinitialized stack frame. + * + * Parameters: struct image_params * the image parameter block + * int * a pointer to the stack offset variable + * + * Returns: 0 Success + * !0 Faiure: errno + * + * Implicit returns: + * (*stackp) The stack offset, modified + * + * Note: The strings segment layout is backward, from the beginning + * of the top of the stack to consume the minimal amount of + * space possible; the returned stack pointer points to the + * end of the area consumed (stacks grow upward). + * + * argc is an int; arg[i] are pointers; env[i] are pointers; + * exec_path is a pointer; the 0's are (void *)NULL's + * + * The stack frame layout is: + * + * +-------------+ + * sp-> | argc | + * +-------------+ + * | arg[0] | + * +-------------+ + * : + * : + * +-------------+ + * | arg[argc-1] | + * +-------------+ + * | 0 | + * +-------------+ + * | env[0] | + * +-------------+ + * : + * : + * +-------------+ + * | env[n] | + * +-------------+ + * | 0 | + * +-------------+ + * | exec_path | In MacOS X PR2 Beaker2E the path passed to exec() is + * +-------------+ passed on the stack just after the trailing 0 of the + * | 0 | the envp[] array as a pointer to a string. + * +-------------+ + * | PATH AREA | + * +-------------+ + * | STRING AREA | + * : + * : + * | | <- p->user_stack + * +-------------+ + * + * Although technically a part of the STRING AREA, we treat the PATH AREA as + * a separate entity. This allows us to align the beginning of the PATH AREA + * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers + * which preceed it on the stack are properly aligned. + * + * TODO: argc copied with suword(), which takes a 64 bit address + */ +static int +exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) +{ + struct proc *p = vfs_context_proc(imgp->ip_vfs_context); + int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4; + char *argv = imgp->ip_argv; /* modifiable copy of argv */ + user_addr_t string_area; /* *argv[], *env[] */ + user_addr_t path_area; /* package launch path */ + user_addr_t ptr_area; /* argv[], env[], exec_path */ + user_addr_t stack; + int stringc = imgp->ip_argc + imgp->ip_envc; + int len; + int error; + int strspace; + + stack = *stackp; + + unsigned patharea_len = imgp->ip_argv - imgp->ip_strings; + int envc_add = 0; + +#ifdef IMGPF_POWERPC + /* + * oah750 expects /usr/lib/dyld\0 as the start of the program name. + * It also expects to have a certain environment variable set to 0. + * 50 bytes for each to ensure we have enough space without having + * to count every byte. + */ + char *progname, *envvar; + char progname_str[] = "/usr/lib/dyld"; + char envvar_str[] = "OAH750_CFG_FU_STACK_SIZE=0"; + + if (imgp->ip_flags & IMGPF_POWERPC) { + progname = progname_str; + envvar = envvar_str; + patharea_len += strlen(progname) + strlen(envvar) + 2; + envc_add = 1; } +#endif /* IMGPF_POWERPC */ + /* + * Set up pointers to the beginning of the string area, the beginning + * of the path area, and the beginning of the pointer area (actually, + * the location of argc, an int, which may be smaller than a pointer, + * but we use ptr_size worth of space for it, for alignment). + */ + string_area = stack - (((imgp->ip_strendp - imgp->ip_strings) + ptr_size-1) & ~(ptr_size-1)) - ptr_size; + path_area = string_area - ((patharea_len + ptr_size-1) & ~(ptr_size-1)); + ptr_area = path_area - ((imgp->ip_argc + imgp->ip_envc + 4 + envc_add) * ptr_size) - ptr_size /*argc*/; + + /* Return the initial stack address: the location of argc */ + *stackp = ptr_area; /* - * Collect arguments on "file" in swap space. + * Record the size of the arguments area so that sysctl_procargs() + * can return the argument area without having to parse the arguments. */ - na = 0; - ne = 0; - nc = 0; - cc = 0; + p->p_argc = imgp->ip_argc; + p->p_argslen = (int)(stack - path_area); + + /* * Support for new app package launching for Mac OS X allocates - * the "path" at the begining. - * execargs get allocated after that + * the "path" at the begining of the imgp->ip_strings buffer. + * copy it just before the string area. */ - cp = (char *) execargsp; /* running pointer for copy */ + len = 0; +#ifdef IMGPF_POWERPC + if (imgp->ip_flags & IMGPF_POWERPC) { + error = copyoutstr(progname, path_area, + patharea_len, + (size_t *)&len); + if (error) + goto bad; + error = copyoutstr(imgp->ip_strings, path_area + strlen(progname) + 1, + patharea_len, + (size_t *)&len); + } else +#endif /* IMGPF_POWERPC */ + error = copyoutstr(imgp->ip_strings, path_area, + patharea_len, + (size_t *)&len); + if (error) + goto bad; + + + /* Save a NULL pointer below it */ + (void)copyoutptr(0LL, path_area - ptr_size, ptr_size); + + /* Save the pointer to "path" just below it */ + (void)copyoutptr(path_area, path_area - 2*ptr_size, ptr_size); + /* - * size of execargs less sizeof "path", - * a pointer to "path" and a NULL poiter + * ptr_size for 2 NULL one each ofter arg[argc -1] and env[n] + * ptr_size for argc + * skip over saved path, ptr_size for pointer to path, + * and ptr_size for the NULL after pointer to path. */ - cc = NCARGS - savedpathlen - 2*NBPW; + + /* argc (int32, stored in a ptr_size area) */ + (void)suword(ptr_area, imgp->ip_argc); + ptr_area += sizeof(int); + /* pad to ptr_size, if 64 bit image, to ensure user stack alignment */ + if (imgp->ip_flags & IMGPF_IS_64BIT) { + (void)suword(ptr_area, 0); /* int, not long: ignored */ + ptr_area += sizeof(int); + } + + /* - * Copy arguments into file in argdev area. + * We use (string_area - path_area) here rather than the more + * intuitive (imgp->ip_argv - imgp->ip_strings) because we are + * interested in the length of the PATH_AREA in user space, + * rather than the actual length of the execution path, since + * it includes alignment padding of the PATH_AREA + STRING_AREA + * to a ptr_size boundary. */ - if (uap->argp) for (;;) { - ap = NULL; - sharg = NULL; - if (indir && na == 0) { - sharg = nd.ni_cnd.cn_nameptr; - ap = (int)sharg; - uap->argp++; /* ignore argv[0] */ - } else if (indir && (na == 1 && cfarg[0])) { - sharg = cfarg; - ap = (int)sharg; - } else if (indir && (na == 1 || (na == 2 && cfarg[0]))) - ap = (int)uap->fname; - else if (uap->argp) { - ap = fuword((caddr_t)uap->argp); - uap->argp++; - } - if (ap == NULL && uap->envp) { - uap->argp = NULL; - if ((ap = fuword((caddr_t)uap->envp)) != NULL) - uap->envp++, ne++; + strspace = SIZE_IMG_STRSPACE - (string_area - path_area); + for (;;) { + if (stringc == imgp->ip_envc) { + /* argv[n] = NULL */ + (void)copyoutptr(0LL, ptr_area, ptr_size); + ptr_area += ptr_size; +#ifdef IMGPF_POWERPC + if (envc_add) { + (void)copyoutptr(string_area, ptr_area, ptr_size); + + do { + if (strspace <= 0) { + error = E2BIG; + break; + } + error = copyoutstr(envvar, string_area, + (unsigned)strspace, + (size_t *)&len); + string_area += len; + envvar += len; + strspace -= len; + } while (error == ENAMETOOLONG); + if (error == EFAULT || error == E2BIG) + break; + ptr_area += ptr_size; + } +#endif /* IMGPF_POWERPC */ } - if (ap == NULL) - break; - na++; - if (ap == -1) { - error = EFAULT; + if (--stringc < 0) break; - } + + /* pointer: argv[n]/env[n] */ + (void)copyoutptr(string_area, ptr_area, ptr_size); + + /* string : argv[n][]/env[n][] */ do { - if (nc >= (NCARGS - savedpathlen - 2*NBPW -1)) { + if (strspace <= 0) { error = E2BIG; break; } - if (sharg) { - error = copystr(sharg, cp, (unsigned)cc, &len); - sharg += len; - } else { - error = copyinstr((caddr_t)ap, cp, (unsigned)cc, - &len); - ap += len; - } - cp += len; - nc += len; - cc -= len; + error = copyoutstr(argv, string_area, + (unsigned)strspace, + (size_t *)&len); + string_area += len; + argv += len; + strspace -= len; } while (error == ENAMETOOLONG); - if (error) { - goto bad; - } + if (error == EFAULT || error == E2BIG) + break; /* bad stack - user's problem */ + ptr_area += ptr_size; } - nc = (nc + NBPW-1) & ~(NBPW-1); + /* env[n] = NULL */ + (void)copyoutptr(0LL, ptr_area, ptr_size); + +bad: + return(error); +} + + +/* + * exec_extract_strings + * + * Copy arguments and environment from user space into work area; we may + * have already copied some early arguments into the work area, and if + * so, any arguments opied in are appended to those already there. + * + * Parameters: struct image_params * the image parameter block + * + * Returns: 0 Success + * !0 Failure: errno + * + * Implicit returns; + * (imgp->ip_argc) Count of arguments, updated + * (imgp->ip_envc) Count of environment strings, updated + * + * + * Notes: The argument and environment vectors are user space pointers + * to arrays of user space pointers. + */ +static int +exec_extract_strings(struct image_params *imgp) +{ + int error = 0; + struct proc *p = vfs_context_proc(imgp->ip_vfs_context); + int seg = (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32); + int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT) ? 8 : 4; + user_addr_t argv = imgp->ip_user_argv; + user_addr_t envv = imgp->ip_user_envv; + + /* Now, get rest of arguments */ /* - * If we have a fat file, find "our" executable. + * If we are running an interpreter, replace the av[0] that was + * passed to execve() with the fully qualified path name that was + * passed to execve() for interpreters which do not use the PATH + * to locate their script arguments. */ - if (is_fat) { - /* - * Look up our architecture in the fat file. - */ - lret = fatfile_getarch(vp, (vm_offset_t)fat_header, &fat_arch); - if (lret != LOAD_SUCCESS) { - error = load_return_to_errno(lret); + if((imgp->ip_flags & IMGPF_INTERPRET) != 0 && argv != 0LL) { + user_addr_t arg; + + error = copyinptr(argv, &arg, ptr_size); + if (error) goto bad; + if (arg != 0LL && arg != (user_addr_t)-1) { + argv += ptr_size; + error = exec_add_string(imgp, imgp->ip_user_fname, seg); + if (error) + goto bad; + imgp->ip_argc++; } - /* Read the Mach-O header out of it */ - error = vn_rdwr(UIO_READ, vp, (caddr_t)&exdata.mach_header, - sizeof (exdata.mach_header), - fat_arch.offset, - UIO_SYSSPACE, (IO_UNIT|IO_NODELOCKED), cred, &resid, p); + } - if (error) { + while (argv != 0LL) { + user_addr_t arg; + + error = copyinptr(argv, &arg, ptr_size); + if (error) goto bad; - } - /* Did we read a complete header? */ - if (resid) { - error = EBADEXEC; + argv += ptr_size; + if (arg == 0LL) { + break; + } else if (arg == (user_addr_t)-1) { + /* Um... why would it be -1? */ + error = EFAULT; goto bad; } + /* + * av[n...] = arg[n] + */ + error = exec_add_string(imgp, arg, seg); + if (error) + goto bad; + imgp->ip_argc++; + } - /* Is what we found a Mach-O executable */ - if (mach_header->magic != MH_MAGIC) { - error = ENOEXEC; + /* Now, get the environment */ + while (envv != 0LL) { + user_addr_t env; + + error = copyinptr(envv, &env, ptr_size); + if (error) goto bad; - } - arch_offset = fat_arch.offset; - arch_size = fat_arch.size; - } else { + envv += ptr_size; + if (env == 0LL) { + break; + } else if (env == (user_addr_t)-1) { + error = EFAULT; + goto bad; + } /* - * Load the Mach-O file. - */ - arch_offset = 0; - arch_size = (u_long)vattr.va_size; + * av[n...] = env[n] + */ + error = exec_add_string(imgp, env, seg); + if (error) + goto bad; + imgp->ip_envc++; } +bad: + return error; +} - if (vfexec) { - kern_return_t result; - result = task_create_local(task, FALSE, FALSE, &new_task); - if (result != KERN_SUCCESS) - printf("execve: task_create failed. Code: 0x%x\n", result); - p->task = new_task; - set_bsdtask_info(new_task, p); - if (p->p_nice != 0) - resetpriority(p); - task = new_task; - map = get_task_map(new_task); - result = thread_create(new_task, &thr_act); - if (result != KERN_SUCCESS) - printf("execve: thread_create failed. Code: 0x%x\n", result); - uthread = get_bsdthread_info(thr_act); - } else { - map = VM_MAP_NULL; +#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) - } +static int +exec_check_permissions(struct image_params *imgp) +{ + struct vnode *vp = imgp->ip_vp; + struct vnode_attr *vap = imgp->ip_vattr; + struct proc *p = vfs_context_proc(imgp->ip_vfs_context); + int error; + kauth_action_t action; + + /* Only allow execution of regular files */ + if (!vnode_isreg(vp)) + return (EACCES); + + /* Get the file attributes that we will be using here and elsewhere */ + VATTR_INIT(vap); + VATTR_WANTED(vap, va_uid); + VATTR_WANTED(vap, va_gid); + VATTR_WANTED(vap, va_mode); + VATTR_WANTED(vap, va_fsid); + VATTR_WANTED(vap, va_fileid); + VATTR_WANTED(vap, va_data_size); + if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0) + return (error); /* - * Load the Mach-O file. + * Ensure that at least one execute bit is on - otherwise root + * will always succeed, and we don't want to happen unless the + * file really is executable. */ - VOP_UNLOCK(vp, 0, p); - if(ws_cache_name) { - tws_handle_startup_file(task, cred->cr_uid, - ws_cache_name, vp, &clean_regions); - } + if ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) + return (EACCES); - lret = load_machfile(vp, mach_header, arch_offset, - arch_size, &load_result, thr_act, map, clean_regions); + /* Disallow zero length files */ + if (vap->va_data_size == 0) + return (ENOEXEC); - if (lret != LOAD_SUCCESS) { - error = load_return_to_errno(lret); - goto badtoolate; - } + imgp->ip_arch_offset = (user_size_t)0; + imgp->ip_arch_size = vap->va_data_size; - /* load_machfile() maps the vnode */ - ubc_map(vp); + /* Disable setuid-ness for traced programs or if MNT_NOSUID */ + if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED)) + vap->va_mode &= ~(VSUID | VSGID); + + /* Check for execute permission */ + action = KAUTH_VNODE_EXECUTE; + /* Traced images must also be readable */ + if (p->p_flag & P_TRACED) + action |= KAUTH_VNODE_READ_DATA; + if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0) + return (error); + /* Don't let it run if anyone had it open for writing */ + if (vp->v_writecount) + return (ETXTBSY); + +#ifdef IMGPF_POWERPC /* - * deal with set[ug]id. + * If the file we are about to attempt to load is the exec_handler_ppc, + * which is determined by matching the vattr fields against previously + * cached values, then we set the PowerPC environment flag. */ + if (vap->va_fsid == exec_archhandler_ppc.fsid && + vap->va_fileid == (uint64_t)((u_long)exec_archhandler_ppc.fileid)) { + imgp->ip_flags |= IMGPF_POWERPC; + } +#endif /* IMGPF_POWERPC */ + + /* XXX May want to indicate to underlying FS that vnode is open */ + + return (error); +} + +/* + * exec_handle_sugid + * + * Initially clear the P_SUGID in the process flags; if an SUGID process is + * exec'ing a non-SUGID image, then this is the point of no return. + * + * If the image being activated is SUGI, then replace the credential with a + * copy, disable tracing (unless the tracing process is root), reset the + * mach task port to revoke it, set the P_SUGID bit, + * + * If the saved user and group ID will be changing, then make sure it happens + * to a new credential, rather than a shared one. + * + * Set the security token (this is probably obsolete, given that the token + * should not technically be separate from the credential itself). + * + * Parameters: struct image_params * the image parameter block + * + * Returns: void No failure indication + * + * Implicit returns: + * Potentially modified/replaced + * Potentially revoked + * P_SUGID bit potentially modified + * Potentially modified + */ +static int +exec_handle_sugid(struct image_params *imgp) +{ + kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context); + struct proc *p = vfs_context_proc(imgp->ip_vfs_context); + int i; + int error = 0; + static struct vnode *dev_null = NULLVP; + p->p_flag &= ~P_SUGID; - if (((origvattr.va_mode & VSUID) != 0 && - p->p_ucred->cr_uid != origvattr.va_uid) - || (origvattr.va_mode & VSGID) != 0 && - p->p_ucred->cr_gid != origvattr.va_gid) { - p->p_ucred = crcopy(cred); + + if (((imgp->ip_origvattr->va_mode & VSUID) != 0 && + kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) || + ((imgp->ip_origvattr->va_mode & VSGID) != 0 && + cred->cr_gid != imgp->ip_origvattr->va_gid)) { #if KTRACE /* * If process is being ktraced, turn off - unless @@ -567,17 +1693,49 @@ again: struct vnode *tvp = p->p_tracep; p->p_tracep = NULL; p->p_traceflag = 0; - vrele(tvp); + vnode_rele(tvp); } #endif - if (origvattr.va_mode & VSUID) - p->p_ucred->cr_uid = origvattr.va_uid; - if (origvattr.va_mode & VSGID) - p->p_ucred->cr_gid = origvattr.va_gid; + /* + * Replace the credential with a copy of itself if euid or egid change. + */ + if (imgp->ip_origvattr->va_mode & VSUID) { + p->p_ucred = kauth_cred_seteuid(p->p_ucred, imgp->ip_origvattr->va_uid); + } + if (imgp->ip_origvattr->va_mode & VSGID) { + p->p_ucred = kauth_cred_setegid(p->p_ucred, imgp->ip_origvattr->va_gid); + } + + /* + * Have mach reset the task port. We don't want + * anyone who had the task port before a setuid + * exec to be able to access/control the task + * after. + */ + if (current_task() == p->task) + ipc_task_reset(p->task); - set_security_token(p); p->p_flag |= P_SUGID; + /* Cache the vnode for /dev/null the first time around */ + if (dev_null == NULLVP) { + struct nameidata nd1; + + NDINIT(&nd1, LOOKUP, FOLLOW, UIO_SYSSPACE32, + CAST_USER_ADDR_T("/dev/null"), + imgp->ip_vfs_context); + + if ((error = vn_open(&nd1, FREAD, 0)) == 0) { + dev_null = nd1.ni_vp; + /* + * vn_open returns with both a use_count + * and an io_count on the found vnode + * drop the io_count, but keep the use_count + */ + vnode_put(nd1.ni_vp); + } + } + /* Radar 2261856; setuid security hole fix */ /* Patch from OpenBSD: A. Ramesh */ /* @@ -587,261 +1745,77 @@ again: * descriptors in this range which has implied meaning * to libc. */ - for (i = 0; i < 3; i++) { - extern struct fileops vnops; - struct nameidata nd1; - struct file *fp; - int indx; + if (dev_null != NULLVP) { + for (i = 0; i < 3; i++) { + struct fileproc *fp; + int indx; + + if (p->p_fd->fd_ofiles[i] != NULL) + continue; - if (p->p_fd->fd_ofiles[i] == NULL) { if ((error = falloc(p, &fp, &indx)) != 0) continue; - NDINIT(&nd1, LOOKUP, FOLLOW, UIO_SYSSPACE, - "/dev/null", p); - if ((error = vn_open(&nd1, FREAD, 0)) != 0) { - ffree(fp); - p->p_fd->fd_ofiles[indx] = NULL; + + if ((error = vnode_ref_ext(dev_null, FREAD)) != 0) { + fp_free(p, indx, fp); break; } - fp->f_flag = FREAD; - fp->f_type = DTYPE_VNODE; - fp->f_ops = &vnops; - fp->f_data = (caddr_t)nd1.ni_vp; - VOP_UNLOCK(nd1.ni_vp, 0, p); - } - } - } - p->p_cred->p_svuid = p->p_ucred->cr_uid; - p->p_cred->p_svgid = p->p_ucred->cr_gid; - - if (!vfexec && (p->p_flag & P_TRACED)) - psignal(p, SIGTRAP); - - if (error) { - goto badtoolate; - } - VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); - vput(vp); - vp = NULL; - - if (load_result.unixproc && - create_unix_stack(get_task_map(task), - load_result.user_stack, load_result.customstack, p)) { - error = load_return_to_errno(LOAD_NOSPACE); - goto badtoolate; - } - - if (vfexec) { - uthread->uu_ar0 = (void *)get_user_regs(thr_act); - } - - /* - * Copy back arglist if necessary. - */ - - - ucp = p->user_stack; - if (vfexec) { - old_map = vm_map_switch(get_task_map(task)); - } - if (load_result.unixproc) { - int pathptr; - - ucp = ucp - nc - NBPW; /* begining of the STRING AREA */ - - /* - * Support for new app package launching for Mac OS X allocates - * the "path" at the begining of the execargs buffer. - * copy it just before the string area. - */ - savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1); - len = 0; - pathptr = ucp - savedpathlen; - error = copyoutstr(savedpath, (caddr_t)pathptr, - (unsigned)savedpathlen, &len); - if (error) { - if (vfexec) - vm_map_switch(old_map); - goto badtoolate; - } - - /* Save a NULL pointer below it */ - (void) suword((caddr_t)(pathptr - NBPW), 0); - - /* Save the pointer to "path" just below it */ - (void) suword((caddr_t)(pathptr - 2*NBPW), pathptr); - /* - * na includes arg[] and env[]. - * NBPW for 2 NULL one each ofter arg[argc -1] and env[n] - * NBPW for argc - * skip over saved path, NBPW for pointer to path, - * and NBPW for the NULL after pointer to path. - */ - ap = ucp - na*NBPW - 3*NBPW - savedpathlen - 2*NBPW; -#if defined(ppc) - thread_setuserstack(thr_act, ap); /* Set the stack */ -#else - uthread->uu_ar0[SP] = ap; -#endif - (void) suword((caddr_t)ap, na-ne); /* argc */ - nc = 0; - cc = 0; - - cp = (char *) execargsp; - cc = NCARGS - savedpathlen - 2*NBPW; - ps.ps_argvstr = (char *)ucp; /* first argv string */ - ps.ps_nargvstr = na - ne; /* argc */ - for (;;) { - ap += NBPW; - if (na == ne) { - (void) suword((caddr_t)ap, 0); - ap += NBPW; - ps.ps_envstr = (char *)ucp; - ps.ps_nenvstr = ne; + fp->f_fglob->fg_flag = FREAD; + fp->f_fglob->fg_type = DTYPE_VNODE; + fp->f_fglob->fg_ops = &vnops; + fp->f_fglob->fg_data = (caddr_t)dev_null; + + proc_fdlock(p); + *fdflags(p, indx) &= ~UF_RESERVED; + fp_drop(p, indx, fp, 1); + proc_fdunlock(p); } - if (--na < 0) - break; - (void) suword((caddr_t)ap, ucp); - do { - error = copyoutstr(cp, (caddr_t)ucp, - (unsigned)cc, &len); - ucp += len; - cp += len; - nc += len; - cc -= len; - } while (error == ENAMETOOLONG); - if (error == EFAULT) - break; /* bad stack - user's problem */ + /* + * for now we need to drop the reference immediately + * since we don't have any mechanism in place to + * release it before starting to unmount "/dev" + * during a reboot/shutdown + */ + vnode_rele(dev_null); + dev_null = NULLVP; } - (void) suword((caddr_t)ap, 0); - } - - if (load_result.dynlinker) { -#if defined(ppc) - ap = thread_adjuserstack(thr_act, -4); /* Adjust the stack */ -#else - ap = uthread->uu_ar0[SP] -= 4; -#endif - (void) suword((caddr_t)ap, load_result.mach_header); - } - - if (vfexec) { - vm_map_switch(old_map); - } -#if defined(ppc) - thread_setentrypoint(thr_act, load_result.entry_point); /* Set the entry point */ -#elif defined(i386) - uthread->uu_ar0[PC] = load_result.entry_point; -#else -#error architecture not implemented! -#endif - - /* Stop profiling */ - stopprofclock(p); - - /* - * Reset signal state. - */ - execsigs(p, thr_act); - - /* - * Close file descriptors - * which specify close-on-exec. - */ - fdexec(p); - /* FIXME: Till vmspace inherit is fixed: */ - if (!vfexec && p->vm_shm) - shmexit(p); - /* Clean up the semaphores */ - semexit(p); - - /* - * Remember file name for accounting. - */ - p->p_acflag &= ~AFORK; - if (nd.ni_cnd.cn_namelen > MAXCOMLEN) - nd.ni_cnd.cn_namelen = MAXCOMLEN; - bcopy((caddr_t)nd.ni_cnd.cn_nameptr, (caddr_t)p->p_comm, - (unsigned)nd.ni_cnd.cn_namelen); - p->p_comm[nd.ni_cnd.cn_namelen] = '\0'; - - { - /* This is for kdebug */ - long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; - - /* Collect the pathname for tracing */ - kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); - - if (vfexec) - KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, - dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, getshuttle_thread(thr_act)); - else - KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, - dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } /* - * mark as execed, wakeup the process that vforked (if any) and tell - * it that it now has it's own resources back + * Implement the semantic where the effective user and group become + * the saved user and group in exec'ed programs. */ - p->p_flag |= P_EXEC; - if (p->p_pptr && (p->p_flag & P_PPWAIT)) { - p->p_flag &= ~P_PPWAIT; - wakeup((caddr_t)p->p_pptr); - } - - if (vfexec && (p->p_flag & P_TRACED)) { - psignal_vfork(p, new_task, thr_act, SIGTRAP); - } + p->p_ucred = kauth_cred_setsvuidgid(p->p_ucred, kauth_cred_getuid(p->p_ucred), p->p_ucred->cr_gid); + + /* XXX Obsolete; security token should not be separate from cred */ + set_security_token(p); -badtoolate: - if (vfexec) { - task_deallocate(new_task); - act_deallocate(thr_act); - if (error) - error = 0; - } -bad: - FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); - if (vp) - vput(vp); -bad1: - if (execargs) - execargs_free(execargs); - if (!error && vfexec) { - vfork_return(current_act(), p->p_pptr, p, retval); - (void) thread_resume(thr_act); - return(0); - } return(error); } - -#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) - -kern_return_t -create_unix_stack(map, user_stack, customstack, p) - vm_map_t map; - vm_offset_t user_stack; - int customstack; - struct proc *p; +static kern_return_t +create_unix_stack(vm_map_t map, user_addr_t user_stack, int customstack, + struct proc *p) { - vm_size_t size; - vm_offset_t addr; + mach_vm_size_t size; + mach_vm_offset_t addr; p->user_stack = user_stack; if (!customstack) { - size = round_page_64(unix_stack_size(p)); - addr = trunc_page_32(user_stack - size); - return (vm_allocate(map,&addr, size, FALSE)); + size = mach_vm_round_page(unix_stack_size(p)); + addr = mach_vm_trunc_page(user_stack - size); + return (mach_vm_allocate(map, &addr, size, + VM_MAKE_TAG(VM_MEMORY_STACK) | + VM_FLAGS_FIXED)); } else return(KERN_SUCCESS); } #include -char init_program_name[128] = "/sbin/mach_init\0"; +static char init_program_name[128] = "/sbin/launchd"; +static const char * other_init = "/sbin/mach_init"; char init_args[128] = ""; @@ -850,15 +1824,12 @@ int init_attempts = 0; void -load_init_program(p) - struct proc *p; +load_init_program(struct proc *p) { vm_offset_t init_addr; - int *old_ap; char *argv[3]; - int error; - register_t retval[2]; - struct uthread * ut; + int error; + register_t retval[2]; error = 0; @@ -874,7 +1845,6 @@ load_init_program(p) if (error && ((boothowto & RB_INITNAME) == 0) && (init_attempts == 1)) { - static char other_init[] = "/etc/mach_init"; printf("Load of %s, errno %d, trying %s\n", init_program_name, error, other_init); error = 0; @@ -898,11 +1868,12 @@ load_init_program(p) init_addr = VM_MIN_ADDRESS; (void) vm_allocate(current_map(), &init_addr, - PAGE_SIZE, TRUE); + PAGE_SIZE, VM_FLAGS_ANYWHERE); if (init_addr == 0) init_addr++; + (void) copyout((caddr_t) init_program_name, - (caddr_t) (init_addr), + CAST_USER_ADDR_T(init_addr), (unsigned) sizeof(init_program_name)+1); argv[0] = (char *) init_addr; @@ -916,7 +1887,7 @@ load_init_program(p) */ (void) copyout((caddr_t) init_args, - (caddr_t) (init_addr), + CAST_USER_ADDR_T(init_addr), (unsigned) sizeof(init_args)); argv[1] = (char *) init_addr; @@ -934,16 +1905,16 @@ load_init_program(p) */ (void) copyout((caddr_t) argv, - (caddr_t) (init_addr), + CAST_USER_ADDR_T(init_addr), (unsigned) sizeof(argv)); /* * Set up argument block for fake call to execve. */ - init_exec_args.fname = argv[0]; - init_exec_args.argp = (char **) init_addr; - init_exec_args.envp = 0; + init_exec_args.fname = CAST_USER_ADDR_T(argv[0]); + init_exec_args.argp = CAST_USER_ADDR_T((char **)init_addr); + init_exec_args.envp = CAST_USER_ADDR_T(0); /* So that mach_init task * is set with uid,gid 0 token @@ -962,7 +1933,7 @@ load_return_to_errno(load_return_t lrtn) { switch (lrtn) { case LOAD_SUCCESS: - return 0; + return 0; case LOAD_BADARCH: return EBADARCH; case LOAD_BADMACHO: @@ -970,41 +1941,20 @@ load_return_to_errno(load_return_t lrtn) case LOAD_SHLIB: return ESHLIBVERS; case LOAD_NOSPACE: + case LOAD_RESOURCE: return ENOMEM; case LOAD_PROTECT: return EACCES; - case LOAD_RESOURCE: + case LOAD_ENOENT: + return ENOENT; + case LOAD_IOERROR: + return EIO; case LOAD_FAILURE: default: return EBADEXEC; } } -/* - * exec_check_access() - */ -int -check_exec_access(p, vp, vap) - struct proc *p; - struct vnode *vp; - struct vattr *vap; -{ - int flag; - int error; - - if (error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) - return (error); - flag = p->p_flag; - if (flag & P_TRACED) { - if (error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) - return (error); - } - if (vp->v_type != VREG || - (vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) - return (EACCES); - return (0); -} - #include #include #include @@ -1014,9 +1964,12 @@ check_exec_access(p, vp, vap) extern semaphore_t execve_semaphore; +/* + * The block of memory used by the execve arguments. At the same time, + * we allocate a page so that we can read in the first page of the image. + */ static int -execargs_alloc(addrp) - vm_offset_t *addrp; +execargs_alloc(struct image_params *imgp) { kern_return_t kret; @@ -1033,20 +1986,22 @@ execargs_alloc(addrp) return (EINTR); } - kret = kmem_alloc_pageable(bsd_pageable_map, addrp, NCARGS); - if (kret != KERN_SUCCESS) + kret = kmem_alloc_pageable(bsd_pageable_map, (vm_offset_t *)&imgp->ip_strings, NCARGS + PAGE_SIZE); + imgp->ip_vdata = imgp->ip_strings + NCARGS; + if (kret != KERN_SUCCESS) { + semaphore_signal(execve_semaphore); return (ENOMEM); - + } return (0); } static int -execargs_free(addr) - vm_offset_t addr; +execargs_free(struct image_params *imgp) { kern_return_t kret; - kmem_free(bsd_pageable_map, addr, NCARGS); + kmem_free(bsd_pageable_map, (vm_offset_t)imgp->ip_strings, NCARGS + PAGE_SIZE); + imgp->ip_strings = NULL; kret = semaphore_signal(execve_semaphore); switch (kret) { @@ -1062,4 +2017,3 @@ execargs_free(addr) return (EINVAL); } } -