+ struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
+ kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
+ struct proc *p = vfs_context_proc(imgp->ip_vfs_context);
+ int error = 0;
+ int vfexec = 0;
+ task_t task;
+ task_t new_task;
+ thread_t thread;
+ struct uthread *uthread;
+ vm_map_t old_map = VM_MAP_NULL;
+ vm_map_t map;
+ boolean_t clean_regions = FALSE;
+ load_return_t lret;
+ load_result_t load_result;
+ shared_region_mapping_t shared_region, initial_region;
+#ifdef IMGPF_POWERPC
+ int powerpcParent, powerpcImage;
+#endif /* IMGPF_POWERPC */
+
+ /*
+ * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
+ * is a reserved field on the end, so for the most part, we can
+ * treat them as if they were identical.
+ */
+ if ((mach_header->magic != MH_MAGIC) &&
+ (mach_header->magic != MH_MAGIC_64)) {
+ error = -1;
+ goto bad;
+ }
+
+ task = current_task();
+ thread = current_thread();
+ uthread = get_bsdthread_info(thread);
+
+ if (uthread->uu_flag & UT_VFORK)
+ vfexec = 1; /* Mark in exec */
+
+ if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64)
+ imgp->ip_flags |= IMGPF_IS_64BIT;
+
+ if (!grade_binary(mach_header->cputype, mach_header->cpusubtype)) {
+ error = EBADARCH;
+ goto bad;
+ }
+
+ /*
+ * Copy in arguments/environment from the old process, if the
+ * vector is non-NULL (i.e. exec is not being called from
+ * load_init_program(), as a special case, at system startup).
+ */
+ if (imgp->ip_user_argv != 0LL) {
+ error = exec_extract_strings(imgp);
+ if (error)
+ goto bad;
+ }
+
+ /*
+ * Hack for binary compatability; put three NULs on the end of the
+ * string area, and round it up to the next word boundary. This
+ * ensures padding with NULs to the boundary.
+ */
+ imgp->ip_strendp[0] = 0;
+ imgp->ip_strendp[1] = 0;
+ imgp->ip_strendp[2] = 0;
+ imgp->ip_strendp += (((imgp->ip_strendp - imgp->ip_strings) + NBPW-1) & ~(NBPW-1));
+
+#ifdef IMGPF_POWERPC
+ /*
+ * XXX
+ *
+ * Should be factored out; this is here because we might be getting
+ * invoked this way as the result of a shell script, and the check
+ * in exec_check_permissions() is not interior to the jump back up
+ * to the "encapsulated_binary:" label in execve().
+ */
+ if (imgp->ip_vattr->va_fsid == exec_archhandler_ppc.fsid &&
+ imgp->ip_vattr->va_fileid == (uint64_t)((u_long)exec_archhandler_ppc.fileid)) {
+ imgp->ip_flags |= IMGPF_POWERPC;
+ }
+#endif /* IMGPF_POWERPC */
+
+ if (vfexec) {
+ kern_return_t result;
+
+ result = task_create_internal(task, FALSE, (imgp->ip_flags & IMGPF_IS_64BIT), &new_task);
+ if (result != KERN_SUCCESS)
+ printf("execve: task_create failed. Code: 0x%x\n", result);
+ p->task = new_task;
+ set_bsdtask_info(new_task, p);
+ if (p->p_nice != 0)
+ resetpriority(p);
+ map = get_task_map(new_task);
+
+ if (imgp->ip_flags & IMGPF_IS_64BIT)
+ vm_map_set_64bit(map);
+ else
+ vm_map_set_32bit(map);
+
+ result = thread_create(new_task, &imgp->ip_vfork_thread);
+ if (result != KERN_SUCCESS)
+ printf("execve: thread_create failed. Code: 0x%x\n", result);
+ /* reset local idea of task, thread, uthread */
+ task = new_task;
+ thread = imgp->ip_vfork_thread;
+ uthread = get_bsdthread_info(thread);
+ } else {
+ map = VM_MAP_NULL;
+ }
+
+ /*
+ * We set these flags here; this is OK, since if we fail after
+ * this point, we have already destroyed the parent process anyway.
+ */
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ task_set_64bit(task, TRUE);
+ p->p_flag |= P_LP64;
+ } else {
+ task_set_64bit(task, FALSE);
+ p->p_flag &= ~P_LP64;
+ }
+
+ /*
+ * Load the Mach-O file.
+ */
+/* LP64 - remove following "if" statement after osfmk/vm/task_working_set.c */
+if((imgp->ip_flags & IMGPF_IS_64BIT) == 0)
+ if(imgp->ip_tws_cache_name) {
+ tws_handle_startup_file(task, kauth_cred_getuid(cred),
+ imgp->ip_tws_cache_name, imgp->ip_vp, &clean_regions);
+ }
+
+ vm_get_shared_region(task, &initial_region);
+
+#ifdef IMGPF_POWERPC
+ /*
+ * If we are transitioning to/from powerpc, then we need to do extra
+ * work here.
+ */
+ powerpcParent = (p->p_flag & P_TRANSLATED) ? 1 : 0;
+ powerpcImage = (imgp->ip_flags & IMGPF_POWERPC) ? 1 : 0;
+
+ if (powerpcParent ^ powerpcImage) {
+ cpu_type_t cpu = (powerpcImage ? CPU_TYPE_POWERPC : cpu_type());
+ struct vnode *rootDir = p->p_fd->fd_rdir;
+
+ shared_region = lookup_default_shared_region((int)rootDir, cpu);
+ if (shared_region == NULL) {
+ shared_region_mapping_t old_region;
+ shared_region_mapping_t new_region;
+ vm_get_shared_region(current_task(), &old_region);
+ /* grrrr... this sets current_task(), not task
+ * -- they're different (usually)
+ */
+ shared_file_boot_time_init((int)rootDir,cpu);
+ if ( current_task() != task ) {
+ vm_get_shared_region(current_task(),&new_region);
+ vm_set_shared_region(task,new_region);
+ vm_set_shared_region(current_task(),old_region);
+ }
+ } else {
+ vm_set_shared_region(task, shared_region);
+ }
+ shared_region_mapping_dealloc(initial_region);
+ } else
+#endif /* IMGPF_POWERPC */
+
+ {
+ struct shared_region_task_mappings map_info;
+ shared_region_mapping_t next;
+
+ shared_region_mapping_info(initial_region,
+ &map_info.text_region,
+ &map_info.text_size,
+ &map_info.data_region,
+ &map_info.data_size,
+ &map_info.region_mappings,
+ &map_info.client_base,
+ &map_info.alternate_base,
+ &map_info.alternate_next,
+ &map_info.fs_base,
+ &map_info.system,
+ &map_info.flags,
+ &next);
+ if (map_info.flags & SHARED_REGION_STANDALONE) {
+ /*
+ * We were using a private shared region.
+ * Try and get back to a system-wide shared region
+ * with matching "fs_base" (for chroot) and "system"
+ * (for CPU type).
+ */
+ shared_region = lookup_default_shared_region(
+ map_info.fs_base,
+ map_info.system);
+ if (shared_region == NULL) {
+ /*
+ * No system-wide default regions, stick to
+ * our private region...
+ */
+ } else {
+ SHARED_REGION_TRACE(
+ SHARED_REGION_TRACE_INFO,
+ ("shared_region: %p [%d(%s)] "
+ "exec(\"%s\"): "
+ "moving from private %p[%x,%x,%x] "
+ "to default %p\n",
+ current_thread(),
+ p->p_pid, p->p_comm,
+ (imgp->ip_p_comm[0] ?
+ imgp->ip_p_comm :
+ imgp->ip_ndp->ni_cnd.cn_nameptr),
+ initial_region,
+ map_info.fs_base,
+ map_info.system,
+ map_info.flags,
+ shared_region));
+ vm_set_shared_region(task, shared_region);
+ shared_region_mapping_dealloc(initial_region);
+ }
+ }
+ }
+
+ /*
+ * NOTE: An error after this point indicates we have potentially
+ * destroyed or overwrote some process state while attempting an
+ * execve() following a vfork(), which is an unrecoverable condition.
+ */
+
+ /*
+ * We reset the task to 64-bit (or not) here. It may have picked up
+ * a new map, and we need that to reflect its true 64-bit nature.
+ */
+
+ task_set_64bit(task,
+ ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT));
+
+ /*
+ * Actually load the image file we previously decided to load.
+ */
+ lret = load_machfile(imgp, mach_header, thread, map, clean_regions, &load_result);
+
+ if (lret != LOAD_SUCCESS) {
+ error = load_return_to_errno(lret);
+ goto badtoolate;
+ }
+
+ /* load_machfile() maps the vnode */
+ (void)ubc_map(imgp->ip_vp, PROT_EXEC);
+
+ /*
+ * deal with set[ug]id.
+ */
+ error = exec_handle_sugid(imgp);
+
+ KNOTE(&p->p_klist, NOTE_EXEC);
+
+ if (!vfexec && (p->p_flag & P_TRACED))
+ psignal(p, SIGTRAP);
+
+ if (error) {
+ goto badtoolate;
+ }
+ vnode_put(imgp->ip_vp);
+ imgp->ip_vp = NULL;
+
+ if (load_result.unixproc &&
+ create_unix_stack(get_task_map(task),
+ load_result.user_stack, load_result.customstack, p)) {
+ error = load_return_to_errno(LOAD_NOSPACE);
+ goto badtoolate;
+ }
+
+ if (vfexec) {
+ old_map = vm_map_switch(get_task_map(task));
+ }
+
+ if (load_result.unixproc) {
+ user_addr_t ap;
+
+ /*
+ * Copy the strings area out into the new process address
+ * space.
+ */
+ ap = p->user_stack;
+ error = exec_copyout_strings(imgp, &ap);
+ if (error) {
+ if (vfexec)
+ vm_map_switch(old_map);
+ goto badtoolate;
+ }
+ /* Set the stack */
+ thread_setuserstack(thread, ap);
+ }
+
+ if (load_result.dynlinker) {
+ uint64_t ap;
+
+ /* Adjust the stack */
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ ap = thread_adjuserstack(thread, -8);
+ error = copyoutptr(load_result.mach_header, ap, 8);
+ } else {
+ ap = thread_adjuserstack(thread, -4);
+ error = suword(ap, load_result.mach_header);
+ }
+ if (error) {
+ if (vfexec)
+ vm_map_switch(old_map);
+ goto badtoolate;
+ }
+ }
+
+ if (vfexec) {
+ vm_map_switch(old_map);
+ }
+ /* Set the entry point */
+ thread_setentrypoint(thread, load_result.entry_point);
+
+ /* Stop profiling */
+ stopprofclock(p);
+
+ /*
+ * Reset signal state.
+ */
+ execsigs(p, thread);
+
+ /*
+ * Close file descriptors
+ * which specify close-on-exec.
+ */
+ fdexec(p);
+
+ /*
+ * need to cancel async IO requests that can be cancelled and wait for those
+ * already active. MAY BLOCK!
+ */
+ _aio_exec( p );
+
+ /* FIXME: Till vmspace inherit is fixed: */
+ if (!vfexec && p->vm_shm)
+ shmexec(p);
+ /* Clean up the semaphores */
+ semexit(p);
+
+ /*
+ * Remember file name for accounting.
+ */
+ p->p_acflag &= ~AFORK;
+ /* If the translated name isn't NULL, then we want to use
+ * that translated name as the name we show as the "real" name.
+ * Otherwise, use the name passed into exec.
+ */
+ if (0 != imgp->ip_p_comm[0]) {
+ bcopy((caddr_t)imgp->ip_p_comm, (caddr_t)p->p_comm,
+ sizeof(p->p_comm));
+ } else {
+ if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN)
+ imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
+ bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
+ (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
+ p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
+ }
+
+ if (kdebug_enable) {
+ long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
+
+ /*
+ * Collect the pathname for tracing
+ */
+ kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
+
+ if (vfexec)
+ {
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE,
+ p->p_pid ,0,0,0, (unsigned int)thread);
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (unsigned int)thread);
+ }
+ else
+ {
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE,
+ p->p_pid ,0,0,0,0);
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
+ }
+ }
+
+#ifdef IMGPF_POWERPC
+ /*
+ * Mark the process as powerpc or not. If powerpc, set the affinity
+ * flag, which will be used for grading binaries in future exec's
+ * from the process.
+ */
+ if (((imgp->ip_flags & IMGPF_POWERPC) != 0))
+ p->p_flag |= P_TRANSLATED;
+ else
+#endif /* IMGPF_POWERPC */
+ p->p_flag &= ~P_TRANSLATED;
+ p->p_flag &= ~P_AFFINITY;
+
+ /*
+ * mark as execed, wakeup the process that vforked (if any) and tell
+ * it that it now has it's own resources back
+ */
+ p->p_flag |= P_EXEC;
+ if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
+ p->p_flag &= ~P_PPWAIT;
+ wakeup((caddr_t)p->p_pptr);
+ }
+
+ if (vfexec && (p->p_flag & P_TRACED)) {
+ psignal_vfork(p, new_task, thread, SIGTRAP);
+ }
+
+badtoolate:
+ if (vfexec) {
+ task_deallocate(new_task);
+ thread_deallocate(thread);
+ if (error)
+ error = 0;
+ }
+
+bad:
+ return(error);