+#ifdef IMGPF_POWERPC
+/*
+ * exec_powerpc32_imgact
+ *
+ * Implicitly invoke the PowerPC handler for a byte-swapped image magic
+ * number. This may happen either as a result of an attempt to invoke a
+ * PowerPC image directly, or indirectly as the interpreter used in an
+ * interpreter script.
+ *
+ * Parameters; struct image_params * image parameter block
+ *
+ * Returns: -1 not an PowerPC image (keep looking)
+ * -3 Success: exec_archhandler_ppc: relookup
+ * >0 Failure: exec_archhandler_ppc: error number
+ *
+ * Note: This image activator does not handle the case of a direct
+ * invocation of the exec_archhandler_ppc, since in that case, the
+ * exec_archhandler_ppc itself is not a PowerPC binary; instead,
+ * binary image activators must recognize the exec_archhandler_ppc;
+ * This is managed in exec_check_permissions().
+ *
+ * Note: This image activator is limited to 32 bit powerpc images;
+ * if support for 64 bit powerpc images is desired, it would
+ * be more in line with this design to write a separate 64 bit
+ * image activator.
+ */
+static int
+exec_powerpc32_imgact(struct image_params *imgp)
+{
+ struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
+ int error;
+ size_t len = 0;
+
+ /*
+ * Make sure it's a PowerPC binary. If we've already redirected
+ * from an interpreted file once, don't do it again.
+ */
+ if (mach_header->magic != MH_CIGAM)
+ return (-1);
+
+ /* If there is no exec_archhandler_ppc, we can't run it */
+ if (exec_archhandler_ppc.path[0] == 0)
+ return (EBADARCH);
+
+ /*
+ * The PowerPC flag will be set by the exec_check_permissions()
+ * call anyway; however, we set this flag here so that the relookup
+ * in execve() does not follow symbolic links, as a side effect.
+ */
+ imgp->ip_flags |= IMGPF_POWERPC;
+
+ /* impute an interpreter */
+ error = copystr(exec_archhandler_ppc.path, imgp->ip_interp_name,
+ IMG_SHSIZE, &len);
+ if (error)
+ return (error);
+
+ /*
+ * provide a replacement string for p->p_comm; we have to use an
+ * an alternate buffer for this, rather than replacing it directly,
+ * since the exec may fail and return to the parent. In that case,
+ * we would have erroneously changed the parent p->p_comm instead.
+ */
+ strncpy(imgp->ip_p_comm, imgp->ip_ndp->ni_cnd.cn_nameptr, MAXCOMLEN);
+ imgp->ip_p_comm[MAXCOMLEN] = '\0';
+
+ return (-3);
+}
+#endif /* IMGPF_POWERPC */
+
+
+/*
+ * exec_shell_imgact
+ *
+ * Image activator for interpreter scripts. If the image begins with the
+ * characters "#!", then it is an interpreter script. Verify that we are
+ * not already executing in PowerPC mode, and that the length of the script
+ * line indicating the interpreter is not in excess of the maximum allowed
+ * size. If this is the case, then break out the arguments, if any, which
+ * are separated by white space, and copy them into the argument save area
+ * as if they were provided on the command line before all other arguments.
+ * The line ends when we encounter a comment character ('#') or newline.
+ *
+ * Parameters; struct image_params * image parameter block
+ *
+ * Returns: -1 not an interpreter (keep looking)
+ * -3 Success: interpreter: relookup
+ * >0 Failure: interpreter: error number
+ *
+ * A return value other than -1 indicates subsequent image activators should
+ * not be given the opportunity to attempt to activate the image.
+ */
+static int
+exec_shell_imgact(struct image_params *imgp)
+{
+ char *vdata = imgp->ip_vdata;
+ char *ihp;
+ char *line_endp;
+ char *interp;
+
+ /*
+ * Make sure it's a shell script. If we've already redirected
+ * from an interpreted file once, don't do it again.
+ *
+ * Note: We disallow PowerPC, since the expectation is that we
+ * may run a PowerPC interpreter, but not an interpret a PowerPC
+ * image. This is consistent with historical behaviour.
+ */
+ if (vdata[0] != '#' ||
+ vdata[1] != '!' ||
+ (imgp->ip_flags & IMGPF_INTERPRET) != 0) {
+ return (-1);
+ }
+
+#ifdef IMGPF_POWERPC
+ if ((imgp->ip_flags & IMGPF_POWERPC) != 0)
+ return (EBADARCH);
+#endif /* IMGPF_POWERPC */
+
+ imgp->ip_flags |= IMGPF_INTERPRET;
+
+ /* Check to see if SUGID scripts are permitted. If they aren't then
+ * clear the SUGID bits.
+ * imgp->ip_vattr is known to be valid.
+ */
+ if (sugid_scripts == 0) {
+ imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID);
+ }
+
+ /* Find the nominal end of the interpreter line */
+ for( ihp = &vdata[2]; *ihp != '\n' && *ihp != '#'; ihp++) {
+ if (ihp >= &vdata[IMG_SHSIZE])
+ return (ENOEXEC);
+ }
+
+ line_endp = ihp;
+ ihp = &vdata[2];
+ /* Skip over leading spaces - until the interpreter name */
+ while ( ihp < line_endp && ((*ihp == ' ') || (*ihp == '\t')))
+ ihp++;
+
+ /*
+ * Find the last non-whitespace character before the end of line or
+ * the beginning of a comment; this is our new end of line.
+ */
+ for (;line_endp > ihp && ((*line_endp == ' ') || (*line_endp == '\t')); line_endp--)
+ continue;
+
+ /* Empty? */
+ if (line_endp == ihp)
+ return (ENOEXEC);
+
+ /* copy the interpreter name */
+ interp = imgp->ip_interp_name;
+ while ((ihp < line_endp) && (*ihp != ' ') && (*ihp != '\t'))
+ *interp++ = *ihp++;
+ *interp = '\0';
+
+ exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_name),
+ UIO_SYSSPACE32);
+
+ ihp = &vdata[2];
+ while (ihp < line_endp) {
+ /* Skip leading whitespace before each argument */
+ while ((*ihp == ' ') || (*ihp == '\t'))
+ ihp++;
+
+ if (ihp >= line_endp)
+ break;
+
+ /* We have an argument; copy it */
+ while ((ihp < line_endp) && (*ihp != ' ') && (*ihp != '\t')) {
+ *imgp->ip_strendp++ = *ihp++;
+ imgp->ip_strspace--;
+ }
+ *imgp->ip_strendp++ = 0;
+ imgp->ip_strspace--;
+ imgp->ip_argc++;
+ }
+
+ return (-3);
+}
+
+
+
+/*
+ * exec_fat_imgact
+ *
+ * Image activator for fat 1.0 binaries. If the binary is fat, then we
+ * need to select an image from it internally, and make that the image
+ * we are going to attempt to execute. At present, this consists of
+ * reloading the first page for the image with a first page from the
+ * offset location indicated by the fat header.
+ *
+ * Important: This image activator is byte order neutral.
+ *
+ * Note: If we find an encapsulated binary, we make no assertions
+ * about its validity; instead, we leave that up to a rescan
+ * for an activator to claim it, and, if it is claimed by one,
+ * that activator is responsible for determining validity.
+ */
+static int
+exec_fat_imgact(struct image_params *imgp)
+{
+ struct proc *p = vfs_context_proc(imgp->ip_vfs_context);
+ kauth_cred_t cred = p->p_ucred;
+ struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata;
+ struct fat_arch fat_arch;
+ int resid, error;
+ load_return_t lret;
+
+ /* Make sure it's a fat binary */
+ if ((fat_header->magic != FAT_MAGIC) &&
+ (fat_header->magic != FAT_CIGAM)) {
+ error = -1;
+ goto bad;
+ }
+
+ /* Look up our preferred architecture in the fat file. */
+ lret = fatfile_getarch_affinity(imgp->ip_vp,
+ (vm_offset_t)fat_header,
+ &fat_arch,
+ (p->p_flag & P_AFFINITY));
+ if (lret != LOAD_SUCCESS) {
+ error = load_return_to_errno(lret);
+ goto bad;
+ }
+
+ /* Read the Mach-O header out of it */
+ error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata,
+ PAGE_SIZE, fat_arch.offset,
+ UIO_SYSSPACE32, (IO_UNIT|IO_NODELOCKED),
+ cred, &resid, p);
+ if (error) {
+ goto bad;
+ }
+
+ /* Did we read a complete header? */
+ if (resid) {
+ error = EBADEXEC;
+ goto bad;
+ }
+
+ /* Success. Indicate we have identified an encapsulated binary */
+ error = -2;
+ imgp->ip_arch_offset = (user_size_t)fat_arch.offset;
+ imgp->ip_arch_size = (user_size_t)fat_arch.size;
+
+bad:
+ return (error);
+}
+
+/*
+ * exec_mach_imgact
+ *
+ * Image activator for mach-o 1.0 binaries.
+ *
+ * Important: This image activator is NOT byte order neutral.
+ */
+static int
+exec_mach_imgact(struct image_params *imgp)
+{
+ struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
+ kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
+ struct proc *p = vfs_context_proc(imgp->ip_vfs_context);
+ int error = 0;
+ int vfexec = 0;
+ task_t task;
+ task_t new_task;
+ thread_t thread;
+ struct uthread *uthread;
+ vm_map_t old_map = VM_MAP_NULL;
+ vm_map_t map;
+ boolean_t clean_regions = FALSE;
+ load_return_t lret;
+ load_result_t load_result;
+ shared_region_mapping_t shared_region, initial_region;
+#ifdef IMGPF_POWERPC
+ int powerpcParent, powerpcImage;
+#endif /* IMGPF_POWERPC */
+
+ /*
+ * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
+ * is a reserved field on the end, so for the most part, we can
+ * treat them as if they were identical.
+ */
+ if ((mach_header->magic != MH_MAGIC) &&
+ (mach_header->magic != MH_MAGIC_64)) {
+ error = -1;
+ goto bad;
+ }
+
+ task = current_task();
+ thread = current_thread();
+ uthread = get_bsdthread_info(thread);
+
+ if (uthread->uu_flag & UT_VFORK)
+ vfexec = 1; /* Mark in exec */
+
+ if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64)
+ imgp->ip_flags |= IMGPF_IS_64BIT;
+
+ if (!grade_binary(mach_header->cputype, mach_header->cpusubtype)) {
+ error = EBADARCH;
+ goto bad;
+ }
+
+ /*
+ * Copy in arguments/environment from the old process, if the
+ * vector is non-NULL (i.e. exec is not being called from
+ * load_init_program(), as a special case, at system startup).
+ */
+ if (imgp->ip_user_argv != 0LL) {
+ error = exec_extract_strings(imgp);
+ if (error)
+ goto bad;
+ }
+
+ /*
+ * Hack for binary compatability; put three NULs on the end of the
+ * string area, and round it up to the next word boundary. This
+ * ensures padding with NULs to the boundary.
+ */
+ imgp->ip_strendp[0] = 0;
+ imgp->ip_strendp[1] = 0;
+ imgp->ip_strendp[2] = 0;
+ imgp->ip_strendp += (((imgp->ip_strendp - imgp->ip_strings) + NBPW-1) & ~(NBPW-1));
+
+#ifdef IMGPF_POWERPC
+ /*
+ * XXX
+ *
+ * Should be factored out; this is here because we might be getting
+ * invoked this way as the result of a shell script, and the check
+ * in exec_check_permissions() is not interior to the jump back up
+ * to the "encapsulated_binary:" label in execve().
+ */
+ if (imgp->ip_vattr->va_fsid == exec_archhandler_ppc.fsid &&
+ imgp->ip_vattr->va_fileid == (uint64_t)((u_long)exec_archhandler_ppc.fileid)) {
+ imgp->ip_flags |= IMGPF_POWERPC;
+ }
+#endif /* IMGPF_POWERPC */
+
+ if (vfexec) {
+ kern_return_t result;
+
+ result = task_create_internal(task, FALSE, (imgp->ip_flags & IMGPF_IS_64BIT), &new_task);
+ if (result != KERN_SUCCESS)
+ printf("execve: task_create failed. Code: 0x%x\n", result);
+ p->task = new_task;
+ set_bsdtask_info(new_task, p);
+ if (p->p_nice != 0)
+ resetpriority(p);
+ map = get_task_map(new_task);
+
+ if (imgp->ip_flags & IMGPF_IS_64BIT)
+ vm_map_set_64bit(map);
+ else
+ vm_map_set_32bit(map);
+
+ result = thread_create(new_task, &imgp->ip_vfork_thread);
+ if (result != KERN_SUCCESS)
+ printf("execve: thread_create failed. Code: 0x%x\n", result);
+ /* reset local idea of task, thread, uthread */
+ task = new_task;
+ thread = imgp->ip_vfork_thread;
+ uthread = get_bsdthread_info(thread);
+ } else {
+ map = VM_MAP_NULL;
+ }
+
+ /*
+ * We set these flags here; this is OK, since if we fail after
+ * this point, we have already destroyed the parent process anyway.
+ */
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ task_set_64bit(task, TRUE);
+ p->p_flag |= P_LP64;
+ } else {
+ task_set_64bit(task, FALSE);
+ p->p_flag &= ~P_LP64;
+ }
+
+ /*
+ * Load the Mach-O file.
+ */
+/* LP64 - remove following "if" statement after osfmk/vm/task_working_set.c */
+if((imgp->ip_flags & IMGPF_IS_64BIT) == 0)
+ if(imgp->ip_tws_cache_name) {
+ tws_handle_startup_file(task, kauth_cred_getuid(cred),
+ imgp->ip_tws_cache_name, imgp->ip_vp, &clean_regions);
+ }
+
+ vm_get_shared_region(task, &initial_region);
+
+#ifdef IMGPF_POWERPC
+ /*
+ * If we are transitioning to/from powerpc, then we need to do extra
+ * work here.
+ */
+ powerpcParent = (p->p_flag & P_TRANSLATED) ? 1 : 0;
+ powerpcImage = (imgp->ip_flags & IMGPF_POWERPC) ? 1 : 0;
+
+ if (powerpcParent ^ powerpcImage) {
+ cpu_type_t cpu = (powerpcImage ? CPU_TYPE_POWERPC : cpu_type());
+ struct vnode *rootDir = p->p_fd->fd_rdir;
+
+ shared_region = lookup_default_shared_region((int)rootDir, cpu);
+ if (shared_region == NULL) {
+ shared_region_mapping_t old_region;
+ shared_region_mapping_t new_region;
+ vm_get_shared_region(current_task(), &old_region);
+ /* grrrr... this sets current_task(), not task
+ * -- they're different (usually)
+ */
+ shared_file_boot_time_init((int)rootDir,cpu);
+ if ( current_task() != task ) {
+ vm_get_shared_region(current_task(),&new_region);
+ vm_set_shared_region(task,new_region);
+ vm_set_shared_region(current_task(),old_region);
+ }
+ } else {
+ vm_set_shared_region(task, shared_region);
+ }
+ shared_region_mapping_dealloc(initial_region);
+ } else
+#endif /* IMGPF_POWERPC */
+
+ {
+ struct shared_region_task_mappings map_info;
+ shared_region_mapping_t next;
+
+ shared_region_mapping_info(initial_region,
+ &map_info.text_region,
+ &map_info.text_size,
+ &map_info.data_region,
+ &map_info.data_size,
+ &map_info.region_mappings,
+ &map_info.client_base,
+ &map_info.alternate_base,
+ &map_info.alternate_next,
+ &map_info.fs_base,
+ &map_info.system,
+ &map_info.flags,
+ &next);
+ if (map_info.flags & SHARED_REGION_STANDALONE) {
+ /*
+ * We were using a private shared region.
+ * Try and get back to a system-wide shared region
+ * with matching "fs_base" (for chroot) and "system"
+ * (for CPU type).
+ */
+ shared_region = lookup_default_shared_region(
+ map_info.fs_base,
+ map_info.system);
+ if (shared_region == NULL) {
+ /*
+ * No system-wide default regions, stick to
+ * our private region...
+ */
+ } else {
+ SHARED_REGION_TRACE(
+ SHARED_REGION_TRACE_INFO,
+ ("shared_region: %p [%d(%s)] "
+ "exec(\"%s\"): "
+ "moving from private %p[%x,%x,%x] "
+ "to default %p\n",
+ current_thread(),
+ p->p_pid, p->p_comm,
+ (imgp->ip_p_comm[0] ?
+ imgp->ip_p_comm :
+ imgp->ip_ndp->ni_cnd.cn_nameptr),
+ initial_region,
+ map_info.fs_base,
+ map_info.system,
+ map_info.flags,
+ shared_region));
+ vm_set_shared_region(task, shared_region);
+ shared_region_mapping_dealloc(initial_region);
+ }
+ }
+ }
+
+ /*
+ * NOTE: An error after this point indicates we have potentially
+ * destroyed or overwrote some process state while attempting an
+ * execve() following a vfork(), which is an unrecoverable condition.
+ */
+
+ /*
+ * We reset the task to 64-bit (or not) here. It may have picked up
+ * a new map, and we need that to reflect its true 64-bit nature.
+ */
+
+ task_set_64bit(task,
+ ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT));
+
+ /*
+ * Actually load the image file we previously decided to load.
+ */
+ lret = load_machfile(imgp, mach_header, thread, map, clean_regions, &load_result);
+
+ if (lret != LOAD_SUCCESS) {
+ error = load_return_to_errno(lret);
+ goto badtoolate;
+ }
+
+ /* load_machfile() maps the vnode */
+ (void)ubc_map(imgp->ip_vp, PROT_EXEC);
+
+ /*
+ * deal with set[ug]id.
+ */
+ error = exec_handle_sugid(imgp);
+
+ KNOTE(&p->p_klist, NOTE_EXEC);
+
+ if (!vfexec && (p->p_flag & P_TRACED))
+ psignal(p, SIGTRAP);
+
+ if (error) {
+ goto badtoolate;
+ }
+ vnode_put(imgp->ip_vp);
+ imgp->ip_vp = NULL;
+
+ if (load_result.unixproc &&
+ create_unix_stack(get_task_map(task),
+ load_result.user_stack, load_result.customstack, p)) {
+ error = load_return_to_errno(LOAD_NOSPACE);
+ goto badtoolate;
+ }
+
+ if (vfexec) {
+ old_map = vm_map_switch(get_task_map(task));
+ }
+
+ if (load_result.unixproc) {
+ user_addr_t ap;
+
+ /*
+ * Copy the strings area out into the new process address
+ * space.
+ */
+ ap = p->user_stack;
+ error = exec_copyout_strings(imgp, &ap);
+ if (error) {
+ if (vfexec)
+ vm_map_switch(old_map);
+ goto badtoolate;
+ }
+ /* Set the stack */
+ thread_setuserstack(thread, ap);
+ }
+
+ if (load_result.dynlinker) {
+ uint64_t ap;
+
+ /* Adjust the stack */
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ ap = thread_adjuserstack(thread, -8);
+ error = copyoutptr(load_result.mach_header, ap, 8);
+ } else {
+ ap = thread_adjuserstack(thread, -4);
+ error = suword(ap, load_result.mach_header);
+ }
+ if (error) {
+ if (vfexec)
+ vm_map_switch(old_map);
+ goto badtoolate;
+ }
+ }
+
+ if (vfexec) {
+ vm_map_switch(old_map);
+ }
+ /* Set the entry point */
+ thread_setentrypoint(thread, load_result.entry_point);
+
+ /* Stop profiling */
+ stopprofclock(p);
+
+ /*
+ * Reset signal state.
+ */
+ execsigs(p, thread);
+
+ /*
+ * Close file descriptors
+ * which specify close-on-exec.
+ */
+ fdexec(p);
+
+ /*
+ * need to cancel async IO requests that can be cancelled and wait for those
+ * already active. MAY BLOCK!
+ */
+ _aio_exec( p );
+
+ /* FIXME: Till vmspace inherit is fixed: */
+ if (!vfexec && p->vm_shm)
+ shmexec(p);
+ /* Clean up the semaphores */
+ semexit(p);
+
+ /*
+ * Remember file name for accounting.
+ */
+ p->p_acflag &= ~AFORK;
+ /* If the translated name isn't NULL, then we want to use
+ * that translated name as the name we show as the "real" name.
+ * Otherwise, use the name passed into exec.
+ */
+ if (0 != imgp->ip_p_comm[0]) {
+ bcopy((caddr_t)imgp->ip_p_comm, (caddr_t)p->p_comm,
+ sizeof(p->p_comm));
+ } else {
+ if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN)
+ imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
+ bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
+ (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
+ p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
+ }
+
+ if (kdebug_enable) {
+ long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
+
+ /*
+ * Collect the pathname for tracing
+ */
+ kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
+
+ if (vfexec)
+ {
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE,
+ p->p_pid ,0,0,0, (unsigned int)thread);
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (unsigned int)thread);
+ }
+ else
+ {
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE,
+ p->p_pid ,0,0,0,0);
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
+ }
+ }
+
+#ifdef IMGPF_POWERPC
+ /*
+ * Mark the process as powerpc or not. If powerpc, set the affinity
+ * flag, which will be used for grading binaries in future exec's
+ * from the process.
+ */
+ if (((imgp->ip_flags & IMGPF_POWERPC) != 0))
+ p->p_flag |= P_TRANSLATED;
+ else
+#endif /* IMGPF_POWERPC */
+ p->p_flag &= ~P_TRANSLATED;
+ p->p_flag &= ~P_AFFINITY;
+
+ /*
+ * mark as execed, wakeup the process that vforked (if any) and tell
+ * it that it now has it's own resources back
+ */
+ p->p_flag |= P_EXEC;
+ if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
+ p->p_flag &= ~P_PPWAIT;
+ wakeup((caddr_t)p->p_pptr);
+ }
+
+ if (vfexec && (p->p_flag & P_TRACED)) {
+ psignal_vfork(p, new_task, thread, SIGTRAP);
+ }
+
+badtoolate:
+ if (vfexec) {
+ task_deallocate(new_task);
+ thread_deallocate(thread);
+ if (error)
+ error = 0;
+ }
+
+bad:
+ return(error);
+}
+
+
+
+
+/*
+ * Our image activator table; this is the table of the image types we are
+ * capable of loading. We list them in order of preference to ensure the
+ * fastest image load speed.
+ *
+ * XXX hardcoded, for now; should use linker sets
+ */
+struct execsw {
+ int (*ex_imgact)(struct image_params *);
+ const char *ex_name;
+} execsw[] = {
+ { exec_mach_imgact, "Mach-o Binary" },
+ { exec_fat_imgact, "Fat Binary" },
+#ifdef IMGPF_POWERPC
+ { exec_powerpc32_imgact, "PowerPC binary" },
+#endif /* IMGPF_POWERPC */
+ { exec_shell_imgact, "Interpreter Script" },
+ { NULL, NULL}
+};
+
+
+/*
+ * TODO: Dynamic linker header address on stack is copied via suword()
+ */