/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
+#include <vm/vm_shared_memory_server.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <mach-o/fat.h>
#include <mach-o/loader.h>
#include <machine/vmparam.h>
+#if KTRACE
+#include <sys/ktrace.h>
+#endif
+
+int app_profile = 0;
extern vm_map_t bsd_pageable_map;
static int load_return_to_errno(load_return_t lrtn);
int execve(struct proc *p, struct execve_args *uap, register_t *retval);
+static int execargs_alloc(vm_offset_t *addrp);
+static int execargs_free(vm_offset_t addr);
int
execv(p, args, retval)
load_return_t lret;
load_result_t load_result;
struct uthread *uthread;
+ vm_map_t old_map;
+ vm_map_t map;
int i;
+ boolean_t new_shared_regions = FALSE;
union {
/* #! and name of interpreter */
char ex_shell[SHSIZE];
int savedpathlen = 0;
vm_offset_t *execargsp;
char *cpnospace;
- task_t tsk;
+ task_t task;
+ task_t new_task;
+ thread_act_t thr_act;
int numthreads;
+ int vfexec=0;
+ unsigned long arch_offset =0;
+ unsigned long arch_size = 0;
+ char *ws_cache_name = NULL; /* used for pre-heat */
- tsk = current_task();
-
+ task = current_task();
+ thr_act = current_act();
+ uthread = get_bsdthread_info(thr_act);
- if(tsk != kernel_task) {
- numthreads = get_task_numacts(tsk);
- if (numthreads <= 0 )
- return(EINVAL);
- if (numthreads > 1) {
- return(EOPNOTSUPP);
+ if (uthread->uu_flag & P_VFORK) {
+ vfexec = 1; /* Mark in exec */
+ } else {
+ if (task != kernel_task) {
+ numthreads = get_task_numacts(task);
+ if (numthreads <= 0 )
+ return(EINVAL);
+ if (numthreads > 1) {
+ return(EOPNOTSUPP);
+ }
}
}
- ret = kmem_alloc_pageable(bsd_pageable_map, &execargs, NCARGS);
- if (ret != KERN_SUCCESS)
- return(ENOMEM);
-
- uthread = get_bsdthread_info(current_act());
+ error = execargs_alloc(&execargs);
+ if (error)
+ return(error);
savedpath = execargs;
* We have to do this before namei() because in case of
* symbolic links, namei() would overwrite the original "path".
* In case the last symbolic link resolved was a relative pathname
- * we would loose the original "path", which could be an
+ * we would lose the original "path", which could be an
* absolute pathname. This might be unacceptable for dyld.
*/
/* XXX We could optimize to avoid copyinstr in the namei() */
* copyinstr will put in savedpathlen, the count of
* characters (including NULL) in the path.
*/
+
+ if(app_profile != 0) {
+
+ /* grab the name of the file out of its path */
+ /* we will need this for lookup within the */
+ /* name file */
+ ws_cache_name = savedpath + savedpathlen;
+ while (ws_cache_name[0] != '/') {
+ if(ws_cache_name == savedpath) {
+ ws_cache_name--;
+ break;
+ }
+ ws_cache_name--;
+ }
+ ws_cache_name++;
+ }
/* Save the name aside for future use */
execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen);
goto bad;
}
- /*
- * Load the Mach-O file.
- */
- VOP_UNLOCK(vp, 0, p);
- lret = load_machfile(vp, mach_header, fat_arch.offset,
- fat_arch.size, &load_result);
+ arch_offset = fat_arch.offset;
+ arch_size = fat_arch.size;
} else {
/*
* Load the Mach-O file.
*/
- VOP_UNLOCK(vp, 0, p);
- lret = load_machfile(vp, mach_header, 0,
- (u_long)vattr.va_size, &load_result);
+ arch_offset = 0;
+ arch_size = (u_long)vattr.va_size;
}
+ if (vfexec) {
+ kern_return_t result;
+
+ result = task_create_local(task, FALSE, FALSE, &new_task);
+ if (result != KERN_SUCCESS)
+ printf("execve: task_create failed. Code: 0x%x\n", result);
+ p->task = new_task;
+ set_bsdtask_info(new_task, p);
+ if (p->p_nice != 0)
+ resetpriority(p);
+ task = new_task;
+ map = get_task_map(new_task);
+ result = thread_create(new_task, &thr_act);
+ if (result != KERN_SUCCESS)
+ printf("execve: thread_create failed. Code: 0x%x\n", result);
+ uthread = get_bsdthread_info(thr_act);
+ } else {
+ map = VM_MAP_NULL;
+
+ }
+
+ /*
+ * Load the Mach-O file.
+ */
+ VOP_UNLOCK(vp, 0, p);
+ if(ws_cache_name) {
+ tws_handle_startup_file(task, cred->cr_uid,
+ ws_cache_name, vp, &new_shared_regions);
+ }
+ if (new_shared_regions) {
+ shared_region_mapping_t new_shared_region;
+ shared_region_mapping_t old_shared_region;
+
+ if (shared_file_create_system_region(&new_shared_region))
+ panic("couldn't create system_shared_region\n");
+
+ vm_get_shared_region(task, &old_shared_region);
+ vm_set_shared_region(task, new_shared_region);
+
+ shared_region_mapping_dealloc(old_shared_region);
+ }
+
+ lret = load_machfile(vp, mach_header, arch_offset,
+ arch_size, &load_result, thr_act, map);
+
if (lret != LOAD_SUCCESS) {
error = load_return_to_errno(lret);
- goto bad;
+ goto badtoolate;
}
/* load_machfile() maps the vnode */
* root set it.
*/
if (p->p_tracep && !(p->p_traceflag & KTRFAC_ROOT)) {
- vrele(p->p_tracep);
+ struct vnode *tvp = p->p_tracep;
p->p_tracep = NULL;
p->p_traceflag = 0;
+ vrele(tvp);
}
#endif
if (origvattr.va_mode & VSUID)
p->p_cred->p_svuid = p->p_ucred->cr_uid;
p->p_cred->p_svgid = p->p_ucred->cr_gid;
- if (p->p_flag & P_TRACED) {
+ if (!vfexec && (p->p_flag & P_TRACED))
psignal(p, SIGTRAP);
-#ifdef BSD_USE_APC
- thread_apc_set(current_act(), bsd_ast);
-#else
- ast_on(AST_BSD);
-#endif
- }
if (error) {
- goto bad;
+ goto badtoolate;
}
VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, p);
vput(vp);
vp = NULL;
if (load_result.unixproc &&
- create_unix_stack(current_map(),
- load_result.user_stack, p)) {
+ create_unix_stack(get_task_map(task),
+ load_result.user_stack, load_result.customstack, p)) {
error = load_return_to_errno(LOAD_NOSPACE);
- goto bad;
+ goto badtoolate;
+ }
+
+ if (vfexec) {
+ uthread->uu_ar0 = (void *)get_user_regs(thr_act);
}
/*
* Copy back arglist if necessary.
*/
+
ucp = p->user_stack;
+ if (vfexec) {
+ old_map = vm_map_switch(get_task_map(task));
+ }
if (load_result.unixproc) {
int pathptr;
* the "path" at the begining of the execargs buffer.
* copy it just before the string area.
*/
- savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1);
+ savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1);
len = 0;
pathptr = ucp - savedpathlen;
error = copyoutstr(savedpath, (caddr_t)pathptr,
(unsigned)savedpathlen, &len);
- if (error)
- goto bad;
+ if (error) {
+ if (vfexec)
+ vm_map_switch(old_map);
+ goto badtoolate;
+ }
/* Save a NULL pointer below it */
(void) suword((caddr_t)(pathptr - NBPW), 0);
* and NBPW for the NULL after pointer to path.
*/
ap = ucp - na*NBPW - 3*NBPW - savedpathlen - 2*NBPW;
+#if defined(ppc)
+ thread_setuserstack(thr_act, ap); /* Set the stack */
+#else
uthread->uu_ar0[SP] = ap;
+#endif
(void) suword((caddr_t)ap, na-ne); /* argc */
nc = 0;
cc = 0;
}
if (load_result.dynlinker) {
+#if defined(ppc)
+ ap = thread_adjuserstack(thr_act, -4); /* Adjust the stack */
+#else
ap = uthread->uu_ar0[SP] -= 4;
+#endif
(void) suword((caddr_t)ap, load_result.mach_header);
}
-#if defined(i386) || defined(ppc)
+ if (vfexec) {
+ vm_map_switch(old_map);
+ }
+#if defined(ppc)
+ thread_setentrypoint(thr_act, load_result.entry_point); /* Set the entry point */
+#elif defined(i386)
uthread->uu_ar0[PC] = load_result.entry_point;
#else
#error architecture not implemented!
/*
* Reset signal state.
*/
- execsigs(p);
+ execsigs(p, thr_act);
/*
* Close file descriptors
*/
fdexec(p);
/* FIXME: Till vmspace inherit is fixed: */
- if (p->vm_shm)
+ if (!vfexec && p->vm_shm)
shmexit(p);
+ /* Clean up the semaphores */
+ semexit(p);
/*
* Remember file name for accounting.
/* Collect the pathname for tracing */
kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
- KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
- dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
+
+ if (vfexec)
+ KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, getshuttle_thread(thr_act));
+ else
+ KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE,
+ dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
}
/*
wakeup((caddr_t)p->p_pptr);
}
+ if (vfexec && (p->p_flag & P_TRACED)) {
+ psignal_vfork(p, new_task, thr_act, SIGTRAP);
+ }
+
+badtoolate:
+ if (vfexec) {
+ task_deallocate(new_task);
+ act_deallocate(thr_act);
+ if (error)
+ error = 0;
+ }
bad:
FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI);
if (vp)
vput(vp);
bad1:
-#if FIXME /* [ */
if (execargs)
- kmem_free_wakeup(bsd_pageable_map, execargs, NCARGS);
-#else /* FIXME ][ */
- if (execargs)
- kmem_free(bsd_pageable_map, execargs, NCARGS);
-#endif /* FIXME ] */
+ execargs_free(execargs);
+ if (!error && vfexec) {
+ vfork_return(current_act(), p->p_pptr, p, retval);
+ (void) thread_resume(thr_act);
+ return(0);
+ }
return(error);
}
#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur)
kern_return_t
-create_unix_stack(map, user_stack, p)
+create_unix_stack(map, user_stack, customstack, p)
vm_map_t map;
vm_offset_t user_stack;
+ int customstack;
struct proc *p;
{
vm_size_t size;
vm_offset_t addr;
p->user_stack = user_stack;
- size = round_page(unix_stack_size(p));
-#if STACK_GROWTH_UP
- /* stack always points to first address for stacks */
- addr = user_stack;
-#else STACK_GROWTH_UP
- addr = trunc_page(user_stack - size);
-#endif /* STACK_GROWTH_UP */
- return (vm_allocate(map,&addr, size, FALSE));
+ if (!customstack) {
+ size = round_page(unix_stack_size(p));
+ addr = trunc_page(user_stack - size);
+ return (vm_allocate(map,&addr, size, FALSE));
+ } else
+ return(KERN_SUCCESS);
}
#include <sys/reboot.h>
register_t retval[2];
struct uthread * ut;
- unix_master();
-
error = 0;
/* init_args are copied in string form directly from bootstrap */
error = execve(p,&init_exec_args,retval);
} while (error);
-
- unix_release();
}
/*
return (0);
}
+#include <mach/mach_types.h>
+#include <mach/vm_prot.h>
+#include <mach/semaphore.h>
+#include <mach/sync_policy.h>
+#include <kern/clock.h>
+#include <mach/kern_return.h>
+
+extern semaphore_t execve_semaphore;
+
+static int
+execargs_alloc(addrp)
+ vm_offset_t *addrp;
+{
+ kern_return_t kret;
+
+ kret = semaphore_wait(execve_semaphore);
+ if (kret != KERN_SUCCESS)
+ switch (kret) {
+ default:
+ return (EINVAL);
+ case KERN_INVALID_ADDRESS:
+ case KERN_PROTECTION_FAILURE:
+ return (EACCES);
+ case KERN_ABORTED:
+ case KERN_OPERATION_TIMED_OUT:
+ return (EINTR);
+ }
+
+ kret = kmem_alloc_pageable(bsd_pageable_map, addrp, NCARGS);
+ if (kret != KERN_SUCCESS)
+ return (ENOMEM);
+
+ return (0);
+}
+
+static int
+execargs_free(addr)
+ vm_offset_t addr;
+{
+ kern_return_t kret;
+
+ kmem_free(bsd_pageable_map, addr, NCARGS);
+
+ kret = semaphore_signal(execve_semaphore);
+ switch (kret) {
+ case KERN_INVALID_ADDRESS:
+ case KERN_PROTECTION_FAILURE:
+ return (EINVAL);
+ case KERN_ABORTED:
+ case KERN_OPERATION_TIMED_OUT:
+ return (EINTR);
+ case KERN_SUCCESS:
+ return(0);
+ default:
+ return (EINVAL);
+ }
+}
+