#include <kern/thread_call.h>
#include <kern/zalloc.h>
-#include <machine/spl.h>
+#include <os/log.h>
+
+#include <os/log.h>
#if CONFIG_MACF
-#include <security/mac.h>
+#include <security/mac_framework.h>
#include <security/mac_mach_internal.h>
#endif
extern void act_thread_catt(void *ctx);
void thread_set_child(thread_t child, int pid);
void *act_thread_csave(void);
+extern boolean_t task_is_exec_copy(task_t);
thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
proc_t forkproc(proc_t);
void forkproc_free(proc_t);
-thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit);
+thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit, int in_exec);
void proc_vfork_begin(proc_t parent_proc);
void proc_vfork_end(proc_t parent_proc);
* Parameters: parent_proc parent process of the process being
* child_threadp pointer to location to receive the
* Mach thread_t of the child process
- * breated
+ * created
* kind kind of creation being requested
* coalitions if spawn, the set of coalitions the
* child process should join, or NULL to
uid = kauth_getruid();
proc_list_lock();
if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
+#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ /*
+ * On the development kernel, panic so that the fact that we hit
+ * the process limit is obvious, as this may very well wedge the
+ * system.
+ */
+ panic("The process table is full; parent pid=%d", parent_proc->p_pid);
+#endif
proc_list_unlock();
tablefull("proc");
return (EAGAIN);
count = chgproccnt(uid, 1);
if (uid != 0 &&
(rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
+#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ /*
+ * On the development kernel, panic so that the fact that we hit
+ * the per user process limit is obvious. This may be less dire
+ * than hitting the global process limit, but we cannot rely on
+ * that.
+ */
+ panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
+#endif
err = EAGAIN;
goto bad;
}
* is64bit TRUE, if the child being created will
* be associated with a 64 bit process
* rather than a 32 bit process
+ * in_exec TRUE, if called from execve or posix spawn set exec
+ * FALSE, if called from fork or vfexec
*
* Note: This code is called in the fork() case, from the execve() call
* graph, if implementing an execve() following a vfork(), from
* in this case, 'inherit_memory' MUST be FALSE.
*/
thread_t
-fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit)
+fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit, int in_exec)
{
thread_t child_thread = NULL;
task_t child_task;
parent_coalitions,
inherit_memory,
is64bit,
- TF_NONE,
+ TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */
+ in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
&child_task);
if (result != KERN_SUCCESS) {
printf("%s: task_create_internal failed. Code: %d\n",
goto bad;
}
- /* Set the child process task to the new task */
- child_proc->task = child_task;
+ if (!in_exec) {
+ /*
+ * Set the child process task to the new task if not in exec,
+ * will set the task for exec case in proc_exec_switch_task after image activation.
+ */
+ child_proc->task = child_task;
+ }
/* Set child task process to child proc */
set_bsdtask_info(child_task, child_proc);
if (timerisset(&child_proc->p_rlim_cpu))
task_vtimer_set(child_task, TASK_VTIMER_RLIM);
- /* Set/clear 64 bit vm_map flag */
- if (is64bit)
- vm_map_set_64bit(get_task_map(child_task));
- else
- vm_map_set_32bit(get_task_map(child_task));
-
/*
* Set child process BSD visible scheduler priority if nice value
* inherited from parent
if (child_proc->p_nice != 0)
resetpriority(child_proc);
- /* Create a new thread for the child process */
- result = thread_create_with_continuation(child_task, &child_thread, (thread_continue_t)proc_wait_to_return);
+ /*
+ * Create a new thread for the child process
+ * The new thread is waiting on the event triggered by 'task_clear_return_wait'
+ */
+ result = thread_create_waiting(child_task,
+ (thread_continue_t)task_wait_to_return,
+ task_get_return_wait_event(child_task),
+ &child_thread);
+
if (result != KERN_SUCCESS) {
printf("%s: thread_create failed. Code: %d\n",
__func__, result);
#endif
/* "Return" to the child */
- proc_clear_return_wait(child_proc, child_thread);
+ task_clear_return_wait(get_threadtask(child_thread));
/* drop the extra references we got during the creation */
if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
goto bad;
}
- child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, parent_proc->p_flag & P_LP64);
+ child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, parent_proc->p_flag & P_LP64, FALSE);
if (child_thread == NULL) {
/*
LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
proc_list_unlock();
+ if (child_proc->p_uniqueid == startup_serial_num_procs) {
+ /*
+ * Turn off startup serial logging now that we have reached
+ * the defined number of startup processes.
+ */
+ startup_serial_logging_active = false;
+ }
/*
* We've identified the PID we are going to use; initialize the new
* Increase reference counts on shared objects.
* The p_stats and p_sigacts substructs are set in vm_fork.
*/
+#if !CONFIG_EMBEDDED
child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
+#else /* !CONFIG_EMBEDDED */
+ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
+#endif /* !CONFIG_EMBEDDED */
if (parent_proc->p_flag & P_PROFIL)
startprofclock(child_proc);
*/
proc_signalstart(child_proc, 0);
proc_transstart(child_proc, 0, 0);
- proc_set_return_wait(child_proc);
child_proc->p_pcaction = 0;
if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
child_proc->p_lflag |= P_LREGISTER;
}
- child_proc->p_wqkqueue = NULL;
child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
+ child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
+ child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
+ child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
#if PSYNCH
pth_proc_hashinit(child_proc);
#endif /* PSYNCH */
void
proc_lock(proc_t p)
{
- lck_mtx_assert(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(&p->p_mlock);
}
uth->uu_sigmask = uth_parent->uu_sigmask;
}
uth->uu_context.vc_thread = thread;
- TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
+ /*
+ * Do not add the uthread to proc uthlist for exec copy task,
+ * since they do not hold a ref on proc.
+ */
+ if (!task_is_exec_copy(task)) {
+ TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
+ }
proc_unlock(p);
#if CONFIG_DTRACE
- if (p->p_dtrace_ptss_pages != NULL) {
+ if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
}
#endif
assert(uth->uu_ar == NULL);
if (uth->uu_kqueue_bound) {
- kevent_qos_internal_unbind(p,
- uth->uu_kqueue_bound,
+ kevent_qos_internal_unbind(p,
+ 0, /* didn't save qos_class */
uth->uu_thread,
uth->uu_kqueue_flags);
- uth->uu_kqueue_flags = 0;
- uth->uu_kqueue_bound = 0;
+ assert(uth->uu_kqueue_override_is_sync == 0);
}
sel = &uth->uu_select;
/*
* Remove the thread from the process list and
* transfer [appropriate] pending signals to the process.
+ * Do not remove the uthread from proc uthlist for exec
+ * copy task, since they does not have a ref on proc and
+ * would not have been added to the list.
*/
- if (get_bsdtask_info(task) == p) {
+ if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
proc_lock(p);
+
TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
proc_unlock(p);
#if CONFIG_DTRACE
struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
uth->t_dtrace_scratch = NULL;
- if (tmpptr != NULL) {
+ if (tmpptr != NULL && !task_is_exec_copy(task)) {
dtrace_ptss_release_entry(p, tmpptr);
}
#endif