#include <kern/thread_call.h>
#include <kern/zalloc.h>
-#include <machine/spl.h>
+#include <os/log.h>
+
+#include <os/log.h>
#if CONFIG_MACF
-#include <security/mac.h>
+#include <security/mac_framework.h>
#include <security/mac_mach_internal.h>
#endif
thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
proc_t forkproc(proc_t);
void forkproc_free(proc_t);
-thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit, int in_exec);
+thread_t fork_create_child(task_t parent_task,
+ coalition_t *parent_coalitions,
+ proc_t child,
+ int inherit_memory,
+ int is_64bit_addr,
+ int is_64bit_data,
+ int in_exec);
void proc_vfork_begin(proc_t parent_proc);
void proc_vfork_end(proc_t parent_proc);
uid = kauth_getruid();
proc_list_lock();
if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
+#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ /*
+ * On the development kernel, panic so that the fact that we hit
+ * the process limit is obvious, as this may very well wedge the
+ * system.
+ */
+ panic("The process table is full; parent pid=%d", parent_proc->p_pid);
+#endif
proc_list_unlock();
tablefull("proc");
return (EAGAIN);
count = chgproccnt(uid, 1);
if (uid != 0 &&
(rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
+#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ /*
+ * On the development kernel, panic so that the fact that we hit
+ * the per user process limit is obvious. This may be less dire
+ * than hitting the global process limit, but we cannot rely on
+ * that.
+ */
+ panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
+#endif
err = EAGAIN;
goto bad;
}
*
* Parameters: parent_task parent task
* parent_coalitions parent's set of coalitions
- * child_proc child process
+ * child_proc child process
* inherit_memory TRUE, if the parents address space is
- * to be inherited by the child
- * is64bit TRUE, if the child being created will
- * be associated with a 64 bit process
- * rather than a 32 bit process
- * in_exec TRUE, if called from execve or posix spawn set exec
- * FALSE, if called from fork or vfexec
+ * to be inherited by the child
+ * is_64bit_addr TRUE, if the child being created will
+ * be associated with a 64 bit address space
+ * is_64bit_data TRUE if the child being created will use a
+ 64-bit register state
+ * in_exec TRUE, if called from execve or posix spawn set exec
+ * FALSE, if called from fork or vfexec
*
* Note: This code is called in the fork() case, from the execve() call
* graph, if implementing an execve() following a vfork(), from
* in this case, 'inherit_memory' MUST be FALSE.
*/
thread_t
-fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit, int in_exec)
+fork_create_child(task_t parent_task,
+ coalition_t *parent_coalitions,
+ proc_t child_proc,
+ int inherit_memory,
+ int is_64bit_addr,
+ int is_64bit_data,
+ int in_exec)
{
thread_t child_thread = NULL;
task_t child_task;
result = task_create_internal(parent_task,
parent_coalitions,
inherit_memory,
- is64bit,
+ is_64bit_addr,
+ is_64bit_data,
TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */
in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
&child_task);
goto bad;
}
- child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, parent_proc->p_flag & P_LP64, FALSE);
+ /*
+ * In the case where the parent_task is TASK_NULL (during the init path)
+ * we make the assumption that the register size will be the same as the
+ * address space size since there's no way to determine the possible
+ * register size until an image is exec'd.
+ *
+ * The only architecture that has different address space and register sizes
+ * (arm64_32) isn't being used within kernel-space, so the above assumption
+ * always holds true for the init path.
+ */
+ const int parent_64bit_addr = parent_proc->p_flag & P_LP64;
+ const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task);
+
+ child_thread = fork_create_child(parent_task,
+ parent_coalitions,
+ child_proc,
+ inherit_memory,
+ parent_64bit_addr,
+ parent_64bit_data,
+ FALSE);
if (child_thread == NULL) {
/*
}
child_task = get_threadtask(child_thread);
- if (parent_proc->p_flag & P_LP64) {
- task_set_64bit(child_task, TRUE);
+ if (parent_64bit_addr) {
OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
} else {
- task_set_64bit(child_task, FALSE);
OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
}
/* Free allocated memory */
FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
+ p->p_sigacts = NULL;
FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
+ p->p_stats = NULL;
+
proc_checkdeadrefs(p);
FREE_ZONE(p, sizeof *p, M_PROC);
}
if (child_proc->p_sigacts == NULL) {
printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
+ child_proc->p_stats = NULL;
FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
child_proc = NULL;
goto bad;
child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
if (child_proc->p_rcall == NULL) {
FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
+ child_proc->p_sigacts = NULL;
FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
+ child_proc->p_stats = NULL;
FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
child_proc = NULL;
goto bad;
LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
proc_list_unlock();
+ if (child_proc->p_uniqueid == startup_serial_num_procs) {
+ /*
+ * Turn off startup serial logging now that we have reached
+ * the defined number of startup processes.
+ */
+ startup_serial_logging_active = false;
+ }
/*
* We've identified the PID we are going to use; initialize the new
* Increase reference counts on shared objects.
* The p_stats and p_sigacts substructs are set in vm_fork.
*/
+#if !CONFIG_EMBEDDED
child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
+#else /* !CONFIG_EMBEDDED */
+ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
+#endif /* !CONFIG_EMBEDDED */
if (parent_proc->p_flag & P_PROFIL)
startprofclock(child_proc);
- child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY));
+ child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
/*
* Note that if the current thread has an assumed identity, this
if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
child_proc->p_lflag |= P_LREGISTER;
}
- child_proc->p_wqkqueue = NULL;
child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
+ child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
+ child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
+ child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
#if PSYNCH
pth_proc_hashinit(child_proc);
#endif /* PSYNCH */
child_proc->p_memstat_memlimit_active = 0;
child_proc->p_memstat_memlimit_inactive = 0;
#if CONFIG_FREEZE
- child_proc->p_memstat_suspendedfootprint = 0;
+ child_proc->p_memstat_freeze_sharedanon_pages = 0;
#endif
child_proc->p_memstat_dirty = 0;
child_proc->p_memstat_idledeadline = 0;
void
proc_lock(proc_t p)
{
- lck_mtx_assert(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(&p->p_mlock);
}
*/
assert(uth->uu_ar == NULL);
- if (uth->uu_kqueue_bound) {
- kevent_qos_internal_unbind(p,
- uth->uu_kqueue_bound,
- uth->uu_thread,
- uth->uu_kqueue_flags);
- uth->uu_kqueue_flags = 0;
- uth->uu_kqueue_bound = 0;
+ if (uth->uu_kqr_bound) {
+ kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
}
sel = &uth->uu_select;