#include <kern/thread_call.h>
#include <kern/zalloc.h>
-#include <machine/spl.h>
+#include <os/log.h>
+
+#include <os/log.h>
#if CONFIG_MACF
-#include <security/mac.h>
+#include <security/mac_framework.h>
#include <security/mac_mach_internal.h>
#endif
uid = kauth_getruid();
proc_list_lock();
if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
+#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ /*
+ * On the development kernel, panic so that the fact that we hit
+ * the process limit is obvious, as this may very well wedge the
+ * system.
+ */
+ panic("The process table is full; parent pid=%d", parent_proc->p_pid);
+#endif
proc_list_unlock();
tablefull("proc");
return (EAGAIN);
count = chgproccnt(uid, 1);
if (uid != 0 &&
(rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
+#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ /*
+ * On the development kernel, panic so that the fact that we hit
+ * the per user process limit is obvious. This may be less dire
+ * than hitting the global process limit, but we cannot rely on
+ * that.
+ */
+ panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
+#endif
err = EAGAIN;
goto bad;
}
LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
proc_list_unlock();
+ if (child_proc->p_uniqueid == startup_serial_num_procs) {
+ /*
+ * Turn off startup serial logging now that we have reached
+ * the defined number of startup processes.
+ */
+ startup_serial_logging_active = false;
+ }
/*
* We've identified the PID we are going to use; initialize the new
* Increase reference counts on shared objects.
* The p_stats and p_sigacts substructs are set in vm_fork.
*/
+#if !CONFIG_EMBEDDED
child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
+#else /* !CONFIG_EMBEDDED */
+ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
+#endif /* !CONFIG_EMBEDDED */
if (parent_proc->p_flag & P_PROFIL)
startprofclock(child_proc);
if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
child_proc->p_lflag |= P_LREGISTER;
}
- child_proc->p_wqkqueue = NULL;
child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
+ child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
+ child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
+ child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
#if PSYNCH
pth_proc_hashinit(child_proc);
#endif /* PSYNCH */
void
proc_lock(proc_t p)
{
- lck_mtx_assert(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(&p->p_mlock);
}
assert(uth->uu_ar == NULL);
if (uth->uu_kqueue_bound) {
- kevent_qos_internal_unbind(p,
- uth->uu_kqueue_bound,
+ kevent_qos_internal_unbind(p,
+ 0, /* didn't save qos_class */
uth->uu_thread,
uth->uu_kqueue_flags);
- uth->uu_kqueue_flags = 0;
- uth->uu_kqueue_bound = 0;
+ assert(uth->uu_kqueue_override_is_sync == 0);
}
sel = &uth->uu_select;