/*
- * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <os/log.h>
-#include <os/log.h>
-
#if CONFIG_MACF
#include <security/mac_framework.h>
#include <security/mac_mach_internal.h>
void proc_vfork_begin(proc_t parent_proc);
void proc_vfork_end(proc_t parent_proc);
+static LCK_GRP_DECLARE(rethrottle_lock_grp, "rethrottle");
+static ZONE_DECLARE(uthread_zone, "uthreads",
+ sizeof(struct uthread), ZC_ZFREE_CLEARMEM);
+
+SECURITY_READ_ONLY_LATE(zone_t) proc_zone;
+ZONE_INIT(&proc_zone, "proc", sizeof(struct proc), ZC_ZFREE_CLEARMEM,
+ ZONE_ID_PROC, NULL);
+
+ZONE_DECLARE(proc_stats_zone, "pstats",
+ sizeof(struct pstats), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
+
+ZONE_DECLARE(proc_sigacts_zone, "sigacts",
+ sizeof(struct sigacts), ZC_NOENCRYPT);
+
#define DOFORK 0x1 /* fork() system call */
#define DOVFORK 0x2 /* vfork() system call */
proc_t child_proc = NULL; /* set in switch, but compiler... */
thread_t child_thread = NULL;
uid_t uid;
- int count;
+ size_t count;
int err = 0;
int spawn = 0;
+ rlim_t rlimit_nproc_cur;
/*
* Although process entries are dynamically created, we still keep
uid = kauth_getruid();
proc_list_lock();
if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
-#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+#if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
/*
* On the development kernel, panic so that the fact that we hit
* the process limit is obvious, as this may very well wedge the
* (locking protection is provided by list lock held in chgproccnt)
*/
count = chgproccnt(uid, 1);
+ rlimit_nproc_cur = proc_limitgetcur(parent_proc, RLIMIT_NPROC, TRUE);
if (uid != 0 &&
- (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
-#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
+ (rlim_t)count > rlimit_nproc_cur) {
+#if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX)
/*
* On the development kernel, panic so that the fact that we hit
* the per user process limit is obvious. This may be less dire
*/
spawn = 1;
- /* FALLSTHROUGH */
+ OS_FALLTHROUGH;
case PROC_CREATE_FORK:
/*
* need to free it. If it's a shared copy, we need to drop our
* reference on it.
*/
- proc_limitdrop(p, 0);
- p->p_limit = NULL;
+ proc_limitdrop(p);
#if SYSV_SHM
/* Need to drop references to the shared memory segment(s), if any */
lck_spin_destroy(&p->p_slock, proc_slock_grp);
/* Release the credential reference */
- kauth_cred_unref(&p->p_ucred);
+ kauth_cred_t tmp_ucred = p->p_ucred;
+ kauth_cred_unref(&tmp_ucred);
+ p->p_ucred = tmp_ucred;
proc_list_lock();
/* Decrement the count of processes in the system */
thread_call_free(p->p_rcall);
/* Free allocated memory */
- FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
+ zfree(proc_sigacts_zone, p->p_sigacts);
p->p_sigacts = NULL;
- FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
+ zfree(proc_stats_zone, p->p_stats);
p->p_stats = NULL;
+ FREE(p->p_subsystem_root_path, M_SBUF);
+ p->p_subsystem_root_path = NULL;
proc_checkdeadrefs(p);
- FREE_ZONE(p, sizeof *p, M_PROC);
+ zfree(proc_zone, p);
}
int error = 0;
struct session *sessp;
uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
+ rlim_t rlimit_cpu_cur;
- MALLOC_ZONE(child_proc, proc_t, sizeof *child_proc, M_PROC, M_WAITOK);
- if (child_proc == NULL) {
- printf("forkproc: M_PROC zone exhausted\n");
- goto bad;
- }
- /* zero it out as we need to insert in hash */
- bzero(child_proc, sizeof *child_proc);
-
- MALLOC_ZONE(child_proc->p_stats, struct pstats *,
- sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK);
- if (child_proc->p_stats == NULL) {
- printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n");
- FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
- child_proc = NULL;
- goto bad;
- }
- MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *,
- sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK);
- if (child_proc->p_sigacts == NULL) {
- printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
- FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
- child_proc->p_stats = NULL;
- FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
- child_proc = NULL;
- goto bad;
- }
+ child_proc = zalloc_flags(proc_zone, Z_WAITOK | Z_ZERO);
+ child_proc->p_stats = zalloc_flags(proc_stats_zone, Z_WAITOK | Z_ZERO);
+ child_proc->p_sigacts = zalloc_flags(proc_sigacts_zone, Z_WAITOK);
/* allocate a callout for use by interval timers */
child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
if (child_proc->p_rcall == NULL) {
- FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
- child_proc->p_sigacts = NULL;
- FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
- child_proc->p_stats = NULL;
- FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
+ zfree(proc_sigacts_zone, child_proc->p_sigacts);
+ zfree(proc_stats_zone, child_proc->p_stats);
+ zfree(proc_zone, child_proc);
child_proc = NULL;
goto bad;
}
__nochk_bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
(unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
+#if defined(HAS_APPLE_PAC)
+ /*
+ * The p_textvp and p_pgrp pointers are address-diversified by PAC, so we must
+ * resign them here for the new proc
+ */
+ if (parent_proc->p_textvp) {
+ child_proc->p_textvp = parent_proc->p_textvp;
+ }
+
+ if (parent_proc->p_pgrp) {
+ child_proc->p_pgrp = parent_proc->p_pgrp;
+ }
+#endif /* defined(HAS_APPLE_PAC) */
+
+ child_proc->p_sessionid = parent_proc->p_sessionid;
+
/*
* Some flags are inherited from the parent.
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
* The p_stats and p_sigacts substructs are set in vm_fork.
*/
-#if !CONFIG_EMBEDDED
- child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
-#else /* !CONFIG_EMBEDDED */
- child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
-#endif /* !CONFIG_EMBEDDED */
+#if CONFIG_DELAY_IDLE_SLEEP
+ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID | P_AFFINITY));
+#else /* CONFIG_DELAY_IDLE_SLEEP */
+ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_SUGID));
+#endif /* CONFIG_DELAY_IDLE_SLEEP */
child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
*
* XXX may fail to copy descriptors to child
*/
+ lck_rw_init(&child_proc->p_dirs_lock, proc_dirslock_grp, proc_lck_attr);
child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
#if SYSV_SHM
(void)shmfork(parent_proc, child_proc);
}
#endif
+
/*
- * inherit the limit structure to child
+ * Child inherits the parent's plimit
*/
proc_limitfork(parent_proc, child_proc);
- if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
- uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur;
- child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur;
+ rlimit_cpu_cur = proc_limitgetcur(child_proc, RLIMIT_CPU, TRUE);
+ if (rlimit_cpu_cur != RLIM_INFINITY) {
+ child_proc->p_rlim_cpu.tv_sec = (rlimit_cpu_cur > __INT_MAX__) ? __INT_MAX__ : rlimit_cpu_cur;
}
/* Intialize new process stats, including start time */
/* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
- bzero(child_proc->p_stats, sizeof(*child_proc->p_stats));
microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
if (parent_proc->p_sigacts != NULL) {
child_proc->p_memstat_idledeadline = 0;
#endif /* CONFIG_MEMORYSTATUS */
+ if (parent_proc->p_subsystem_root_path) {
+ size_t parent_length = strlen(parent_proc->p_subsystem_root_path) + 1;
+ MALLOC(child_proc->p_subsystem_root_path, char *, parent_length, M_SBUF, M_WAITOK | M_ZERO);
+ memcpy(child_proc->p_subsystem_root_path, parent_proc->p_subsystem_root_path, parent_length);
+ }
+
bad:
return child_proc;
}
lck_mtx_unlock(&p->p_ucred_mlock);
}
-#include <kern/zalloc.h>
-
-struct zone *uthread_zone = NULL;
-
-static lck_grp_t *rethrottle_lock_grp;
-static lck_attr_t *rethrottle_lock_attr;
-static lck_grp_attr_t *rethrottle_lock_grp_attr;
-
-static void
-uthread_zone_init(void)
-{
- assert(uthread_zone == NULL);
-
- rethrottle_lock_grp_attr = lck_grp_attr_alloc_init();
- rethrottle_lock_grp = lck_grp_alloc_init("rethrottle", rethrottle_lock_grp_attr);
- rethrottle_lock_attr = lck_attr_alloc_init();
-
- uthread_zone = zinit(sizeof(struct uthread),
- thread_max * sizeof(struct uthread),
- THREAD_CHUNK * sizeof(struct uthread),
- "uthreads");
-}
-
void *
uthread_alloc(task_t task, thread_t thread, int noinherit)
{
uthread_t uth_parent;
void *ut;
- if (uthread_zone == NULL) {
- uthread_zone_init();
- }
-
- ut = (void *)zalloc(uthread_zone);
- bzero(ut, sizeof(struct uthread));
+ ut = zalloc_flags(uthread_zone, Z_WAITOK | Z_ZERO);
p = (proc_t) get_bsdtask_info(task);
uth = (uthread_t)ut;
uth->uu_thread = thread;
- lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp,
- rethrottle_lock_attr);
+ lck_spin_init(&uth->uu_rethrottle_lock, &rethrottle_lock_grp,
+ LCK_ATTR_NULL);
/*
* Thread inherits credential from the creating thread, if both
uth->t_tombstone = NULL;
}
- lck_spin_destroy(&uth->uu_rethrottle_lock, rethrottle_lock_grp);
+ lck_spin_destroy(&uth->uu_rethrottle_lock, &rethrottle_lock_grp);
uthread_cleanup_name(uthread);
/* and free the uthread itself */