X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/a1c7dba18ef36983396c282fe85292db066e39db..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/osfmk/kern/thread.c diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c index b9a7ae0eb..0057c988f 100644 --- a/osfmk/kern/thread.c +++ b/osfmk/kern/thread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2014 Apple Inc. All rights reserved. + * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -112,15 +112,26 @@ #include #include #include +#include +#include #include #include #include #include +#include #include +#include + +#include #if KPC #include #endif +#if MONOTONIC +#include +#include +#endif /* MONOTONIC */ + #include #include #include @@ -129,8 +140,11 @@ #include #include - +#include #include +#include + +#include /* * Exported interfaces @@ -140,6 +154,7 @@ #include #include #include +#include static struct zone *thread_zone; static lck_grp_attr_t thread_lck_grp_attr; @@ -154,6 +169,18 @@ static queue_head_t thread_stack_queue; decl_simple_lock_data(static,thread_terminate_lock) static queue_head_t thread_terminate_queue; +static queue_head_t crashed_threads_queue; + +decl_simple_lock_data(static,thread_exception_lock) +static queue_head_t thread_exception_queue; + +struct thread_exception_elt { + queue_chain_t elt; + exception_type_t exception_type; + task_t exception_task; + thread_t exception_thread; +}; + static struct thread thread_template, init_thread; static void sched_call_null( @@ -162,7 +189,9 @@ static void sched_call_null( #ifdef MACH_BSD extern void proc_exit(void *); +extern mach_exception_data_type_t proc_encode_exit_exception_code(void *); extern uint64_t get_dispatchqueue_offset_from_proc(void *); +extern uint64_t get_return_to_kernel_offset_from_proc(void *p); extern int proc_selfpid(void); extern char * proc_name_address(void *p); #endif /* MACH_BSD */ @@ -177,8 +206,7 @@ static uint64_t thread_unique_id = 100; struct _thread_ledger_indices thread_ledgers = { -1 }; static ledger_template_t thread_ledger_template = NULL; -void init_thread_ledgers(void); -int task_disable_cpumon(task_t task); +static void init_thread_ledgers(void); #if CONFIG_JETSAM void jetsam_on_ledger_cpulimit_exceeded(void); @@ -193,7 +221,7 @@ void jetsam_on_ledger_cpulimit_exceeded(void); #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70 int cpumon_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */ -void __attribute__((noinline)) THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void); +void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void); /* * The smallest interval over which we support limiting CPU consumption is 1ms @@ -207,6 +235,10 @@ thread_bootstrap(void) * Fill in a template thread for fast initialization. */ +#if MACH_ASSERT + thread_template.thread_magic = THREAD_MAGIC; +#endif /* MACH_ASSERT */ + thread_template.runq = PROCESSOR_NULL; thread_template.ref_count = 2; @@ -214,7 +246,7 @@ thread_bootstrap(void) thread_template.reason = AST_NONE; thread_template.at_safe_point = FALSE; thread_template.wait_event = NO_EVENT64; - thread_template.wait_queue = WAIT_QUEUE_NULL; + thread_template.waitq = NULL; thread_template.wait_result = THREAD_WAITING; thread_template.options = THREAD_ABORTSAFE; thread_template.state = TH_WAIT | TH_UNINT; @@ -227,6 +259,7 @@ thread_bootstrap(void) thread_template.sched_flags = 0; thread_template.saved_mode = TH_MODE_NONE; thread_template.safe_release = 0; + thread_template.th_sched_bucket = TH_BUCKET_RUN; thread_template.sfi_class = SFI_CLASS_UNSPECIFIED; thread_template.sfi_wait_class = SFI_CLASS_UNSPECIFIED; @@ -236,25 +269,24 @@ thread_bootstrap(void) thread_template.static_param = 0; thread_template.policy_reset = 0; - thread_template.priority = 0; + thread_template.base_pri = BASEPRI_DEFAULT; thread_template.sched_pri = 0; thread_template.max_priority = 0; thread_template.task_priority = 0; thread_template.promotions = 0; thread_template.pending_promoter_index = 0; - thread_template.pending_promoter[0] = + thread_template.pending_promoter[0] = NULL; thread_template.pending_promoter[1] = NULL; thread_template.rwlock_count = 0; -#if MACH_ASSERT - thread_template.SHARE_COUNT = 0; - thread_template.BG_COUNT = 0; -#endif /* MACH_ASSERT */ thread_template.realtime.deadline = UINT64_MAX; thread_template.quantum_remaining = 0; thread_template.last_run_time = 0; + thread_template.last_made_runnable_time = THREAD_NOT_RUNNABLE; + thread_template.last_basepri_change_time = THREAD_NOT_RUNNABLE; + thread_template.same_pri_latency = 0; thread_template.computation_metered = 0; thread_template.computation_epoch = 0; @@ -267,6 +299,11 @@ thread_bootstrap(void) #endif thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0; +#if MONOTONIC + memset(&thread_template.t_monotonic, 0, + sizeof(thread_template.t_monotonic)); +#endif /* MONOTONIC */ + thread_template.bound_processor = PROCESSOR_NULL; thread_template.last_processor = PROCESSOR_NULL; @@ -274,22 +311,23 @@ thread_bootstrap(void) timer_init(&thread_template.user_timer); timer_init(&thread_template.system_timer); + timer_init(&thread_template.ptime); thread_template.user_timer_save = 0; thread_template.system_timer_save = 0; thread_template.vtimer_user_save = 0; thread_template.vtimer_prof_save = 0; thread_template.vtimer_rlim_save = 0; + thread_template.vtimer_qos_save = 0; +#if CONFIG_SCHED_SFI thread_template.wait_sfi_begin_time = 0; +#endif thread_template.wait_timer_is_set = FALSE; thread_template.wait_timer_active = 0; thread_template.depress_timer_active = 0; - thread_template.special_handler.handler = special_handler; - thread_template.special_handler.next = NULL; - thread_template.recover = (vm_offset_t)NULL; thread_template.map = VM_MAP_NULL; @@ -300,6 +338,13 @@ thread_bootstrap(void) thread_template.t_dtrace_tracing = 0; #endif /* CONFIG_DTRACE */ +#if KPERF + thread_template.kperf_flags = 0; + thread_template.kperf_pet_gen = 0; + thread_template.kperf_c_switch = 0; + thread_template.kperf_pet_cnt = 0; +#endif + #if KPC thread_template.kpc_buf = NULL; #endif @@ -308,7 +353,11 @@ thread_bootstrap(void) thread_template.hv_thread_target = NULL; #endif /* HYPERVISOR */ - thread_template.t_chud = 0; +#if (DEVELOPMENT || DEBUG) + thread_template.t_page_creation_throttled_hard = 0; + thread_template.t_page_creation_throttled_soft = 0; +#endif /* DEVELOPMENT || DEBUG */ + thread_template.t_page_creation_throttled = 0; thread_template.t_page_creation_count = 0; thread_template.t_page_creation_time = 0; @@ -319,19 +368,20 @@ thread_bootstrap(void) thread_template.t_ledger = LEDGER_NULL; thread_template.t_threadledger = LEDGER_NULL; -#ifdef CONFIG_BANK thread_template.t_bankledger = LEDGER_NULL; thread_template.t_deduct_bank_ledger_time = 0; -#endif - thread_template.requested_policy = default_task_requested_policy; - thread_template.effective_policy = default_task_effective_policy; - thread_template.pended_policy = default_task_pended_policy; + thread_template.requested_policy = (struct thread_requested_policy) {}; + thread_template.effective_policy = (struct thread_effective_policy) {}; bzero(&thread_template.overrides, sizeof(thread_template.overrides)); + thread_template.sync_ipc_overrides = 0; thread_template.iotier_override = THROTTLE_LEVEL_NONE; thread_template.thread_io_stats = NULL; +#if CONFIG_EMBEDDED + thread_template.taskwatch = NULL; +#endif /* CONFIG_EMBEDDED */ thread_template.thread_callout_interrupt_wakeups = thread_template.thread_callout_platform_idle_wakeups = 0; thread_template.thread_timer_wakeups_bin_1 = thread_template.thread_timer_wakeups_bin_2 = 0; @@ -342,6 +392,8 @@ thread_bootstrap(void) thread_template.ith_voucher_name = MACH_PORT_NULL; thread_template.ith_voucher = IPC_VOUCHER_NULL; + thread_template.th_work_interval = NULL; + init_thread = thread_template; machine_set_current_thread(&init_thread); } @@ -370,9 +422,11 @@ thread_init(void) lck_grp_attr_setdefault(&thread_lck_grp_attr); lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr); lck_attr_setdefault(&thread_lck_attr); - + stack_init(); + thread_policy_init(); + /* * Initialize any machine-dependent * per-thread structures necessary. @@ -389,6 +443,31 @@ thread_init(void) init_thread_ledgers(); } +boolean_t +thread_is_active(thread_t thread) +{ + return (thread->active); +} + +void +thread_corpse_continue(void) +{ + thread_t thread = current_thread(); + + thread_terminate_internal(thread); + + /* + * Handle the thread termination directly + * here instead of returning to userspace. + */ + assert(thread->active == FALSE); + thread_ast_clear(thread, AST_APC); + thread_apc_ast(thread); + + panic("thread_corpse_continue"); + /*NOTREACHED*/ +} + static void thread_terminate_continue(void) { @@ -403,7 +482,6 @@ void thread_terminate_self(void) { thread_t thread = current_thread(); - task_t task; spl_t s; int threadcnt; @@ -415,14 +493,12 @@ thread_terminate_self(void) thread_mtx_lock(thread); ipc_thread_disable(thread); - + thread_mtx_unlock(thread); s = splsched(); thread_lock(thread); - assert_thread_sched_count(thread); - /* * Cancel priority depression, wait for concurrent expirations * on other processors. @@ -431,7 +507,7 @@ thread_terminate_self(void) thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; /* If our priority was low because of a depressed yield, restore it in case we block below */ - set_sched_pri(thread, thread->priority); + thread_recompute_sched_pri(thread, FALSE); if (timer_call_cancel(&thread->depress_timer)) thread->depress_timer_active--; @@ -452,6 +528,11 @@ thread_terminate_self(void) thread_unlock(thread); splx(s); +#if CONFIG_EMBEDDED + thead_remove_taskwatch(thread); +#endif /* CONFIG_EMBEDDED */ + + work_interval_thread_terminate(thread); thread_mtx_lock(thread); @@ -459,16 +540,67 @@ thread_terminate_self(void) thread_mtx_unlock(thread); + bank_swap_thread_bank_ledger(thread, NULL); + task = thread->task; uthread_cleanup(task, thread->uthread, task->bsd_info); + + if (task->bsd_info && !task_is_exec_copy(task)) { + /* trace out pid before we sign off */ + long dbg_arg1 = 0; + long dbg_arg2 = 0; + + kdbg_trace_data(thread->task->bsd_info, &dbg_arg1, &dbg_arg2); + + KERNEL_DEBUG_CONSTANT(TRACE_DATA_THREAD_TERMINATE_PID | DBG_FUNC_NONE, + dbg_arg1, 0, 0, 0, 0); + } + + /* + * After this subtraction, this thread should never access + * task->bsd_info unless it got 0 back from the hw_atomic_sub. It + * could be racing with other threads to be the last thread in the + * process, and the last thread in the process will tear down the proc + * structure and zero-out task->bsd_info. + */ threadcnt = hw_atomic_sub(&task->active_thread_count, 1); /* * If we are the last thread to terminate and the task is * associated with a BSD process, perform BSD process exit. */ - if (threadcnt == 0 && task->bsd_info != NULL) + if (threadcnt == 0 && task->bsd_info != NULL && !task_is_exec_copy(task)) { + mach_exception_data_type_t subcode = 0; + { + /* since we're the last thread in this process, trace out the command name too */ + long dbg_arg1 = 0, dbg_arg2 = 0, dbg_arg3 = 0, dbg_arg4 = 0; + + kdbg_trace_string(thread->task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); + + KERNEL_DEBUG_CONSTANT(TRACE_STRING_PROC_EXIT | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); + } + + /* Get the exit reason before proc_exit */ + subcode = proc_encode_exit_exception_code(task->bsd_info); proc_exit(task->bsd_info); + /* + * if there is crash info in task + * then do the deliver action since this is + * last thread for this task. + */ + if (task->corpse_info) { + task_deliver_crash_notification(task, current_thread(), EXC_RESOURCE, subcode); + } + } + + if (threadcnt == 0) { + task_lock(task); + if (task_is_a_corpse_fork(task)) { + thread_wakeup((event_t)&task->active_thread_count); + } + task_unlock(task); + } uthread_cred_free(thread->uthread); @@ -509,7 +641,9 @@ thread_terminate_self(void) */ thread->state |= TH_TERMINATE; thread_mark_wait_locked(thread, THREAD_UNINT); + assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0); assert(thread->promotions == 0); + assert(!(thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED)); assert(thread->rwlock_count == 0); thread_unlock(thread); /* splsched */ @@ -518,21 +652,47 @@ thread_terminate_self(void) /*NOTREACHED*/ } +/* Drop a thread refcount that definitely isn't the last one. */ +void +thread_deallocate_safe(thread_t thread) +{ + assert_thread_magic(thread); + + uint32_t old_refcount = atomic_fetch_sub_explicit(&thread->ref_count, 1, memory_order_release); + + if (__improbable(old_refcount <= 1)) + panic("bad thread refcount: %d", old_refcount); +} + void thread_deallocate( thread_t thread) { task_t task; + __assert_only uint32_t th_ref_count; if (thread == THREAD_NULL) return; - if (thread_deallocate_internal(thread) > 0) - return; + assert_thread_magic(thread); + + if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1, + memory_order_release) - 1 > 0)) { + return; + } - if(!(thread->state & TH_TERMINATE2)) + th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire); + assert(th_ref_count == 0); + + assert(thread_owned_workloops_count(thread) == 0); + + if (!(thread->state & TH_TERMINATE2)) panic("thread_deallocate: thread not properly terminated\n"); + assert(thread->runq == PROCESSOR_NULL); + + assert(thread->user_promotions == 0); + #if KPC kpc_thread_destroy(thread); #endif @@ -571,9 +731,149 @@ thread_deallocate( task_deallocate(task); +#if MACH_ASSERT + assert_thread_magic(thread); + thread->thread_magic = 0; +#endif /* MACH_ASSERT */ + zfree(thread_zone, thread); } +void +thread_starts_owning_workloop(thread_t thread) +{ + atomic_fetch_add_explicit(&thread->kqwl_owning_count, 1, + memory_order_relaxed); +} + +void +thread_ends_owning_workloop(thread_t thread) +{ + __assert_only uint32_t count; + count = atomic_fetch_sub_explicit(&thread->kqwl_owning_count, 1, + memory_order_relaxed); + assert(count > 0); +} + +uint32_t +thread_owned_workloops_count(thread_t thread) +{ + return atomic_load_explicit(&thread->kqwl_owning_count, + memory_order_relaxed); +} + +/* + * thread_inspect_deallocate: + * + * Drop a thread inspection reference. + */ +void +thread_inspect_deallocate( + thread_inspect_t thread_inspect) +{ + return(thread_deallocate((thread_t)thread_inspect)); +} + +/* + * thread_exception_daemon: + * + * Deliver EXC_{RESOURCE,GUARD} exception + */ +static void +thread_exception_daemon(void) +{ + struct thread_exception_elt *elt; + task_t task; + thread_t thread; + exception_type_t etype; + + simple_lock(&thread_exception_lock); + while ((elt = (struct thread_exception_elt *)dequeue_head(&thread_exception_queue)) != NULL) { + simple_unlock(&thread_exception_lock); + + etype = elt->exception_type; + task = elt->exception_task; + thread = elt->exception_thread; + assert_thread_magic(thread); + + kfree(elt, sizeof (*elt)); + + /* wait for all the threads in the task to terminate */ + task_lock(task); + task_wait_till_threads_terminate_locked(task); + task_unlock(task); + + /* Consumes the task ref returned by task_generate_corpse_internal */ + task_deallocate(task); + /* Consumes the thread ref returned by task_generate_corpse_internal */ + thread_deallocate(thread); + + /* Deliver the notification, also clears the corpse. */ + task_deliver_crash_notification(task, thread, etype, 0); + + simple_lock(&thread_exception_lock); + } + + assert_wait((event_t)&thread_exception_queue, THREAD_UNINT); + simple_unlock(&thread_exception_lock); + + thread_block((thread_continue_t)thread_exception_daemon); +} + +/* + * thread_exception_enqueue: + * + * Enqueue a corpse port to be delivered an EXC_{RESOURCE,GUARD}. + */ +void +thread_exception_enqueue( + task_t task, + thread_t thread, + exception_type_t etype) +{ + assert(EXC_RESOURCE == etype || EXC_GUARD == etype); + struct thread_exception_elt *elt = kalloc(sizeof (*elt)); + elt->exception_type = etype; + elt->exception_task = task; + elt->exception_thread = thread; + + simple_lock(&thread_exception_lock); + enqueue_tail(&thread_exception_queue, (queue_entry_t)elt); + simple_unlock(&thread_exception_lock); + + thread_wakeup((event_t)&thread_exception_queue); +} + +/* + * thread_copy_resource_info + * + * Copy the resource info counters from source + * thread to destination thread. + */ +void +thread_copy_resource_info( + thread_t dst_thread, + thread_t src_thread) +{ + dst_thread->thread_tag = src_thread->thread_tag; + dst_thread->c_switch = src_thread->c_switch; + dst_thread->p_switch = src_thread->p_switch; + dst_thread->ps_switch = src_thread->ps_switch; + dst_thread->precise_user_kernel_time = src_thread->precise_user_kernel_time; + dst_thread->user_timer = src_thread->user_timer; + dst_thread->user_timer_save = src_thread->user_timer_save; + dst_thread->system_timer = src_thread->system_timer; + dst_thread->system_timer_save = src_thread->system_timer_save; + dst_thread->vtimer_user_save = src_thread->vtimer_user_save; + dst_thread->vtimer_prof_save = src_thread->vtimer_prof_save; + dst_thread->vtimer_rlim_save = src_thread->vtimer_rlim_save; + dst_thread->vtimer_qos_save = src_thread->vtimer_qos_save; + dst_thread->syscalls_unix = src_thread->syscalls_unix; + dst_thread->syscalls_mach = src_thread->syscalls_mach; + ledger_rollup(dst_thread->t_threadledger, src_thread->t_threadledger); + *dst_thread->thread_io_stats = *src_thread->thread_io_stats; +} + /* * thread_terminate_daemon: * @@ -591,17 +891,27 @@ thread_terminate_daemon(void) (void)splsched(); simple_lock(&thread_terminate_lock); - while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) { + while ((thread = qe_dequeue_head(&thread_terminate_queue, struct thread, runq_links)) != THREAD_NULL) { + assert_thread_magic(thread); + + /* + * if marked for crash reporting, skip reaping. + * The corpse delivery thread will clear bit and enqueue + * for reaping when done + */ + if (thread->inspection){ + enqueue_tail(&crashed_threads_queue, &thread->runq_links); + continue; + } + simple_unlock(&thread_terminate_lock); (void)spllo(); - assert(thread->SHARE_COUNT == 0); - assert(thread->BG_COUNT == 0); - task = thread->task; task_lock(task); task->total_user_time += timer_grab(&thread->user_timer); + task->total_ptime += timer_grab(&thread->ptime); if (thread->precise_user_kernel_time) { task->total_system_time += timer_grab(&thread->system_timer); } else { @@ -618,8 +928,14 @@ thread_terminate_daemon(void) task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1; task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2; task->task_gpu_ns += ml_gpu_stat(thread); - - thread_update_qos_cpu_time(thread, FALSE); + task->task_energy += ml_energy_stat(thread); + +#if MONOTONIC + mt_terminate_update(task, thread); +#endif /* MONOTONIC */ + + thread_update_qos_cpu_time(thread); + queue_remove(&task->threads, thread, thread_t, task_threads); task->thread_count--; @@ -663,15 +979,48 @@ void thread_terminate_enqueue( thread_t thread) { - KERNEL_DEBUG_CONSTANT(TRACEDBG_CODE(DBG_TRACE_DATA, TRACE_DATA_THREAD_TERMINATE) | DBG_FUNC_NONE, thread->thread_id, 0, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(TRACE_DATA_THREAD_TERMINATE | DBG_FUNC_NONE, thread->thread_id, 0, 0, 0, 0); simple_lock(&thread_terminate_lock); - enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread); + enqueue_tail(&thread_terminate_queue, &thread->runq_links); simple_unlock(&thread_terminate_lock); thread_wakeup((event_t)&thread_terminate_queue); } +/* + * thread_terminate_crashed_threads: + * walk the list of crashed threads and put back set of threads + * who are no longer being inspected. + */ +void +thread_terminate_crashed_threads() +{ + thread_t th_remove; + boolean_t should_wake_terminate_queue = FALSE; + + simple_lock(&thread_terminate_lock); + /* + * loop through the crashed threads queue + * to put any threads that are not being inspected anymore + */ + + qe_foreach_element_safe(th_remove, &crashed_threads_queue, runq_links) { + /* make sure current_thread is never in crashed queue */ + assert(th_remove != current_thread()); + + if (th_remove->inspection == FALSE) { + re_queue_tail(&thread_terminate_queue, &th_remove->runq_links); + should_wake_terminate_queue = TRUE; + } + } + + simple_unlock(&thread_terminate_lock); + if (should_wake_terminate_queue == TRUE) { + thread_wakeup((event_t)&thread_terminate_queue); + } +} + /* * thread_stack_daemon: * @@ -687,12 +1036,16 @@ thread_stack_daemon(void) s = splsched(); simple_lock(&thread_stack_lock); - while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) { + while ((thread = qe_dequeue_head(&thread_stack_queue, struct thread, runq_links)) != THREAD_NULL) { + assert_thread_magic(thread); + simple_unlock(&thread_stack_lock); splx(s); /* allocate stack with interrupts enabled so that we can call into VM */ stack_alloc(thread); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0); s = splsched(); thread_lock(thread); @@ -721,8 +1074,11 @@ void thread_stack_enqueue( thread_t thread) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0); + assert_thread_magic(thread); + simple_lock(&thread_stack_lock); - enqueue_tail(&thread_stack_queue, (queue_entry_t)thread); + enqueue_tail(&thread_stack_queue, &thread->runq_links); simple_unlock(&thread_stack_lock); thread_wakeup((event_t)&thread_stack_queue); @@ -736,6 +1092,7 @@ thread_daemon_init(void) simple_lock_init(&thread_terminate_lock, 0); queue_init(&thread_terminate_queue); + queue_init(&crashed_threads_queue); result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread); if (result != KERN_SUCCESS) @@ -746,13 +1103,26 @@ thread_daemon_init(void) simple_lock_init(&thread_stack_lock, 0); queue_init(&thread_stack_queue); - result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread); + result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT_HIGH, &thread); if (result != KERN_SUCCESS) panic("thread_daemon_init: thread_stack_daemon"); thread_deallocate(thread); + + simple_lock_init(&thread_exception_lock, 0); + queue_init(&thread_exception_queue); + + result = kernel_thread_start_priority((thread_continue_t)thread_exception_daemon, NULL, MINPRI_KERNEL, &thread); + if (result != KERN_SUCCESS) + panic("thread_daemon_init: thread_exception_daemon"); + + thread_deallocate(thread); } +#define TH_OPTION_NONE 0x00 +#define TH_OPTION_NOCRED 0x01 +#define TH_OPTION_NOSUSP 0x02 + /* * Create a new thread. * Doesn't start the thread running. @@ -765,9 +1135,6 @@ thread_create_internal( integer_t priority, thread_continue_t continuation, int options, -#define TH_OPTION_NONE 0x00 -#define TH_OPTION_NOCRED 0x01 -#define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; @@ -789,6 +1156,10 @@ thread_create_internal( #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { +#if MACH_ASSERT + new_thread->thread_magic = 0; +#endif /* MACH_ASSERT */ + zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } @@ -805,6 +1176,10 @@ thread_create_internal( uthread_zone_free(ut); #endif /* MACH_BSD */ +#if MACH_ASSERT + new_thread->thread_magic = 0; +#endif /* MACH_ASSERT */ + zfree(thread_zone, new_thread); return (KERN_FAILURE); } @@ -824,6 +1199,11 @@ thread_create_internal( new_thread->thread_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info)); assert(new_thread->thread_io_stats != NULL); bzero(new_thread->thread_io_stats, sizeof(struct io_stat_info)); + new_thread->sync_ipc_overrides = 0; + +#if KASAN + kasan_init_thread(&new_thread->kasan_data); +#endif #if CONFIG_IOSCHED /* Clear out the I/O Scheduling info for AppleFSCompression */ @@ -833,11 +1213,13 @@ thread_create_internal( lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); - if ( !parent_task->active || parent_task->halting || - ((options & TH_OPTION_NOSUSP) != 0 && - parent_task->suspend_count > 0) || - (parent_task->thread_count >= task_threadmax && - parent_task != kernel_task) ) { + /* + * Fail thread creation if parent task is being torn down or has too many threads + * If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended + */ + if (parent_task->active == 0 || parent_task->halting || + (parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) || + (parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); @@ -881,11 +1263,9 @@ thread_create_internal( ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time); } - new_thread->cpu_time_last_qos = 0; -#ifdef CONFIG_BANK new_thread->t_bankledger = LEDGER_NULL; new_thread->t_deduct_bank_ledger_time = 0; -#endif + new_thread->t_deduct_bank_ledger_energy = 0; new_thread->t_ledger = new_thread->task->ledger; if (new_thread->t_ledger) @@ -902,43 +1282,38 @@ thread_create_internal( timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); -#if CONFIG_COUNTERS - /* - * If parent task has any reservations, they need to be propagated to this - * thread. - */ - new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? - THREAD_PMC_FLAG : 0U; -#endif #if KPC kpc_thread_create(new_thread); #endif - - /* Only need to update policies pushed from task to thread */ - new_thread->requested_policy.bg_iotier = parent_task->effective_policy.bg_iotier; - new_thread->requested_policy.terminated = parent_task->effective_policy.terminated; /* Set the thread's scheduling parameters */ new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); - new_thread->sched_flags = 0; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; - new_thread->priority = (priority < 0)? parent_task->priority: priority; - if (new_thread->priority > new_thread->max_priority) - new_thread->priority = new_thread->max_priority; - new_thread->importance = new_thread->priority - new_thread->task_priority; - new_thread->saved_importance = new_thread->importance; + + int new_priority = (priority < 0) ? parent_task->priority: priority; + new_priority = (priority < 0)? parent_task->priority: priority; + if (new_priority > new_thread->max_priority) + new_priority = new_thread->max_priority; +#if CONFIG_EMBEDDED + if (new_priority < MAXPRI_THROTTLE) { + new_priority = MAXPRI_THROTTLE; + } +#endif /* CONFIG_EMBEDDED */ + + new_thread->importance = new_priority - new_thread->task_priority; + + sched_set_thread_base_priority(new_thread, new_priority); #if defined(CONFIG_SCHED_TIMESHARE_CORE) new_thread->sched_stamp = sched_tick; - new_thread->pri_shift = sched_pri_shift; + new_thread->pri_shift = sched_pri_shifts[new_thread->th_sched_bucket]; #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */ - if (parent_task->max_priority <= MAXPRI_THROTTLE) { - sched_set_thread_throttled(new_thread, TRUE); - } - - SCHED(compute_priority)(new_thread, FALSE); +#if CONFIG_EMBEDDED + if (parent_task->max_priority <= MAXPRI_THROTTLE) + sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED); +#endif /* CONFIG_EMBEDDED */ thread_policy_create(new_thread); @@ -952,27 +1327,50 @@ thread_create_internal( /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; + queue_enter(&threads, new_thread, thread_t, threads); threads_count++; new_thread->active = TRUE; - + if (task_is_a_corpse_fork(parent_task)) { + /* Set the inspection bit if the task is a corpse fork */ + new_thread->inspection = TRUE; + } else { + new_thread->inspection = FALSE; + } + new_thread->corpse_dup = FALSE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; - kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); + kdbg_trace_data(parent_task->bsd_info, &dbg_arg2, &dbg_arg4); + + /* + * Starting with 26604425, exec'ing creates a new task/thread. + * + * NEWTHREAD in the current process has two possible meanings: + * + * 1) Create a new thread for this process. + * 2) Create a new thread for the future process this will become in an exec. + * + * To disambiguate these, arg3 will be set to TRUE for case #2. + * + * The value we need to find (TPF_EXEC_COPY) is stable in the case of a + * task exec'ing. The read of t_procflags does not take the proc_lock. + */ + dbg_arg3 = (task_is_exec_copy(parent_task)) ? TRUE : 0; + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, - (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); + TRACE_DATA_NEWTHREAD | DBG_FUNC_NONE, + (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, dbg_arg3, dbg_arg4, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, + TRACE_STRING_NEWTHREAD | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } @@ -985,7 +1383,8 @@ static kern_return_t thread_create_internal2( task_t task, thread_t *new_thread, - boolean_t from_user) + boolean_t from_user, + thread_continue_t continuation) { kern_return_t result; thread_t thread; @@ -993,7 +1392,7 @@ thread_create_internal2( if (task == TASK_NULL || task == kernel_task) return (KERN_INVALID_ARGUMENT); - result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread); + result = thread_create_internal(task, -1, continuation, TH_OPTION_NONE, &thread); if (result != KERN_SUCCESS) return (result); @@ -1024,7 +1423,7 @@ thread_create( task_t task, thread_t *new_thread) { - return thread_create_internal2(task, new_thread, FALSE); + return thread_create_internal2(task, new_thread, FALSE, (thread_continue_t)thread_bootstrap_return); } kern_return_t @@ -1032,19 +1431,78 @@ thread_create_from_user( task_t task, thread_t *new_thread) { - return thread_create_internal2(task, new_thread, TRUE); + return thread_create_internal2(task, new_thread, TRUE, (thread_continue_t)thread_bootstrap_return); } +kern_return_t +thread_create_with_continuation( + task_t task, + thread_t *new_thread, + thread_continue_t continuation) +{ + return thread_create_internal2(task, new_thread, FALSE, continuation); +} + +/* + * Create a thread that is already started, but is waiting on an event + */ +static kern_return_t +thread_create_waiting_internal( + task_t task, + thread_continue_t continuation, + event_t event, + int options, + thread_t *new_thread) +{ + kern_return_t result; + thread_t thread; + + if (task == TASK_NULL || task == kernel_task) + return (KERN_INVALID_ARGUMENT); + + result = thread_create_internal(task, -1, continuation, options, &thread); + if (result != KERN_SUCCESS) + return (result); + + /* note no user_stop_count or thread_hold here */ + + if (task->suspend_count > 0) + thread_hold(thread); + + thread_mtx_lock(thread); + thread_start_in_assert_wait(thread, event, THREAD_INTERRUPTIBLE); + thread_mtx_unlock(thread); + + task_unlock(task); + lck_mtx_unlock(&tasks_threads_lock); + + *new_thread = thread; + + return (KERN_SUCCESS); +} + +kern_return_t +thread_create_waiting( + task_t task, + thread_continue_t continuation, + event_t event, + thread_t *new_thread) +{ + return thread_create_waiting_internal(task, continuation, event, + TH_OPTION_NONE, new_thread); +} + + static kern_return_t thread_create_running_internal2( - register task_t task, + task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, thread_t *new_thread, boolean_t from_user) { - register kern_return_t result; + kern_return_t result; thread_t thread; if (task == TASK_NULL || task == kernel_task) @@ -1054,8 +1512,10 @@ thread_create_running_internal2( if (result != KERN_SUCCESS) return (result); - result = machine_thread_set_state( - thread, flavor, new_state, new_state_count); + if (task->suspend_count > 0) + thread_hold(thread); + + result = machine_thread_set_state(thread, flavor, new_state, new_state_count); if (result != KERN_SUCCESS) { task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); @@ -1066,7 +1526,7 @@ thread_create_running_internal2( } thread_mtx_lock(thread); - thread_start_internal(thread); + thread_start(thread); thread_mtx_unlock(thread); if (from_user) @@ -1083,7 +1543,7 @@ thread_create_running_internal2( /* Prototype, see justification above */ kern_return_t thread_create_running( - register task_t task, + task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, @@ -1091,7 +1551,7 @@ thread_create_running( kern_return_t thread_create_running( - register task_t task, + task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, @@ -1104,7 +1564,7 @@ thread_create_running( kern_return_t thread_create_running_from_user( - register task_t task, + task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, @@ -1144,6 +1604,19 @@ thread_create_workq( return (KERN_SUCCESS); } +kern_return_t +thread_create_workq_waiting( + task_t task, + thread_continue_t continuation, + event_t event, + thread_t *new_thread) +{ + + return thread_create_waiting_internal(task, continuation, event, + TH_OPTION_NOCRED | TH_OPTION_NOSUSP, + new_thread); +} + /* * kernel_thread_create: * @@ -1161,7 +1634,7 @@ kernel_thread_create( thread_t thread; task_t task = kernel_task; - result = thread_create_internal(task, priority, continuation, TH_OPTION_NONE, &thread); + result = thread_create_internal(task, priority, continuation, TH_OPTION_NOCRED | TH_OPTION_NONE, &thread); if (result != KERN_SUCCESS) return (result); @@ -1170,6 +1643,9 @@ kernel_thread_create( stack_alloc(thread); assert(thread->kernel_stack != 0); +#if CONFIG_EMBEDDED + if (priority > BASEPRI_KERNEL) +#endif thread->reserved_stack = thread->kernel_stack; thread->parameter = parameter; @@ -1198,7 +1674,7 @@ kernel_thread_start_priority( *new_thread = thread; thread_mtx_lock(thread); - thread_start_internal(thread); + thread_start(thread); thread_mtx_unlock(thread); return (result); @@ -1213,124 +1689,129 @@ kernel_thread_start( return kernel_thread_start_priority(continuation, parameter, -1, new_thread); } - -kern_return_t -thread_info_internal( - register thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, /* ptr to OUT array */ - mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ +/* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */ +/* it is assumed that the thread is locked by the caller */ +static void +retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info) { - int state, flags; - spl_t s; + int state, flags; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + /* fill in info */ - if (flavor == THREAD_BASIC_INFO) { - register thread_basic_info_t basic_info; + thread_read_times(thread, &basic_info->user_time, + &basic_info->system_time); - if (*thread_info_count < THREAD_BASIC_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); + /* + * Update lazy-evaluated scheduler info because someone wants it. + */ + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); - basic_info = (thread_basic_info_t) thread_info_out; + basic_info->sleep_time = 0; - s = splsched(); - thread_lock(thread); + /* + * To calculate cpu_usage, first correct for timer rate, + * then for 5/8 ageing. The correction factor [3/5] is + * (1/(5/8) - 1). + */ + basic_info->cpu_usage = 0; +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + if (sched_tick_interval) { + basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage + * TH_USAGE_SCALE) / sched_tick_interval); + basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; + } +#endif - /* fill in info */ + if (basic_info->cpu_usage > TH_USAGE_SCALE) + basic_info->cpu_usage = TH_USAGE_SCALE; - thread_read_times(thread, &basic_info->user_time, - &basic_info->system_time); + basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? + POLICY_TIMESHARE: POLICY_RR); - /* - * Update lazy-evaluated scheduler info because someone wants it. - */ - if (SCHED(can_update_priority)(thread)) - SCHED(update_priority)(thread); + flags = 0; + if (thread->options & TH_OPT_IDLE_THREAD) + flags |= TH_FLAGS_IDLE; - basic_info->sleep_time = 0; + if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { + flags |= TH_FLAGS_GLOBAL_FORCED_IDLE; + } - /* - * To calculate cpu_usage, first correct for timer rate, - * then for 5/8 ageing. The correction factor [3/5] is - * (1/(5/8) - 1). - */ - basic_info->cpu_usage = 0; -#if defined(CONFIG_SCHED_TIMESHARE_CORE) - if (sched_tick_interval) { - basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage - * TH_USAGE_SCALE) / sched_tick_interval); - basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; - } -#endif - - if (basic_info->cpu_usage > TH_USAGE_SCALE) - basic_info->cpu_usage = TH_USAGE_SCALE; + if (!thread->kernel_stack) + flags |= TH_FLAGS_SWAPPED; + + state = 0; + if (thread->state & TH_TERMINATE) + state = TH_STATE_HALTED; + else + if (thread->state & TH_RUN) + state = TH_STATE_RUNNING; + else + if (thread->state & TH_UNINT) + state = TH_STATE_UNINTERRUPTIBLE; + else + if (thread->state & TH_SUSP) + state = TH_STATE_STOPPED; + else + if (thread->state & TH_WAIT) + state = TH_STATE_WAITING; - basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? - POLICY_TIMESHARE: POLICY_RR); + basic_info->run_state = state; + basic_info->flags = flags; - flags = 0; - if (thread->options & TH_OPT_IDLE_THREAD) - flags |= TH_FLAGS_IDLE; + basic_info->suspend_count = thread->user_stop_count; - if (!thread->kernel_stack) - flags |= TH_FLAGS_SWAPPED; + return; +} - state = 0; - if (thread->state & TH_TERMINATE) - state = TH_STATE_HALTED; - else - if (thread->state & TH_RUN) - state = TH_STATE_RUNNING; - else - if (thread->state & TH_UNINT) - state = TH_STATE_UNINTERRUPTIBLE; - else - if (thread->state & TH_SUSP) - state = TH_STATE_STOPPED; - else - if (thread->state & TH_WAIT) - state = TH_STATE_WAITING; +kern_return_t +thread_info_internal( + thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, /* ptr to OUT array */ + mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ +{ + spl_t s; - basic_info->run_state = state; - basic_info->flags = flags; + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); - basic_info->suspend_count = thread->user_stop_count; + if (flavor == THREAD_BASIC_INFO) { - thread_unlock(thread); - splx(s); + if (*thread_info_count < THREAD_BASIC_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); - *thread_info_count = THREAD_BASIC_INFO_COUNT; + s = splsched(); + thread_lock(thread); - return (KERN_SUCCESS); + retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out); + + thread_unlock(thread); + splx(s); + + *thread_info_count = THREAD_BASIC_INFO_COUNT; + + return (KERN_SUCCESS); } else if (flavor == THREAD_IDENTIFIER_INFO) { - register thread_identifier_info_t identifier_info; + thread_identifier_info_t identifier_info; - if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) + if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) return (KERN_INVALID_ARGUMENT); - identifier_info = (thread_identifier_info_t) thread_info_out; + identifier_info = (thread_identifier_info_t) thread_info_out; - s = splsched(); - thread_lock(thread); + s = splsched(); + thread_lock(thread); - identifier_info->thread_id = thread->thread_id; - identifier_info->thread_handle = thread->machine.cthread_self; - if(thread->task->bsd_info) { - identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); - } else { - thread_unlock(thread); - splx(s); - return KERN_INVALID_ARGUMENT; - } + identifier_info->thread_id = thread->thread_id; + identifier_info->thread_handle = thread->machine.cthread_self; + identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread); - thread_unlock(thread); - splx(s); - return KERN_SUCCESS; + thread_unlock(thread); + splx(s); + return KERN_SUCCESS; } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { @@ -1341,23 +1822,22 @@ thread_info_internal( ts_info = (policy_timeshare_info_t)thread_info_out; - s = splsched(); + s = splsched(); thread_lock(thread); - if (thread->sched_mode != TH_MODE_TIMESHARE) { - thread_unlock(thread); + if (thread->sched_mode != TH_MODE_TIMESHARE) { + thread_unlock(thread); splx(s); - return (KERN_INVALID_POLICY); - } + } ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; - ts_info->depress_priority = thread->priority; + ts_info->depress_priority = thread->base_pri; } else { - ts_info->base_priority = thread->priority; + ts_info->base_priority = thread->base_pri; ts_info->depress_priority = -1; } @@ -1365,11 +1845,11 @@ thread_info_internal( ts_info->max_priority = thread->max_priority; thread_unlock(thread); - splx(s); + splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; - return (KERN_SUCCESS); + return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_FIFO_INFO) { @@ -1383,17 +1863,17 @@ thread_info_internal( policy_rr_info_t rr_info; uint32_t quantum_time; uint64_t quantum_ns; - + if (*thread_info_count < POLICY_RR_INFO_COUNT) return (KERN_INVALID_ARGUMENT); rr_info = (policy_rr_info_t) thread_info_out; - s = splsched(); + s = splsched(); thread_lock(thread); - if (thread->sched_mode == TH_MODE_TIMESHARE) { - thread_unlock(thread); + if (thread->sched_mode == TH_MODE_TIMESHARE) { + thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); @@ -1402,25 +1882,80 @@ thread_info_internal( rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; - rr_info->depress_priority = thread->priority; + rr_info->depress_priority = thread->base_pri; } else { - rr_info->base_priority = thread->priority; + rr_info->base_priority = thread->base_pri; rr_info->depress_priority = -1; } quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); absolutetime_to_nanoseconds(quantum_time, &quantum_ns); - + rr_info->max_priority = thread->max_priority; - rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000); + rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000); thread_unlock(thread); - splx(s); + splx(s); *thread_info_count = POLICY_RR_INFO_COUNT; - return (KERN_SUCCESS); + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_EXTENDED_INFO) { + thread_basic_info_data_t basic_info; + thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out; + + if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) { + return (KERN_INVALID_ARGUMENT); + } + + s = splsched(); + thread_lock(thread); + + /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for + * the PROC_PIDTHREADINFO flavor (which can't be used on corpses) + */ + retrieve_thread_basic_info(thread, &basic_info); + extended_info->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC)); + extended_info->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC)); + + extended_info->pth_cpu_usage = basic_info.cpu_usage; + extended_info->pth_policy = basic_info.policy; + extended_info->pth_run_state = basic_info.run_state; + extended_info->pth_flags = basic_info.flags; + extended_info->pth_sleep_time = basic_info.sleep_time; + extended_info->pth_curpri = thread->sched_pri; + extended_info->pth_priority = thread->base_pri; + extended_info->pth_maxpriority = thread->max_priority; + + bsd_getthreadname(thread->uthread,extended_info->pth_name); + + thread_unlock(thread); + splx(s); + + *thread_info_count = THREAD_EXTENDED_INFO_COUNT; + + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_DEBUG_INFO_INTERNAL) { +#if DEVELOPMENT || DEBUG + thread_debug_info_internal_t dbg_info; + if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) + return (KERN_NOT_SUPPORTED); + + if (thread_info_out == NULL) + return (KERN_INVALID_ARGUMENT); + + dbg_info = (thread_debug_info_internal_t) thread_info_out; + dbg_info->page_creation_count = thread->t_page_creation_count; + + *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT; + return (KERN_SUCCESS); +#endif /* DEVELOPMENT || DEBUG */ + return (KERN_NOT_SUPPORTED); } return (KERN_INVALID_ARGUMENT); @@ -1568,6 +2103,12 @@ thread_wire( } +boolean_t +is_vm_privileged(void) +{ + return current_thread()->options & TH_OPT_VMPRIV ? TRUE : FALSE; +} + boolean_t set_vm_privilege(boolean_t privileged) { @@ -1586,14 +2127,33 @@ set_vm_privilege(boolean_t privileged) return (was_vmpriv); } +void +set_thread_rwlock_boost(void) +{ + current_thread()->rwlock_count++; +} + +void +clear_thread_rwlock_boost(void) +{ + thread_t thread = current_thread(); + + if ((thread->rwlock_count-- == 1) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { + + lck_rw_clear_promotion(thread); + } +} + /* * XXX assuming current thread only, for now... */ void -thread_guard_violation(thread_t thread, unsigned type) +thread_guard_violation(thread_t thread, + mach_exception_data_type_t code, mach_exception_data_type_t subcode) { assert(thread == current_thread()); + assert(thread->task != kernel_task); spl_t s = splsched(); /* @@ -1601,9 +2161,11 @@ thread_guard_violation(thread_t thread, unsigned type) * to store all info required to handle the AST when * returning to userspace */ - thread->guard_exc_info.type = type; + assert(EXC_GUARD_DECODE_GUARD_TYPE(code)); + thread->guard_exc_info.code = code; + thread->guard_exc_info.subcode = subcode; thread_ast_set(thread, AST_GUARD); - ast_propagate(thread->ast); + ast_propagate(thread); splx(s); } @@ -1618,12 +2180,27 @@ thread_guard_violation(thread_t thread, unsigned type) * info and raises the exception. */ void -guard_ast(thread_t thread) +guard_ast(thread_t t) { - if (thread->guard_exc_info.type == GUARD_TYPE_MACH_PORT) - mach_port_guard_ast(thread); - else - fd_guard_ast(thread); + const mach_exception_data_type_t + code = t->guard_exc_info.code, + subcode = t->guard_exc_info.subcode; + + switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) { + case GUARD_TYPE_MACH_PORT: + mach_port_guard_ast(t, code, subcode); + break; + case GUARD_TYPE_FD: + fd_guard_ast(t, code, subcode); + break; +#if CONFIG_VNGUARD + case GUARD_TYPE_VN: + vn_guard_ast(t, code, subcode); + break; +#endif + default: + panic("guard_exc_info %llx %llx", code, subcode); + } } static void @@ -1652,58 +2229,62 @@ thread_cputime_callback(int warning, __unused const void *arg0, __unused const v #endif if (warning == 0) { - THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(); + SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(); } } void __attribute__((noinline)) -THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void) +SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) { int pid = 0; task_t task = current_task(); thread_t thread = current_thread(); uint64_t tid = thread->thread_id; - char *procname = (char *) "unknown"; + const char *procname = "unknown"; time_value_t thread_total_time = {0, 0}; time_value_t thread_system_time; time_value_t thread_user_time; int action; uint8_t percentage; - uint32_t limit_percent; - uint32_t usage_percent; + uint32_t usage_percent = 0; uint32_t interval_sec; uint64_t interval_ns; uint64_t balance_ns; boolean_t fatal = FALSE; + boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */ + kern_return_t kr; +#ifdef EXC_RESOURCE_MONITORS mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; +#endif /* EXC_RESOURCE_MONITORS */ struct ledger_entry_info lei; assert(thread->t_threadledger != LEDGER_NULL); /* - * Now that a thread has tripped the monitor, disable it for the entire task. + * Extract the fatal bit and suspend the monitor (which clears the bit). */ task_lock(task); - if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) { - /* - * The CPU usage monitor has been disabled on our task, so some other - * thread must have gotten here first. We only send one exception per - * task lifetime, so there's nothing left for us to do here. - */ - task_unlock(task); - return; - } if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) { fatal = TRUE; + send_exc_resource = TRUE; } - task_disable_cpumon(task); + /* Only one thread can be here at a time. Whichever makes it through + first will successfully suspend the monitor and proceed to send the + notification. Other threads will get an error trying to suspend the + monitor and give up on sending the notification. In the first release, + the monitor won't be resumed for a number of seconds, but we may + eventually need to handle low-latency resume. + */ + kr = task_suspend_cpumon(task); task_unlock(task); + if (kr == KERN_INVALID_ARGUMENT) return; #ifdef MACH_BSD pid = proc_selfpid(); - if (task->bsd_info != NULL) + if (task->bsd_info != NULL) { procname = proc_name_address(task->bsd_info); + } #endif thread_get_cpulimit(&action, &percentage, &interval_ns); @@ -1713,58 +2294,80 @@ THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void) thread_read_times(thread, &thread_user_time, &thread_system_time); time_value_add(&thread_total_time, &thread_user_time); time_value_add(&thread_total_time, &thread_system_time); - ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei); + /* credit/debit/balance/limit are in absolute time units; + the refill info is in nanoseconds. */ absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns); - usage_percent = (uint32_t) ((balance_ns * 100ULL) / lei.lei_last_refill); - - /* Show refill period in the same units as balance, limit, etc */ - nanoseconds_to_absolutetime(lei.lei_refill_period, &lei.lei_refill_period); + if (lei.lei_last_refill > 0) { + usage_percent = (uint32_t)((balance_ns*100ULL) / lei.lei_last_refill); + } - limit_percent = (uint32_t) ((lei.lei_limit * 100ULL) / lei.lei_refill_period); + /* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */ + printf("process %s[%d] thread %llu caught burning CPU! " + "It used more than %d%% CPU over %u seconds " + "(actual recent usage: %d%% over ~%llu seconds). " + "Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys) " + "ledger balance: %lld mabs credit: %lld mabs debit: %lld mabs " + "limit: %llu mabs period: %llu ns last refill: %llu ns%s.\n", + procname, pid, tid, + percentage, interval_sec, + usage_percent, + (lei.lei_last_refill + NSEC_PER_SEC/2) / NSEC_PER_SEC, + thread_total_time.seconds, thread_total_time.microseconds, + thread_user_time.seconds, thread_user_time.microseconds, + thread_system_time.seconds,thread_system_time.microseconds, + lei.lei_balance, lei.lei_credit, lei.lei_debit, + lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill, + (fatal ? " [fatal violation]" : "")); - /* TODO: show task total runtime as well? see TASK_ABSOLUTETIME_INFO */ + /* + For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once + we have logging parity, we will stop sending EXC_RESOURCE (24508922). + */ - if (disable_exc_resource) { - printf("process %s[%d] thread %llu caught burning CPU!; EXC_RESOURCE " - "supressed by a boot-arg\n", procname, pid, tid); - return; + /* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */ + lei.lei_balance = balance_ns; + absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit); + trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei); + kr = send_resource_violation(send_cpu_usage_violation, task, &lei, + fatal ? kRNFatalLimitFlag : 0); + if (kr) { + printf("send_resource_violation(CPU usage, ...): error %#x\n", kr); } - if (audio_active) { - printf("process %s[%d] thread %llu caught burning CPU!; EXC_RESOURCE " - "supressed due to audio playback\n", procname, pid, tid); - return; +#ifdef EXC_RESOURCE_MONITORS + if (send_exc_resource) { + if (disable_exc_resource) { + printf("process %s[%d] thread %llu caught burning CPU! " + "EXC_RESOURCE%s supressed by a boot-arg\n", + procname, pid, tid, fatal ? " (and termination)" : ""); + return; + } + + if (audio_active) { + printf("process %s[%d] thread %llu caught burning CPU! " + "EXC_RESOURCE & termination supressed due to audio playback\n", + procname, pid, tid); + return; + } } - printf("process %s[%d] thread %llu caught burning CPU! " - "It used more than %d%% CPU (Actual recent usage: %d%%) over %d seconds. " - "thread lifetime cpu usage %d.%06d seconds, (%d.%06d user, %d.%06d system) " - "ledger info: balance: %lld credit: %lld debit: %lld limit: %llu (%d%%) " - "period: %llu time since last refill (ns): %llu %s\n", - procname, pid, tid, - percentage, usage_percent, interval_sec, - thread_total_time.seconds, thread_total_time.microseconds, - thread_user_time.seconds, thread_user_time.microseconds, - thread_system_time.seconds, thread_system_time.microseconds, - lei.lei_balance, - lei.lei_credit, lei.lei_debit, - lei.lei_limit, limit_percent, - lei.lei_refill_period, lei.lei_last_refill, - (fatal ? "[fatal violation]" : "")); - - - code[0] = code[1] = 0; - EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU); - if (fatal) { - EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL); - }else { - EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR); + + + if (send_exc_resource) { + code[0] = code[1] = 0; + EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU); + if (fatal) { + EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL); + }else { + EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR); + } + EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec); + EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], percentage); + EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent); + exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); } - EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec); - EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[0], limit_percent); - EXC_RESOURCE_CPUMONITOR_ENCODE_PERCENTAGE(code[1], usage_percent); - exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); +#endif /* EXC_RESOURCE_MONITORS */ if (fatal) { #if CONFIG_JETSAM @@ -1775,18 +2378,6 @@ THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void) } } -#define UPDATE_IO_STATS(info, size) \ -{ \ - info.count++; \ - info.size += size; \ -} - -#define UPDATE_IO_STATS_ATOMIC(info, size) \ -{ \ - OSIncrementAtomic64((SInt64 *)&(info.count)); \ - OSAddAtomic64(size, (SInt64 *)&(info.size)); \ -} - void thread_update_io_stats(thread_t thread, int size, int io_flags) { int io_tier; @@ -1819,9 +2410,13 @@ void thread_update_io_stats(thread_t thread, int size, int io_flags) UPDATE_IO_STATS(thread->thread_io_stats->total_io, size); UPDATE_IO_STATS_ATOMIC(thread->task->task_io_stats->total_io, size); + if (!(io_flags & DKIO_READ)) { + DTRACE_IO3(physical_writes, struct task *, thread->task, uint32_t, size, int, io_flags); + ledger_credit(thread->task->ledger, task_ledgers.physical_writes, size); + } } -void +static void init_thread_ledgers(void) { ledger_template_t t; int idx; @@ -1840,7 +2435,8 @@ init_thread_ledgers(void) { } thread_ledgers.cpu_time = idx; - + + ledger_template_complete(t); thread_ledger_template = t; } @@ -2007,6 +2603,39 @@ thread_sched_call( thread->sched_call = (call != NULL)? call: sched_call_null; } +sched_call_t +thread_disable_sched_call( + thread_t thread, + sched_call_t call) +{ + if (call) { + spl_t s = splsched(); + thread_lock(thread); + if (thread->sched_call == call) { + thread->sched_call = sched_call_null; + } else { + call = NULL; + } + thread_unlock(thread); + splx(s); + } + return call; +} + +void +thread_reenable_sched_call( + thread_t thread, + sched_call_t call) +{ + if (call) { + spl_t s = splsched(); + thread_lock(thread); + thread_sched_call(thread, call); + thread_unlock(thread); + splx(s); + } +} + void thread_static_param( thread_t thread, @@ -2035,17 +2664,55 @@ uint64_t thread_dispatchqaddr( thread_t thread) { - uint64_t dispatchqueue_addr = 0; - uint64_t thread_handle = 0; + uint64_t dispatchqueue_addr; + uint64_t thread_handle; - if (thread != THREAD_NULL) { - thread_handle = thread->machine.cthread_self; + if (thread == THREAD_NULL) + return 0; + + thread_handle = thread->machine.cthread_self; + if (thread_handle == 0) + return 0; + + if (thread->inspection == TRUE) + dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(thread->task); + else if (thread->task->bsd_info) + dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); + else + dispatchqueue_addr = 0; - if (thread->task->bsd_info) - dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); + return dispatchqueue_addr; +} + +uint64_t +thread_rettokern_addr( + thread_t thread) +{ + uint64_t rettokern_addr; + uint64_t rettokern_offset; + uint64_t thread_handle; + + if (thread == THREAD_NULL) + return 0; + + thread_handle = thread->machine.cthread_self; + if (thread_handle == 0) + return 0; + + if (thread->task->bsd_info) { + rettokern_offset = get_return_to_kernel_offset_from_proc(thread->task->bsd_info); + + /* Return 0 if return to kernel offset is not initialized. */ + if (rettokern_offset == 0) { + rettokern_addr = 0; + } else { + rettokern_addr = thread_handle + rettokern_offset; + } + } else { + rettokern_addr = 0; } - return (dispatchqueue_addr); + return rettokern_addr; } /* @@ -2088,9 +2755,8 @@ thread_set_voucher_name(mach_port_name_t voucher_name) thread_t thread = current_thread(); ipc_voucher_t new_voucher = IPC_VOUCHER_NULL; ipc_voucher_t voucher; -#ifdef CONFIG_BANK ledger_t bankledger = NULL; -#endif + thread_group_t banktg = NULL; if (MACH_PORT_DEAD == voucher_name) return KERN_INVALID_RIGHT; @@ -2103,19 +2769,16 @@ thread_set_voucher_name(mach_port_name_t voucher_name) if (IPC_VOUCHER_NULL == new_voucher) return KERN_INVALID_ARGUMENT; } -#ifdef CONFIG_BANK - bankledger = bank_get_voucher_ledger(new_voucher); -#endif + bank_get_bank_ledger_and_thread_group(new_voucher, &bankledger, &banktg); thread_mtx_lock(thread); voucher = thread->ith_voucher; thread->ith_voucher_name = voucher_name; thread->ith_voucher = new_voucher; -#ifdef CONFIG_BANK - bank_swap_thread_bank_ledger(thread, bankledger); -#endif thread_mtx_unlock(thread); + bank_swap_thread_bank_ledger(thread, bankledger); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), @@ -2217,30 +2880,26 @@ thread_set_mach_voucher( ipc_voucher_t voucher) { ipc_voucher_t old_voucher; -#ifdef CONFIG_BANK ledger_t bankledger = NULL; -#endif + thread_group_t banktg = NULL; if (THREAD_NULL == thread) return KERN_INVALID_ARGUMENT; - if (thread != current_thread() || thread->started) + if (thread != current_thread() && thread->started) return KERN_INVALID_ARGUMENT; - ipc_voucher_reference(voucher); -#ifdef CONFIG_BANK - bankledger = bank_get_voucher_ledger(voucher); -#endif + bank_get_bank_ledger_and_thread_group(voucher, &bankledger, &banktg); + thread_mtx_lock(thread); old_voucher = thread->ith_voucher; thread->ith_voucher = voucher; thread->ith_voucher_name = MACH_PORT_NULL; -#ifdef CONFIG_BANK - bank_swap_thread_bank_ledger(thread, bankledger); -#endif thread_mtx_unlock(thread); + bank_swap_thread_bank_ledger(thread, bankledger); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), @@ -2271,19 +2930,16 @@ thread_swap_mach_voucher( { mach_port_name_t old_voucher_name; ipc_voucher_t old_voucher; -#ifdef CONFIG_BANK ledger_t bankledger = NULL; -#endif + thread_group_t banktg = NULL; if (THREAD_NULL == thread) return KERN_INVALID_TASK; - if (thread != current_thread() || thread->started) + if (thread != current_thread() && thread->started) return KERN_INVALID_ARGUMENT; -#ifdef CONFIG_BANK - bankledger = bank_get_voucher_ledger(new_voucher); -#endif + bank_get_bank_ledger_and_thread_group(new_voucher, &bankledger, &banktg); thread_mtx_lock(thread); @@ -2313,10 +2969,8 @@ thread_swap_mach_voucher( ipc_voucher_reference(new_voucher); thread->ith_voucher = new_voucher; thread->ith_voucher_name = MACH_PORT_NULL; -#ifdef CONFIG_BANK - bank_swap_thread_bank_ledger(thread, bankledger); -#endif thread_mtx_unlock(thread); + bank_swap_thread_bank_ledger(thread, bankledger); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, @@ -2361,6 +3015,54 @@ thread_get_current_voucher_origin_pid( return kr; } + +boolean_t +thread_has_thread_name(thread_t th) +{ + if ((th) && (th->uthread)) { + return bsd_hasthreadname(th->uthread); + } + + /* + * This is an odd case; clients may set the thread name based on the lack of + * a name, but in this context there is no uthread to attach the name to. + */ + return FALSE; +} + +void +thread_set_thread_name(thread_t th, const char* name) +{ + if ((th) && (th->uthread) && name) { + bsd_setthreadname(th->uthread, name); + } +} + +/* + * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit. + */ +void thread_enable_send_importance(thread_t thread, boolean_t enable) +{ + if (enable == TRUE) + thread->options |= TH_OPT_SEND_IMPORTANCE; + else + thread->options &= ~TH_OPT_SEND_IMPORTANCE; +} + +/* + * thread_set_allocation_name - . + */ + +kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name) +{ + kern_allocation_name_t ret; + thread_kernel_state_t kstate = thread_get_kernel_state(current_thread()); + ret = kstate->allocation_name; + // fifo + if (!new_name || !kstate->allocation_name) kstate->allocation_name = new_name; + return ret; +} + #if CONFIG_DTRACE uint32_t dtrace_get_thread_predcache(thread_t thread) { @@ -2378,6 +3080,15 @@ int64_t dtrace_get_thread_vtime(thread_t thread) return 0; } +int dtrace_get_thread_last_cpu_id(thread_t thread) +{ + if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) { + return thread->last_processor->cpu_id; + } else { + return -1; + } +} + int64_t dtrace_get_thread_tracing(thread_t thread) { if (thread != THREAD_NULL) @@ -2402,6 +3113,14 @@ vm_offset_t dtrace_get_kernel_stack(thread_t thread) return 0; } +#if KASAN +struct kasan_thread_data * +kasan_get_thread_data(thread_t thread) +{ + return &thread->kasan_data; +} +#endif + int64_t dtrace_calc_thread_recent_vtime(thread_t thread) { if (thread != THREAD_NULL) { @@ -2465,6 +3184,8 @@ void dtrace_thread_bootstrap(void) if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) { thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS; DTRACE_PROC(exec__success); + KDBG(BSDDBG_CODE(DBG_BSD_PROC,BSD_PROC_EXEC), + task_pid(task)); } DTRACE_PROC(start); }