X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/kern/thread.c diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c index f8556bbab..535d1e319 100644 --- a/osfmk/kern/thread.c +++ b/osfmk/kern/thread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2014 Apple Inc. All rights reserved. + * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -117,6 +117,7 @@ #include #include #include +#include #if KPC #include #endif @@ -129,7 +130,7 @@ #include #include - +#include #include /* @@ -146,12 +147,16 @@ static lck_grp_attr_t thread_lck_grp_attr; lck_attr_t thread_lck_attr; lck_grp_t thread_lck_grp; +struct zone *thread_qos_override_zone; + decl_simple_lock_data(static,thread_stack_lock) static queue_head_t thread_stack_queue; decl_simple_lock_data(static,thread_terminate_lock) static queue_head_t thread_terminate_queue; +static queue_head_t crashed_threads_queue; + static struct thread thread_template, init_thread; static void sched_call_null( @@ -212,7 +217,7 @@ thread_bootstrap(void) thread_template.reason = AST_NONE; thread_template.at_safe_point = FALSE; thread_template.wait_event = NO_EVENT64; - thread_template.wait_queue = WAIT_QUEUE_NULL; + thread_template.waitq = NULL; thread_template.wait_result = THREAD_WAITING; thread_template.options = THREAD_ABORTSAFE; thread_template.state = TH_WAIT | TH_UNINT; @@ -234,13 +239,13 @@ thread_bootstrap(void) thread_template.static_param = 0; thread_template.policy_reset = 0; - thread_template.priority = 0; + thread_template.base_pri = 0; thread_template.sched_pri = 0; thread_template.max_priority = 0; thread_template.task_priority = 0; thread_template.promotions = 0; thread_template.pending_promoter_index = 0; - thread_template.pending_promoter[0] = + thread_template.pending_promoter[0] = NULL; thread_template.pending_promoter[1] = NULL; thread_template.rwlock_count = 0; @@ -253,6 +258,7 @@ thread_bootstrap(void) thread_template.quantum_remaining = 0; thread_template.last_run_time = 0; + thread_template.last_made_runnable_time = 0; thread_template.computation_metered = 0; thread_template.computation_epoch = 0; @@ -278,16 +284,15 @@ thread_bootstrap(void) thread_template.vtimer_prof_save = 0; thread_template.vtimer_rlim_save = 0; +#if CONFIG_SCHED_SFI thread_template.wait_sfi_begin_time = 0; +#endif thread_template.wait_timer_is_set = FALSE; thread_template.wait_timer_active = 0; thread_template.depress_timer_active = 0; - thread_template.special_handler.handler = special_handler; - thread_template.special_handler.next = NULL; - thread_template.recover = (vm_offset_t)NULL; thread_template.map = VM_MAP_NULL; @@ -307,6 +312,12 @@ thread_bootstrap(void) #endif /* HYPERVISOR */ thread_template.t_chud = 0; + +#if (DEVELOPMENT || DEBUG) + thread_template.t_page_creation_throttled_hard = 0; + thread_template.t_page_creation_throttled_soft = 0; +#endif /* DEVELOPMENT || DEBUG */ + thread_template.t_page_creation_throttled = 0; thread_template.t_page_creation_count = 0; thread_template.t_page_creation_time = 0; @@ -326,7 +337,7 @@ thread_bootstrap(void) thread_template.effective_policy = default_task_effective_policy; thread_template.pended_policy = default_task_pended_policy; - thread_template.usynch_override_contended_resource_count = 0; + bzero(&thread_template.overrides, sizeof(thread_template.overrides)); thread_template.iotier_override = THROTTLE_LEVEL_NONE; thread_template.thread_io_stats = NULL; @@ -340,6 +351,8 @@ thread_bootstrap(void) thread_template.ith_voucher_name = MACH_PORT_NULL; thread_template.ith_voucher = IPC_VOUCHER_NULL; + thread_template.work_interval_id = 0; + init_thread = thread_template; machine_set_current_thread(&init_thread); } @@ -355,6 +368,16 @@ thread_init(void) THREAD_CHUNK * sizeof(struct thread), "threads"); + thread_qos_override_zone = zinit( + sizeof(struct thread_qos_override), + 4 * thread_max * sizeof(struct thread_qos_override), + PAGE_SIZE, + "thread qos override"); + zone_change(thread_qos_override_zone, Z_EXPAND, TRUE); + zone_change(thread_qos_override_zone, Z_COLLECT, TRUE); + zone_change(thread_qos_override_zone, Z_CALLERACCT, FALSE); + zone_change(thread_qos_override_zone, Z_NOENCRYPT, TRUE); + lck_grp_attr_setdefault(&thread_lck_grp_attr); lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr); lck_attr_setdefault(&thread_lck_attr); @@ -391,7 +414,6 @@ void thread_terminate_self(void) { thread_t thread = current_thread(); - task_t task; spl_t s; int threadcnt; @@ -419,7 +441,7 @@ thread_terminate_self(void) thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; /* If our priority was low because of a depressed yield, restore it in case we block below */ - set_sched_pri(thread, thread->priority); + thread_recompute_sched_pri(thread, FALSE); if (timer_call_cancel(&thread->depress_timer)) thread->depress_timer_active--; @@ -448,16 +470,24 @@ thread_terminate_self(void) thread_mtx_unlock(thread); task = thread->task; - uthread_cleanup(task, thread->uthread, task->bsd_info); + uthread_cleanup(task, thread->uthread, task->bsd_info, thread->inspection == 1 ? TRUE : FALSE); threadcnt = hw_atomic_sub(&task->active_thread_count, 1); /* * If we are the last thread to terminate and the task is * associated with a BSD process, perform BSD process exit. */ - if (threadcnt == 0 && task->bsd_info != NULL) + if (threadcnt == 0 && task->bsd_info != NULL) { proc_exit(task->bsd_info); - + /* + * if there is crash info in task + * then do the deliver action since this is + * last thread for this task. + */ + if (task->corpse_info) { + task_deliver_crash_notification(task); + } + } uthread_cred_free(thread->uthread); s = splsched(); @@ -497,7 +527,9 @@ thread_terminate_self(void) */ thread->state |= TH_TERMINATE; thread_mark_wait_locked(thread, THREAD_UNINT); + assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0); assert(thread->promotions == 0); + assert(!(thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED)); assert(thread->rwlock_count == 0); thread_unlock(thread); /* splsched */ @@ -506,6 +538,14 @@ thread_terminate_self(void) /*NOTREACHED*/ } +/* Drop a thread refcount that definitely isn't the last one. */ +void +thread_deallocate_safe(thread_t thread) +{ + if (__improbable(hw_atomic_sub(&(thread)->ref_count, 1) == 0)) + panic("bad thread refcount!"); +} + void thread_deallocate( thread_t thread) @@ -515,18 +555,22 @@ thread_deallocate( if (thread == THREAD_NULL) return; - if (thread_deallocate_internal(thread) > 0) + if (__probable(hw_atomic_sub(&(thread)->ref_count, 1) > 0)) return; if(!(thread->state & TH_TERMINATE2)) panic("thread_deallocate: thread not properly terminated\n"); + assert(thread->runq == PROCESSOR_NULL); + #if KPC kpc_thread_destroy(thread); #endif ipc_thread_terminate(thread); + proc_thread_qos_deallocate(thread); + task = thread->task; #ifdef MACH_BSD @@ -578,11 +622,22 @@ thread_terminate_daemon(void) simple_lock(&thread_terminate_lock); while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) { + + /* + * if marked for crash reporting, skip reaping. + * The corpse delivery thread will clear bit and enqueue + * for reaping when done + */ + if (thread->inspection){ + enqueue_tail(&crashed_threads_queue, (queue_entry_t)thread); + continue; + } + simple_unlock(&thread_terminate_lock); (void)spllo(); assert(thread->SHARE_COUNT == 0); - assert(thread->BG_COUNT == 0); + assert(thread->BG_COUNT == 0); task = thread->task; @@ -649,7 +704,7 @@ void thread_terminate_enqueue( thread_t thread) { - KERNEL_DEBUG_CONSTANT(TRACEDBG_CODE(DBG_TRACE_DATA, TRACE_DATA_THREAD_TERMINATE) | DBG_FUNC_NONE, thread->thread_id, 0, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(TRACE_DATA_THREAD_TERMINATE | DBG_FUNC_NONE, thread->thread_id, 0, 0, 0, 0); simple_lock(&thread_terminate_lock); enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread); @@ -658,6 +713,42 @@ thread_terminate_enqueue( thread_wakeup((event_t)&thread_terminate_queue); } +/* + * thread_terminate_crashed_threads: + * walk the list of crashed therds and put back set of threads + * who are no longer being inspected. + */ +void +thread_terminate_crashed_threads() +{ + thread_t th_iter, th_remove; + boolean_t should_wake_terminate_queue = FALSE; + + simple_lock(&thread_terminate_lock); + /* + * loop through the crashed threads queue + * to put any threads that are not being inspected anymore + */ + th_iter = (thread_t)queue_first(&crashed_threads_queue); + while (!queue_end(&crashed_threads_queue, (queue_entry_t)th_iter)) { + th_remove = th_iter; + th_iter = (thread_t)queue_next(&th_iter->links); + + /* make sure current_thread is never in crashed queue */ + assert(th_remove != current_thread()); + if (th_remove->inspection != TRUE){ + remque((queue_entry_t)th_remove); + enqueue_tail(&thread_terminate_queue, (queue_entry_t)th_remove); + should_wake_terminate_queue = TRUE; + } + } + + simple_unlock(&thread_terminate_lock); + if (should_wake_terminate_queue == TRUE) { + thread_wakeup((event_t)&thread_terminate_queue); + } +} + /* * thread_stack_daemon: * @@ -679,6 +770,8 @@ thread_stack_daemon(void) /* allocate stack with interrupts enabled so that we can call into VM */ stack_alloc(thread); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0); s = splsched(); thread_lock(thread); @@ -707,6 +800,8 @@ void thread_stack_enqueue( thread_t thread) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0); + simple_lock(&thread_stack_lock); enqueue_tail(&thread_stack_queue, (queue_entry_t)thread); simple_unlock(&thread_stack_lock); @@ -722,6 +817,7 @@ thread_daemon_init(void) simple_lock_init(&thread_terminate_lock, 0); queue_init(&thread_terminate_queue); + queue_init(&crashed_threads_queue); result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread); if (result != KERN_SUCCESS) @@ -739,6 +835,9 @@ thread_daemon_init(void) thread_deallocate(thread); } +#define TH_OPTION_NONE 0x00 +#define TH_OPTION_NOCRED 0x01 +#define TH_OPTION_NOSUSP 0x02 /* * Create a new thread. * Doesn't start the thread running. @@ -751,9 +850,6 @@ thread_create_internal( integer_t priority, thread_continue_t continuation, int options, -#define TH_OPTION_NONE 0x00 -#define TH_OPTION_NOCRED 0x01 -#define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; @@ -786,7 +882,7 @@ thread_create_internal( new_thread->uthread = NULL; /* cred free may not be necessary */ - uthread_cleanup(parent_task, ut, parent_task->bsd_info); + uthread_cleanup(parent_task, ut, parent_task->bsd_info, FALSE); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ @@ -832,7 +928,7 @@ thread_create_internal( void *ut = new_thread->uthread; new_thread->uthread = NULL; - uthread_cleanup(parent_task, ut, parent_task->bsd_info); + uthread_cleanup(parent_task, ut, parent_task->bsd_info, FALSE); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); @@ -888,14 +984,6 @@ thread_create_internal( timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); -#if CONFIG_COUNTERS - /* - * If parent task has any reservations, they need to be propagated to this - * thread. - */ - new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? - THREAD_PMC_FLAG : 0U; -#endif #if KPC kpc_thread_create(new_thread); #endif @@ -905,26 +993,29 @@ thread_create_internal( new_thread->requested_policy.terminated = parent_task->effective_policy.terminated; /* Set the thread's scheduling parameters */ +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + new_thread->sched_stamp = sched_tick; + new_thread->pri_shift = sched_pri_shift; +#endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */ + new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); new_thread->sched_flags = 0; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; - new_thread->priority = (priority < 0)? parent_task->priority: priority; - if (new_thread->priority > new_thread->max_priority) - new_thread->priority = new_thread->max_priority; - new_thread->importance = new_thread->priority - new_thread->task_priority; - new_thread->saved_importance = new_thread->importance; -#if defined(CONFIG_SCHED_TIMESHARE_CORE) - new_thread->sched_stamp = sched_tick; - new_thread->pri_shift = sched_pri_shift; -#endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */ + int new_priority = (priority < 0) ? parent_task->priority: priority; + new_priority = (priority < 0)? parent_task->priority: priority; + if (new_priority > new_thread->max_priority) + new_priority = new_thread->max_priority; + + new_thread->importance = new_priority - new_thread->task_priority; + new_thread->saved_importance = new_thread->importance; if (parent_task->max_priority <= MAXPRI_THROTTLE) { sched_set_thread_throttled(new_thread, TRUE); } - SCHED(compute_priority)(new_thread, FALSE); + sched_set_thread_base_priority(new_thread, new_priority); thread_policy_create(new_thread); @@ -942,7 +1033,7 @@ thread_create_internal( threads_count++; new_thread->active = TRUE; - + new_thread->inspection = FALSE; *out_thread = new_thread; { @@ -971,7 +1062,8 @@ static kern_return_t thread_create_internal2( task_t task, thread_t *new_thread, - boolean_t from_user) + boolean_t from_user, + thread_continue_t continuation) { kern_return_t result; thread_t thread; @@ -979,7 +1071,7 @@ thread_create_internal2( if (task == TASK_NULL || task == kernel_task) return (KERN_INVALID_ARGUMENT); - result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread); + result = thread_create_internal(task, -1, continuation, TH_OPTION_NONE, &thread); if (result != KERN_SUCCESS) return (result); @@ -1010,7 +1102,7 @@ thread_create( task_t task, thread_t *new_thread) { - return thread_create_internal2(task, new_thread, FALSE); + return thread_create_internal2(task, new_thread, FALSE, (thread_continue_t)thread_bootstrap_return); } kern_return_t @@ -1018,7 +1110,16 @@ thread_create_from_user( task_t task, thread_t *new_thread) { - return thread_create_internal2(task, new_thread, TRUE); + return thread_create_internal2(task, new_thread, TRUE, (thread_continue_t)thread_bootstrap_return); +} + +kern_return_t +thread_create_with_continuation( + task_t task, + thread_t *new_thread, + thread_continue_t continuation) +{ + return thread_create_internal2(task, new_thread, FALSE, continuation); } static kern_return_t @@ -1040,8 +1141,7 @@ thread_create_running_internal2( if (result != KERN_SUCCESS) return (result); - result = machine_thread_set_state( - thread, flavor, new_state, new_state_count); + result = machine_thread_set_state(thread, flavor, new_state, new_state_count); if (result != KERN_SUCCESS) { task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); @@ -1199,6 +1299,80 @@ kernel_thread_start( return kernel_thread_start_priority(continuation, parameter, -1, new_thread); } +/* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */ +/* it is assumed that the thread is locked by the caller */ +static void +retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info) +{ + int state, flags; + + /* fill in info */ + + thread_read_times(thread, &basic_info->user_time, + &basic_info->system_time); + + /* + * Update lazy-evaluated scheduler info because someone wants it. + */ + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); + + basic_info->sleep_time = 0; + + /* + * To calculate cpu_usage, first correct for timer rate, + * then for 5/8 ageing. The correction factor [3/5] is + * (1/(5/8) - 1). + */ + basic_info->cpu_usage = 0; +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + if (sched_tick_interval) { + basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage + * TH_USAGE_SCALE) / sched_tick_interval); + basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; + } +#endif + + if (basic_info->cpu_usage > TH_USAGE_SCALE) + basic_info->cpu_usage = TH_USAGE_SCALE; + + basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? + POLICY_TIMESHARE: POLICY_RR); + + flags = 0; + if (thread->options & TH_OPT_IDLE_THREAD) + flags |= TH_FLAGS_IDLE; + + if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { + flags |= TH_FLAGS_GLOBAL_FORCED_IDLE; + } + + if (!thread->kernel_stack) + flags |= TH_FLAGS_SWAPPED; + + state = 0; + if (thread->state & TH_TERMINATE) + state = TH_STATE_HALTED; + else + if (thread->state & TH_RUN) + state = TH_STATE_RUNNING; + else + if (thread->state & TH_UNINT) + state = TH_STATE_UNINTERRUPTIBLE; + else + if (thread->state & TH_SUSP) + state = TH_STATE_STOPPED; + else + if (thread->state & TH_WAIT) + state = TH_STATE_WAITING; + + basic_info->run_state = state; + basic_info->flags = flags; + + basic_info->suspend_count = thread->user_stop_count; + + return; +} kern_return_t thread_info_internal( @@ -1207,116 +1381,47 @@ thread_info_internal( thread_info_t thread_info_out, /* ptr to OUT array */ mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ { - int state, flags; - spl_t s; + spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); if (flavor == THREAD_BASIC_INFO) { - register thread_basic_info_t basic_info; - if (*thread_info_count < THREAD_BASIC_INFO_COUNT) + if (*thread_info_count < THREAD_BASIC_INFO_COUNT) return (KERN_INVALID_ARGUMENT); - basic_info = (thread_basic_info_t) thread_info_out; - - s = splsched(); - thread_lock(thread); - - /* fill in info */ + s = splsched(); + thread_lock(thread); - thread_read_times(thread, &basic_info->user_time, - &basic_info->system_time); + retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out); - /* - * Update lazy-evaluated scheduler info because someone wants it. - */ - if (SCHED(can_update_priority)(thread)) - SCHED(update_priority)(thread); + thread_unlock(thread); + splx(s); - basic_info->sleep_time = 0; + *thread_info_count = THREAD_BASIC_INFO_COUNT; - /* - * To calculate cpu_usage, first correct for timer rate, - * then for 5/8 ageing. The correction factor [3/5] is - * (1/(5/8) - 1). - */ - basic_info->cpu_usage = 0; -#if defined(CONFIG_SCHED_TIMESHARE_CORE) - if (sched_tick_interval) { - basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage - * TH_USAGE_SCALE) / sched_tick_interval); - basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; - } -#endif - - if (basic_info->cpu_usage > TH_USAGE_SCALE) - basic_info->cpu_usage = TH_USAGE_SCALE; - - basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? - POLICY_TIMESHARE: POLICY_RR); - - flags = 0; - if (thread->options & TH_OPT_IDLE_THREAD) - flags |= TH_FLAGS_IDLE; - - if (!thread->kernel_stack) - flags |= TH_FLAGS_SWAPPED; - - state = 0; - if (thread->state & TH_TERMINATE) - state = TH_STATE_HALTED; - else - if (thread->state & TH_RUN) - state = TH_STATE_RUNNING; - else - if (thread->state & TH_UNINT) - state = TH_STATE_UNINTERRUPTIBLE; - else - if (thread->state & TH_SUSP) - state = TH_STATE_STOPPED; - else - if (thread->state & TH_WAIT) - state = TH_STATE_WAITING; - - basic_info->run_state = state; - basic_info->flags = flags; - - basic_info->suspend_count = thread->user_stop_count; - - thread_unlock(thread); - splx(s); - - *thread_info_count = THREAD_BASIC_INFO_COUNT; - - return (KERN_SUCCESS); + return (KERN_SUCCESS); } else if (flavor == THREAD_IDENTIFIER_INFO) { - register thread_identifier_info_t identifier_info; + register thread_identifier_info_t identifier_info; - if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) + if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) return (KERN_INVALID_ARGUMENT); - identifier_info = (thread_identifier_info_t) thread_info_out; + identifier_info = (thread_identifier_info_t) thread_info_out; - s = splsched(); - thread_lock(thread); + s = splsched(); + thread_lock(thread); - identifier_info->thread_id = thread->thread_id; - identifier_info->thread_handle = thread->machine.cthread_self; - if(thread->task->bsd_info) { - identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); - } else { - thread_unlock(thread); - splx(s); - return KERN_INVALID_ARGUMENT; - } + identifier_info->thread_id = thread->thread_id; + identifier_info->thread_handle = thread->machine.cthread_self; + identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread); - thread_unlock(thread); - splx(s); - return KERN_SUCCESS; + thread_unlock(thread); + splx(s); + return KERN_SUCCESS; } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { @@ -1327,23 +1432,22 @@ thread_info_internal( ts_info = (policy_timeshare_info_t)thread_info_out; - s = splsched(); + s = splsched(); thread_lock(thread); - if (thread->sched_mode != TH_MODE_TIMESHARE) { - thread_unlock(thread); + if (thread->sched_mode != TH_MODE_TIMESHARE) { + thread_unlock(thread); splx(s); - return (KERN_INVALID_POLICY); - } + } ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; - ts_info->depress_priority = thread->priority; + ts_info->depress_priority = thread->base_pri; } else { - ts_info->base_priority = thread->priority; + ts_info->base_priority = thread->base_pri; ts_info->depress_priority = -1; } @@ -1351,11 +1455,11 @@ thread_info_internal( ts_info->max_priority = thread->max_priority; thread_unlock(thread); - splx(s); + splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; - return (KERN_SUCCESS); + return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_FIFO_INFO) { @@ -1369,17 +1473,17 @@ thread_info_internal( policy_rr_info_t rr_info; uint32_t quantum_time; uint64_t quantum_ns; - + if (*thread_info_count < POLICY_RR_INFO_COUNT) return (KERN_INVALID_ARGUMENT); rr_info = (policy_rr_info_t) thread_info_out; - s = splsched(); + s = splsched(); thread_lock(thread); - if (thread->sched_mode == TH_MODE_TIMESHARE) { - thread_unlock(thread); + if (thread->sched_mode == TH_MODE_TIMESHARE) { + thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); @@ -1388,25 +1492,80 @@ thread_info_internal( rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; - rr_info->depress_priority = thread->priority; + rr_info->depress_priority = thread->base_pri; } else { - rr_info->base_priority = thread->priority; + rr_info->base_priority = thread->base_pri; rr_info->depress_priority = -1; } quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); absolutetime_to_nanoseconds(quantum_time, &quantum_ns); - + rr_info->max_priority = thread->max_priority; - rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000); + rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000); thread_unlock(thread); - splx(s); + splx(s); *thread_info_count = POLICY_RR_INFO_COUNT; - return (KERN_SUCCESS); + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_EXTENDED_INFO) { + thread_basic_info_data_t basic_info; + thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out; + + if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) { + return (KERN_INVALID_ARGUMENT); + } + + s = splsched(); + thread_lock(thread); + + /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for + * the PROC_PIDTHREADINFO flavor (which can't be used on corpses) + */ + retrieve_thread_basic_info(thread, &basic_info); + extended_info->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC)); + extended_info->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC)); + + extended_info->pth_cpu_usage = basic_info.cpu_usage; + extended_info->pth_policy = basic_info.policy; + extended_info->pth_run_state = basic_info.run_state; + extended_info->pth_flags = basic_info.flags; + extended_info->pth_sleep_time = basic_info.sleep_time; + extended_info->pth_curpri = thread->sched_pri; + extended_info->pth_priority = thread->base_pri; + extended_info->pth_maxpriority = thread->max_priority; + + bsd_getthreadname(thread->uthread,extended_info->pth_name); + + thread_unlock(thread); + splx(s); + + *thread_info_count = THREAD_EXTENDED_INFO_COUNT; + + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_DEBUG_INFO_INTERNAL) { +#if DEVELOPMENT || DEBUG + thread_debug_info_internal_t dbg_info; + if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) + return (KERN_NOT_SUPPORTED); + + if (thread_info_out == NULL) + return (KERN_INVALID_ARGUMENT); + + dbg_info = (thread_debug_info_internal_t) thread_info_out; + dbg_info->page_creation_count = thread->t_page_creation_count; + + *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT; + return (KERN_SUCCESS); +#endif /* DEVELOPMENT || DEBUG */ + return (KERN_NOT_SUPPORTED); } return (KERN_INVALID_ARGUMENT); @@ -1649,7 +1808,7 @@ THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void) task_t task = current_task(); thread_t thread = current_thread(); uint64_t tid = thread->thread_id; - char *procname = (char *) "unknown"; + const char *procname = "unknown"; time_value_t thread_total_time = {0, 0}; time_value_t thread_system_time; time_value_t thread_user_time; @@ -1761,18 +1920,6 @@ THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void) } } -#define UPDATE_IO_STATS(info, size) \ -{ \ - info.count++; \ - info.size += size; \ -} - -#define UPDATE_IO_STATS_ATOMIC(info, size) \ -{ \ - OSIncrementAtomic64((SInt64 *)&(info.count)); \ - OSAddAtomic64(size, (SInt64 *)&(info.size)); \ -} - void thread_update_io_stats(thread_t thread, int size, int io_flags) { int io_tier; @@ -2026,8 +2173,10 @@ thread_dispatchqaddr( if (thread != THREAD_NULL) { thread_handle = thread->machine.cthread_self; - - if (thread->task->bsd_info) + + if (thread->inspection == TRUE) + dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(thread->task); + else if (thread->task->bsd_info) dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); } @@ -2364,6 +2513,15 @@ int64_t dtrace_get_thread_vtime(thread_t thread) return 0; } +int dtrace_get_thread_last_cpu_id(thread_t thread) +{ + if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) { + return thread->last_processor->cpu_id; + } else { + return -1; + } +} + int64_t dtrace_get_thread_tracing(thread_t thread) { if (thread != THREAD_NULL)