/*
- * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/assert.h>
#include <kern/exc_resource.h>
#include <kern/telemetry.h>
+#include <corpses/task_corpse.h>
#if KPC
#include <kern/kpc.h>
#endif
#include <vm/vm_pageout.h>
#include <sys/kdebug.h>
-
+#include <sys/bsdtask_info.h>
#include <mach/sdt.h>
/*
decl_simple_lock_data(static,thread_terminate_lock)
static queue_head_t thread_terminate_queue;
+static queue_head_t crashed_threads_queue;
+
static struct thread thread_template, init_thread;
static void sched_call_null(
thread_template.reason = AST_NONE;
thread_template.at_safe_point = FALSE;
thread_template.wait_event = NO_EVENT64;
- thread_template.wait_queue = WAIT_QUEUE_NULL;
+ thread_template.waitq = NULL;
thread_template.wait_result = THREAD_WAITING;
thread_template.options = THREAD_ABORTSAFE;
thread_template.state = TH_WAIT | TH_UNINT;
thread_template.static_param = 0;
thread_template.policy_reset = 0;
- thread_template.priority = 0;
+ thread_template.base_pri = 0;
thread_template.sched_pri = 0;
thread_template.max_priority = 0;
thread_template.task_priority = 0;
thread_template.promotions = 0;
thread_template.pending_promoter_index = 0;
- thread_template.pending_promoter[0] =
+ thread_template.pending_promoter[0] = NULL;
thread_template.pending_promoter[1] = NULL;
thread_template.rwlock_count = 0;
thread_template.quantum_remaining = 0;
thread_template.last_run_time = 0;
+ thread_template.last_made_runnable_time = 0;
thread_template.computation_metered = 0;
thread_template.computation_epoch = 0;
thread_template.vtimer_prof_save = 0;
thread_template.vtimer_rlim_save = 0;
+#if CONFIG_SCHED_SFI
thread_template.wait_sfi_begin_time = 0;
+#endif
thread_template.wait_timer_is_set = FALSE;
thread_template.wait_timer_active = 0;
thread_template.depress_timer_active = 0;
- thread_template.special_handler.handler = special_handler;
- thread_template.special_handler.next = NULL;
-
thread_template.recover = (vm_offset_t)NULL;
thread_template.map = VM_MAP_NULL;
thread_template.ith_voucher_name = MACH_PORT_NULL;
thread_template.ith_voucher = IPC_VOUCHER_NULL;
+ thread_template.work_interval_id = 0;
+
init_thread = thread_template;
machine_set_current_thread(&init_thread);
}
thread_terminate_self(void)
{
thread_t thread = current_thread();
-
task_t task;
spl_t s;
int threadcnt;
thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
/* If our priority was low because of a depressed yield, restore it in case we block below */
- set_sched_pri(thread, thread->priority);
+ thread_recompute_sched_pri(thread, FALSE);
if (timer_call_cancel(&thread->depress_timer))
thread->depress_timer_active--;
thread_mtx_unlock(thread);
task = thread->task;
- uthread_cleanup(task, thread->uthread, task->bsd_info);
+ uthread_cleanup(task, thread->uthread, task->bsd_info, thread->inspection == 1 ? TRUE : FALSE);
threadcnt = hw_atomic_sub(&task->active_thread_count, 1);
/*
* If we are the last thread to terminate and the task is
* associated with a BSD process, perform BSD process exit.
*/
- if (threadcnt == 0 && task->bsd_info != NULL)
+ if (threadcnt == 0 && task->bsd_info != NULL) {
proc_exit(task->bsd_info);
-
+ /*
+ * if there is crash info in task
+ * then do the deliver action since this is
+ * last thread for this task.
+ */
+ if (task->corpse_info) {
+ task_deliver_crash_notification(task);
+ }
+ }
uthread_cred_free(thread->uthread);
s = splsched();
*/
thread->state |= TH_TERMINATE;
thread_mark_wait_locked(thread, THREAD_UNINT);
+ assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0);
assert(thread->promotions == 0);
+ assert(!(thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED));
assert(thread->rwlock_count == 0);
thread_unlock(thread);
/* splsched */
/*NOTREACHED*/
}
+/* Drop a thread refcount that definitely isn't the last one. */
+void
+thread_deallocate_safe(thread_t thread)
+{
+ if (__improbable(hw_atomic_sub(&(thread)->ref_count, 1) == 0))
+ panic("bad thread refcount!");
+}
+
void
thread_deallocate(
thread_t thread)
if (thread == THREAD_NULL)
return;
- if (thread_deallocate_internal(thread) > 0)
+ if (__probable(hw_atomic_sub(&(thread)->ref_count, 1) > 0))
return;
if(!(thread->state & TH_TERMINATE2))
panic("thread_deallocate: thread not properly terminated\n");
+ assert(thread->runq == PROCESSOR_NULL);
+
#if KPC
kpc_thread_destroy(thread);
#endif
simple_lock(&thread_terminate_lock);
while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
+
+ /*
+ * if marked for crash reporting, skip reaping.
+ * The corpse delivery thread will clear bit and enqueue
+ * for reaping when done
+ */
+ if (thread->inspection){
+ enqueue_tail(&crashed_threads_queue, (queue_entry_t)thread);
+ continue;
+ }
+
simple_unlock(&thread_terminate_lock);
(void)spllo();
assert(thread->SHARE_COUNT == 0);
- assert(thread->BG_COUNT == 0);
+ assert(thread->BG_COUNT == 0);
task = thread->task;
thread_wakeup((event_t)&thread_terminate_queue);
}
+/*
+ * thread_terminate_crashed_threads:
+ * walk the list of crashed therds and put back set of threads
+ * who are no longer being inspected.
+ */
+void
+thread_terminate_crashed_threads()
+{
+ thread_t th_iter, th_remove;
+ boolean_t should_wake_terminate_queue = FALSE;
+
+ simple_lock(&thread_terminate_lock);
+ /*
+ * loop through the crashed threads queue
+ * to put any threads that are not being inspected anymore
+ */
+ th_iter = (thread_t)queue_first(&crashed_threads_queue);
+ while (!queue_end(&crashed_threads_queue, (queue_entry_t)th_iter)) {
+ th_remove = th_iter;
+ th_iter = (thread_t)queue_next(&th_iter->links);
+
+ /* make sure current_thread is never in crashed queue */
+ assert(th_remove != current_thread());
+ if (th_remove->inspection != TRUE){
+ remque((queue_entry_t)th_remove);
+ enqueue_tail(&thread_terminate_queue, (queue_entry_t)th_remove);
+ should_wake_terminate_queue = TRUE;
+ }
+ }
+
+ simple_unlock(&thread_terminate_lock);
+ if (should_wake_terminate_queue == TRUE) {
+ thread_wakeup((event_t)&thread_terminate_queue);
+ }
+}
+
/*
* thread_stack_daemon:
*
/* allocate stack with interrupts enabled so that we can call into VM */
stack_alloc(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0);
s = splsched();
thread_lock(thread);
thread_stack_enqueue(
thread_t thread)
{
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0);
+
simple_lock(&thread_stack_lock);
enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
simple_unlock(&thread_stack_lock);
simple_lock_init(&thread_terminate_lock, 0);
queue_init(&thread_terminate_queue);
+ queue_init(&crashed_threads_queue);
result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
if (result != KERN_SUCCESS)
thread_deallocate(thread);
}
+#define TH_OPTION_NONE 0x00
+#define TH_OPTION_NOCRED 0x01
+#define TH_OPTION_NOSUSP 0x02
/*
* Create a new thread.
* Doesn't start the thread running.
integer_t priority,
thread_continue_t continuation,
int options,
-#define TH_OPTION_NONE 0x00
-#define TH_OPTION_NOCRED 0x01
-#define TH_OPTION_NOSUSP 0x02
thread_t *out_thread)
{
thread_t new_thread;
new_thread->uthread = NULL;
/* cred free may not be necessary */
- uthread_cleanup(parent_task, ut, parent_task->bsd_info);
+ uthread_cleanup(parent_task, ut, parent_task->bsd_info, FALSE);
uthread_cred_free(ut);
uthread_zone_free(ut);
#endif /* MACH_BSD */
void *ut = new_thread->uthread;
new_thread->uthread = NULL;
- uthread_cleanup(parent_task, ut, parent_task->bsd_info);
+ uthread_cleanup(parent_task, ut, parent_task->bsd_info, FALSE);
/* cred free may not be necessary */
uthread_cred_free(ut);
uthread_zone_free(ut);
timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
-#if CONFIG_COUNTERS
- /*
- * If parent task has any reservations, they need to be propagated to this
- * thread.
- */
- new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ?
- THREAD_PMC_FLAG : 0U;
-#endif
#if KPC
kpc_thread_create(new_thread);
#endif
new_thread->requested_policy.terminated = parent_task->effective_policy.terminated;
/* Set the thread's scheduling parameters */
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ new_thread->sched_stamp = sched_tick;
+ new_thread->pri_shift = sched_pri_shift;
+#endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
+
new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
new_thread->sched_flags = 0;
new_thread->max_priority = parent_task->max_priority;
new_thread->task_priority = parent_task->priority;
- new_thread->priority = (priority < 0)? parent_task->priority: priority;
- if (new_thread->priority > new_thread->max_priority)
- new_thread->priority = new_thread->max_priority;
- new_thread->importance = new_thread->priority - new_thread->task_priority;
- new_thread->saved_importance = new_thread->importance;
-#if defined(CONFIG_SCHED_TIMESHARE_CORE)
- new_thread->sched_stamp = sched_tick;
- new_thread->pri_shift = sched_pri_shift;
-#endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */
+ int new_priority = (priority < 0) ? parent_task->priority: priority;
+ new_priority = (priority < 0)? parent_task->priority: priority;
+ if (new_priority > new_thread->max_priority)
+ new_priority = new_thread->max_priority;
+
+ new_thread->importance = new_priority - new_thread->task_priority;
+ new_thread->saved_importance = new_thread->importance;
if (parent_task->max_priority <= MAXPRI_THROTTLE) {
sched_set_thread_throttled(new_thread, TRUE);
}
- SCHED(compute_priority)(new_thread, FALSE);
+ sched_set_thread_base_priority(new_thread, new_priority);
thread_policy_create(new_thread);
threads_count++;
new_thread->active = TRUE;
-
+ new_thread->inspection = FALSE;
*out_thread = new_thread;
{
thread_create_internal2(
task_t task,
thread_t *new_thread,
- boolean_t from_user)
+ boolean_t from_user,
+ thread_continue_t continuation)
{
kern_return_t result;
thread_t thread;
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
- result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
+ result = thread_create_internal(task, -1, continuation, TH_OPTION_NONE, &thread);
if (result != KERN_SUCCESS)
return (result);
task_t task,
thread_t *new_thread)
{
- return thread_create_internal2(task, new_thread, FALSE);
+ return thread_create_internal2(task, new_thread, FALSE, (thread_continue_t)thread_bootstrap_return);
}
kern_return_t
task_t task,
thread_t *new_thread)
{
- return thread_create_internal2(task, new_thread, TRUE);
+ return thread_create_internal2(task, new_thread, TRUE, (thread_continue_t)thread_bootstrap_return);
+}
+
+kern_return_t
+thread_create_with_continuation(
+ task_t task,
+ thread_t *new_thread,
+ thread_continue_t continuation)
+{
+ return thread_create_internal2(task, new_thread, FALSE, continuation);
}
static kern_return_t
if (result != KERN_SUCCESS)
return (result);
- result = machine_thread_set_state(
- thread, flavor, new_state, new_state_count);
+ result = machine_thread_set_state(thread, flavor, new_state, new_state_count);
if (result != KERN_SUCCESS) {
task_unlock(task);
lck_mtx_unlock(&tasks_threads_lock);
return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
}
+/* Separated into helper function so it can be used by THREAD_BASIC_INFO and THREAD_EXTENDED_INFO */
+/* it is assumed that the thread is locked by the caller */
+static void
+retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info)
+{
+ int state, flags;
+
+ /* fill in info */
+
+ thread_read_times(thread, &basic_info->user_time,
+ &basic_info->system_time);
+
+ /*
+ * Update lazy-evaluated scheduler info because someone wants it.
+ */
+ if (SCHED(can_update_priority)(thread))
+ SCHED(update_priority)(thread);
+
+ basic_info->sleep_time = 0;
+
+ /*
+ * To calculate cpu_usage, first correct for timer rate,
+ * then for 5/8 ageing. The correction factor [3/5] is
+ * (1/(5/8) - 1).
+ */
+ basic_info->cpu_usage = 0;
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ if (sched_tick_interval) {
+ basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
+ * TH_USAGE_SCALE) / sched_tick_interval);
+ basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
+ }
+#endif
+
+ if (basic_info->cpu_usage > TH_USAGE_SCALE)
+ basic_info->cpu_usage = TH_USAGE_SCALE;
+
+ basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
+ POLICY_TIMESHARE: POLICY_RR);
+
+ flags = 0;
+ if (thread->options & TH_OPT_IDLE_THREAD)
+ flags |= TH_FLAGS_IDLE;
+
+ if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
+ flags |= TH_FLAGS_GLOBAL_FORCED_IDLE;
+ }
+
+ if (!thread->kernel_stack)
+ flags |= TH_FLAGS_SWAPPED;
+
+ state = 0;
+ if (thread->state & TH_TERMINATE)
+ state = TH_STATE_HALTED;
+ else
+ if (thread->state & TH_RUN)
+ state = TH_STATE_RUNNING;
+ else
+ if (thread->state & TH_UNINT)
+ state = TH_STATE_UNINTERRUPTIBLE;
+ else
+ if (thread->state & TH_SUSP)
+ state = TH_STATE_STOPPED;
+ else
+ if (thread->state & TH_WAIT)
+ state = TH_STATE_WAITING;
+
+ basic_info->run_state = state;
+ basic_info->flags = flags;
+
+ basic_info->suspend_count = thread->user_stop_count;
+
+ return;
+}
kern_return_t
thread_info_internal(
thread_info_t thread_info_out, /* ptr to OUT array */
mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
{
- int state, flags;
- spl_t s;
+ spl_t s;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
if (flavor == THREAD_BASIC_INFO) {
- register thread_basic_info_t basic_info;
- if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
+ if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
return (KERN_INVALID_ARGUMENT);
- basic_info = (thread_basic_info_t) thread_info_out;
-
- s = splsched();
- thread_lock(thread);
-
- /* fill in info */
+ s = splsched();
+ thread_lock(thread);
- thread_read_times(thread, &basic_info->user_time,
- &basic_info->system_time);
+ retrieve_thread_basic_info(thread, (thread_basic_info_t) thread_info_out);
- /*
- * Update lazy-evaluated scheduler info because someone wants it.
- */
- if (SCHED(can_update_priority)(thread))
- SCHED(update_priority)(thread);
+ thread_unlock(thread);
+ splx(s);
- basic_info->sleep_time = 0;
+ *thread_info_count = THREAD_BASIC_INFO_COUNT;
- /*
- * To calculate cpu_usage, first correct for timer rate,
- * then for 5/8 ageing. The correction factor [3/5] is
- * (1/(5/8) - 1).
- */
- basic_info->cpu_usage = 0;
-#if defined(CONFIG_SCHED_TIMESHARE_CORE)
- if (sched_tick_interval) {
- basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
- * TH_USAGE_SCALE) / sched_tick_interval);
- basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
- }
-#endif
-
- if (basic_info->cpu_usage > TH_USAGE_SCALE)
- basic_info->cpu_usage = TH_USAGE_SCALE;
-
- basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
- POLICY_TIMESHARE: POLICY_RR);
-
- flags = 0;
- if (thread->options & TH_OPT_IDLE_THREAD)
- flags |= TH_FLAGS_IDLE;
-
- if (!thread->kernel_stack)
- flags |= TH_FLAGS_SWAPPED;
-
- state = 0;
- if (thread->state & TH_TERMINATE)
- state = TH_STATE_HALTED;
- else
- if (thread->state & TH_RUN)
- state = TH_STATE_RUNNING;
- else
- if (thread->state & TH_UNINT)
- state = TH_STATE_UNINTERRUPTIBLE;
- else
- if (thread->state & TH_SUSP)
- state = TH_STATE_STOPPED;
- else
- if (thread->state & TH_WAIT)
- state = TH_STATE_WAITING;
-
- basic_info->run_state = state;
- basic_info->flags = flags;
-
- basic_info->suspend_count = thread->user_stop_count;
-
- thread_unlock(thread);
- splx(s);
-
- *thread_info_count = THREAD_BASIC_INFO_COUNT;
-
- return (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
else
if (flavor == THREAD_IDENTIFIER_INFO) {
- register thread_identifier_info_t identifier_info;
+ register thread_identifier_info_t identifier_info;
- if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
+ if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
return (KERN_INVALID_ARGUMENT);
- identifier_info = (thread_identifier_info_t) thread_info_out;
+ identifier_info = (thread_identifier_info_t) thread_info_out;
- s = splsched();
- thread_lock(thread);
+ s = splsched();
+ thread_lock(thread);
- identifier_info->thread_id = thread->thread_id;
- identifier_info->thread_handle = thread->machine.cthread_self;
- if(thread->task->bsd_info) {
- identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
- } else {
- thread_unlock(thread);
- splx(s);
- return KERN_INVALID_ARGUMENT;
- }
+ identifier_info->thread_id = thread->thread_id;
+ identifier_info->thread_handle = thread->machine.cthread_self;
+ identifier_info->dispatch_qaddr = thread_dispatchqaddr(thread);
- thread_unlock(thread);
- splx(s);
- return KERN_SUCCESS;
+ thread_unlock(thread);
+ splx(s);
+ return KERN_SUCCESS;
}
else
if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
ts_info = (policy_timeshare_info_t)thread_info_out;
- s = splsched();
+ s = splsched();
thread_lock(thread);
- if (thread->sched_mode != TH_MODE_TIMESHARE) {
- thread_unlock(thread);
+ if (thread->sched_mode != TH_MODE_TIMESHARE) {
+ thread_unlock(thread);
splx(s);
-
return (KERN_INVALID_POLICY);
- }
+ }
ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
if (ts_info->depressed) {
ts_info->base_priority = DEPRESSPRI;
- ts_info->depress_priority = thread->priority;
+ ts_info->depress_priority = thread->base_pri;
}
else {
- ts_info->base_priority = thread->priority;
+ ts_info->base_priority = thread->base_pri;
ts_info->depress_priority = -1;
}
ts_info->max_priority = thread->max_priority;
thread_unlock(thread);
- splx(s);
+ splx(s);
*thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
- return (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
else
if (flavor == THREAD_SCHED_FIFO_INFO) {
policy_rr_info_t rr_info;
uint32_t quantum_time;
uint64_t quantum_ns;
-
+
if (*thread_info_count < POLICY_RR_INFO_COUNT)
return (KERN_INVALID_ARGUMENT);
rr_info = (policy_rr_info_t) thread_info_out;
- s = splsched();
+ s = splsched();
thread_lock(thread);
- if (thread->sched_mode == TH_MODE_TIMESHARE) {
- thread_unlock(thread);
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ thread_unlock(thread);
splx(s);
return (KERN_INVALID_POLICY);
rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
if (rr_info->depressed) {
rr_info->base_priority = DEPRESSPRI;
- rr_info->depress_priority = thread->priority;
+ rr_info->depress_priority = thread->base_pri;
}
else {
- rr_info->base_priority = thread->priority;
+ rr_info->base_priority = thread->base_pri;
rr_info->depress_priority = -1;
}
quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
-
+
rr_info->max_priority = thread->max_priority;
- rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
+ rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
thread_unlock(thread);
- splx(s);
+ splx(s);
*thread_info_count = POLICY_RR_INFO_COUNT;
- return (KERN_SUCCESS);
+ return (KERN_SUCCESS);
+ }
+ else
+ if (flavor == THREAD_EXTENDED_INFO) {
+ thread_basic_info_data_t basic_info;
+ thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out;
+
+ if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ s = splsched();
+ thread_lock(thread);
+
+ /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
+ * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
+ */
+ retrieve_thread_basic_info(thread, &basic_info);
+ extended_info->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
+ extended_info->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
+
+ extended_info->pth_cpu_usage = basic_info.cpu_usage;
+ extended_info->pth_policy = basic_info.policy;
+ extended_info->pth_run_state = basic_info.run_state;
+ extended_info->pth_flags = basic_info.flags;
+ extended_info->pth_sleep_time = basic_info.sleep_time;
+ extended_info->pth_curpri = thread->sched_pri;
+ extended_info->pth_priority = thread->base_pri;
+ extended_info->pth_maxpriority = thread->max_priority;
+
+ bsd_getthreadname(thread->uthread,extended_info->pth_name);
+
+ thread_unlock(thread);
+ splx(s);
+
+ *thread_info_count = THREAD_EXTENDED_INFO_COUNT;
+
+ return (KERN_SUCCESS);
+ }
+ else
+ if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
+#if DEVELOPMENT || DEBUG
+ thread_debug_info_internal_t dbg_info;
+ if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT)
+ return (KERN_NOT_SUPPORTED);
+
+ if (thread_info_out == NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ dbg_info = (thread_debug_info_internal_t) thread_info_out;
+ dbg_info->page_creation_count = thread->t_page_creation_count;
+
+ *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
+ return (KERN_SUCCESS);
+#endif /* DEVELOPMENT || DEBUG */
+ return (KERN_NOT_SUPPORTED);
}
return (KERN_INVALID_ARGUMENT);
task_t task = current_task();
thread_t thread = current_thread();
uint64_t tid = thread->thread_id;
- char *procname = (char *) "unknown";
+ const char *procname = "unknown";
time_value_t thread_total_time = {0, 0};
time_value_t thread_system_time;
time_value_t thread_user_time;
if (thread != THREAD_NULL) {
thread_handle = thread->machine.cthread_self;
-
- if (thread->task->bsd_info)
+
+ if (thread->inspection == TRUE)
+ dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(thread->task);
+ else if (thread->task->bsd_info)
dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
}
return 0;
}
+int dtrace_get_thread_last_cpu_id(thread_t thread)
+{
+ if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
+ return thread->last_processor->cpu_id;
+ } else {
+ return -1;
+ }
+}
+
int64_t dtrace_get_thread_tracing(thread_t thread)
{
if (thread != THREAD_NULL)