/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_FREE_COPYRIGHT@
*
*/
-#include <mach_host.h>
-#include <mach_prof.h>
-
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/policy.h>
#include <kern/thread.h>
#include <kern/host.h>
#include <kern/zalloc.h>
-#include <kern/profile.h>
#include <kern/assert.h>
#include <ipc/ipc_kmsg.h>
#include <sys/kdebug.h>
+#include <mach/sdt.h>
+
/*
* Exported interfaces
*/
#include <mach/host_priv_server.h>
static struct zone *thread_zone;
+static lck_grp_attr_t thread_lck_grp_attr;
+lck_attr_t thread_lck_attr;
+lck_grp_t thread_lck_grp;
decl_simple_lock_data(static,thread_stack_lock)
static queue_head_t thread_stack_queue;
static struct thread thread_template, init_thread;
+static void sched_call_null(
+ int type,
+ thread_t thread);
+
#ifdef MACH_BSD
extern void proc_exit(void *);
+extern uint64_t get_dispatchqueue_offset_from_proc(void *);
#endif /* MACH_BSD */
+extern int debug_task;
+int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
+int task_threadmax = CONFIG_THREAD_MAX;
+
+static uint64_t thread_unique_id = 0;
+
void
thread_bootstrap(void)
{
* Fill in a template thread for fast initialization.
*/
- thread_template.runq = RUN_QUEUE_NULL;
+ thread_template.runq = PROCESSOR_NULL;
thread_template.ref_count = 2;
thread_template.sched_usage = 0;
thread_template.pri_shift = INT8_MAX;
thread_template.cpu_usage = thread_template.cpu_delta = 0;
+ thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0;
thread_template.bound_processor = PROCESSOR_NULL;
thread_template.last_processor = PROCESSOR_NULL;
- thread_template.last_switch = 0;
+
+ thread_template.sched_call = sched_call_null;
timer_init(&thread_template.user_timer);
timer_init(&thread_template.system_timer);
thread_template.user_timer_save = 0;
thread_template.system_timer_save = 0;
+ thread_template.vtimer_user_save = 0;
+ thread_template.vtimer_prof_save = 0;
+ thread_template.vtimer_rlim_save = 0;
thread_template.wait_timer_is_set = FALSE;
thread_template.wait_timer_active = 0;
thread_template.depress_timer_active = 0;
- thread_template.processor_set = PROCESSOR_SET_NULL;
-
thread_template.special_handler.handler = special_handler;
- thread_template.special_handler.next = 0;
+ thread_template.special_handler.next = NULL;
-#if MACH_HOST
- thread_template.may_assign = TRUE;
- thread_template.assign_active = FALSE;
-#endif /* MACH_HOST */
thread_template.funnel_lock = THR_FUNNEL_NULL;
thread_template.funnel_state = 0;
thread_template.recover = (vm_offset_t)NULL;
+
+ thread_template.map = VM_MAP_NULL;
+
+#if CONFIG_DTRACE
+ thread_template.t_dtrace_predcache = 0;
+ thread_template.t_dtrace_vtime = 0;
+ thread_template.t_dtrace_tracing = 0;
+#endif /* CONFIG_DTRACE */
+
+ thread_template.t_chud = 0;
+ thread_template.t_page_creation_count = 0;
+ thread_template.t_page_creation_time = 0;
+ thread_template.affinity_set = NULL;
+
init_thread = thread_template;
machine_set_current_thread(&init_thread);
}
{
thread_zone = zinit(
sizeof(struct thread),
- THREAD_MAX * sizeof(struct thread),
+ thread_max * sizeof(struct thread),
THREAD_CHUNK * sizeof(struct thread),
"threads");
-
+ zone_change(thread_zone, Z_NOENCRYPT, TRUE);
+
+ lck_grp_attr_setdefault(&thread_lck_grp_attr);
+ lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr);
+ lck_attr_setdefault(&thread_lck_attr);
+
stack_init();
/*
thread_t thread = current_thread();
task_t task;
spl_t s;
+ int threadcnt;
+
+ DTRACE_PROC(lwp__exit);
+
+ thread_mtx_lock(thread);
+
+ ulock_release_all(thread);
+
+ ipc_thread_disable(thread);
+
+ thread_mtx_unlock(thread);
s = splsched();
thread_lock(thread);
/*
- * Cancel priority depression, reset scheduling parameters,
- * and wait for concurrent expirations on other processors.
+ * Cancel priority depression, wait for concurrent expirations
+ * on other processors.
*/
if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
thread->depress_timer_active--;
}
- thread_policy_reset(thread);
-
while (thread->depress_timer_active > 0) {
thread_unlock(thread);
splx(s);
thread_lock(thread);
}
+ thread_sched_call(thread, NULL);
+
thread_unlock(thread);
splx(s);
- thread_mtx_lock(thread);
-
- ulock_release_all(thread);
+ thread_policy_reset(thread);
- ipc_thread_disable(thread);
-
- thread_mtx_unlock(thread);
+ task = thread->task;
+ uthread_cleanup(task, thread->uthread, task->bsd_info);
+ threadcnt = hw_atomic_sub(&task->active_thread_count, 1);
/*
* If we are the last thread to terminate and the task is
* associated with a BSD process, perform BSD process exit.
*/
- task = thread->task;
- if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
- task->bsd_info != NULL )
+ if (threadcnt == 0 && task->bsd_info != NULL)
proc_exit(task->bsd_info);
+ uthread_cred_free(thread->uthread);
+
s = splsched();
thread_lock(thread);
thread_deallocate(
thread_t thread)
{
- processor_set_t pset;
task_t task;
if (thread == THREAD_NULL)
void *ut = thread->uthread;
thread->uthread = NULL;
- uthread_free(task, ut, task->bsd_info);
+ uthread_zone_free(ut);
}
#endif /* MACH_BSD */
task_deallocate(task);
- pset = thread->processor_set;
- pset_deallocate(pset);
-
if (thread->kernel_stack != 0)
stack_free(thread);
+ lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
machine_thread_destroy(thread);
zfree(thread_zone, thread);
{
thread_t thread;
task_t task;
- processor_set_t pset;
(void)splsched();
simple_lock(&thread_terminate_lock);
task->total_user_time += timer_grab(&thread->user_timer);
task->total_system_time += timer_grab(&thread->system_timer);
+ task->c_switch += thread->c_switch;
+ task->p_switch += thread->p_switch;
+ task->ps_switch += thread->ps_switch;
+
queue_remove(&task->threads, thread, thread_t, task_threads);
task->thread_count--;
- task_unlock(task);
- pset = thread->processor_set;
+ /*
+ * If the task is being halted, and there is only one thread
+ * left in the task after this one, then wakeup that thread.
+ */
+ if (task->thread_count == 1 && task->halting)
+ thread_wakeup((event_t)&task->halting);
+
+ task_unlock(task);
- pset_lock(pset);
- pset_remove_thread(pset, thread);
- pset_unlock(pset);
+ lck_mtx_lock(&tasks_threads_lock);
+ queue_remove(&threads, thread, thread_t, threads);
+ threads_count--;
+ lck_mtx_unlock(&tasks_threads_lock);
thread_deallocate(thread);
{
thread_t thread;
- (void)splsched();
simple_lock(&thread_stack_lock);
while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
simple_unlock(&thread_stack_lock);
- /* splsched */
stack_alloc(thread);
-
+
+ (void)splsched();
thread_lock(thread);
thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
thread_unlock(thread);
(void)spllo();
- (void)splsched();
simple_lock(&thread_stack_lock);
}
assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
simple_unlock(&thread_stack_lock);
- /* splsched */
thread_block((thread_continue_t)thread_stack_daemon);
/*NOTREACHED*/
task_t parent_task,
integer_t priority,
thread_continue_t continuation,
+ int options,
+#define TH_OPTION_NONE 0x00
+#define TH_OPTION_NOCRED 0x01
+#define TH_OPTION_NOSUSP 0x02
thread_t *out_thread)
{
thread_t new_thread;
- processor_set_t pset;
static thread_t first_thread;
/*
* Allocate a thread and initialize static fields
*/
- if (first_thread == NULL)
+ if (first_thread == THREAD_NULL)
new_thread = first_thread = current_thread();
else
new_thread = (thread_t)zalloc(thread_zone);
- if (new_thread == NULL)
+ if (new_thread == THREAD_NULL)
return (KERN_RESOURCE_SHORTAGE);
if (new_thread != first_thread)
*new_thread = thread_template;
#ifdef MACH_BSD
- {
- new_thread->uthread = uthread_alloc(parent_task, new_thread);
- if (new_thread->uthread == NULL) {
- zfree(thread_zone, new_thread);
- return (KERN_RESOURCE_SHORTAGE);
- }
+ new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
+ if (new_thread->uthread == NULL) {
+ zfree(thread_zone, new_thread);
+ return (KERN_RESOURCE_SHORTAGE);
}
#endif /* MACH_BSD */
if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
- {
- void *ut = new_thread->uthread;
+ void *ut = new_thread->uthread;
- new_thread->uthread = NULL;
- uthread_free(parent_task, ut, parent_task->bsd_info);
- }
+ new_thread->uthread = NULL;
+ /* cred free may not be necessary */
+ uthread_cleanup(parent_task, ut, parent_task->bsd_info);
+ uthread_cred_free(ut);
+ uthread_zone_free(ut);
#endif /* MACH_BSD */
+
zfree(thread_zone, new_thread);
return (KERN_FAILURE);
}
thread_lock_init(new_thread);
wake_lock_init(new_thread);
- mutex_init(&new_thread->mutex, 0);
+ lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);
ipc_thread_init(new_thread);
queue_init(&new_thread->held_ulocks);
- thread_prof_init(new_thread, parent_task);
new_thread->continuation = continuation;
- pset = parent_task->processor_set;
- assert(pset == &default_pset);
- pset_lock(pset);
-
+ lck_mtx_lock(&tasks_threads_lock);
task_lock(parent_task);
- assert(parent_task->processor_set == pset);
- if ( !parent_task->active ||
- (parent_task->thread_count >= THREAD_MAX &&
- parent_task != kernel_task)) {
+ if ( !parent_task->active || parent_task->halting ||
+ ((options & TH_OPTION_NOSUSP) != 0 &&
+ parent_task->suspend_count > 0) ||
+ (parent_task->thread_count >= task_threadmax &&
+ parent_task != kernel_task) ) {
task_unlock(parent_task);
- pset_unlock(pset);
+ lck_mtx_unlock(&tasks_threads_lock);
#ifdef MACH_BSD
{
void *ut = new_thread->uthread;
new_thread->uthread = NULL;
- uthread_free(parent_task, ut, parent_task->bsd_info);
+ uthread_cleanup(parent_task, ut, parent_task->bsd_info);
+ /* cred free may not be necessary */
+ uthread_cred_free(ut);
+ uthread_zone_free(ut);
}
#endif /* MACH_BSD */
ipc_thread_disable(new_thread);
ipc_thread_terminate(new_thread);
+ lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
machine_thread_destroy(new_thread);
zfree(thread_zone, new_thread);
return (KERN_FAILURE);
}
+ /* New threads inherit any default state on the task */
+ machine_thread_inherit_taskwide(new_thread, parent_task);
+
task_reference_internal(parent_task);
/* Cache the task's map */
/* So terminating threads don't need to take the task lock to decrement */
hw_atomic_add(&parent_task->active_thread_count, 1);
- /* Associate the thread with the processor set */
- pset_add_thread(pset, new_thread);
+ /* Protected by the tasks_threads_lock */
+ new_thread->thread_id = ++thread_unique_id;
+
+ queue_enter(&threads, new_thread, thread_t, threads);
+ threads_count++;
timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
+#if CONFIG_COUNTERS
+ /*
+ * If parent task has any reservations, they need to be propagated to this
+ * thread.
+ */
+ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ?
+ THREAD_PMC_FLAG : 0U;
+#endif
+
/* Set the thread's scheduling parameters */
if (parent_task != kernel_task)
new_thread->sched_mode |= TH_MODE_TIMESHARE;
new_thread->importance =
new_thread->priority - new_thread->task_priority;
new_thread->sched_stamp = sched_tick;
- new_thread->pri_shift = new_thread->processor_set->pri_shift;
+ new_thread->pri_shift = sched_pri_shift;
compute_priority(new_thread, FALSE);
new_thread->active = TRUE;
KERNEL_DEBUG_CONSTANT(
TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
- (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
+ (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);
kdbg_trace_string(parent_task->bsd_info,
&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
}
+ DTRACE_PROC1(lwp__create, thread_t, *out_thread);
+
return (KERN_SUCCESS);
}
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
- result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
+ result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
if (result != KERN_SUCCESS)
return (result);
if (task->suspend_count > 0)
thread_hold(thread);
- pset_unlock(task->processor_set);
task_unlock(task);
+ lck_mtx_unlock(&tasks_threads_lock);
*new_thread = thread;
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
- result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
+ result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
if (result != KERN_SUCCESS)
return (result);
result = machine_thread_set_state(
thread, flavor, new_state, new_state_count);
if (result != KERN_SUCCESS) {
- pset_unlock(task->processor_set);
task_unlock(task);
+ lck_mtx_unlock(&tasks_threads_lock);
thread_terminate(thread);
thread_deallocate(thread);
}
thread_mtx_lock(thread);
- clear_wait(thread, THREAD_AWAKENED);
- thread->started = TRUE;
+ thread_start_internal(thread);
thread_mtx_unlock(thread);
- pset_unlock(task->processor_set);
+
task_unlock(task);
+ lck_mtx_unlock(&tasks_threads_lock);
*new_thread = thread;
return (result);
}
+kern_return_t
+thread_create_workq(
+ task_t task,
+ thread_continue_t thread_return,
+ thread_t *new_thread)
+{
+ kern_return_t result;
+ thread_t thread;
+
+ if (task == TASK_NULL || task == kernel_task)
+ return (KERN_INVALID_ARGUMENT);
+
+ result = thread_create_internal(task, -1, thread_return, TH_OPTION_NOCRED | TH_OPTION_NOSUSP, &thread);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ thread->user_stop_count = 1;
+ thread_hold(thread);
+ if (task->suspend_count > 0)
+ thread_hold(thread);
+
+ task_unlock(task);
+ lck_mtx_unlock(&tasks_threads_lock);
+
+ *new_thread = thread;
+
+ return (KERN_SUCCESS);
+}
+
/*
* kernel_thread_create:
*
thread_t thread;
task_t task = kernel_task;
- result = thread_create_internal(task, priority, continuation, &thread);
+ result = thread_create_internal(task, priority, continuation, TH_OPTION_NONE, &thread);
if (result != KERN_SUCCESS)
return (result);
- pset_unlock(task->processor_set);
task_unlock(task);
+ lck_mtx_unlock(&tasks_threads_lock);
stack_alloc(thread);
assert(thread->kernel_stack != 0);
+#if CONFIG_EMBEDDED
+ if (priority > BASEPRI_KERNEL)
+#endif
thread->reserved_stack = thread->kernel_stack;
thread->parameter = parameter;
+if(debug_task & 1)
+ kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
*new_thread = thread;
return (result);
if (result != KERN_SUCCESS)
return (result);
+ *new_thread = thread;
+
thread_mtx_lock(thread);
- clear_wait(thread, THREAD_AWAKENED);
- thread->started = TRUE;
+ thread_start_internal(thread);
thread_mtx_unlock(thread);
- *new_thread = thread;
-
return (result);
}
return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
}
+#ifndef __LP64__
+
thread_t
kernel_thread(
task_t task,
return (thread);
}
+#endif /* __LP64__ */
+
kern_return_t
thread_info_internal(
register thread_t thread,
* then for 5/8 ageing. The correction factor [3/5] is
* (1/(5/8) - 1).
*/
- basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
- * TH_USAGE_SCALE) / sched_tick_interval;
+ basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
+ * TH_USAGE_SCALE) / sched_tick_interval);
basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
if (basic_info->cpu_usage > TH_USAGE_SCALE)
POLICY_TIMESHARE: POLICY_RR);
flags = 0;
- if (thread->state & TH_IDLE)
+ if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread)
flags |= TH_FLAGS_IDLE;
if (!thread->kernel_stack)
return (KERN_SUCCESS);
}
else
+ if (flavor == THREAD_IDENTIFIER_INFO) {
+ register thread_identifier_info_t identifier_info;
+
+ if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
+ return (KERN_INVALID_ARGUMENT);
+
+ identifier_info = (thread_identifier_info_t) thread_info_out;
+
+ s = splsched();
+ thread_lock(thread);
+
+ identifier_info->thread_id = thread->thread_id;
+#if defined(__ppc__) || defined(__arm__)
+ identifier_info->thread_handle = thread->machine.cthread_self;
+#else
+ identifier_info->thread_handle = thread->machine.pcb->cthread_self;
+#endif
+ if(thread->task->bsd_info) {
+ identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
+ } else {
+ thread_unlock(thread);
+ splx(s);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+ return KERN_SUCCESS;
+ }
+ else
if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
policy_timeshare_info_t ts_info;
time_value_t *user_time,
time_value_t *system_time)
{
- absolutetime_to_microtime(
- timer_grab(&thread->user_timer),
- &user_time->seconds, &user_time->microseconds);
+ clock_sec_t secs;
+ clock_usec_t usecs;
- absolutetime_to_microtime(
- timer_grab(&thread->system_timer),
- &system_time->seconds, &system_time->microseconds);
+ absolutetime_to_microtime(timer_grab(&thread->user_timer), &secs, &usecs);
+ user_time->seconds = (typeof(user_time->seconds))secs;
+ user_time->microseconds = usecs;
+
+ absolutetime_to_microtime(timer_grab(&thread->system_timer), &secs, &usecs);
+ system_time->seconds = (typeof(system_time->seconds))secs;
+ system_time->microseconds = usecs;
}
kern_return_t
thread_assign_default(
thread_t thread)
{
- return (thread_assign(thread, &default_pset));
+ return (thread_assign(thread, &pset0));
}
/*
if (thread == NULL)
return (KERN_INVALID_ARGUMENT);
- *pset = thread->processor_set;
- pset_reference(*pset);
+ *pset = &pset0;
+
return (KERN_SUCCESS);
}
funnel_t * fnl)
{
lck_mtx_unlock(fnl->fnl_mutex);
+ fnl->fnl_mtxholder = NULL;
fnl->fnl_mtxrelease = current_thread();
}
if (funneled == TRUE) {
if (cur_thread->funnel_lock)
- panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
+ panic("Funnel lock called when holding one %p", cur_thread->funnel_lock);
KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
fnl, 1, 0, 0, 0);
funnel_lock(fnl);
return(funnel_state_prev);
}
+static void
+sched_call_null(
+__unused int type,
+__unused thread_t thread)
+{
+ return;
+}
+
+void
+thread_sched_call(
+ thread_t thread,
+ sched_call_t call)
+{
+ thread->sched_call = (call != NULL)? call: sched_call_null;
+}
+
+void
+thread_static_param(
+ thread_t thread,
+ boolean_t state)
+{
+ thread_mtx_lock(thread);
+ thread->static_param = state;
+ thread_mtx_unlock(thread);
+}
+
+uint64_t
+thread_tid(
+ thread_t thread)
+{
+ return (thread != THREAD_NULL? thread->thread_id: 0);
+}
+
+uint64_t
+thread_dispatchqaddr(
+ thread_t thread)
+{
+ uint64_t dispatchqueue_addr = 0;
+ uint64_t thread_handle = 0;
+
+ if (thread != THREAD_NULL) {
+#if defined(__ppc__) || defined(__arm__)
+ thread_handle = thread->machine.cthread_self;
+#else
+ thread_handle = thread->machine.pcb->cthread_self;
+#endif
+
+ if (thread->task->bsd_info)
+ dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
+ }
+
+ return (dispatchqueue_addr);
+}
/*
* Export routines to other components for things that are done as macros
{
return (thread_should_halt_fast(th));
}
+
+#if CONFIG_DTRACE
+uint32_t dtrace_get_thread_predcache(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->t_dtrace_predcache;
+ else
+ return 0;
+}
+
+int64_t dtrace_get_thread_vtime(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->t_dtrace_vtime;
+ else
+ return 0;
+}
+
+int64_t dtrace_get_thread_tracing(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->t_dtrace_tracing;
+ else
+ return 0;
+}
+
+boolean_t dtrace_get_thread_reentering(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return (thread->options & TH_OPT_DTRACE) ? TRUE : FALSE;
+ else
+ return 0;
+}
+
+vm_offset_t dtrace_get_kernel_stack(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->kernel_stack;
+ else
+ return 0;
+}
+
+int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
+{
+#if STAT_TIME
+ if (thread != THREAD_NULL) {
+ return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer));
+ } else
+ return 0;
+#else
+ if (thread != THREAD_NULL) {
+ processor_t processor = current_processor();
+ uint64_t abstime = mach_absolute_time();
+ timer_t timer;
+
+ timer = PROCESSOR_DATA(processor, thread_timer);
+
+ return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
+ (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
+ } else
+ return 0;
+#endif
+}
+
+void dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
+{
+ if (thread != THREAD_NULL)
+ thread->t_dtrace_predcache = predcache;
+}
+
+void dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
+{
+ if (thread != THREAD_NULL)
+ thread->t_dtrace_vtime = vtime;
+}
+
+void dtrace_set_thread_tracing(thread_t thread, int64_t accum)
+{
+ if (thread != THREAD_NULL)
+ thread->t_dtrace_tracing = accum;
+}
+
+void dtrace_set_thread_reentering(thread_t thread, boolean_t vbool)
+{
+ if (thread != THREAD_NULL) {
+ if (vbool)
+ thread->options |= TH_OPT_DTRACE;
+ else
+ thread->options &= (~TH_OPT_DTRACE);
+ }
+}
+
+vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
+{
+ vm_offset_t prev = 0;
+
+ if (thread != THREAD_NULL) {
+ prev = thread->recover;
+ thread->recover = recover;
+ }
+ return prev;
+}
+
+void dtrace_thread_bootstrap(void)
+{
+ task_t task = current_task();
+ if(task->thread_count == 1) {
+ DTRACE_PROC(start);
+ }
+ DTRACE_PROC(lwp__start);
+
+}
+#endif /* CONFIG_DTRACE */