/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_FREE_COPYRIGHT@
#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <mach/alert.h>
-#include <mach_prof.h>
#include <mach/rpc.h>
#include <mach/thread_act_server.h>
#include <kern/ast.h>
#include <kern/mach_param.h>
#include <kern/zalloc.h>
+#include <kern/extmod_statistics.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
#include <kern/exception.h>
#include <kern/ipc_mig.h>
#include <kern/ipc_tt.h>
-#include <kern/profile.h>
#include <kern/machine.h>
#include <kern/spl.h>
#include <kern/syscall_subr.h>
#include <kern/sync_lock.h>
#include <kern/processor.h>
#include <kern/timer.h>
-#include <mach_prof.h>
+#include <kern/affinity.h>
+
#include <mach/rpc.h>
+#include <security/mac_mach_internal.h>
+
void act_abort(thread_t);
-void act_set_apc(thread_t);
void install_special_handler_locked(thread_t);
void special_handler_continue(void);
+/*
+ * Internal routine to mark a thread as started.
+ * Always called with the thread mutex locked.
+ *
+ * Note: function intentionally declared with the noinline attribute to
+ * prevent multiple declaration of probe symbols in this file; we would
+ * prefer "#pragma noinline", but gcc does not support it.
+ * PR-6385749 -- the lwp-start probe should fire from within the context
+ * of the newly created thread. Commented out for now, in case we
+ * turn it into a dead code probe.
+ */
+void
+thread_start_internal(
+ thread_t thread)
+{
+ clear_wait(thread, THREAD_AWAKENED);
+ thread->started = TRUE;
+ // DTRACE_PROC1(lwp__start, thread_t, thread);
+}
+
/*
* Internal routine to terminate a thread.
* Sometimes called with task already locked.
if (thread->started)
clear_wait(thread, THREAD_INTERRUPTED);
else {
- clear_wait(thread, THREAD_AWAKENED);
- thread->started = TRUE;
+ thread_start_internal(thread);
}
}
else
result = KERN_TERMINATED;
+ if (thread->affinity_set != NULL)
+ thread_affinity_terminate(thread);
+
thread_mtx_unlock(thread);
if (thread != current_thread() && result == KERN_SUCCESS)
- thread_wait(thread);
+ thread_wait(thread, FALSE);
return (result);
}
if (thread->started)
thread_wakeup_one(&thread->suspend_count);
else {
- clear_wait(thread, THREAD_AWAKENED);
- thread->started = TRUE;
+ thread_start_internal(thread);
}
}
}
thread_mtx_unlock(thread);
if (thread != self && result == KERN_SUCCESS)
- thread_wait(thread);
+ thread_wait(thread, FALSE);
return (result);
}
if (thread->started)
thread_wakeup_one(&thread->suspend_count);
else {
- clear_wait(thread, THREAD_AWAKENED);
- thread->started = TRUE;
+ thread_start_internal(thread);
}
}
}
thread_lock(thread);
- if (!(thread->state & TH_ABORT)) {
- thread->state |= TH_ABORT;
+ if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
+ thread->sched_flags |= TH_SFLAG_ABORT;
install_special_handler_locked(thread);
}
else
- thread->state &= ~TH_ABORT_SAFELY;
+ thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
thread_unlock(thread);
splx(s);
thread_lock(thread);
if (!thread->at_safe_point ||
- clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
- if (!(thread->state & TH_ABORT)) {
- thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
+ clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
+ if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
+ thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
install_special_handler_locked(thread);
}
}
kern_return_t
thread_info(
- thread_t thread,
+ thread_t thread,
thread_flavor_t flavor,
thread_info_t thread_info_out,
mach_msg_type_number_t *thread_info_count)
thread_mtx_lock(thread);
- if (thread->active)
+ if (thread->active || thread->inspection)
result = thread_info_internal(
thread, flavor, thread_info_out, thread_info_count);
else
thread_mtx_unlock(thread);
- if (thread_stop(thread)) {
+ if (thread_stop(thread, FALSE)) {
thread_mtx_lock(thread);
result = machine_thread_get_state(
thread, flavor, state, state_count);
result = machine_thread_get_state(
thread, flavor, state, state_count);
}
+ else if (thread->inspection)
+ {
+ result = machine_thread_get_state(
+ thread, flavor, state, state_count);
+ }
else
result = KERN_TERMINATED;
* Change thread's machine-dependent state. Called with nothing
* locked. Returns same way.
*/
-kern_return_t
-thread_set_state(
+static kern_return_t
+thread_set_state_internal(
register thread_t thread,
int flavor,
thread_state_t state,
- mach_msg_type_number_t state_count)
+ mach_msg_type_number_t state_count,
+ boolean_t from_user)
{
kern_return_t result = KERN_SUCCESS;
thread_mtx_unlock(thread);
- if (thread_stop(thread)) {
+ if (thread_stop(thread, TRUE)) {
thread_mtx_lock(thread);
result = machine_thread_set_state(
thread, flavor, state, state_count);
else
result = KERN_TERMINATED;
+ if ((result == KERN_SUCCESS) && from_user)
+ extmod_statistics_incr_thread_set_state(thread);
+
thread_mtx_unlock(thread);
return (result);
}
+
+/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
+kern_return_t
+thread_set_state(
+ register thread_t thread,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t state_count);
+
+kern_return_t
+thread_set_state(
+ register thread_t thread,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t state_count)
+{
+ return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
+}
+kern_return_t
+thread_set_state_from_user(
+ register thread_t thread,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t state_count)
+{
+ return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
+}
/*
* Kernel-internal "thread" interfaces used outside this file:
thread_mtx_unlock(thread);
- if (thread_stop(thread)) {
+ if (thread_stop(thread, TRUE)) {
thread_mtx_lock(thread);
result = machine_thread_state_initialize( thread );
thread_unstop(thread);
thread_release(thread);
}
else
- result = machine_thread_state_initialize( thread );
+ result = machine_thread_state_initialize( thread );
}
else
result = KERN_TERMINATED;
thread_mtx_unlock(target);
- if (thread_stop(target)) {
+ if (thread_stop(target, TRUE)) {
thread_mtx_lock(target);
result = machine_thread_dup(self, target);
+ if (self->affinity_set != AFFINITY_SET_NULL)
+ thread_affinity_dup(self, target);
thread_unstop(target);
}
else {
return (thread_get_state(thread, flavor, tstate, count));
}
+/*
+ * Change thread's machine-dependent userspace TSD base.
+ * Called with nothing locked. Returns same way.
+ */
+kern_return_t
+thread_set_tsd_base(
+ thread_t thread,
+ mach_vm_offset_t tsd_base)
+{
+ kern_return_t result = KERN_SUCCESS;
+
+ if (thread == THREAD_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ thread_mtx_lock(thread);
+
+ if (thread->active) {
+ if (thread != current_thread()) {
+ thread_hold(thread);
+
+ thread_mtx_unlock(thread);
+
+ if (thread_stop(thread, TRUE)) {
+ thread_mtx_lock(thread);
+ result = machine_thread_set_tsd_base(thread, tsd_base);
+ thread_unstop(thread);
+ }
+ else {
+ thread_mtx_lock(thread);
+ result = KERN_ABORTED;
+ }
+
+ thread_release(thread);
+ }
+ else
+ result = machine_thread_set_tsd_base(thread, tsd_base);
+ }
+ else
+ result = KERN_TERMINATED;
+
+ thread_mtx_unlock(thread);
+
+ return (result);
+}
+
/*
* install_special_handler:
*
install_special_handler_locked(
thread_t thread)
{
- ReturnHandler **rh;
-
- /* The work handler must always be the last ReturnHandler on the list,
- because it can do tricky things like detach the thr_act. */
- for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
- continue;
-
- if (rh != &thread->special_handler.next)
- *rh = &thread->special_handler;
-
+
/*
* Temporarily undepress, so target has
* a chance to do locking required to
* block itself in special_handler().
*/
- if (thread->sched_mode & TH_MODE_ISDEPRESSED)
- compute_priority(thread, TRUE);
+ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
+ thread_recompute_sched_pri(thread, TRUE);
thread_ast_set(thread, AST_APC);
/*
* Activation control support routines internal to this file:
+ *
*/
-void
-act_execute_returnhandlers(void)
-{
- thread_t thread = current_thread();
-
- thread_ast_clear(thread, AST_APC);
- spllo();
-
- for (;;) {
- ReturnHandler *rh;
-
- thread_mtx_lock(thread);
-
- (void)splsched();
- thread_lock(thread);
-
- rh = thread->handlers;
- if (rh != NULL) {
- thread->handlers = rh->next;
-
- thread_unlock(thread);
- spllo();
-
- thread_mtx_unlock(thread);
-
- /* Execute it */
- (*rh->handler)(rh, thread);
- }
- else
- break;
- }
-
- thread_unlock(thread);
- spllo();
-
- thread_mtx_unlock(thread);
-}
-
/*
* special_handler_continue
*
spl_t s = splsched();
thread_lock(thread);
- if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
+ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
processor_t myprocessor = thread->last_processor;
thread->sched_pri = DEPRESSPRI;
myprocessor->current_pri = thread->sched_pri;
- thread->sched_mode &= ~TH_MODE_PREEMPT;
}
thread_unlock(thread);
splx(s);
*/
void
special_handler(
- __unused ReturnHandler *rh,
thread_t thread)
{
spl_t s;
s = splsched();
thread_lock(thread);
- thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */
+ thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
thread_unlock(thread);
splx(s);
*/
if (thread->active) {
if (thread->suspend_count > 0) {
- if (thread->handlers == NULL) {
- assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
- thread_mtx_unlock(thread);
- thread_block((thread_continue_t)special_handler_continue);
- /*NOTREACHED*/
- }
-
+ assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
thread_mtx_unlock(thread);
-
- special_handler_continue();
+ thread_block((thread_continue_t)special_handler_continue);
/*NOTREACHED*/
}
}
thread_mtx_unlock(thread);
}
+/* Prototype, see justification above */
+kern_return_t
+act_set_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t count);
+
kern_return_t
act_set_state(
thread_t thread,
}
+kern_return_t
+act_set_state_from_user(
+ thread_t thread,
+ int flavor,
+ thread_state_t state,
+ mach_msg_type_number_t count)
+{
+ if (thread == current_thread())
+ return (KERN_INVALID_ARGUMENT);
+
+ return (thread_set_state_from_user(thread, flavor, state, count));
+
+}
+
kern_return_t
act_get_state(
thread_t thread,
return (thread_get_state(thread, flavor, state, count));
}
-void
-act_set_astbsd(
- thread_t thread)
+static void
+act_set_ast(
+ thread_t thread,
+ ast_t ast)
{
- spl_t s = splsched();
-
+ spl_t s = splsched();
+
if (thread == current_thread()) {
- thread_ast_set(thread, AST_BSD);
+ thread_ast_set(thread, ast);
ast_propagate(thread->ast);
- }
- else {
- processor_t processor;
+ } else {
+ processor_t processor;
thread_lock(thread);
- thread_ast_set(thread, AST_BSD);
+ thread_ast_set(thread, ast);
processor = thread->last_processor;
- if ( processor != PROCESSOR_NULL &&
- processor->state == PROCESSOR_RUNNING &&
- processor->active_thread == thread )
+ if ( processor != PROCESSOR_NULL &&
+ processor->state == PROCESSOR_RUNNING &&
+ processor->active_thread == thread )
cause_ast_check(processor);
thread_unlock(thread);
}
-
+
splx(s);
}
void
-act_set_apc(
+act_set_astbsd(
thread_t thread)
{
- spl_t s = splsched();
-
- if (thread == current_thread()) {
- thread_ast_set(thread, AST_APC);
- ast_propagate(thread->ast);
- }
- else {
- processor_t processor;
+ act_set_ast( thread, AST_BSD );
+}
- thread_lock(thread);
- thread_ast_set(thread, AST_APC);
- processor = thread->last_processor;
- if ( processor != PROCESSOR_NULL &&
- processor->state == PROCESSOR_RUNNING &&
- processor->active_thread == thread )
- cause_ast_check(processor);
- thread_unlock(thread);
- }
-
- splx(s);
+void
+act_set_kperf(
+ thread_t thread)
+{
+ /* safety check */
+ if (thread != current_thread())
+ if( !ml_get_interrupts_enabled() )
+ panic("unsafe act_set_kperf operation");
+
+ act_set_ast( thread, AST_KPERF );
}
+
+#if CONFIG_MACF
+void
+act_set_astmacf(
+ thread_t thread)
+{
+ act_set_ast( thread, AST_MACF);
+}
+#endif
+
+void
+set_astledger(thread_t thread)
+{
+ act_set_ast(thread, AST_LEDGER);
+}
+
+