#include <kern/kern_types.h>
#include <kern/kalloc.h>
#include <kern/cpu_data.h>
-#include <kern/counters.h>
#include <kern/extmod_statistics.h>
#include <kern/ipc_mig.h>
#include <kern/ipc_tt.h>
{
thread_t thread = current_thread();
- thread_terminate_internal(thread);
+ thread_terminate_internal(thread, TH_TERMINATE_OPTION_NONE);
/*
* Handle the thread termination directly
thread->thread_magic = 0;
#endif /* MACH_ASSERT */
+ lck_mtx_lock(&tasks_threads_lock);
+ assert(terminated_threads_count > 0);
+ queue_remove(&terminated_threads, thread, thread_t, threads);
+ terminated_threads_count--;
+ lck_mtx_unlock(&tasks_threads_lock);
+
zfree(thread_zone, thread);
}
lck_mtx_lock(&tasks_threads_lock);
queue_remove(&threads, thread, thread_t, threads);
threads_count--;
+ queue_enter(&terminated_threads, thread, thread_t, threads);
+ terminated_threads_count++;
lck_mtx_unlock(&tasks_threads_lock);
thread_deallocate(thread);
}
}
-#define TH_OPTION_NONE 0x00
-#define TH_OPTION_NOCRED 0x01
-#define TH_OPTION_NOSUSP 0x02
-#define TH_OPTION_WORKQ 0x04
+__options_decl(thread_create_internal_options_t, uint32_t, {
+ TH_OPTION_NONE = 0x00,
+ TH_OPTION_NOCRED = 0x01,
+ TH_OPTION_NOSUSP = 0x02,
+ TH_OPTION_WORKQ = 0x04,
+ TH_OPTION_IMMOVABLE = 0x08,
+ TH_OPTION_PINNED = 0x10,
+});
/*
* Create a new thread.
thread_create_internal(
task_t parent_task,
integer_t priority,
- thread_continue_t continuation,
+ thread_continue_t continuation,
void *parameter,
- int options,
+ thread_create_internal_options_t options,
thread_t *out_thread)
{
thread_t new_thread;
- static thread_t first_thread;
+ static thread_t first_thread;
+ ipc_thread_init_options_t init_options = IPC_THREAD_INIT_NONE;
/*
* Allocate a thread and initialize static fields
init_thread_from_template(new_thread);
}
+ if (options & TH_OPTION_PINNED) {
+ init_options |= IPC_THREAD_INIT_PINNED;
+ }
+
+ if (options & TH_OPTION_IMMOVABLE) {
+ init_options |= IPC_THREAD_INIT_IMMOVABLE;
+ }
+
os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2);
#if DEBUG || DEVELOPMENT
queue_init(&new_thread->t_temp_alloc_list);
lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL);
- ipc_thread_init(new_thread);
+ ipc_thread_init(new_thread, init_options);
new_thread->continuation = continuation;
new_thread->parameter = parameter;
}
static kern_return_t
-thread_create_internal2(
- task_t task,
- thread_t *new_thread,
- boolean_t from_user,
- thread_continue_t continuation)
+thread_create_with_options_internal(
+ task_t task,
+ thread_t *new_thread,
+ boolean_t from_user,
+ thread_create_internal_options_t options,
+ thread_continue_t continuation)
{
kern_return_t result;
- thread_t thread;
+ thread_t thread;
if (task == TASK_NULL || task == kernel_task) {
return KERN_INVALID_ARGUMENT;
}
#endif
- result = thread_create_internal(task, -1, continuation, NULL, TH_OPTION_NONE, &thread);
+ result = thread_create_internal(task, -1, continuation, NULL, options, &thread);
if (result != KERN_SUCCESS) {
return result;
}
task_t task,
thread_t *new_thread)
{
- return thread_create_internal2(task, new_thread, FALSE, (thread_continue_t)thread_bootstrap_return);
+ return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE,
+ (thread_continue_t)thread_bootstrap_return);
+}
+
+/*
+ * Create a thread that has its itk_self pinned
+ * Deprecated, should be cleanup once rdar://70892168 lands
+ */
+kern_return_t
+thread_create_pinned(
+ task_t task,
+ thread_t *new_thread)
+{
+ return thread_create_with_options_internal(task, new_thread, FALSE,
+ TH_OPTION_PINNED | TH_OPTION_IMMOVABLE, (thread_continue_t)thread_bootstrap_return);
+}
+
+kern_return_t
+thread_create_immovable(
+ task_t task,
+ thread_t *new_thread)
+{
+ return thread_create_with_options_internal(task, new_thread, FALSE,
+ TH_OPTION_IMMOVABLE, (thread_continue_t)thread_bootstrap_return);
}
kern_return_t
task_t task,
thread_t *new_thread)
{
- return thread_create_internal2(task, new_thread, TRUE, (thread_continue_t)thread_bootstrap_return);
+ return thread_create_with_options_internal(task, new_thread, TRUE, TH_OPTION_NONE,
+ (thread_continue_t)thread_bootstrap_return);
}
kern_return_t
thread_t *new_thread,
thread_continue_t continuation)
{
- return thread_create_internal2(task, new_thread, FALSE, continuation);
+ return thread_create_with_options_internal(task, new_thread, FALSE, TH_OPTION_NONE, continuation);
}
/*
kern_return_t
thread_create_waiting(
- task_t task,
- thread_continue_t continuation,
- event_t event,
- thread_t *new_thread)
+ task_t task,
+ thread_continue_t continuation,
+ event_t event,
+ th_create_waiting_options_t options,
+ thread_t *new_thread)
{
+ thread_create_internal_options_t ci_options = TH_OPTION_NONE;
+
+ assert((options & ~TH_CREATE_WAITING_OPTION_MASK) == 0);
+ if (options & TH_CREATE_WAITING_OPTION_PINNED) {
+ ci_options |= TH_OPTION_PINNED;
+ }
+ if (options & TH_CREATE_WAITING_OPTION_IMMOVABLE) {
+ ci_options |= TH_OPTION_IMMOVABLE;
+ }
+
return thread_create_waiting_internal(task, continuation, event,
- kThreadWaitNone, TH_OPTION_NONE, new_thread);
+ kThreadWaitNone, ci_options, new_thread);
}
thread_continue_t continuation,
thread_t *new_thread)
{
- int options = TH_OPTION_NOCRED | TH_OPTION_NOSUSP | TH_OPTION_WORKQ;
+ /*
+ * Create thread, but don't pin control port just yet, in case someone calls
+ * task_threads() and deallocates pinned port before kernel copyout happens,
+ * which will result in pinned port guard exception. Instead, pin and make
+ * it immovable atomically at copyout during workq_setup_and_run().
+ */
+ int options = TH_OPTION_NOCRED | TH_OPTION_NOSUSP | TH_OPTION_WORKQ | TH_OPTION_IMMOVABLE;
return thread_create_waiting_internal(task, continuation, NULL,
kThreadWaitParkedWorkQueue, options, new_thread);
}
return KERN_INVALID_ARGUMENT;
}
- assert(host_priv == &realhost);
-
if (prev_state) {
*prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
}
ip_unlock(port);
return;
}
- thread = (thread_t)port->ip_kobject;
+ thread = (thread_t)ipc_kobject_get(port);
kotype = ip_kotype(port);
if (thread != THREAD_NULL) {
assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
return;
}
+ if (kotype == IKOT_THREAD_READ) {
+ flavor = THREAD_FLAVOR_READ;
+ } else {
+ flavor = THREAD_FLAVOR_INSPECT;
+ }
+
thread_mtx_lock(thread);
ip_lock(port);
- require_ip_active(port);
/*
+ * If the port is no longer active, then ipc_thread_terminate() ran
+ * and destroyed the kobject already. Just deallocate the task
+ * ref we took and go away.
+ *
+ * It is also possible that several nsrequests are in flight,
+ * only one shall NULL-out the port entry, and this is the one
+ * that gets to dealloc the port.
+ *
* Check for a stale no-senders notification. A call to any function
* that vends out send rights to this port could resurrect it between
* this notification being generated and actually being handled here.
*/
- if (port->ip_srights > 0) {
+ if (!ip_active(port) ||
+ thread->ith_thread_ports[flavor] != port ||
+ port->ip_srights > 0) {
ip_unlock(port);
thread_mtx_unlock(thread);
thread_deallocate(thread);
return;
}
- if (kotype == IKOT_THREAD_READ) {
- flavor = THREAD_FLAVOR_READ;
- } else {
- flavor = THREAD_FLAVOR_INSPECT;
- }
- assert(thread->ith_self[flavor] == port);
- thread->ith_self[flavor] = IP_NULL;
- port->ip_kobject = IKOT_NONE;
+
+ assert(thread->ith_thread_ports[flavor] == port);
+ thread->ith_thread_ports[flavor] = IP_NULL;
+ ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE);
ip_unlock(port);
thread_mtx_unlock(thread);
thread_deallocate(thread);