-/* apply internal backgrounding for workqueue threads */
-int
-proc_apply_workq_bgthreadpolicy(thread_t thread)
-{
- if (thread == THREAD_NULL)
- return ESRCH;
-
- proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE,
- TASK_POLICY_WORKQ_BG, TASK_POLICY_ENABLE);
-
- return(0);
-}
-
-/*
- * remove internal backgrounding for workqueue threads
- * does NOT go find sockets created while BG and unbackground them
- */
-int
-proc_restore_workq_bgthreadpolicy(thread_t thread)
-{
- if (thread == THREAD_NULL)
- return ESRCH;
-
- proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE,
- TASK_POLICY_WORKQ_BG, TASK_POLICY_DISABLE);
-
- return(0);
-}
-
-/* here for temporary compatibility */
-int
-proc_setthread_saved_importance(__unused thread_t thread, __unused int importance)
-{
- return(0);
-}
-
-/*
- * Set an override on the thread which is consulted with a
- * higher priority than the task/thread policy. This should
- * only be set for temporary grants until the thread
- * returns to the userspace boundary
- *
- * We use atomic operations to swap in the override, with
- * the assumption that the thread itself can
- * read the override and clear it on return to userspace.
- *
- * No locking is performed, since it is acceptable to see
- * a stale override for one loop through throttle_lowpri_io().
- * However a thread reference must be held on the thread.
- */
-
-void set_thread_iotier_override(thread_t thread, int policy)
-{
- int current_override;
-
- /* Let most aggressive I/O policy win until user boundary */
- do {
- current_override = thread->iotier_override;
-
- if (current_override != THROTTLE_LEVEL_NONE)
- policy = MIN(current_override, policy);
-
- if (current_override == policy) {
- /* no effective change */
- return;
- }
- } while (!OSCompareAndSwap(current_override, policy, &thread->iotier_override));
-
- /*
- * Since the thread may be currently throttled,
- * re-evaluate tiers and potentially break out
- * of an msleep
- */
- rethrottle_thread(thread->uthread);
-}
-
-/*
- * Userspace synchronization routines (like pthread mutexes, pthread reader-writer locks,
- * semaphores, dispatch_sync) may result in priority inversions where a higher priority
- * (i.e. scheduler priority, I/O tier, QoS tier) is waiting on a resource owned by a lower
- * priority thread. In these cases, we attempt to propagate the priority token, as long
- * as the subsystem informs us of the relationships between the threads. The userspace
- * synchronization subsystem should maintain the information of owner->resource and
- * resource->waiters itself.
- */
-
-/*
- * This helper canonicalizes the resource/resource_type given the current qos_override_mode
- * in effect. Note that wildcards (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD) may need
- * to be handled specially in the future, but for now it's fine to slam
- * *resource to USER_ADDR_NULL even if it was previously a wildcard.
- */
-static void _canonicalize_resource_and_type(user_addr_t *resource, int *resource_type) {
- if (qos_override_mode == QOS_OVERRIDE_MODE_OVERHANG_PEAK || qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) {
- /* Map all input resource/type to a single one */
- *resource = USER_ADDR_NULL;
- *resource_type = THREAD_QOS_OVERRIDE_TYPE_UNKNOWN;
- } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE) {
- /* no transform */
- } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH) {
- /* Map all dispatch overrides to a single one, to avoid memory overhead */
- if (*resource_type == THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE) {
- *resource = USER_ADDR_NULL;
- }
- } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE) {
- /* Map all mutex overrides to a single one, to avoid memory overhead */
- if (*resource_type == THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX) {
- *resource = USER_ADDR_NULL;
- }
- }
-}
-
-/* This helper routine finds an existing override if known. Locking should be done by caller */
-static struct thread_qos_override *_find_qos_override(thread_t thread, user_addr_t resource, int resource_type) {
- struct thread_qos_override *override;
-
- override = thread->overrides;
- while (override) {
- if (override->override_resource == resource &&
- override->override_resource_type == resource_type) {
- return override;
- }
-
- override = override->override_next;
- }
-
- return NULL;
-}
-
-static void _find_and_decrement_qos_override(thread_t thread, user_addr_t resource, int resource_type, boolean_t reset, struct thread_qos_override **free_override_list) {
- struct thread_qos_override *override, *override_prev;
-
- override_prev = NULL;
- override = thread->overrides;
- while (override) {
- struct thread_qos_override *override_next = override->override_next;
-
- if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD == resource || override->override_resource == resource) &&
- override->override_resource_type == resource_type) {
- if (reset) {
- override->override_contended_resource_count = 0;
- } else {
- override->override_contended_resource_count--;
- }
-
- if (override->override_contended_resource_count == 0) {
- if (override_prev == NULL) {
- thread->overrides = override_next;
- } else {
- override_prev->override_next = override_next;
- }
-
- /* Add to out-param for later zfree */
- override->override_next = *free_override_list;
- *free_override_list = override;
- } else {
- override_prev = override;
- }
-
- if (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD != resource) {
- return;
- }
- } else {
- override_prev = override;
- }
-
- override = override_next;
- }
-}
-
-/* This helper recalculates the current requested override using the policy selected at boot */
-static int _calculate_requested_qos_override(thread_t thread)
-{
- if (qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) {
- return THREAD_QOS_UNSPECIFIED;
- }
-
- /* iterate over all overrides and calculate MAX */
- struct thread_qos_override *override;
- int qos_override = THREAD_QOS_UNSPECIFIED;
-
- override = thread->overrides;
- while (override) {
- if (qos_override_mode != QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH ||
- override->override_resource_type != THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE) {
- qos_override = MAX(qos_override, override->override_qos);
- }
-
- override = override->override_next;
- }
-
- return qos_override;
-}
-
-boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type)
-{
- thread_t self = current_thread();
- struct task_pend_token pend_token = {};
-
- /* XXX move to thread mutex when thread policy does */
- task_lock(task);
-
- /*
- * If thread is passed, it is assumed to be most accurate, since the caller must have an explicit (or implicit) reference
- * to the thread
- */
-
- if (thread != THREAD_NULL) {
- assert(task == thread->task);
- } else {
- if (tid == self->thread_id) {
- thread = self;
- } else {
- thread = task_findtid(task, tid);
-
- if (thread == THREAD_NULL) {
- KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_NONE,
- tid, 0, 0xdead, 0, 0);
- task_unlock(task);
- return FALSE;
- }
- }
- }
-
- KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_START,
- thread_tid(thread), override_qos, first_override_for_resource ? 1 : 0, 0, 0);
-
- DTRACE_BOOST5(qos_add_override_pre, uint64_t, tid, uint64_t, thread->requested_policy.thrp_qos,
- uint64_t, thread->effective_policy.thep_qos, int, override_qos, boolean_t, first_override_for_resource);
-
- struct task_requested_policy requested = thread->requested_policy;
- struct thread_qos_override *override;
- struct thread_qos_override *deferred_free_override = NULL;
- int new_qos_override, prev_qos_override;
- int new_effective_qos;
- boolean_t has_thread_reference = FALSE;
-
- _canonicalize_resource_and_type(&resource, &resource_type);
-
- if (first_override_for_resource) {
- override = _find_qos_override(thread, resource, resource_type);
- if (override) {
- override->override_contended_resource_count++;
- } else {
- struct thread_qos_override *override_new;
-
- /* We need to allocate a new object. Drop the task lock and recheck afterwards in case someone else added the override */
- thread_reference(thread);
- has_thread_reference = TRUE;
- task_unlock(task);
- override_new = zalloc(thread_qos_override_zone);
- task_lock(task);
-
- override = _find_qos_override(thread, resource, resource_type);
- if (override) {
- /* Someone else already allocated while the task lock was dropped */
- deferred_free_override = override_new;
- override->override_contended_resource_count++;
- } else {
- override = override_new;
- override->override_next = thread->overrides;
- override->override_contended_resource_count = 1 /* since first_override_for_resource was TRUE */;
- override->override_resource = resource;
- override->override_resource_type = resource_type;
- override->override_qos = THREAD_QOS_UNSPECIFIED;
- thread->overrides = override;
- }
- }
- } else {
- override = _find_qos_override(thread, resource, resource_type);
- }
-
- if (override) {
- if (override->override_qos == THREAD_QOS_UNSPECIFIED)
- override->override_qos = override_qos;
- else
- override->override_qos = MAX(override->override_qos, override_qos);
- }
-
- /* Determine how to combine the various overrides into a single current requested override */
- prev_qos_override = requested.thrp_qos_override;
- new_qos_override = _calculate_requested_qos_override(thread);
-
- if (new_qos_override != prev_qos_override) {
- requested.thrp_qos_override = new_qos_override;
-
- thread->requested_policy = requested;
-
- task_policy_update_locked(task, thread, &pend_token);
-
- if (!has_thread_reference) {
- thread_reference(thread);
- }
-
- task_unlock(task);
-
- task_policy_update_complete_unlocked(task, thread, &pend_token);
-
- new_effective_qos = thread->effective_policy.thep_qos;
-
- thread_deallocate(thread);
- } else {
- new_effective_qos = thread->effective_policy.thep_qos;
-
- task_unlock(task);
-
- if (has_thread_reference) {
- thread_deallocate(thread);
- }
- }
-
- if (deferred_free_override) {
- zfree(thread_qos_override_zone, deferred_free_override);
- }
-
- DTRACE_BOOST3(qos_add_override_post, int, prev_qos_override, int, new_qos_override,
- int, new_effective_qos);
-
- KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_END,
- new_qos_override, resource, resource_type, 0, 0);
-
- return TRUE;
-}
-
-
-static boolean_t _proc_thread_qos_remove_override_internal(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type, boolean_t reset)
-{
- thread_t self = current_thread();
- struct task_pend_token pend_token = {};
-
- /* XXX move to thread mutex when thread policy does */
- task_lock(task);
-
- /*
- * If thread is passed, it is assumed to be most accurate, since the caller must have an explicit (or implicit) reference
- * to the thread
- */
- if (thread != THREAD_NULL) {
- assert(task == thread->task);
- } else {
- if (tid == self->thread_id) {
- thread = self;
- } else {
- thread = task_findtid(task, tid);
-
- if (thread == THREAD_NULL) {
- KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_NONE,
- tid, 0, 0xdead, 0, 0);
- task_unlock(task);
- return FALSE;
- }
- }
- }
-
- struct task_requested_policy requested = thread->requested_policy;
- struct thread_qos_override *deferred_free_override_list = NULL;
- int new_qos_override, prev_qos_override;
-
- _canonicalize_resource_and_type(&resource, &resource_type);
-
- _find_and_decrement_qos_override(thread, resource, resource_type, reset, &deferred_free_override_list);
-
- KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_START,
- thread_tid(thread), resource, reset, 0, 0);
-
- /* Determine how to combine the various overrides into a single current requested override */
- prev_qos_override = requested.thrp_qos_override;
- new_qos_override = _calculate_requested_qos_override(thread);
-
- if (new_qos_override != prev_qos_override) {
- requested.thrp_qos_override = new_qos_override;
-
- thread->requested_policy = requested;
-
- task_policy_update_locked(task, thread, &pend_token);
-
- thread_reference(thread);
-
- task_unlock(task);
-
- task_policy_update_complete_unlocked(task, thread, &pend_token);
-
- thread_deallocate(thread);
- } else {
- task_unlock(task);
- }
-
- while (deferred_free_override_list) {
- struct thread_qos_override *override_next = deferred_free_override_list->override_next;
-
- zfree(thread_qos_override_zone, deferred_free_override_list);
- deferred_free_override_list = override_next;
- }
-
- KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_END,
- 0, 0, 0, 0, 0);
-
- return TRUE;
-}
-
-boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type)
-{
- return _proc_thread_qos_remove_override_internal(task, thread, tid, resource, resource_type, FALSE);
-
-}
-
-boolean_t proc_thread_qos_reset_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type)
-{
- return _proc_thread_qos_remove_override_internal(task, thread, tid, resource, resource_type, TRUE);
-}
-
-/* Deallocate before thread termination */
-void proc_thread_qos_deallocate(thread_t thread)
-{
- task_t task = thread->task;
- struct thread_qos_override *override;
-
- /* XXX move to thread mutex when thread policy does */
- task_lock(task);
- override = thread->overrides;
- thread->overrides = NULL; /* task policy re-evaluation needed? */
- thread->requested_policy.thrp_qos_override = THREAD_QOS_UNSPECIFIED;
- task_unlock(task);
-
- while (override) {
- struct thread_qos_override *override_next = override->override_next;
-
- zfree(thread_qos_override_zone, override);
- override = override_next;
- }
-}
-