]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/syscall_subr.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / kern / syscall_subr.c
index 6f614474c13615f4d3d37efb78973a48fe3bdbc3..1ebf39e4f9d4f405b1a9e9a0f41f3624b0595bb9 100644 (file)
@@ -98,14 +98,7 @@ __unused     struct pfz_exit_args *args)
  *     swtch and swtch_pri both attempt to context switch (logic in
  *     thread_block no-ops the context switch if nothing would happen).
  *     A boolean is returned that indicates whether there is anything
- *     else runnable.
- *
- *     This boolean can be used by a thread waiting on a
- *     lock or condition:  If FALSE is returned, the thread is justified
- *     in becoming a resource hog by continuing to spin because there's
- *     nothing else useful that the processor could do.  If TRUE is
- *     returned, the thread should make one more check on the
- *     lock and then be a good citizen and really suspend.
+ *     else runnable.  That's no excuse to spin, though.
  */
 
 static void
@@ -255,7 +248,8 @@ kern_return_t
 thread_switch(
        struct thread_switch_args *args)
 {
-       register thread_t               thread, self = current_thread();
+       thread_t                        thread = THREAD_NULL;
+       thread_t                        self = current_thread();
        mach_port_name_t                thread_name = args->thread_name;
        int                                             option = args->option;
        mach_msg_timeout_t              option_time = args->option_time;
@@ -303,11 +297,11 @@ thread_switch(
        /*
         * Translate the port name if supplied.
         */
-    if (thread_name != MACH_PORT_NULL) {
-               ipc_port_t                      port;
+       if (thread_name != MACH_PORT_NULL) {
+               ipc_port_t port;
 
                if (ipc_port_translate_send(self->task->itk_space,
-                                                                       thread_name, &port) == KERN_SUCCESS) {
+                                           thread_name, &port) == KERN_SUCCESS) {
                        ip_reference(port);
                        ip_unlock(port);
 
@@ -315,16 +309,11 @@ thread_switch(
                        ip_release(port);
 
                        if (thread == self) {
-                               (void)thread_deallocate_internal(thread);
+                               thread_deallocate(thread);
                                thread = THREAD_NULL;
                        }
                }
-               else
-                       thread = THREAD_NULL;
        }
-       else
-               thread = THREAD_NULL;
-
 
        if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) {
                if (thread != THREAD_NULL) {
@@ -336,16 +325,18 @@ thread_switch(
                                 * a thread in another task)
                                 */
 
-                               (void)thread_deallocate_internal(thread);
+                               thread_deallocate(thread);
                                thread = THREAD_NULL;
                        } else {
                                /*
                                 * Attempt to kick the lock owner up to our same IO throttling tier.
                                 * If the thread is currently blocked in throttle_lowpri_io(),
                                 * it will immediately break out.
+                                *
+                                * TODO: SFI break out?
                                 */
                                int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO);
-                               
+
                                set_thread_iotier_override(thread, new_policy);
                        }
                }
@@ -355,62 +346,43 @@ thread_switch(
         * Try to handoff if supplied.
         */
        if (thread != THREAD_NULL) {
-               processor_t             processor;
-               spl_t                   s;
+               spl_t s = splsched();
 
-               s = splsched();
-               thread_lock(thread);
+               /* This may return a different thread if the target is pushing on something */
+               thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
 
                KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
-                                                         thread_tid(thread), thread->state, 0, 0, 0);
-
-               /*
-                *      Check that the thread is not bound
-                *      to a different processor, and that realtime
-                *      is not involved.
-                *
-                *      Next, pull it off its run queue.  If it
-                *      doesn't come, it's not eligible.
-                */
-               processor = current_processor();
-               if (processor->current_pri < BASEPRI_RTQUEUES                   &&
-                       thread->sched_pri < BASEPRI_RTQUEUES                            &&
-                       (thread->bound_processor == PROCESSOR_NULL      ||
-                        thread->bound_processor == processor)                          &&
-                               thread_run_queue_remove(thread)                                                 ) {
-                       /*
-                        *      Hah, got it!!
-                        */
-                       thread_unlock(thread);
-
-                       (void)thread_deallocate_internal(thread);
+                                     thread_tid(thread), thread->state,
+                                     pulled_thread ? TRUE : FALSE, 0, 0);
+
+               if (pulled_thread != THREAD_NULL) {
+                       /* We can't be dropping the last ref here */
+                       thread_deallocate_safe(thread);
 
                        if (wait_option)
                                assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
-                                                                                                               option_time, scale_factor);
-                       else
-                       if (depress_option)
+                                                   option_time, scale_factor);
+                       else if (depress_option)
                                thread_depress_ms(option_time);
 
                        self->saved.swtch.option = option;
                        self->saved.swtch.reenable_workq_callback = reenable_workq_callback;
 
-                       thread_run(self, (thread_continue_t)thread_switch_continue, NULL, thread);
+                       thread_run(self, (thread_continue_t)thread_switch_continue, NULL, pulled_thread);
                        /* NOTREACHED */
+                       panic("returned from thread_run!");
                }
 
-               thread_unlock(thread);
                splx(s);
 
                thread_deallocate(thread);
        }
-               
+
        if (wait_option)
                assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, scale_factor);
-       else
-       if (depress_option)
+       else if (depress_option)
                thread_depress_ms(option_time);
-         
+
        self->saved.swtch.option = option;
        self->saved.swtch.reenable_workq_callback = reenable_workq_callback;
 
@@ -443,6 +415,14 @@ thread_depress_abstime(
                processor_t             myprocessor = self->last_processor;
 
                self->sched_pri = DEPRESSPRI;
+
+               KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+                                     (uintptr_t)thread_tid(self),
+                                     self->base_pri,
+                                     self->sched_pri,
+                                     0, /* eventually, 'reason' */
+                                     0);
+
                myprocessor->current_pri = self->sched_pri;
                self->sched_flags |= TH_SFLAG_DEPRESS;
 
@@ -482,7 +462,7 @@ thread_depress_expire(
     thread_lock(thread);
        if (--thread->depress_timer_active == 0) {
                thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
-               SCHED(compute_priority)(thread, FALSE);
+               thread_recompute_sched_pri(thread, FALSE);
        }
     thread_unlock(thread);
     splx(s);
@@ -503,7 +483,7 @@ thread_depress_abort_internal(
        if (!(thread->sched_flags & TH_SFLAG_POLLDEPRESS)) {
                if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
                        thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
-                       SCHED(compute_priority)(thread, FALSE);
+                       thread_recompute_sched_pri(thread, FALSE);
                        result = KERN_SUCCESS;
                }
 
@@ -538,6 +518,14 @@ thread_poll_yield(
                        thread_lock(self);
                        if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
                                self->sched_pri = DEPRESSPRI;
+
+                               KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+                                                     (uintptr_t)thread_tid(self),
+                                                     self->base_pri,
+                                                     self->sched_pri,
+                                                     0, /* eventually, 'reason' */
+                                                     0);
+
                                myprocessor->current_pri = self->sched_pri;
                        }
                        self->computation_epoch = abstime;