]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/thread_policy.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / thread_policy.c
index 93a3287641d232eb4074544a5b2031c7d2d22e64..58028df2d289cdfefb835f9359617964c97cec87 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <kern/kern_types.h>
 #include <kern/processor.h>
 #include <kern/thread.h>
+#include <kern/affinity.h>
 
 static void
 thread_recompute_priority(
        thread_t                thread);
 
+
+
 kern_return_t
 thread_policy_set(
        thread_t                                thread,
@@ -44,19 +47,32 @@ thread_policy_set(
        thread_policy_t                 policy_info,
        mach_msg_type_number_t  count)
 {
-       kern_return_t                   result = KERN_SUCCESS;
-       spl_t                                   s;
 
        if (thread == THREAD_NULL)
                return (KERN_INVALID_ARGUMENT);
 
+       if (thread->static_param)
+               return (KERN_SUCCESS);
+
+       return (thread_policy_set_internal(thread, flavor, policy_info, count));
+}
+
+kern_return_t
+thread_policy_set_internal(
+       thread_t                                thread,
+       thread_policy_flavor_t  flavor,
+       thread_policy_t                 policy_info,
+       mach_msg_type_number_t  count)
+{
+       kern_return_t                   result = KERN_SUCCESS;
+       spl_t                                   s;
+
        thread_mtx_lock(thread);
        if (!thread->active) {
                thread_mtx_unlock(thread);
 
                return (KERN_TERMINATED);
        }
-
        switch (flavor) {
 
        case THREAD_EXTENDED_POLICY:
@@ -81,15 +97,15 @@ thread_policy_set(
                        if (timeshare && !oldmode) {
                                thread->sched_mode |= TH_MODE_TIMESHARE;
 
-                               if (thread->state & TH_RUN)
-                                       pset_share_incr(thread->processor_set);
+                               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+                                       sched_share_incr();
                        }
                        else
                        if (!timeshare && oldmode) {
                                thread->sched_mode &= ~TH_MODE_TIMESHARE;
 
-                               if (thread->state & TH_RUN)
-                                       pset_share_decr(thread->processor_set);
+                               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+                                       sched_share_decr();
                        }
 
                        thread_recompute_priority(thread);
@@ -138,8 +154,8 @@ thread_policy_set(
                        if (thread->sched_mode & TH_MODE_TIMESHARE) {
                                thread->sched_mode &= ~TH_MODE_TIMESHARE;
 
-                               if (thread->state & TH_RUN)
-                                       pset_share_decr(thread->processor_set);
+                               if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+                                       sched_share_decr();
                        }
                        thread->sched_mode |= TH_MODE_REALTIME;
                        thread_recompute_priority(thread);
@@ -163,7 +179,6 @@ thread_policy_set(
                        result = KERN_INVALID_ARGUMENT;
                        break;
                }
-
                info = (thread_precedence_policy_t)policy_info;
 
                s = splsched();
@@ -179,13 +194,35 @@ thread_policy_set(
                break;
        }
 
+       case THREAD_AFFINITY_POLICY:
+       {
+               thread_affinity_policy_t        info;
+
+               if (!thread_affinity_is_supported()) {
+                       result = KERN_NOT_SUPPORTED;
+                       break;
+               }
+               if (count < THREAD_AFFINITY_POLICY_COUNT) {
+                       result = KERN_INVALID_ARGUMENT;
+                       break;
+               }
+
+               info = (thread_affinity_policy_t) policy_info;
+               /*
+                * Unlock the thread mutex here and
+                * return directly after calling thread_affinity_set().
+                * This is necessary for correct lock ordering because
+                * thread_affinity_set() takes the task lock.
+                */
+               thread_mtx_unlock(thread);
+               return thread_affinity_set(thread, info->affinity_tag);
+       }
        default:
                result = KERN_INVALID_ARGUMENT;
                break;
        }
 
        thread_mtx_unlock(thread);
-
        return (result);
 }
 
@@ -244,14 +281,19 @@ void
 thread_policy_reset(
        thread_t                thread)
 {
+       spl_t           s;
+
+       s = splsched();
+       thread_lock(thread);
+
        if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
                thread->sched_mode &= ~TH_MODE_REALTIME;
 
                if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
                        thread->sched_mode |= TH_MODE_TIMESHARE;
 
-                       if (thread->state & TH_RUN)
-                               pset_share_incr(thread->processor_set);
+                       if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+                               sched_share_incr();
                }
        }
        else {
@@ -262,6 +304,9 @@ thread_policy_reset(
        thread->importance = 0;
 
        thread_recompute_priority(thread);
+
+       thread_unlock(thread);
+       splx(s);
 }
 
 kern_return_t
@@ -384,6 +429,29 @@ thread_policy_get(
                break;
        }
 
+       case THREAD_AFFINITY_POLICY:
+       {
+               thread_affinity_policy_t                info;
+
+               if (!thread_affinity_is_supported()) {
+                       result = KERN_NOT_SUPPORTED;
+                       break;
+               }
+               if (*count < THREAD_AFFINITY_POLICY_COUNT) {
+                       result = KERN_INVALID_ARGUMENT;
+                       break;
+               }
+
+               info = (thread_affinity_policy_t)policy_info;
+
+               if (!(*get_default))
+                       info->affinity_tag = thread_affinity_get(thread);
+               else
+                       info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
+
+               break;
+       }
+
        default:
                result = KERN_INVALID_ARGUMENT;
                break;