/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/kern_types.h>
#include <kern/processor.h>
#include <kern/thread.h>
+#include <kern/affinity.h>
static void
thread_recompute_priority(
thread_t thread);
+
+
kern_return_t
thread_policy_set(
thread_t thread,
thread_policy_t policy_info,
mach_msg_type_number_t count)
{
- kern_return_t result = KERN_SUCCESS;
- spl_t s;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
+ if (thread->static_param)
+ return (KERN_SUCCESS);
+
+ return (thread_policy_set_internal(thread, flavor, policy_info, count));
+}
+
+kern_return_t
+thread_policy_set_internal(
+ thread_t thread,
+ thread_policy_flavor_t flavor,
+ thread_policy_t policy_info,
+ mach_msg_type_number_t count)
+{
+ kern_return_t result = KERN_SUCCESS;
+ spl_t s;
+
thread_mtx_lock(thread);
if (!thread->active) {
thread_mtx_unlock(thread);
return (KERN_TERMINATED);
}
-
switch (flavor) {
case THREAD_EXTENDED_POLICY:
if (timeshare && !oldmode) {
thread->sched_mode |= TH_MODE_TIMESHARE;
- if (thread->state & TH_RUN)
- pset_share_incr(thread->processor_set);
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+ sched_share_incr();
}
else
if (!timeshare && oldmode) {
thread->sched_mode &= ~TH_MODE_TIMESHARE;
- if (thread->state & TH_RUN)
- pset_share_decr(thread->processor_set);
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+ sched_share_decr();
}
thread_recompute_priority(thread);
if (thread->sched_mode & TH_MODE_TIMESHARE) {
thread->sched_mode &= ~TH_MODE_TIMESHARE;
- if (thread->state & TH_RUN)
- pset_share_decr(thread->processor_set);
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+ sched_share_decr();
}
thread->sched_mode |= TH_MODE_REALTIME;
thread_recompute_priority(thread);
result = KERN_INVALID_ARGUMENT;
break;
}
-
info = (thread_precedence_policy_t)policy_info;
s = splsched();
break;
}
+ case THREAD_AFFINITY_POLICY:
+ {
+ thread_affinity_policy_t info;
+
+ if (!thread_affinity_is_supported()) {
+ result = KERN_NOT_SUPPORTED;
+ break;
+ }
+ if (count < THREAD_AFFINITY_POLICY_COUNT) {
+ result = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ info = (thread_affinity_policy_t) policy_info;
+ /*
+ * Unlock the thread mutex here and
+ * return directly after calling thread_affinity_set().
+ * This is necessary for correct lock ordering because
+ * thread_affinity_set() takes the task lock.
+ */
+ thread_mtx_unlock(thread);
+ return thread_affinity_set(thread, info->affinity_tag);
+ }
default:
result = KERN_INVALID_ARGUMENT;
break;
}
thread_mtx_unlock(thread);
-
return (result);
}
thread_policy_reset(
thread_t thread)
{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
thread->sched_mode &= ~TH_MODE_REALTIME;
if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
thread->sched_mode |= TH_MODE_TIMESHARE;
- if (thread->state & TH_RUN)
- pset_share_incr(thread->processor_set);
+ if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
+ sched_share_incr();
}
}
else {
thread->importance = 0;
thread_recompute_priority(thread);
+
+ thread_unlock(thread);
+ splx(s);
}
kern_return_t
break;
}
+ case THREAD_AFFINITY_POLICY:
+ {
+ thread_affinity_policy_t info;
+
+ if (!thread_affinity_is_supported()) {
+ result = KERN_NOT_SUPPORTED;
+ break;
+ }
+ if (*count < THREAD_AFFINITY_POLICY_COUNT) {
+ result = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ info = (thread_affinity_policy_t)policy_info;
+
+ if (!(*get_default))
+ info->affinity_tag = thread_affinity_get(thread);
+ else
+ info->affinity_tag = THREAD_AFFINITY_TAG_NULL;
+
+ break;
+ }
+
default:
result = KERN_INVALID_ARGUMENT;
break;