2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
31 * The calls most likely to change are: policy_thread_done and
32 * policy_thread_begin. They're the policy calls related to
33 * context switching. I'm not satisfied with what I have now and
34 * these are the forms I'm trying next.
36 * I still have to merge the data type names from my different sandboxes
37 * and I don't really talk about locking except for the run queue locking.
39 * There is a big change for run queues: there is a single lock for an
40 * entire run queue array structure (instead of a lock per queue header).
41 * It's OK for a policy to reorganize a particular queue BUT it has to
42 * disable the queue header (sched_queue_disable). Since a queue header
43 * isn't shared by multiple policies and the framework won't touch the
44 * queue header if it's disabled, the policy can do anything it wants
45 * without taking out a global lock.
47 * The only run queue primitives provided are the really fast ones:
48 * insert at the head (sched_queue_preempt), insert at the tail
49 * and if the queue was empty check for preemption
50 * (sched_queue_add_preempt), just insert at the tail
51 * (sched_queue_add_only), and remove (sched_queue_remove). Everything
52 * else needs to be done by first disabling the queue header (and then
53 * you can do whatever you want to the queue).
55 * BTW, the convention here is:
57 * policy_xxx - calls from the framework into policies (via the
58 * pointers in the policy object)
60 * sched_xxx - scheduling mechanisms provided by the framework
61 * which can be called by policies.
65 * Initializes an instance of a scheduling policy assigning it the
66 * corresponding policy_id and run queue headers.
69 * sched_policy_object *policy,
71 * int minpri, maxpri);
73 * Enable/disable a scheduling policy on a processor [set]
75 * policy_enable_processor_set(
76 * sched_policy_object *policy, / * policy * /
77 * processor_set_t processor_set ); / * processor set * /
79 * policy_disable_processor_set(
80 * sched_policy_object *policy,
81 * processor_set_t processor_set);
83 * policy_enable_processor(
84 * sched_policy_object *policy,
85 * processor_t processor );
87 * policy_disable_processor(
88 * sched_policy_object *policy,
89 * processor_t processor);
91 * Notifies the policy that the thread has become runnable
93 * policy_thread_unblock(
94 * sched_policy_object *policy,
97 * Notifies the policy that the current thread is done or
98 * a new thread has been selected to run
100 * policy_thread_done(
101 * sched_policy_object *policy,
102 * thread_t *old_thread );
104 * policy_thread_begin(
105 * sched_policy_object *policy,
106 * thread_t *new_thread );
108 * Attach/detach a thread from the scheduling policy
110 * policy_thread_attach(
111 * sched_policy_object *policy,
112 * thread_t *thread );
114 * policy_thread_detach(
115 * sched_policy_object *policy,
116 * thread_t *thread );
118 * Set the thread's processor [set]
120 * policy_thread_processor(
121 * sched_policy_object *policy,
123 * processor_t processor );
125 * policy_thread_processor_set(
126 * sched_policy_object *policy,
128 * processor_set_t processor_set);
130 * Scheduling Framework Interfaces
132 * [en/dis]able particular run queue headers on a processor [set],
134 * Lock the run queues, update the mask, unlock the run queues. If
135 * enabling, check preemption.
137 * sched_queue_enable(
139 * sched_priority_mask *mask );
141 * sched_queue_disable(
143 * sched_priority_mask *mask );
145 * Lock the run queues, insert the thread at the head, unlock the
146 * run queues and preempt (if possible).
148 * sched_queue_preempt(
149 * integer_t priority,
151 * run_queue_t run_queues );
153 * Lock the run queues, add the thread to the tail, unlock the run queues
154 * and preempt if appropriate.
156 * sched_queue_add_preempt(
157 * integer_t priority,
159 * run_queue_t run_queues );
161 * Lock the run queues, add the thread to the tail, unlock the queues
162 * but don't check for preemption.
164 * sched_queue_add_only(
165 * integer_t priority,
167 * run_queue_t run_queues );
169 * Lock the run queues, remove the entry the thread, unlock the run queues.
171 * sched_queue_remove(
175 #include <kern/kern_types.h>
176 #include <kern/sched.h>
177 #include <mach/thread_switch.h>
178 #include <mach/mach_types.h>
181 * Type definitions and constants for MK Scheduling Framework
183 typedef int sf_return_t
;
185 /* successful completion */
190 #define SF_KERN_RESOURCE_SHORTAGE 2
192 /* Scheduler Framework Object -- i.e., a scheduling policy */
193 typedef struct sf_policy
*sf_object_t
;
196 * maximum number of scheduling policies that the Scheduling Framework
197 * will host (picked arbitrarily)
199 #define MAX_SCHED_POLS 10
203 * Scheduling Framework Interfaces
207 /* Initialize Framework and selected policies */
212 * Scheduling Policy Interfaces
217 * Operation list for scheduling policies. (Modeled after the
218 * device operations `.../mach_kernel/device/conf.h.')
220 * Key to some abbreviations:
221 * sp = scheduling policy
222 * sf = scheduling framework
224 typedef struct sched_policy_ops
{
225 /* Allow the policy to update the meta-priority of a running thread */
226 sf_return_t (*sp_thread_update_mpri
)(
230 /* Notify the policy that a thread has become runnable */
231 sf_return_t (*sp_thread_unblock
)(
235 /* Notify the policy that the current thread is done */
236 /*** ??? Should this call take a `reason' argument? ***/
237 sf_return_t (*sp_thread_done
)(
239 thread_t old_thread
);
241 /* Notify the policy that a new thread has been selected to run */
242 sf_return_t (*sp_thread_begin
)(
244 thread_t new_thread
);
246 /* Notify the policy that an old thread is ready to be requeued */
247 sf_return_t (*sp_thread_dispatch
)(
249 thread_t old_thread
);
251 /* Attach/detach a thread from the scheduling policy */
252 sf_return_t (*sp_thread_attach
)(
256 sf_return_t (*sp_thread_detach
)(
260 /* Set the thread's processor [set] */
261 sf_return_t (*sp_thread_processor
)(
264 processor_t processor
);
266 sf_return_t (*sp_thread_processor_set
)(
269 processor_set_t processor_set
);
271 sf_return_t (*sp_thread_setup
)(
276 *** ??? Hopefully, many of the following operations are only
277 *** temporary. Consequently, they haven't been forced to take
278 *** the same form as the others just yet. That should happen
279 *** for all of those that end up being permanent additions to the
280 *** list of standard operations.
283 /* `swtch_pri()' routine -- attempt to give up processor */
284 void (*sp_swtch_pri
)(
288 /* `thread_switch()' routine -- context switch w/ optional hint */
289 kern_return_t (*sp_thread_switch
)(
291 thread_act_t hint_act
,
293 mach_msg_timeout_t option_time
);
295 /* `thread_depress_abort()' routine -- prematurely abort depression */
296 kern_return_t (*sp_thread_depress_abort
)(
300 /* `thread_depress_timeout()' routine -- timeout on depression */
301 void (*sp_thread_depress_timeout
)(
305 boolean_t (*sp_thread_runnable
)(
317 typedef struct sf_policy
{
318 int policy_id
; /* policy number */
322 #define SCHED_POLICY_NULL ((sched_policy_t *) 0)
324 #define policy_id_to_sched_policy(policy_id) \
325 (((policy_id) != POLICY_NULL)? \
326 &sched_policy[(policy_id)] : SCHED_POLICY_NULL)
328 extern sched_policy_t sched_policy
[MAX_SCHED_POLS
];
330 #endif /* _KERN_SF_H_ */