]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | * | |
25 | */ | |
26 | ||
27 | #ifndef _KERN_SF_H_ | |
28 | #define _KERN_SF_H_ | |
29 | ||
30 | /* | |
31 | * The calls most likely to change are: policy_thread_done and | |
32 | * policy_thread_begin. They're the policy calls related to | |
33 | * context switching. I'm not satisfied with what I have now and | |
34 | * these are the forms I'm trying next. | |
35 | * | |
36 | * I still have to merge the data type names from my different sandboxes | |
37 | * and I don't really talk about locking except for the run queue locking. | |
38 | * | |
39 | * There is a big change for run queues: there is a single lock for an | |
40 | * entire run queue array structure (instead of a lock per queue header). | |
41 | * It's OK for a policy to reorganize a particular queue BUT it has to | |
42 | * disable the queue header (sched_queue_disable). Since a queue header | |
43 | * isn't shared by multiple policies and the framework won't touch the | |
44 | * queue header if it's disabled, the policy can do anything it wants | |
45 | * without taking out a global lock. | |
46 | * | |
47 | * The only run queue primitives provided are the really fast ones: | |
48 | * insert at the head (sched_queue_preempt), insert at the tail | |
49 | * and if the queue was empty check for preemption | |
50 | * (sched_queue_add_preempt), just insert at the tail | |
51 | * (sched_queue_add_only), and remove (sched_queue_remove). Everything | |
52 | * else needs to be done by first disabling the queue header (and then | |
53 | * you can do whatever you want to the queue). | |
54 | * | |
55 | * BTW, the convention here is: | |
56 | * | |
57 | * policy_xxx - calls from the framework into policies (via the | |
58 | * pointers in the policy object) | |
59 | * | |
60 | * sched_xxx - scheduling mechanisms provided by the framework | |
61 | * which can be called by policies. | |
62 | * | |
63 | * ---------- | |
64 | * | |
65 | * Initializes an instance of a scheduling policy assigning it the | |
66 | * corresponding policy_id and run queue headers. | |
67 | * | |
68 | * policy_init( | |
69 | * sched_policy_object *policy, | |
70 | * int policy_id, | |
71 | * int minpri, maxpri); | |
72 | * | |
73 | * Enable/disable a scheduling policy on a processor [set] | |
74 | * | |
75 | * policy_enable_processor_set( | |
76 | * sched_policy_object *policy, / * policy * / | |
77 | * processor_set_t processor_set ); / * processor set * / | |
78 | * | |
79 | * policy_disable_processor_set( | |
80 | * sched_policy_object *policy, | |
81 | * processor_set_t processor_set); | |
82 | * | |
83 | * policy_enable_processor( | |
84 | * sched_policy_object *policy, | |
85 | * processor_t processor ); | |
86 | * | |
87 | * policy_disable_processor( | |
88 | * sched_policy_object *policy, | |
89 | * processor_t processor); | |
90 | * | |
91 | * Notifies the policy that the thread has become runnable | |
92 | * | |
93 | * policy_thread_unblock( | |
94 | * sched_policy_object *policy, | |
95 | * thread_t thread ) | |
96 | * | |
97 | * Notifies the policy that the current thread is done or | |
98 | * a new thread has been selected to run | |
99 | * | |
100 | * policy_thread_done( | |
101 | * sched_policy_object *policy, | |
102 | * thread_t *old_thread ); | |
103 | * | |
104 | * policy_thread_begin( | |
105 | * sched_policy_object *policy, | |
106 | * thread_t *new_thread ); | |
107 | * | |
108 | * Attach/detach a thread from the scheduling policy | |
109 | * | |
110 | * policy_thread_attach( | |
111 | * sched_policy_object *policy, | |
112 | * thread_t *thread ); | |
113 | * | |
114 | * policy_thread_detach( | |
115 | * sched_policy_object *policy, | |
116 | * thread_t *thread ); | |
117 | * | |
118 | * Set the thread's processor [set] | |
119 | * | |
120 | * policy_thread_processor( | |
121 | * sched_policy_object *policy, | |
122 | * thread_t *thread, | |
123 | * processor_t processor ); | |
124 | * | |
125 | * policy_thread_processor_set( | |
126 | * sched_policy_object *policy, | |
127 | * thread_t *thread, | |
128 | * processor_set_t processor_set); | |
129 | * | |
130 | * Scheduling Framework Interfaces | |
131 | * | |
132 | * [en/dis]able particular run queue headers on a processor [set], | |
133 | * | |
134 | * Lock the run queues, update the mask, unlock the run queues. If | |
135 | * enabling, check preemption. | |
136 | * | |
137 | * sched_queue_enable( | |
138 | * run_queue_t runq, | |
139 | * sched_priority_mask *mask ); | |
140 | * | |
141 | * sched_queue_disable( | |
142 | * run_queue_t runq, | |
143 | * sched_priority_mask *mask ); | |
144 | * | |
145 | * Lock the run queues, insert the thread at the head, unlock the | |
146 | * run queues and preempt (if possible). | |
147 | * | |
148 | * sched_queue_preempt( | |
149 | * integer_t priority, | |
150 | * thread_t thread, | |
151 | * run_queue_t run_queues ); | |
152 | * | |
153 | * Lock the run queues, add the thread to the tail, unlock the run queues | |
154 | * and preempt if appropriate. | |
155 | * | |
156 | * sched_queue_add_preempt( | |
157 | * integer_t priority, | |
158 | * thread_t thread, | |
159 | * run_queue_t run_queues ); | |
160 | * | |
161 | * Lock the run queues, add the thread to the tail, unlock the queues | |
162 | * but don't check for preemption. | |
163 | * | |
164 | * sched_queue_add_only( | |
165 | * integer_t priority, | |
166 | * thread_t thread, | |
167 | * run_queue_t run_queues ); | |
168 | * | |
169 | * Lock the run queues, remove the entry the thread, unlock the run queues. | |
170 | * | |
171 | * sched_queue_remove( | |
172 | * thread_t thread ); | |
173 | */ | |
174 | ||
175 | #include <kern/kern_types.h> | |
176 | #include <kern/sched.h> | |
177 | #include <mach/thread_switch.h> | |
178 | #include <mach/mach_types.h> | |
179 | ||
180 | /* | |
181 | * Type definitions and constants for MK Scheduling Framework | |
182 | */ | |
183 | typedef int sf_return_t; | |
184 | ||
185 | /* successful completion */ | |
186 | #define SF_SUCCESS 0 | |
187 | ||
188 | /* error codes */ | |
189 | #define SF_FAILURE 1 | |
190 | #define SF_KERN_RESOURCE_SHORTAGE 2 | |
191 | ||
192 | /* Scheduler Framework Object -- i.e., a scheduling policy */ | |
193 | typedef struct sf_policy *sf_object_t; | |
194 | ||
195 | /* | |
196 | * maximum number of scheduling policies that the Scheduling Framework | |
197 | * will host (picked arbitrarily) | |
198 | */ | |
199 | #define MAX_SCHED_POLS 10 | |
200 | ||
201 | /********** | |
202 | * | |
203 | * Scheduling Framework Interfaces | |
204 | * | |
205 | **********/ | |
206 | ||
207 | /* Initialize Framework and selected policies */ | |
208 | void sf_init(void); | |
209 | ||
210 | /********** | |
211 | * | |
212 | * Scheduling Policy Interfaces | |
213 | * | |
214 | **********/ | |
215 | ||
216 | /* | |
217 | * Operation list for scheduling policies. (Modeled after the | |
218 | * device operations `.../mach_kernel/device/conf.h.') | |
219 | * | |
220 | * Key to some abbreviations: | |
221 | * sp = scheduling policy | |
222 | * sf = scheduling framework | |
223 | */ | |
224 | typedef struct sched_policy_ops { | |
225 | /* Allow the policy to update the meta-priority of a running thread */ | |
226 | sf_return_t (*sp_thread_update_mpri)( | |
227 | sf_object_t policy, | |
228 | thread_t thread); | |
229 | ||
230 | /* Notify the policy that a thread has become runnable */ | |
231 | sf_return_t (*sp_thread_unblock)( | |
232 | sf_object_t policy, | |
233 | thread_t thread); | |
234 | ||
235 | /* Notify the policy that the current thread is done */ | |
236 | /*** ??? Should this call take a `reason' argument? ***/ | |
237 | sf_return_t (*sp_thread_done)( | |
238 | sf_object_t policy, | |
239 | thread_t old_thread); | |
240 | ||
241 | /* Notify the policy that a new thread has been selected to run */ | |
242 | sf_return_t (*sp_thread_begin)( | |
243 | sf_object_t policy, | |
244 | thread_t new_thread); | |
245 | ||
246 | /* Notify the policy that an old thread is ready to be requeued */ | |
247 | sf_return_t (*sp_thread_dispatch)( | |
248 | sf_object_t policy, | |
249 | thread_t old_thread); | |
250 | ||
251 | /* Attach/detach a thread from the scheduling policy */ | |
252 | sf_return_t (*sp_thread_attach)( | |
253 | sf_object_t policy, | |
254 | thread_t thread); | |
255 | ||
256 | sf_return_t (*sp_thread_detach)( | |
257 | sf_object_t policy, | |
258 | thread_t thread); | |
259 | ||
260 | /* Set the thread's processor [set] */ | |
261 | sf_return_t (*sp_thread_processor)( | |
262 | sf_object_t policy, | |
263 | thread_t *thread, | |
264 | processor_t processor); | |
265 | ||
266 | sf_return_t (*sp_thread_processor_set)( | |
267 | sf_object_t policy, | |
268 | thread_t thread, | |
269 | processor_set_t processor_set); | |
270 | ||
271 | sf_return_t (*sp_thread_setup)( | |
272 | sf_object_t policy, | |
273 | thread_t thread); | |
274 | ||
275 | /*** | |
276 | *** ??? Hopefully, many of the following operations are only | |
277 | *** temporary. Consequently, they haven't been forced to take | |
278 | *** the same form as the others just yet. That should happen | |
279 | *** for all of those that end up being permanent additions to the | |
280 | *** list of standard operations. | |
281 | ***/ | |
282 | ||
283 | /* `swtch_pri()' routine -- attempt to give up processor */ | |
284 | void (*sp_swtch_pri)( | |
285 | sf_object_t policy, | |
286 | int pri); | |
287 | ||
288 | /* `thread_switch()' routine -- context switch w/ optional hint */ | |
289 | kern_return_t (*sp_thread_switch)( | |
290 | sf_object_t policy, | |
291 | thread_act_t hint_act, | |
292 | int option, | |
293 | mach_msg_timeout_t option_time); | |
294 | ||
295 | /* `thread_depress_abort()' routine -- prematurely abort depression */ | |
296 | kern_return_t (*sp_thread_depress_abort)( | |
297 | sf_object_t policy, | |
298 | thread_t thread); | |
299 | ||
300 | /* `thread_depress_timeout()' routine -- timeout on depression */ | |
301 | void (*sp_thread_depress_timeout)( | |
302 | sf_object_t policy, | |
303 | thread_t thread); | |
304 | ||
305 | boolean_t (*sp_thread_runnable)( | |
306 | sf_object_t policy, | |
307 | thread_t thread); | |
308 | ||
309 | } sp_ops_t; | |
310 | ||
311 | /********** | |
312 | * | |
313 | * Scheduling Policy | |
314 | * | |
315 | **********/ | |
316 | ||
317 | typedef struct sf_policy { | |
318 | int policy_id; /* policy number */ | |
319 | sp_ops_t sp_ops; | |
320 | } sched_policy_t; | |
321 | ||
322 | #define SCHED_POLICY_NULL ((sched_policy_t *) 0) | |
323 | ||
324 | #define policy_id_to_sched_policy(policy_id) \ | |
325 | (((policy_id) != POLICY_NULL)? \ | |
326 | &sched_policy[(policy_id)] : SCHED_POLICY_NULL) | |
327 | ||
328 | extern sched_policy_t sched_policy[MAX_SCHED_POLS]; | |
329 | ||
330 | #endif /* _KERN_SF_H_ */ |