]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b A |
29 | */ |
30 | /* | |
31 | * @OSF_COPYRIGHT@ | |
32 | */ | |
33 | /* | |
34 | * Mach Operating System | |
35 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
36 | * All Rights Reserved. | |
37 | * | |
38 | * Permission to use, copy, modify and distribute this software and its | |
39 | * documentation is hereby granted, provided that both the copyright | |
40 | * notice and this permission notice appear in all copies of the | |
41 | * software, derivative works or modified versions, and any portions | |
42 | * thereof, and that both notices appear in supporting documentation. | |
43 | * | |
44 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
45 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
46 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
47 | * | |
48 | * Carnegie Mellon requests users of this software to return to | |
49 | * | |
50 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
51 | * School of Computer Science | |
52 | * Carnegie Mellon University | |
53 | * Pittsburgh PA 15213-3890 | |
54 | * | |
55 | * any improvements or extensions that they make and grant Carnegie Mellon | |
56 | * the rights to redistribute these changes. | |
57 | */ | |
58 | /* | |
59 | */ | |
60 | ||
1c79356b A |
61 | #include <mach/boolean.h> |
62 | #include <mach/thread_switch.h> | |
63 | #include <ipc/ipc_port.h> | |
64 | #include <ipc/ipc_space.h> | |
65 | #include <kern/counters.h> | |
1c79356b A |
66 | #include <kern/ipc_kobject.h> |
67 | #include <kern/processor.h> | |
68 | #include <kern/sched.h> | |
69 | #include <kern/sched_prim.h> | |
70 | #include <kern/spl.h> | |
71 | #include <kern/task.h> | |
72 | #include <kern/thread.h> | |
1c79356b A |
73 | #include <mach/policy.h> |
74 | ||
75 | #include <kern/syscall_subr.h> | |
76 | #include <mach/mach_host_server.h> | |
77 | #include <mach/mach_syscalls.h> | |
78 | ||
1c79356b A |
79 | /* |
80 | * swtch and swtch_pri both attempt to context switch (logic in | |
81 | * thread_block no-ops the context switch if nothing would happen). | |
82 | * A boolean is returned that indicates whether there is anything | |
83 | * else runnable. | |
84 | * | |
85 | * This boolean can be used by a thread waiting on a | |
86 | * lock or condition: If FALSE is returned, the thread is justified | |
87 | * in becoming a resource hog by continuing to spin because there's | |
88 | * nothing else useful that the processor could do. If TRUE is | |
89 | * returned, the thread should make one more check on the | |
90 | * lock and then be a good citizen and really suspend. | |
91 | */ | |
92 | ||
91447636 | 93 | static void |
0b4e3aa0 | 94 | swtch_continue(void) |
1c79356b | 95 | { |
0b4e3aa0 A |
96 | register processor_t myprocessor; |
97 | boolean_t result; | |
1c79356b | 98 | |
91447636 | 99 | disable_preemption(); |
1c79356b | 100 | myprocessor = current_processor(); |
0b4e3aa0 A |
101 | result = myprocessor->runq.count > 0 || |
102 | myprocessor->processor_set->runq.count > 0; | |
91447636 | 103 | enable_preemption(); |
1c79356b | 104 | |
0b4e3aa0 A |
105 | thread_syscall_return(result); |
106 | /*NOTREACHED*/ | |
107 | } | |
1c79356b A |
108 | |
109 | boolean_t | |
91447636 A |
110 | swtch( |
111 | __unused struct swtch_args *args) | |
1c79356b A |
112 | { |
113 | register processor_t myprocessor; | |
114 | boolean_t result; | |
115 | ||
91447636 | 116 | disable_preemption(); |
1c79356b | 117 | myprocessor = current_processor(); |
0b4e3aa0 | 118 | if ( myprocessor->runq.count == 0 && |
1c79356b A |
119 | myprocessor->processor_set->runq.count == 0 ) { |
120 | mp_enable_preemption(); | |
121 | ||
122 | return (FALSE); | |
123 | } | |
91447636 | 124 | enable_preemption(); |
1c79356b A |
125 | |
126 | counter(c_swtch_block++); | |
127 | ||
91447636 | 128 | thread_block_reason((thread_continue_t)swtch_continue, NULL, AST_YIELD); |
1c79356b | 129 | |
91447636 | 130 | disable_preemption(); |
1c79356b | 131 | myprocessor = current_processor(); |
0b4e3aa0 A |
132 | result = myprocessor->runq.count > 0 || |
133 | myprocessor->processor_set->runq.count > 0; | |
91447636 | 134 | enable_preemption(); |
1c79356b A |
135 | |
136 | return (result); | |
137 | } | |
138 | ||
91447636 | 139 | static void |
0b4e3aa0 A |
140 | swtch_pri_continue(void) |
141 | { | |
142 | register processor_t myprocessor; | |
143 | boolean_t result; | |
144 | ||
91447636 | 145 | thread_depress_abort_internal(current_thread()); |
0b4e3aa0 | 146 | |
91447636 | 147 | disable_preemption(); |
0b4e3aa0 A |
148 | myprocessor = current_processor(); |
149 | result = myprocessor->runq.count > 0 || | |
150 | myprocessor->processor_set->runq.count > 0; | |
151 | mp_enable_preemption(); | |
152 | ||
153 | thread_syscall_return(result); | |
154 | /*NOTREACHED*/ | |
155 | } | |
156 | ||
1c79356b A |
157 | boolean_t |
158 | swtch_pri( | |
91447636 | 159 | __unused struct swtch_pri_args *args) |
1c79356b | 160 | { |
1c79356b A |
161 | register processor_t myprocessor; |
162 | boolean_t result; | |
1c79356b | 163 | |
91447636 | 164 | disable_preemption(); |
1c79356b | 165 | myprocessor = current_processor(); |
0b4e3aa0 | 166 | if ( myprocessor->runq.count == 0 && |
1c79356b | 167 | myprocessor->processor_set->runq.count == 0 ) { |
0b4e3aa0 | 168 | mp_enable_preemption(); |
1c79356b A |
169 | |
170 | return (FALSE); | |
171 | } | |
91447636 | 172 | enable_preemption(); |
0b4e3aa0 A |
173 | |
174 | counter(c_swtch_pri_block++); | |
175 | ||
91447636 | 176 | thread_depress_abstime(std_quantum); |
1c79356b | 177 | |
91447636 | 178 | thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD); |
1c79356b | 179 | |
91447636 | 180 | thread_depress_abort_internal(current_thread()); |
1c79356b | 181 | |
91447636 | 182 | disable_preemption(); |
1c79356b | 183 | myprocessor = current_processor(); |
0b4e3aa0 A |
184 | result = myprocessor->runq.count > 0 || |
185 | myprocessor->processor_set->runq.count > 0; | |
91447636 | 186 | enable_preemption(); |
1c79356b A |
187 | |
188 | return (result); | |
189 | } | |
190 | ||
91447636 A |
191 | static void |
192 | thread_switch_continue(void) | |
193 | { | |
194 | register thread_t self = current_thread(); | |
195 | int option = self->saved.swtch.option; | |
196 | ||
197 | if (option == SWITCH_OPTION_DEPRESS) | |
198 | thread_depress_abort_internal(self); | |
199 | ||
200 | thread_syscall_return(KERN_SUCCESS); | |
201 | /*NOTREACHED*/ | |
202 | } | |
203 | ||
1c79356b A |
204 | /* |
205 | * thread_switch: | |
206 | * | |
207 | * Context switch. User may supply thread hint. | |
208 | */ | |
209 | kern_return_t | |
210 | thread_switch( | |
91447636 | 211 | struct thread_switch_args *args) |
1c79356b | 212 | { |
91447636 A |
213 | register thread_t thread, self = current_thread(); |
214 | mach_port_name_t thread_name = args->thread_name; | |
215 | int option = args->option; | |
216 | mach_msg_timeout_t option_time = args->option_time; | |
1c79356b A |
217 | |
218 | /* | |
219 | * Process option. | |
220 | */ | |
221 | switch (option) { | |
222 | ||
223 | case SWITCH_OPTION_NONE: | |
224 | case SWITCH_OPTION_DEPRESS: | |
225 | case SWITCH_OPTION_WAIT: | |
226 | break; | |
227 | ||
228 | default: | |
229 | return (KERN_INVALID_ARGUMENT); | |
230 | } | |
231 | ||
91447636 A |
232 | /* |
233 | * Translate the port name if supplied. | |
234 | */ | |
1c79356b A |
235 | if (thread_name != MACH_PORT_NULL) { |
236 | ipc_port_t port; | |
237 | ||
91447636 | 238 | if (ipc_port_translate_send(self->task->itk_space, |
1c79356b A |
239 | thread_name, &port) == KERN_SUCCESS) { |
240 | ip_reference(port); | |
241 | ip_unlock(port); | |
242 | ||
91447636 | 243 | thread = convert_port_to_thread(port); |
1c79356b | 244 | ipc_port_release(port); |
91447636 A |
245 | |
246 | if (thread == self) { | |
247 | thread_deallocate_internal(thread); | |
248 | thread = THREAD_NULL; | |
249 | } | |
1c79356b | 250 | } |
91447636 A |
251 | else |
252 | thread = THREAD_NULL; | |
1c79356b | 253 | } |
91447636 A |
254 | else |
255 | thread = THREAD_NULL; | |
256 | ||
257 | /* | |
258 | * Try to handoff if supplied. | |
259 | */ | |
260 | if (thread != THREAD_NULL) { | |
261 | processor_t processor; | |
262 | spl_t s; | |
263 | ||
264 | s = splsched(); | |
265 | thread_lock(thread); | |
266 | ||
267 | /* | |
268 | * Check if the thread is in the right pset, | |
269 | * is not bound to a different processor, | |
270 | * and that realtime is not involved. | |
271 | * | |
272 | * Next, pull it off its run queue. If it | |
273 | * doesn't come, it's not eligible. | |
274 | */ | |
275 | processor = current_processor(); | |
276 | if (processor->current_pri < BASEPRI_RTQUEUES && | |
277 | thread->sched_pri < BASEPRI_RTQUEUES && | |
278 | thread->processor_set == processor->processor_set && | |
279 | (thread->bound_processor == PROCESSOR_NULL || | |
280 | thread->bound_processor == processor) && | |
281 | run_queue_remove(thread) != RUN_QUEUE_NULL ) { | |
282 | /* | |
283 | * Hah, got it!! | |
284 | */ | |
285 | thread_unlock(thread); | |
286 | ||
287 | thread_deallocate_internal(thread); | |
288 | ||
289 | if (option == SWITCH_OPTION_WAIT) | |
290 | assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, | |
291 | option_time, 1000*NSEC_PER_USEC); | |
292 | else | |
293 | if (option == SWITCH_OPTION_DEPRESS) | |
294 | thread_depress_ms(option_time); | |
295 | ||
296 | self->saved.swtch.option = option; | |
297 | ||
298 | thread_run(self, (thread_continue_t)thread_switch_continue, NULL, thread); | |
299 | /* NOTREACHED */ | |
300 | } | |
301 | ||
302 | thread_unlock(thread); | |
303 | splx(s); | |
304 | ||
305 | thread_deallocate(thread); | |
306 | } | |
307 | ||
308 | if (option == SWITCH_OPTION_WAIT) | |
309 | assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, 1000*NSEC_PER_USEC); | |
310 | else | |
311 | if (option == SWITCH_OPTION_DEPRESS) | |
312 | thread_depress_ms(option_time); | |
313 | ||
314 | self->saved.swtch.option = option; | |
315 | ||
316 | thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD); | |
317 | ||
318 | if (option == SWITCH_OPTION_DEPRESS) | |
319 | thread_depress_abort_internal(self); | |
320 | ||
321 | return (KERN_SUCCESS); | |
322 | } | |
1c79356b | 323 | |
91447636 A |
324 | /* |
325 | * Depress thread's priority to lowest possible for the specified interval, | |
326 | * with a value of zero resulting in no timeout being scheduled. | |
327 | */ | |
328 | void | |
329 | thread_depress_abstime( | |
330 | uint64_t interval) | |
331 | { | |
332 | register thread_t self = current_thread(); | |
333 | uint64_t deadline; | |
334 | spl_t s; | |
335 | ||
336 | s = splsched(); | |
337 | thread_lock(self); | |
338 | if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) { | |
339 | processor_t myprocessor = self->last_processor; | |
340 | ||
341 | self->sched_pri = DEPRESSPRI; | |
342 | myprocessor->current_pri = self->sched_pri; | |
343 | self->sched_mode &= ~TH_MODE_PREEMPT; | |
344 | self->sched_mode |= TH_MODE_DEPRESS; | |
345 | ||
346 | if (interval != 0) { | |
347 | clock_absolutetime_interval_to_deadline(interval, &deadline); | |
348 | if (!timer_call_enter(&self->depress_timer, deadline)) | |
349 | self->depress_timer_active++; | |
350 | } | |
351 | } | |
352 | thread_unlock(self); | |
353 | splx(s); | |
354 | } | |
355 | ||
356 | void | |
357 | thread_depress_ms( | |
358 | mach_msg_timeout_t interval) | |
359 | { | |
360 | uint64_t abstime; | |
361 | ||
362 | clock_interval_to_absolutetime_interval( | |
363 | interval, 1000*NSEC_PER_USEC, &abstime); | |
364 | thread_depress_abstime(abstime); | |
365 | } | |
366 | ||
367 | /* | |
368 | * Priority depression expiration. | |
369 | */ | |
370 | void | |
371 | thread_depress_expire( | |
372 | void *p0, | |
373 | __unused void *p1) | |
374 | { | |
375 | thread_t thread = p0; | |
376 | spl_t s; | |
377 | ||
378 | s = splsched(); | |
379 | thread_lock(thread); | |
380 | if (--thread->depress_timer_active == 0) { | |
381 | thread->sched_mode &= ~TH_MODE_ISDEPRESSED; | |
382 | compute_priority(thread, FALSE); | |
383 | } | |
384 | thread_unlock(thread); | |
385 | splx(s); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Prematurely abort priority depression if there is one. | |
390 | */ | |
391 | kern_return_t | |
392 | thread_depress_abort_internal( | |
393 | thread_t thread) | |
394 | { | |
395 | kern_return_t result = KERN_NOT_DEPRESSED; | |
396 | spl_t s; | |
397 | ||
398 | s = splsched(); | |
399 | thread_lock(thread); | |
400 | if (!(thread->sched_mode & TH_MODE_POLLDEPRESS)) { | |
401 | if (thread->sched_mode & TH_MODE_ISDEPRESSED) { | |
402 | thread->sched_mode &= ~TH_MODE_ISDEPRESSED; | |
403 | compute_priority(thread, FALSE); | |
404 | result = KERN_SUCCESS; | |
405 | } | |
406 | ||
407 | if (timer_call_cancel(&thread->depress_timer)) | |
408 | thread->depress_timer_active--; | |
409 | } | |
410 | thread_unlock(thread); | |
411 | splx(s); | |
412 | ||
413 | return (result); | |
414 | } | |
415 | ||
416 | void | |
417 | thread_poll_yield( | |
418 | thread_t self) | |
419 | { | |
420 | spl_t s; | |
421 | ||
422 | assert(self == current_thread()); | |
423 | ||
424 | s = splsched(); | |
425 | if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) { | |
426 | uint64_t total_computation, abstime; | |
427 | ||
428 | abstime = mach_absolute_time(); | |
429 | total_computation = abstime - self->computation_epoch; | |
430 | total_computation += self->computation_metered; | |
431 | if (total_computation >= max_poll_computation) { | |
432 | processor_t myprocessor = current_processor(); | |
433 | ast_t preempt; | |
434 | ||
435 | thread_lock(self); | |
436 | if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) { | |
437 | self->sched_pri = DEPRESSPRI; | |
438 | myprocessor->current_pri = self->sched_pri; | |
439 | self->sched_mode &= ~TH_MODE_PREEMPT; | |
440 | } | |
441 | self->computation_epoch = abstime; | |
442 | self->computation_metered = 0; | |
443 | self->sched_mode |= TH_MODE_POLLDEPRESS; | |
444 | ||
445 | abstime += (total_computation >> sched_poll_yield_shift); | |
446 | if (!timer_call_enter(&self->depress_timer, abstime)) | |
447 | self->depress_timer_active++; | |
448 | thread_unlock(self); | |
449 | ||
450 | if ((preempt = csw_check(self, myprocessor)) != AST_NONE) | |
451 | ast_on(preempt); | |
452 | } | |
453 | } | |
454 | splx(s); | |
1c79356b | 455 | } |