2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 #include <mach/boolean.h>
62 #include <mach/thread_switch.h>
63 #include <ipc/ipc_port.h>
64 #include <ipc/ipc_space.h>
65 #include <kern/counters.h>
66 #include <kern/ipc_kobject.h>
67 #include <kern/processor.h>
68 #include <kern/sched.h>
69 #include <kern/sched_prim.h>
71 #include <kern/task.h>
72 #include <kern/thread.h>
73 #include <mach/policy.h>
75 #include <kern/syscall_subr.h>
76 #include <mach/mach_host_server.h>
77 #include <mach/mach_syscalls.h>
80 * swtch and swtch_pri both attempt to context switch (logic in
81 * thread_block no-ops the context switch if nothing would happen).
82 * A boolean is returned that indicates whether there is anything
85 * This boolean can be used by a thread waiting on a
86 * lock or condition: If FALSE is returned, the thread is justified
87 * in becoming a resource hog by continuing to spin because there's
88 * nothing else useful that the processor could do. If TRUE is
89 * returned, the thread should make one more check on the
90 * lock and then be a good citizen and really suspend.
96 register processor_t myprocessor
;
100 myprocessor
= current_processor();
101 result
= myprocessor
->runq
.count
> 0 ||
102 myprocessor
->processor_set
->runq
.count
> 0;
105 thread_syscall_return(result
);
111 __unused
struct swtch_args
*args
)
113 register processor_t myprocessor
;
116 disable_preemption();
117 myprocessor
= current_processor();
118 if ( myprocessor
->runq
.count
== 0 &&
119 myprocessor
->processor_set
->runq
.count
== 0 ) {
120 mp_enable_preemption();
126 counter(c_swtch_block
++);
128 thread_block_reason((thread_continue_t
)swtch_continue
, NULL
, AST_YIELD
);
130 disable_preemption();
131 myprocessor
= current_processor();
132 result
= myprocessor
->runq
.count
> 0 ||
133 myprocessor
->processor_set
->runq
.count
> 0;
140 swtch_pri_continue(void)
142 register processor_t myprocessor
;
145 thread_depress_abort_internal(current_thread());
147 disable_preemption();
148 myprocessor
= current_processor();
149 result
= myprocessor
->runq
.count
> 0 ||
150 myprocessor
->processor_set
->runq
.count
> 0;
151 mp_enable_preemption();
153 thread_syscall_return(result
);
159 __unused
struct swtch_pri_args
*args
)
161 register processor_t myprocessor
;
164 disable_preemption();
165 myprocessor
= current_processor();
166 if ( myprocessor
->runq
.count
== 0 &&
167 myprocessor
->processor_set
->runq
.count
== 0 ) {
168 mp_enable_preemption();
174 counter(c_swtch_pri_block
++);
176 thread_depress_abstime(std_quantum
);
178 thread_block_reason((thread_continue_t
)swtch_pri_continue
, NULL
, AST_YIELD
);
180 thread_depress_abort_internal(current_thread());
182 disable_preemption();
183 myprocessor
= current_processor();
184 result
= myprocessor
->runq
.count
> 0 ||
185 myprocessor
->processor_set
->runq
.count
> 0;
192 thread_switch_continue(void)
194 register thread_t self
= current_thread();
195 int option
= self
->saved
.swtch
.option
;
197 if (option
== SWITCH_OPTION_DEPRESS
)
198 thread_depress_abort_internal(self
);
200 thread_syscall_return(KERN_SUCCESS
);
207 * Context switch. User may supply thread hint.
211 struct thread_switch_args
*args
)
213 register thread_t thread
, self
= current_thread();
214 mach_port_name_t thread_name
= args
->thread_name
;
215 int option
= args
->option
;
216 mach_msg_timeout_t option_time
= args
->option_time
;
223 case SWITCH_OPTION_NONE
:
224 case SWITCH_OPTION_DEPRESS
:
225 case SWITCH_OPTION_WAIT
:
229 return (KERN_INVALID_ARGUMENT
);
233 * Translate the port name if supplied.
235 if (thread_name
!= MACH_PORT_NULL
) {
238 if (ipc_port_translate_send(self
->task
->itk_space
,
239 thread_name
, &port
) == KERN_SUCCESS
) {
243 thread
= convert_port_to_thread(port
);
244 ipc_port_release(port
);
246 if (thread
== self
) {
247 thread_deallocate_internal(thread
);
248 thread
= THREAD_NULL
;
252 thread
= THREAD_NULL
;
255 thread
= THREAD_NULL
;
258 * Try to handoff if supplied.
260 if (thread
!= THREAD_NULL
) {
261 processor_t processor
;
268 * Check if the thread is in the right pset,
269 * is not bound to a different processor,
270 * and that realtime is not involved.
272 * Next, pull it off its run queue. If it
273 * doesn't come, it's not eligible.
275 processor
= current_processor();
276 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
277 thread
->sched_pri
< BASEPRI_RTQUEUES
&&
278 thread
->processor_set
== processor
->processor_set
&&
279 (thread
->bound_processor
== PROCESSOR_NULL
||
280 thread
->bound_processor
== processor
) &&
281 run_queue_remove(thread
) != RUN_QUEUE_NULL
) {
285 thread_unlock(thread
);
287 thread_deallocate_internal(thread
);
289 if (option
== SWITCH_OPTION_WAIT
)
290 assert_wait_timeout((event_t
)assert_wait_timeout
, THREAD_ABORTSAFE
,
291 option_time
, 1000*NSEC_PER_USEC
);
293 if (option
== SWITCH_OPTION_DEPRESS
)
294 thread_depress_ms(option_time
);
296 self
->saved
.swtch
.option
= option
;
298 thread_run(self
, (thread_continue_t
)thread_switch_continue
, NULL
, thread
);
302 thread_unlock(thread
);
305 thread_deallocate(thread
);
308 if (option
== SWITCH_OPTION_WAIT
)
309 assert_wait_timeout((event_t
)assert_wait_timeout
, THREAD_ABORTSAFE
, option_time
, 1000*NSEC_PER_USEC
);
311 if (option
== SWITCH_OPTION_DEPRESS
)
312 thread_depress_ms(option_time
);
314 self
->saved
.swtch
.option
= option
;
316 thread_block_reason((thread_continue_t
)thread_switch_continue
, NULL
, AST_YIELD
);
318 if (option
== SWITCH_OPTION_DEPRESS
)
319 thread_depress_abort_internal(self
);
321 return (KERN_SUCCESS
);
325 * Depress thread's priority to lowest possible for the specified interval,
326 * with a value of zero resulting in no timeout being scheduled.
329 thread_depress_abstime(
332 register thread_t self
= current_thread();
338 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
339 processor_t myprocessor
= self
->last_processor
;
341 self
->sched_pri
= DEPRESSPRI
;
342 myprocessor
->current_pri
= self
->sched_pri
;
343 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
344 self
->sched_mode
|= TH_MODE_DEPRESS
;
347 clock_absolutetime_interval_to_deadline(interval
, &deadline
);
348 if (!timer_call_enter(&self
->depress_timer
, deadline
))
349 self
->depress_timer_active
++;
358 mach_msg_timeout_t interval
)
362 clock_interval_to_absolutetime_interval(
363 interval
, 1000*NSEC_PER_USEC
, &abstime
);
364 thread_depress_abstime(abstime
);
368 * Priority depression expiration.
371 thread_depress_expire(
375 thread_t thread
= p0
;
380 if (--thread
->depress_timer_active
== 0) {
381 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
382 compute_priority(thread
, FALSE
);
384 thread_unlock(thread
);
389 * Prematurely abort priority depression if there is one.
392 thread_depress_abort_internal(
395 kern_return_t result
= KERN_NOT_DEPRESSED
;
400 if (!(thread
->sched_mode
& TH_MODE_POLLDEPRESS
)) {
401 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
402 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
403 compute_priority(thread
, FALSE
);
404 result
= KERN_SUCCESS
;
407 if (timer_call_cancel(&thread
->depress_timer
))
408 thread
->depress_timer_active
--;
410 thread_unlock(thread
);
422 assert(self
== current_thread());
425 if (!(self
->sched_mode
& (TH_MODE_REALTIME
|TH_MODE_TIMESHARE
))) {
426 uint64_t total_computation
, abstime
;
428 abstime
= mach_absolute_time();
429 total_computation
= abstime
- self
->computation_epoch
;
430 total_computation
+= self
->computation_metered
;
431 if (total_computation
>= max_poll_computation
) {
432 processor_t myprocessor
= current_processor();
436 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
437 self
->sched_pri
= DEPRESSPRI
;
438 myprocessor
->current_pri
= self
->sched_pri
;
439 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
441 self
->computation_epoch
= abstime
;
442 self
->computation_metered
= 0;
443 self
->sched_mode
|= TH_MODE_POLLDEPRESS
;
445 abstime
+= (total_computation
>> sched_poll_yield_shift
);
446 if (!timer_call_enter(&self
->depress_timer
, abstime
))
447 self
->depress_timer_active
++;
450 if ((preempt
= csw_check(self
, myprocessor
)) != AST_NONE
)