2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 #include <mach/boolean.h>
54 #include <mach/thread_switch.h>
55 #include <ipc/ipc_port.h>
56 #include <ipc/ipc_space.h>
57 #include <kern/counters.h>
58 #include <kern/ipc_kobject.h>
59 #include <kern/processor.h>
60 #include <kern/sched.h>
61 #include <kern/sched_prim.h>
63 #include <kern/task.h>
64 #include <kern/thread.h>
65 #include <mach/policy.h>
67 #include <kern/syscall_subr.h>
68 #include <mach/mach_host_server.h>
69 #include <mach/mach_syscalls.h>
72 * swtch and swtch_pri both attempt to context switch (logic in
73 * thread_block no-ops the context switch if nothing would happen).
74 * A boolean is returned that indicates whether there is anything
77 * This boolean can be used by a thread waiting on a
78 * lock or condition: If FALSE is returned, the thread is justified
79 * in becoming a resource hog by continuing to spin because there's
80 * nothing else useful that the processor could do. If TRUE is
81 * returned, the thread should make one more check on the
82 * lock and then be a good citizen and really suspend.
88 register processor_t myprocessor
;
92 myprocessor
= current_processor();
93 result
= myprocessor
->runq
.count
> 0 ||
94 myprocessor
->processor_set
->runq
.count
> 0;
97 thread_syscall_return(result
);
103 __unused
struct swtch_args
*args
)
105 register processor_t myprocessor
;
108 disable_preemption();
109 myprocessor
= current_processor();
110 if ( myprocessor
->runq
.count
== 0 &&
111 myprocessor
->processor_set
->runq
.count
== 0 ) {
112 mp_enable_preemption();
118 counter(c_swtch_block
++);
120 thread_block_reason((thread_continue_t
)swtch_continue
, NULL
, AST_YIELD
);
122 disable_preemption();
123 myprocessor
= current_processor();
124 result
= myprocessor
->runq
.count
> 0 ||
125 myprocessor
->processor_set
->runq
.count
> 0;
132 swtch_pri_continue(void)
134 register processor_t myprocessor
;
137 thread_depress_abort_internal(current_thread());
139 disable_preemption();
140 myprocessor
= current_processor();
141 result
= myprocessor
->runq
.count
> 0 ||
142 myprocessor
->processor_set
->runq
.count
> 0;
143 mp_enable_preemption();
145 thread_syscall_return(result
);
151 __unused
struct swtch_pri_args
*args
)
153 register processor_t myprocessor
;
156 disable_preemption();
157 myprocessor
= current_processor();
158 if ( myprocessor
->runq
.count
== 0 &&
159 myprocessor
->processor_set
->runq
.count
== 0 ) {
160 mp_enable_preemption();
166 counter(c_swtch_pri_block
++);
168 thread_depress_abstime(std_quantum
);
170 thread_block_reason((thread_continue_t
)swtch_pri_continue
, NULL
, AST_YIELD
);
172 thread_depress_abort_internal(current_thread());
174 disable_preemption();
175 myprocessor
= current_processor();
176 result
= myprocessor
->runq
.count
> 0 ||
177 myprocessor
->processor_set
->runq
.count
> 0;
184 thread_switch_continue(void)
186 register thread_t self
= current_thread();
187 int option
= self
->saved
.swtch
.option
;
189 if (option
== SWITCH_OPTION_DEPRESS
)
190 thread_depress_abort_internal(self
);
192 thread_syscall_return(KERN_SUCCESS
);
199 * Context switch. User may supply thread hint.
203 struct thread_switch_args
*args
)
205 register thread_t thread
, self
= current_thread();
206 mach_port_name_t thread_name
= args
->thread_name
;
207 int option
= args
->option
;
208 mach_msg_timeout_t option_time
= args
->option_time
;
215 case SWITCH_OPTION_NONE
:
216 case SWITCH_OPTION_DEPRESS
:
217 case SWITCH_OPTION_WAIT
:
221 return (KERN_INVALID_ARGUMENT
);
225 * Translate the port name if supplied.
227 if (thread_name
!= MACH_PORT_NULL
) {
230 if (ipc_port_translate_send(self
->task
->itk_space
,
231 thread_name
, &port
) == KERN_SUCCESS
) {
235 thread
= convert_port_to_thread(port
);
236 ipc_port_release(port
);
238 if (thread
== self
) {
239 thread_deallocate_internal(thread
);
240 thread
= THREAD_NULL
;
244 thread
= THREAD_NULL
;
247 thread
= THREAD_NULL
;
250 * Try to handoff if supplied.
252 if (thread
!= THREAD_NULL
) {
253 processor_t processor
;
260 * Check if the thread is in the right pset,
261 * is not bound to a different processor,
262 * and that realtime is not involved.
264 * Next, pull it off its run queue. If it
265 * doesn't come, it's not eligible.
267 processor
= current_processor();
268 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
269 thread
->sched_pri
< BASEPRI_RTQUEUES
&&
270 thread
->processor_set
== processor
->processor_set
&&
271 (thread
->bound_processor
== PROCESSOR_NULL
||
272 thread
->bound_processor
== processor
) &&
273 run_queue_remove(thread
) != RUN_QUEUE_NULL
) {
277 thread_unlock(thread
);
279 thread_deallocate_internal(thread
);
281 if (option
== SWITCH_OPTION_WAIT
)
282 assert_wait_timeout((event_t
)assert_wait_timeout
, THREAD_ABORTSAFE
,
283 option_time
, 1000*NSEC_PER_USEC
);
285 if (option
== SWITCH_OPTION_DEPRESS
)
286 thread_depress_ms(option_time
);
288 self
->saved
.swtch
.option
= option
;
290 thread_run(self
, (thread_continue_t
)thread_switch_continue
, NULL
, thread
);
294 thread_unlock(thread
);
297 thread_deallocate(thread
);
300 if (option
== SWITCH_OPTION_WAIT
)
301 assert_wait_timeout((event_t
)assert_wait_timeout
, THREAD_ABORTSAFE
, option_time
, 1000*NSEC_PER_USEC
);
303 if (option
== SWITCH_OPTION_DEPRESS
)
304 thread_depress_ms(option_time
);
306 self
->saved
.swtch
.option
= option
;
308 thread_block_reason((thread_continue_t
)thread_switch_continue
, NULL
, AST_YIELD
);
310 if (option
== SWITCH_OPTION_DEPRESS
)
311 thread_depress_abort_internal(self
);
313 return (KERN_SUCCESS
);
317 * Depress thread's priority to lowest possible for the specified interval,
318 * with a value of zero resulting in no timeout being scheduled.
321 thread_depress_abstime(
324 register thread_t self
= current_thread();
330 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
331 processor_t myprocessor
= self
->last_processor
;
333 self
->sched_pri
= DEPRESSPRI
;
334 myprocessor
->current_pri
= self
->sched_pri
;
335 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
336 self
->sched_mode
|= TH_MODE_DEPRESS
;
339 clock_absolutetime_interval_to_deadline(interval
, &deadline
);
340 if (!timer_call_enter(&self
->depress_timer
, deadline
))
341 self
->depress_timer_active
++;
350 mach_msg_timeout_t interval
)
354 clock_interval_to_absolutetime_interval(
355 interval
, 1000*NSEC_PER_USEC
, &abstime
);
356 thread_depress_abstime(abstime
);
360 * Priority depression expiration.
363 thread_depress_expire(
367 thread_t thread
= p0
;
372 if (--thread
->depress_timer_active
== 0) {
373 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
374 compute_priority(thread
, FALSE
);
376 thread_unlock(thread
);
381 * Prematurely abort priority depression if there is one.
384 thread_depress_abort_internal(
387 kern_return_t result
= KERN_NOT_DEPRESSED
;
392 if (!(thread
->sched_mode
& TH_MODE_POLLDEPRESS
)) {
393 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
394 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
395 compute_priority(thread
, FALSE
);
396 result
= KERN_SUCCESS
;
399 if (timer_call_cancel(&thread
->depress_timer
))
400 thread
->depress_timer_active
--;
402 thread_unlock(thread
);
414 assert(self
== current_thread());
417 if (!(self
->sched_mode
& (TH_MODE_REALTIME
|TH_MODE_TIMESHARE
))) {
418 uint64_t total_computation
, abstime
;
420 abstime
= mach_absolute_time();
421 total_computation
= abstime
- self
->computation_epoch
;
422 total_computation
+= self
->computation_metered
;
423 if (total_computation
>= max_poll_computation
) {
424 processor_t myprocessor
= current_processor();
428 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
429 self
->sched_pri
= DEPRESSPRI
;
430 myprocessor
->current_pri
= self
->sched_pri
;
431 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
433 self
->computation_epoch
= abstime
;
434 self
->computation_metered
= 0;
435 self
->sched_mode
|= TH_MODE_POLLDEPRESS
;
437 abstime
+= (total_computation
>> sched_poll_yield_shift
);
438 if (!timer_call_enter(&self
->depress_timer
, abstime
))
439 self
->depress_timer_active
++;
442 if ((preempt
= csw_check(self
, myprocessor
)) != AST_NONE
)