2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
31 *** ??? The following lines were picked up when code was incorporated
32 *** into this file from `kern/syscall_subr.c.' These should be moved
33 *** with the code if it moves again. Otherwise, they should be trimmed,
34 *** based on the files included above.
37 #include <mach/boolean.h>
38 #include <mach/thread_switch.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_space.h>
41 #include <kern/ipc_kobject.h>
42 #include <kern/processor.h>
43 #include <kern/sched.h>
44 #include <kern/sched_prim.h>
46 #include <kern/task.h>
47 #include <kern/thread.h>
49 #include <mach/policy.h>
51 #include <kern/syscall_subr.h>
52 #include <mach/mach_host_server.h>
53 #include <mach/mach_syscalls.h>
56 *** ??? End of lines picked up when code was incorporated
57 *** into this file from `kern/syscall_subr.c.'
60 #include <kern/mk_sp.h>
61 #include <kern/misc_protos.h>
63 #include <kern/sched.h>
64 #include <kern/sched_prim.h>
65 #include <kern/assert.h>
66 #include <kern/thread.h>
67 #include <mach/mach_host_server.h>
70 *** ??? The next two files supply the prototypes for `thread_set_policy()'
71 *** and `thread_policy.' These routines cannot stay here if they are
72 *** exported Mach system calls.
74 #include <mach/thread_act_server.h>
75 #include <mach/host_priv_server.h>
78 _mk_sp_thread_unblock(
81 if (thread
->state
& TH_IDLE
)
84 if (thread
->sched_mode
& TH_MODE_REALTIME
) {
85 thread
->realtime
.deadline
= mach_absolute_time();
86 thread
->realtime
.deadline
+= thread
->realtime
.constraint
;
89 thread
->current_quantum
= 0;
90 thread
->computation_metered
= 0;
91 thread
->reason
= AST_NONE
;
98 processor_t processor
)
101 * A running thread is being taken off a processor:
103 processor
->last_dispatch
= mach_absolute_time();
105 if (old_thread
->state
& TH_IDLE
)
109 * Compute remainder of current quantum.
111 if ( first_timeslice(processor
) &&
112 processor
->quantum_end
> processor
->last_dispatch
)
113 old_thread
->current_quantum
=
114 (processor
->quantum_end
- processor
->last_dispatch
);
116 old_thread
->current_quantum
= 0;
118 if (old_thread
->sched_mode
& TH_MODE_REALTIME
) {
120 * Cancel the deadline if the thread has
121 * consumed the entire quantum.
123 if (old_thread
->current_quantum
== 0) {
124 old_thread
->realtime
.deadline
= UINT64_MAX
;
125 old_thread
->reason
|= AST_QUANTUM
;
130 * For non-realtime threads treat a tiny
131 * remaining quantum as an expired quantum
132 * but include what's left next time.
134 if (old_thread
->current_quantum
< min_std_quantum
) {
135 old_thread
->reason
|= AST_QUANTUM
;
136 old_thread
->current_quantum
+= std_quantum
;
141 * If we are doing a direct handoff then
142 * give the remainder of our quantum to
145 if ((old_thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
146 new_thread
->current_quantum
= old_thread
->current_quantum
;
147 old_thread
->reason
|= AST_QUANTUM
;
148 old_thread
->current_quantum
= 0;
151 old_thread
->last_switch
= processor
->last_dispatch
;
153 old_thread
->computation_metered
+=
154 (old_thread
->last_switch
- old_thread
->computation_epoch
);
160 processor_t processor
)
164 * The designated thread is beginning execution:
166 if (thread
->state
& TH_IDLE
) {
167 timer_call_cancel(&processor
->quantum_timer
);
168 processor
->timeslice
= 1;
173 if (thread
->current_quantum
== 0)
174 thread_quantum_init(thread
);
176 processor
->quantum_end
=
177 (processor
->last_dispatch
+ thread
->current_quantum
);
178 timer_call_enter1(&processor
->quantum_timer
,
179 thread
, processor
->quantum_end
);
181 processor_timeslice_setup(processor
, thread
);
183 thread
->last_switch
= processor
->last_dispatch
;
185 thread
->computation_epoch
= thread
->last_switch
;
189 _mk_sp_thread_dispatch(
192 if (thread
->reason
& AST_QUANTUM
)
193 thread_setrun(thread
, SCHED_TAILQ
);
195 if (thread
->reason
& AST_PREEMPT
)
196 thread_setrun(thread
, SCHED_HEADQ
);
198 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
200 thread
->reason
= AST_NONE
;
204 * thread_policy_common:
206 * Set scheduling policy & priority for thread.
209 thread_policy_common(
216 if ( thread
== THREAD_NULL
||
217 invalid_policy(policy
) )
218 return(KERN_INVALID_ARGUMENT
);
223 if ( !(thread
->sched_mode
& TH_MODE_REALTIME
) &&
224 !(thread
->safe_mode
& TH_MODE_REALTIME
) ) {
225 if (!(thread
->sched_mode
& TH_MODE_FAILSAFE
)) {
226 integer_t oldmode
= (thread
->sched_mode
& TH_MODE_TIMESHARE
);
228 if (policy
== POLICY_TIMESHARE
&& !oldmode
) {
229 thread
->sched_mode
|= TH_MODE_TIMESHARE
;
231 if (thread
->state
& TH_RUN
)
232 pset_share_incr(thread
->processor_set
);
235 if (policy
!= POLICY_TIMESHARE
&& oldmode
) {
236 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
238 if (thread
->state
& TH_RUN
)
239 pset_share_decr(thread
->processor_set
);
243 if (policy
== POLICY_TIMESHARE
)
244 thread
->safe_mode
|= TH_MODE_TIMESHARE
;
246 thread
->safe_mode
&= ~TH_MODE_TIMESHARE
;
249 if (priority
>= thread
->max_priority
)
250 priority
= thread
->max_priority
- thread
->task_priority
;
252 if (priority
>= MINPRI_KERNEL
)
253 priority
-= MINPRI_KERNEL
;
255 if (priority
>= MINPRI_SYSTEM
)
256 priority
-= MINPRI_SYSTEM
;
258 priority
-= BASEPRI_DEFAULT
;
260 priority
+= thread
->task_priority
;
262 if (priority
> thread
->max_priority
)
263 priority
= thread
->max_priority
;
265 if (priority
< MINPRI
)
268 thread
->importance
= priority
- thread
->task_priority
;
270 set_priority(thread
, priority
);
273 thread_unlock(thread
);
276 return (KERN_SUCCESS
);
282 * Set scheduling policy and parameters, both base and limit, for
283 * the given thread. Policy can be any policy implemented by the
284 * processor set, whether enabled or not.
288 thread_act_t thr_act
,
289 processor_set_t pset
,
292 mach_msg_type_number_t base_count
,
293 policy_limit_t limit
,
294 mach_msg_type_number_t limit_count
)
298 kern_return_t result
= KERN_SUCCESS
;
300 if ( thr_act
== THR_ACT_NULL
||
301 pset
== PROCESSOR_SET_NULL
)
302 return (KERN_INVALID_ARGUMENT
);
304 thread
= act_lock_thread(thr_act
);
305 if (thread
== THREAD_NULL
) {
306 act_unlock_thread(thr_act
);
308 return(KERN_INVALID_ARGUMENT
);
311 if (pset
!= thread
->processor_set
) {
312 act_unlock_thread(thr_act
);
314 return(KERN_FAILURE
);
321 policy_rr_base_t rr_base
= (policy_rr_base_t
) base
;
322 policy_rr_limit_t rr_limit
= (policy_rr_limit_t
) limit
;
324 if ( base_count
!= POLICY_RR_BASE_COUNT
||
325 limit_count
!= POLICY_RR_LIMIT_COUNT
) {
326 result
= KERN_INVALID_ARGUMENT
;
330 bas
= rr_base
->base_priority
;
331 max
= rr_limit
->max_priority
;
332 if (invalid_pri(bas
) || invalid_pri(max
)) {
333 result
= KERN_INVALID_ARGUMENT
;
342 policy_fifo_base_t fifo_base
= (policy_fifo_base_t
) base
;
343 policy_fifo_limit_t fifo_limit
= (policy_fifo_limit_t
) limit
;
345 if ( base_count
!= POLICY_FIFO_BASE_COUNT
||
346 limit_count
!= POLICY_FIFO_LIMIT_COUNT
) {
347 result
= KERN_INVALID_ARGUMENT
;
351 bas
= fifo_base
->base_priority
;
352 max
= fifo_limit
->max_priority
;
353 if (invalid_pri(bas
) || invalid_pri(max
)) {
354 result
= KERN_INVALID_ARGUMENT
;
361 case POLICY_TIMESHARE
:
363 policy_timeshare_base_t ts_base
= (policy_timeshare_base_t
) base
;
364 policy_timeshare_limit_t ts_limit
=
365 (policy_timeshare_limit_t
) limit
;
367 if ( base_count
!= POLICY_TIMESHARE_BASE_COUNT
||
368 limit_count
!= POLICY_TIMESHARE_LIMIT_COUNT
) {
369 result
= KERN_INVALID_ARGUMENT
;
373 bas
= ts_base
->base_priority
;
374 max
= ts_limit
->max_priority
;
375 if (invalid_pri(bas
) || invalid_pri(max
)) {
376 result
= KERN_INVALID_ARGUMENT
;
384 result
= KERN_INVALID_POLICY
;
387 if (result
!= KERN_SUCCESS
) {
388 act_unlock_thread(thr_act
);
393 result
= thread_policy_common(thread
, policy
, bas
);
394 act_unlock_thread(thr_act
);
403 * Set scheduling policy and parameters, both base and limit, for
404 * the given thread. Policy must be a policy which is enabled for the
405 * processor set. Change contained threads if requested.
409 thread_act_t thr_act
,
412 mach_msg_type_number_t count
,
416 processor_set_t pset
;
417 kern_return_t result
= KERN_SUCCESS
;
418 policy_limit_t limit
;
420 policy_rr_limit_data_t rr_limit
;
421 policy_fifo_limit_data_t fifo_limit
;
422 policy_timeshare_limit_data_t ts_limit
;
424 if (thr_act
== THR_ACT_NULL
)
425 return (KERN_INVALID_ARGUMENT
);
427 thread
= act_lock_thread(thr_act
);
428 pset
= thread
->processor_set
;
429 if ( thread
== THREAD_NULL
||
430 pset
== PROCESSOR_SET_NULL
){
431 act_unlock_thread(thr_act
);
433 return(KERN_INVALID_ARGUMENT
);
436 if ( invalid_policy(policy
) ||
437 ((POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
) & policy
) == 0 ) {
438 act_unlock_thread(thr_act
);
440 return(KERN_INVALID_POLICY
);
445 * Set scheduling limits to base priority.
451 policy_rr_base_t rr_base
;
453 if (count
!= POLICY_RR_BASE_COUNT
) {
454 result
= KERN_INVALID_ARGUMENT
;
458 limcount
= POLICY_RR_LIMIT_COUNT
;
459 rr_base
= (policy_rr_base_t
) base
;
460 rr_limit
.max_priority
= rr_base
->base_priority
;
461 limit
= (policy_limit_t
) &rr_limit
;
468 policy_fifo_base_t fifo_base
;
470 if (count
!= POLICY_FIFO_BASE_COUNT
) {
471 result
= KERN_INVALID_ARGUMENT
;
475 limcount
= POLICY_FIFO_LIMIT_COUNT
;
476 fifo_base
= (policy_fifo_base_t
) base
;
477 fifo_limit
.max_priority
= fifo_base
->base_priority
;
478 limit
= (policy_limit_t
) &fifo_limit
;
483 case POLICY_TIMESHARE
:
485 policy_timeshare_base_t ts_base
;
487 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
488 result
= KERN_INVALID_ARGUMENT
;
492 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
493 ts_base
= (policy_timeshare_base_t
) base
;
494 ts_limit
.max_priority
= ts_base
->base_priority
;
495 limit
= (policy_limit_t
) &ts_limit
;
501 result
= KERN_INVALID_POLICY
;
508 * Use current scheduling limits. Ensure that the
509 * new base priority will not exceed current limits.
515 policy_rr_base_t rr_base
;
517 if (count
!= POLICY_RR_BASE_COUNT
) {
518 result
= KERN_INVALID_ARGUMENT
;
522 limcount
= POLICY_RR_LIMIT_COUNT
;
523 rr_base
= (policy_rr_base_t
) base
;
524 if (rr_base
->base_priority
> thread
->max_priority
) {
525 result
= KERN_POLICY_LIMIT
;
529 rr_limit
.max_priority
= thread
->max_priority
;
530 limit
= (policy_limit_t
) &rr_limit
;
537 policy_fifo_base_t fifo_base
;
539 if (count
!= POLICY_FIFO_BASE_COUNT
) {
540 result
= KERN_INVALID_ARGUMENT
;
544 limcount
= POLICY_FIFO_LIMIT_COUNT
;
545 fifo_base
= (policy_fifo_base_t
) base
;
546 if (fifo_base
->base_priority
> thread
->max_priority
) {
547 result
= KERN_POLICY_LIMIT
;
551 fifo_limit
.max_priority
= thread
->max_priority
;
552 limit
= (policy_limit_t
) &fifo_limit
;
557 case POLICY_TIMESHARE
:
559 policy_timeshare_base_t ts_base
;
561 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
562 result
= KERN_INVALID_ARGUMENT
;
566 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
567 ts_base
= (policy_timeshare_base_t
) base
;
568 if (ts_base
->base_priority
> thread
->max_priority
) {
569 result
= KERN_POLICY_LIMIT
;
573 ts_limit
.max_priority
= thread
->max_priority
;
574 limit
= (policy_limit_t
) &ts_limit
;
580 result
= KERN_INVALID_POLICY
;
586 act_unlock_thread(thr_act
);
588 if (result
== KERN_SUCCESS
)
589 result
= thread_set_policy(thr_act
, pset
,
590 policy
, base
, count
, limit
, limcount
);
596 * Define shifts for simulating (5/8)**n
599 shift_data_t wait_shift
[32] = {
600 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
601 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
602 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
603 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
606 * do_priority_computation:
608 * Calculate new priority for thread based on its base priority plus
609 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
610 * usage to priorities. SCHED_SHIFT converts for the scaling
611 * of the sched_usage field by SCHED_SCALE. This scaling comes
612 * from the multiplication by sched_load (thread_timer_delta)
613 * in sched.h. sched_load is calculated as a scaled overload
614 * factor in compute_mach_factor (mach_factor.c).
618 #define do_priority_computation(thread, pri) \
620 (pri) = (thread)->priority /* start with base priority */ \
621 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
622 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
623 if ((pri) < MINPRI_STANDARD) \
624 (pri) = MINPRI_STANDARD; \
626 if ((pri) > MAXPRI_STANDARD) \
627 (pri) = MAXPRI_STANDARD; \
629 #else /* PRI_SHIFT_2 */
630 #define do_priority_computation(thread, pri) \
632 (pri) = (thread)->priority /* start with base priority */ \
633 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
634 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
635 if ((pri) < MINPRI_STANDARD) \
636 (pri) = MINPRI_STANDARD; \
638 if ((pri) > MAXPRI_STANDARD) \
639 (pri) = MAXPRI_STANDARD; \
641 #endif /* PRI_SHIFT_2 */
642 #else /* defined(PRI_SHIFT_2) */
643 #define do_priority_computation(thread, pri) \
645 (pri) = (thread)->priority /* start with base priority */ \
646 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
647 if ((pri) < MINPRI_STANDARD) \
648 (pri) = MINPRI_STANDARD; \
650 if ((pri) > MAXPRI_STANDARD) \
651 (pri) = MAXPRI_STANDARD; \
653 #endif /* defined(PRI_SHIFT_2) */
657 register thread_t thread
,
658 register int priority
)
660 thread
->priority
= priority
;
661 compute_priority(thread
, FALSE
);
667 * Reset the current scheduled priority of the
668 * thread according to its base priority if the
669 * thread has not been promoted or depressed.
671 * If the thread is timesharing, adjust according
672 * to recent cpu usage.
674 * The thread *must* be locked by the caller.
678 register thread_t thread
,
679 boolean_t override_depress
)
681 register int priority
;
683 if ( !(thread
->sched_mode
& TH_MODE_PROMOTED
) &&
684 (!(thread
->sched_mode
& TH_MODE_ISDEPRESSED
) ||
685 override_depress
) ) {
686 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
687 do_priority_computation(thread
, priority
);
689 priority
= thread
->priority
;
691 set_sched_pri(thread
, priority
);
696 * compute_my_priority:
698 * Version of compute priority for current thread.
699 * Caller must have thread locked and thread must
700 * be timesharing and not depressed.
702 * Only used for priority updates.
706 register thread_t thread
)
708 register int priority
;
710 do_priority_computation(thread
, priority
);
711 assert(thread
->runq
== RUN_QUEUE_NULL
);
712 thread
->sched_pri
= priority
;
718 * Cause the priority computation of a thread that has been
719 * sleeping or suspended to "catch up" with the system. Thread
720 * *MUST* be locked by caller. If thread is running, then this
721 * can only be called by the thread on itself.
725 register thread_t thread
)
727 register unsigned int ticks
;
728 register shift_t shiftp
;
730 ticks
= sched_tick
- thread
->sched_stamp
;
734 * If asleep for more than 30 seconds forget all
735 * cpu_usage, else catch up on missed aging.
736 * 5/8 ** n is approximated by the two shifts
737 * in the wait_shift array.
739 thread
->sched_stamp
+= ticks
;
740 thread_timer_delta(thread
);
742 thread
->cpu_usage
= 0;
743 thread
->sched_usage
= 0;
746 thread
->cpu_usage
+= thread
->cpu_delta
;
747 thread
->sched_usage
+= thread
->sched_delta
;
749 shiftp
= &wait_shift
[ticks
];
750 if (shiftp
->shift2
> 0) {
752 (thread
->cpu_usage
>> shiftp
->shift1
) +
753 (thread
->cpu_usage
>> shiftp
->shift2
);
754 thread
->sched_usage
=
755 (thread
->sched_usage
>> shiftp
->shift1
) +
756 (thread
->sched_usage
>> shiftp
->shift2
);
760 (thread
->cpu_usage
>> shiftp
->shift1
) -
761 (thread
->cpu_usage
>> -(shiftp
->shift2
));
762 thread
->sched_usage
=
763 (thread
->sched_usage
>> shiftp
->shift1
) -
764 (thread
->sched_usage
>> -(shiftp
->shift2
));
768 thread
->cpu_delta
= 0;
769 thread
->sched_delta
= 0;
772 * Check for fail-safe release.
774 if ( (thread
->sched_mode
& TH_MODE_FAILSAFE
) &&
775 thread
->sched_stamp
>= thread
->safe_release
) {
776 if (!(thread
->safe_mode
& TH_MODE_TIMESHARE
)) {
777 if (thread
->safe_mode
& TH_MODE_REALTIME
) {
778 thread
->priority
= BASEPRI_RTQUEUES
;
780 thread
->sched_mode
|= TH_MODE_REALTIME
;
783 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
785 if (thread
->state
& TH_RUN
)
786 pset_share_decr(thread
->processor_set
);
788 if (!(thread
->sched_mode
& TH_MODE_ISDEPRESSED
))
789 set_sched_pri(thread
, thread
->priority
);
792 thread
->safe_mode
= 0;
793 thread
->sched_mode
&= ~TH_MODE_FAILSAFE
;
797 * Recompute scheduled priority if appropriate.
799 if ( (thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
800 !(thread
->sched_mode
& TH_MODE_PROMOTED
) &&
801 !(thread
->sched_mode
& TH_MODE_ISDEPRESSED
) ) {
802 register int new_pri
;
804 do_priority_computation(thread
, new_pri
);
805 if (new_pri
!= thread
->sched_pri
) {
808 runq
= run_queue_remove(thread
);
809 thread
->sched_pri
= new_pri
;
810 if (runq
!= RUN_QUEUE_NULL
)
811 thread_setrun(thread
, SCHED_TAILQ
);
817 * thread_switch_continue:
819 * Continuation routine for a thread switch.
821 * Just need to arrange the return value gets sent out correctly and that
822 * we cancel the timer or the depression called for by the options to the
823 * thread_switch call.
826 _mk_sp_thread_switch_continue(void)
828 register thread_t self
= current_thread();
829 int wait_result
= self
->wait_result
;
830 int option
= self
->saved
.swtch
.option
;
832 if (option
== SWITCH_OPTION_WAIT
&& wait_result
!= THREAD_TIMED_OUT
)
833 thread_cancel_timer();
835 if (option
== SWITCH_OPTION_DEPRESS
)
836 _mk_sp_thread_depress_abort(self
, FALSE
);
838 thread_syscall_return(KERN_SUCCESS
);
845 * Context switch. User may supply thread hint.
847 * Fixed priority threads that call this get what they asked for
848 * even if that violates priority order.
851 _mk_sp_thread_switch(
852 thread_act_t hint_act
,
854 mach_msg_timeout_t option_time
)
856 register thread_t self
= current_thread();
860 * Check and use thr_act hint if appropriate. It is not
861 * appropriate to give a hint that shares the current shuttle.
863 if (hint_act
!= THR_ACT_NULL
) {
864 register thread_t thread
= act_lock_thread(hint_act
);
866 if ( thread
!= THREAD_NULL
&&
868 thread
->top_act
== hint_act
) {
869 processor_t processor
;
875 * Check if the thread is in the right pset,
876 * is not bound to a different processor,
877 * and that realtime is not involved.
879 * Next, pull it off its run queue. If it
880 * doesn't come, it's not eligible.
882 processor
= current_processor();
883 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
884 thread
->sched_pri
< BASEPRI_RTQUEUES
&&
885 thread
->processor_set
== processor
->processor_set
&&
886 (thread
->bound_processor
== PROCESSOR_NULL
||
887 thread
->bound_processor
== processor
) &&
888 run_queue_remove(thread
) != RUN_QUEUE_NULL
) {
892 thread_unlock(thread
);
894 act_unlock_thread(hint_act
);
895 act_deallocate(hint_act
);
897 if (option
== SWITCH_OPTION_WAIT
)
898 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
900 if (option
== SWITCH_OPTION_DEPRESS
)
901 _mk_sp_thread_depress_ms(option_time
);
903 self
->saved
.swtch
.option
= option
;
905 thread_run(self
, _mk_sp_thread_switch_continue
, thread
);
909 thread_unlock(thread
);
913 act_unlock_thread(hint_act
);
914 act_deallocate(hint_act
);
918 * No handoff hint supplied, or hint was wrong. Call thread_block() in
919 * hopes of running something else. If nothing else is runnable,
920 * thread_block will detect this. WARNING: thread_switch with no
921 * option will not do anything useful if the thread calling it is the
922 * highest priority thread (can easily happen with a collection
923 * of timesharing threads).
925 if (option
== SWITCH_OPTION_WAIT
)
926 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
928 if (option
== SWITCH_OPTION_DEPRESS
)
929 _mk_sp_thread_depress_ms(option_time
);
931 self
->saved
.swtch
.option
= option
;
933 thread_block_reason(_mk_sp_thread_switch_continue
, AST_YIELD
);
935 if (option
== SWITCH_OPTION_WAIT
)
936 thread_cancel_timer();
938 if (option
== SWITCH_OPTION_DEPRESS
)
939 _mk_sp_thread_depress_abort(self
, FALSE
);
941 return (KERN_SUCCESS
);
945 * Depress thread's priority to lowest possible for the specified interval,
946 * with a value of zero resulting in no timeout being scheduled.
949 _mk_sp_thread_depress_abstime(
952 register thread_t self
= current_thread();
958 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
959 processor_t myprocessor
= self
->last_processor
;
961 self
->sched_pri
= DEPRESSPRI
;
962 myprocessor
->current_pri
= self
->sched_pri
;
963 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
964 self
->sched_mode
|= TH_MODE_DEPRESS
;
967 clock_absolutetime_interval_to_deadline(interval
, &deadline
);
968 if (!timer_call_enter(&self
->depress_timer
, deadline
))
969 self
->depress_timer_active
++;
977 _mk_sp_thread_depress_ms(
978 mach_msg_timeout_t interval
)
982 clock_interval_to_absolutetime_interval(
983 interval
, 1000*NSEC_PER_USEC
, &abstime
);
984 _mk_sp_thread_depress_abstime(abstime
);
988 * Priority depression expiration.
991 thread_depress_expire(
992 timer_call_param_t p0
,
993 timer_call_param_t p1
)
995 thread_t thread
= p0
;
1000 if (--thread
->depress_timer_active
== 1) {
1001 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
1002 compute_priority(thread
, FALSE
);
1004 thread_unlock(thread
);
1009 * Prematurely abort priority depression if there is one.
1012 _mk_sp_thread_depress_abort(
1013 register thread_t thread
,
1016 kern_return_t result
= KERN_NOT_DEPRESSED
;
1020 thread_lock(thread
);
1021 if (abortall
|| !(thread
->sched_mode
& TH_MODE_POLLDEPRESS
)) {
1022 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
1023 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
1024 compute_priority(thread
, FALSE
);
1025 result
= KERN_SUCCESS
;
1028 if (timer_call_cancel(&thread
->depress_timer
))
1029 thread
->depress_timer_active
--;
1031 thread_unlock(thread
);
1038 _mk_sp_thread_perhaps_yield(
1043 assert(self
== current_thread());
1046 if (!(self
->sched_mode
& (TH_MODE_REALTIME
|TH_MODE_TIMESHARE
))) {
1047 extern uint64_t max_poll_computation
;
1048 extern int sched_poll_yield_shift
;
1049 uint64_t total_computation
, abstime
;
1051 abstime
= mach_absolute_time();
1052 total_computation
= abstime
- self
->computation_epoch
;
1053 total_computation
+= self
->computation_metered
;
1054 if (total_computation
>= max_poll_computation
) {
1055 processor_t myprocessor
= current_processor();
1059 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
1060 self
->sched_pri
= DEPRESSPRI
;
1061 myprocessor
->current_pri
= self
->sched_pri
;
1062 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
1064 self
->computation_epoch
= abstime
;
1065 self
->computation_metered
= 0;
1066 self
->sched_mode
|= TH_MODE_POLLDEPRESS
;
1068 abstime
+= (total_computation
>> sched_poll_yield_shift
);
1069 if (!timer_call_enter(&self
->depress_timer
, abstime
))
1070 self
->depress_timer_active
++;
1071 thread_unlock(self
);
1073 if ((preempt
= csw_check(self
, myprocessor
)) != AST_NONE
)