2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
34 #include <mach/boolean.h>
35 #include <mach/thread_switch.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/ipc_kobject.h>
39 #include <kern/processor.h>
40 #include <kern/sched.h>
41 #include <kern/sched_prim.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
46 #include <mach/policy.h>
48 #include <kern/syscall_subr.h>
49 #include <mach/mach_host_server.h>
50 #include <mach/mach_syscalls.h>
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
57 #include <kern/mk_sp.h>
58 #include <kern/misc_protos.h>
60 #include <kern/sched.h>
61 #include <kern/sched_prim.h>
62 #include <kern/assert.h>
63 #include <kern/thread.h>
64 #include <mach/mach_host_server.h>
67 *** ??? The next two files supply the prototypes for `thread_set_policy()'
68 *** and `thread_policy.' These routines cannot stay here if they are
69 *** exported Mach system calls.
71 #include <mach/thread_act_server.h>
72 #include <mach/host_priv_server.h>
73 #include <sys/kdebug.h>
76 _mk_sp_thread_unblock(
79 thread_setrun(thread
, TAIL_Q
);
81 thread
->current_quantum
= 0;
82 thread
->computation_metered
= 0;
83 thread
->reason
= AST_NONE
;
85 KERNEL_DEBUG_CONSTANT(
86 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
87 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
94 processor_t processor
)
97 * A running thread is being taken off a processor:
99 clock_get_uptime(&processor
->last_dispatch
);
100 if (!(old_thread
->state
& TH_IDLE
)) {
102 * Compute remainder of current quantum.
104 if ( first_quantum(processor
) &&
105 processor
->quantum_end
> processor
->last_dispatch
)
106 old_thread
->current_quantum
=
107 (processor
->quantum_end
- processor
->last_dispatch
);
109 old_thread
->current_quantum
= 0;
112 * For non-realtime threads treat a tiny
113 * remaining quantum as an expired quantum
114 * but include what's left next time.
116 if (!(old_thread
->sched_mode
& TH_MODE_REALTIME
)) {
117 if (old_thread
->current_quantum
< min_std_quantum
) {
118 old_thread
->reason
|= AST_QUANTUM
;
119 old_thread
->current_quantum
+= std_quantum
;
123 if (old_thread
->current_quantum
== 0)
124 old_thread
->reason
|= AST_QUANTUM
;
127 * If we are doing a direct handoff then
128 * give the remainder of our quantum to
131 if ((old_thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
132 new_thread
->current_quantum
= old_thread
->current_quantum
;
133 old_thread
->reason
|= AST_QUANTUM
;
134 old_thread
->current_quantum
= 0;
137 old_thread
->last_switch
= processor
->last_dispatch
;
139 old_thread
->computation_metered
+=
140 (old_thread
->last_switch
- old_thread
->computation_epoch
);
147 processor_t processor
)
151 * The designated thread is beginning execution:
153 if (!(thread
->state
& TH_IDLE
)) {
154 if (thread
->current_quantum
== 0)
155 thread
->current_quantum
=
156 (thread
->sched_mode
& TH_MODE_REALTIME
)?
157 thread
->realtime
.computation
: std_quantum
;
159 processor
->quantum_end
=
160 (processor
->last_dispatch
+ thread
->current_quantum
);
161 timer_call_enter1(&processor
->quantum_timer
,
162 thread
, processor
->quantum_end
);
164 processor
->slice_quanta
=
165 (thread
->sched_mode
& TH_MODE_TIMESHARE
)?
166 processor
->processor_set
->set_quanta
: 1;
168 thread
->last_switch
= processor
->last_dispatch
;
170 thread
->computation_epoch
= thread
->last_switch
;
173 timer_call_cancel(&processor
->quantum_timer
);
175 processor
->slice_quanta
= 1;
180 _mk_sp_thread_dispatch(
183 if (thread
->reason
& AST_QUANTUM
)
184 thread_setrun(thread
, TAIL_Q
);
186 thread_setrun(thread
, HEAD_Q
);
188 thread
->reason
= AST_NONE
;
192 * thread_policy_common:
194 * Set scheduling policy & priority for thread.
197 thread_policy_common(
204 if ( thread
== THREAD_NULL
||
205 invalid_policy(policy
) )
206 return(KERN_INVALID_ARGUMENT
);
211 if ( !(thread
->sched_mode
& TH_MODE_REALTIME
) &&
212 !(thread
->safe_mode
& TH_MODE_REALTIME
) ) {
213 if (!(thread
->sched_mode
& TH_MODE_FAILSAFE
)) {
214 if (policy
== POLICY_TIMESHARE
)
215 thread
->sched_mode
|= TH_MODE_TIMESHARE
;
217 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
220 if (policy
== POLICY_TIMESHARE
)
221 thread
->safe_mode
|= TH_MODE_TIMESHARE
;
223 thread
->safe_mode
&= ~TH_MODE_TIMESHARE
;
226 if (priority
>= thread
->max_priority
)
227 priority
= thread
->max_priority
- thread
->task_priority
;
229 if (priority
>= MINPRI_KERNEL
)
230 priority
-= MINPRI_KERNEL
;
232 if (priority
>= MINPRI_SYSTEM
)
233 priority
-= MINPRI_SYSTEM
;
235 priority
-= BASEPRI_DEFAULT
;
237 priority
+= thread
->task_priority
;
239 if (priority
> thread
->max_priority
)
240 priority
= thread
->max_priority
;
242 if (priority
< MINPRI
)
245 thread
->importance
= priority
- thread
->task_priority
;
247 set_priority(thread
, priority
);
250 thread_unlock(thread
);
253 return (KERN_SUCCESS
);
259 * Set scheduling policy and parameters, both base and limit, for
260 * the given thread. Policy can be any policy implemented by the
261 * processor set, whether enabled or not.
265 thread_act_t thr_act
,
266 processor_set_t pset
,
269 mach_msg_type_number_t base_count
,
270 policy_limit_t limit
,
271 mach_msg_type_number_t limit_count
)
275 kern_return_t result
= KERN_SUCCESS
;
277 if ( thr_act
== THR_ACT_NULL
||
278 pset
== PROCESSOR_SET_NULL
)
279 return (KERN_INVALID_ARGUMENT
);
281 thread
= act_lock_thread(thr_act
);
282 if (thread
== THREAD_NULL
) {
283 act_unlock_thread(thr_act
);
285 return(KERN_INVALID_ARGUMENT
);
288 if (pset
!= thread
->processor_set
) {
289 act_unlock_thread(thr_act
);
291 return(KERN_FAILURE
);
298 policy_rr_base_t rr_base
= (policy_rr_base_t
) base
;
299 policy_rr_limit_t rr_limit
= (policy_rr_limit_t
) limit
;
301 if ( base_count
!= POLICY_RR_BASE_COUNT
||
302 limit_count
!= POLICY_RR_LIMIT_COUNT
) {
303 result
= KERN_INVALID_ARGUMENT
;
307 bas
= rr_base
->base_priority
;
308 max
= rr_limit
->max_priority
;
309 if (invalid_pri(bas
) || invalid_pri(max
)) {
310 result
= KERN_INVALID_ARGUMENT
;
319 policy_fifo_base_t fifo_base
= (policy_fifo_base_t
) base
;
320 policy_fifo_limit_t fifo_limit
= (policy_fifo_limit_t
) limit
;
322 if ( base_count
!= POLICY_FIFO_BASE_COUNT
||
323 limit_count
!= POLICY_FIFO_LIMIT_COUNT
) {
324 result
= KERN_INVALID_ARGUMENT
;
328 bas
= fifo_base
->base_priority
;
329 max
= fifo_limit
->max_priority
;
330 if (invalid_pri(bas
) || invalid_pri(max
)) {
331 result
= KERN_INVALID_ARGUMENT
;
338 case POLICY_TIMESHARE
:
340 policy_timeshare_base_t ts_base
= (policy_timeshare_base_t
) base
;
341 policy_timeshare_limit_t ts_limit
=
342 (policy_timeshare_limit_t
) limit
;
344 if ( base_count
!= POLICY_TIMESHARE_BASE_COUNT
||
345 limit_count
!= POLICY_TIMESHARE_LIMIT_COUNT
) {
346 result
= KERN_INVALID_ARGUMENT
;
350 bas
= ts_base
->base_priority
;
351 max
= ts_limit
->max_priority
;
352 if (invalid_pri(bas
) || invalid_pri(max
)) {
353 result
= KERN_INVALID_ARGUMENT
;
361 result
= KERN_INVALID_POLICY
;
364 if (result
!= KERN_SUCCESS
) {
365 act_unlock_thread(thr_act
);
370 result
= thread_policy_common(thread
, policy
, bas
);
371 act_unlock_thread(thr_act
);
380 * Set scheduling policy and parameters, both base and limit, for
381 * the given thread. Policy must be a policy which is enabled for the
382 * processor set. Change contained threads if requested.
386 thread_act_t thr_act
,
389 mach_msg_type_number_t count
,
393 processor_set_t pset
;
394 kern_return_t result
= KERN_SUCCESS
;
395 policy_limit_t limit
;
397 policy_rr_limit_data_t rr_limit
;
398 policy_fifo_limit_data_t fifo_limit
;
399 policy_timeshare_limit_data_t ts_limit
;
401 if (thr_act
== THR_ACT_NULL
)
402 return (KERN_INVALID_ARGUMENT
);
404 thread
= act_lock_thread(thr_act
);
405 pset
= thread
->processor_set
;
406 if ( thread
== THREAD_NULL
||
407 pset
== PROCESSOR_SET_NULL
){
408 act_unlock_thread(thr_act
);
410 return(KERN_INVALID_ARGUMENT
);
413 if ( invalid_policy(policy
) ||
414 ((POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
) & policy
) == 0 ) {
415 act_unlock_thread(thr_act
);
417 return(KERN_INVALID_POLICY
);
422 * Set scheduling limits to base priority.
428 policy_rr_base_t rr_base
;
430 if (count
!= POLICY_RR_BASE_COUNT
) {
431 result
= KERN_INVALID_ARGUMENT
;
435 limcount
= POLICY_RR_LIMIT_COUNT
;
436 rr_base
= (policy_rr_base_t
) base
;
437 rr_limit
.max_priority
= rr_base
->base_priority
;
438 limit
= (policy_limit_t
) &rr_limit
;
445 policy_fifo_base_t fifo_base
;
447 if (count
!= POLICY_FIFO_BASE_COUNT
) {
448 result
= KERN_INVALID_ARGUMENT
;
452 limcount
= POLICY_FIFO_LIMIT_COUNT
;
453 fifo_base
= (policy_fifo_base_t
) base
;
454 fifo_limit
.max_priority
= fifo_base
->base_priority
;
455 limit
= (policy_limit_t
) &fifo_limit
;
460 case POLICY_TIMESHARE
:
462 policy_timeshare_base_t ts_base
;
464 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
465 result
= KERN_INVALID_ARGUMENT
;
469 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
470 ts_base
= (policy_timeshare_base_t
) base
;
471 ts_limit
.max_priority
= ts_base
->base_priority
;
472 limit
= (policy_limit_t
) &ts_limit
;
478 result
= KERN_INVALID_POLICY
;
485 * Use current scheduling limits. Ensure that the
486 * new base priority will not exceed current limits.
492 policy_rr_base_t rr_base
;
494 if (count
!= POLICY_RR_BASE_COUNT
) {
495 result
= KERN_INVALID_ARGUMENT
;
499 limcount
= POLICY_RR_LIMIT_COUNT
;
500 rr_base
= (policy_rr_base_t
) base
;
501 if (rr_base
->base_priority
> thread
->max_priority
) {
502 result
= KERN_POLICY_LIMIT
;
506 rr_limit
.max_priority
= thread
->max_priority
;
507 limit
= (policy_limit_t
) &rr_limit
;
514 policy_fifo_base_t fifo_base
;
516 if (count
!= POLICY_FIFO_BASE_COUNT
) {
517 result
= KERN_INVALID_ARGUMENT
;
521 limcount
= POLICY_FIFO_LIMIT_COUNT
;
522 fifo_base
= (policy_fifo_base_t
) base
;
523 if (fifo_base
->base_priority
> thread
->max_priority
) {
524 result
= KERN_POLICY_LIMIT
;
528 fifo_limit
.max_priority
= thread
->max_priority
;
529 limit
= (policy_limit_t
) &fifo_limit
;
534 case POLICY_TIMESHARE
:
536 policy_timeshare_base_t ts_base
;
538 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
539 result
= KERN_INVALID_ARGUMENT
;
543 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
544 ts_base
= (policy_timeshare_base_t
) base
;
545 if (ts_base
->base_priority
> thread
->max_priority
) {
546 result
= KERN_POLICY_LIMIT
;
550 ts_limit
.max_priority
= thread
->max_priority
;
551 limit
= (policy_limit_t
) &ts_limit
;
557 result
= KERN_INVALID_POLICY
;
563 act_unlock_thread(thr_act
);
565 if (result
== KERN_SUCCESS
)
566 result
= thread_set_policy(thr_act
, pset
,
567 policy
, base
, count
, limit
, limcount
);
573 * Define shifts for simulating (5/8)**n
576 shift_data_t wait_shift
[32] = {
577 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
578 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
579 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
580 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
583 * do_priority_computation:
585 * Calculate new priority for thread based on its base priority plus
586 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
587 * usage to priorities. SCHED_SHIFT converts for the scaling
588 * of the sched_usage field by SCHED_SCALE. This scaling comes
589 * from the multiplication by sched_load (thread_timer_delta)
590 * in sched.h. sched_load is calculated as a scaled overload
591 * factor in compute_mach_factor (mach_factor.c).
595 #define do_priority_computation(thread, pri) \
597 (pri) = (thread)->priority /* start with base priority */ \
598 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
599 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
600 if ((pri) < MINPRI_STANDARD) \
601 (pri) = MINPRI_STANDARD; \
603 if ((pri) > MAXPRI_STANDARD) \
604 (pri) = MAXPRI_STANDARD; \
606 #else /* PRI_SHIFT_2 */
607 #define do_priority_computation(thread, pri) \
609 (pri) = (thread)->priority /* start with base priority */ \
610 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
611 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
612 if ((pri) < MINPRI_STANDARD) \
613 (pri) = MINPRI_STANDARD; \
615 if ((pri) > MAXPRI_STANDARD) \
616 (pri) = MAXPRI_STANDARD; \
618 #endif /* PRI_SHIFT_2 */
619 #else /* defined(PRI_SHIFT_2) */
620 #define do_priority_computation(thread, pri) \
622 (pri) = (thread)->priority /* start with base priority */ \
623 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
624 if ((pri) < MINPRI_STANDARD) \
625 (pri) = MINPRI_STANDARD; \
627 if ((pri) > MAXPRI_STANDARD) \
628 (pri) = MAXPRI_STANDARD; \
630 #endif /* defined(PRI_SHIFT_2) */
634 register thread_t thread
,
635 register int priority
)
637 thread
->priority
= priority
;
638 compute_priority(thread
, FALSE
);
644 * Reset the current scheduled priority of the
645 * thread according to its base priority if the
646 * thread has not been promoted or depressed.
648 * If the thread is timesharing, adjust according
649 * to recent cpu usage.
651 * The thread *must* be locked by the caller.
655 register thread_t thread
,
656 boolean_t override_depress
)
658 register int priority
;
660 if ( !(thread
->sched_mode
& TH_MODE_PROMOTED
) &&
661 (!(thread
->sched_mode
& TH_MODE_ISDEPRESSED
) ||
662 override_depress
) ) {
663 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
664 do_priority_computation(thread
, priority
);
666 priority
= thread
->priority
;
668 set_sched_pri(thread
, priority
);
673 * compute_my_priority:
675 * Version of compute priority for current thread.
676 * Caller must have thread locked and thread must
677 * be timesharing and not depressed.
679 * Only used for priority updates.
683 register thread_t thread
)
685 register int priority
;
687 do_priority_computation(thread
, priority
);
688 assert(thread
->runq
== RUN_QUEUE_NULL
);
689 thread
->sched_pri
= priority
;
695 * Cause the priority computation of a thread that has been
696 * sleeping or suspended to "catch up" with the system. Thread
697 * *MUST* be locked by caller. If thread is running, then this
698 * can only be called by the thread on itself.
702 register thread_t thread
)
704 register unsigned int ticks
;
705 register shift_t shiftp
;
707 ticks
= sched_tick
- thread
->sched_stamp
;
711 * If asleep for more than 30 seconds forget all
712 * cpu_usage, else catch up on missed aging.
713 * 5/8 ** n is approximated by the two shifts
714 * in the wait_shift array.
716 thread
->sched_stamp
+= ticks
;
717 thread_timer_delta(thread
);
719 thread
->cpu_usage
= 0;
720 thread
->sched_usage
= 0;
723 thread
->cpu_usage
+= thread
->cpu_delta
;
724 thread
->sched_usage
+= thread
->sched_delta
;
726 shiftp
= &wait_shift
[ticks
];
727 if (shiftp
->shift2
> 0) {
729 (thread
->cpu_usage
>> shiftp
->shift1
) +
730 (thread
->cpu_usage
>> shiftp
->shift2
);
731 thread
->sched_usage
=
732 (thread
->sched_usage
>> shiftp
->shift1
) +
733 (thread
->sched_usage
>> shiftp
->shift2
);
737 (thread
->cpu_usage
>> shiftp
->shift1
) -
738 (thread
->cpu_usage
>> -(shiftp
->shift2
));
739 thread
->sched_usage
=
740 (thread
->sched_usage
>> shiftp
->shift1
) -
741 (thread
->sched_usage
>> -(shiftp
->shift2
));
745 thread
->cpu_delta
= 0;
746 thread
->sched_delta
= 0;
749 * Check for fail-safe release.
751 if ( (thread
->sched_mode
& TH_MODE_FAILSAFE
) &&
752 thread
->sched_stamp
>= thread
->safe_release
) {
753 if (!(thread
->safe_mode
& TH_MODE_TIMESHARE
)) {
754 if (thread
->safe_mode
& TH_MODE_REALTIME
) {
755 thread
->priority
= BASEPRI_REALTIME
;
757 thread
->sched_mode
|= TH_MODE_REALTIME
;
760 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
762 if (!(thread
->sched_mode
& TH_MODE_ISDEPRESSED
))
763 set_sched_pri(thread
, thread
->priority
);
766 thread
->safe_mode
= 0;
767 thread
->sched_mode
&= ~TH_MODE_FAILSAFE
;
771 * Recompute scheduled priority if appropriate.
773 if ( (thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
774 !(thread
->sched_mode
& TH_MODE_PROMOTED
) &&
775 !(thread
->sched_mode
& TH_MODE_ISDEPRESSED
) ) {
776 register int new_pri
;
778 do_priority_computation(thread
, new_pri
);
779 if (new_pri
!= thread
->sched_pri
) {
782 runq
= rem_runq(thread
);
783 thread
->sched_pri
= new_pri
;
784 if (runq
!= RUN_QUEUE_NULL
)
785 thread_setrun(thread
, TAIL_Q
);
791 * thread_switch_continue:
793 * Continuation routine for a thread switch.
795 * Just need to arrange the return value gets sent out correctly and that
796 * we cancel the timer or the depression called for by the options to the
797 * thread_switch call.
800 _mk_sp_thread_switch_continue(void)
802 register thread_t self
= current_thread();
803 int wait_result
= self
->wait_result
;
804 int option
= self
->saved
.swtch
.option
;
806 if (option
== SWITCH_OPTION_WAIT
&& wait_result
!= THREAD_TIMED_OUT
)
807 thread_cancel_timer();
809 if (option
== SWITCH_OPTION_DEPRESS
)
810 _mk_sp_thread_depress_abort(self
, FALSE
);
812 thread_syscall_return(KERN_SUCCESS
);
819 * Context switch. User may supply thread hint.
821 * Fixed priority threads that call this get what they asked for
822 * even if that violates priority order.
825 _mk_sp_thread_switch(
826 thread_act_t hint_act
,
828 mach_msg_timeout_t option_time
)
830 register thread_t self
= current_thread();
831 register processor_t myprocessor
;
835 * Check and use thr_act hint if appropriate. It is not
836 * appropriate to give a hint that shares the current shuttle.
838 if (hint_act
!= THR_ACT_NULL
) {
839 register thread_t thread
= act_lock_thread(hint_act
);
841 if ( thread
!= THREAD_NULL
&&
843 thread
->top_act
== hint_act
) {
848 * Check if the thread is in the right pset. Then
849 * pull it off its run queue. If it
850 * doesn't come, then it's not eligible.
852 if ( thread
->processor_set
== self
->processor_set
&&
853 rem_runq(thread
) != RUN_QUEUE_NULL
) {
857 thread_unlock(thread
);
859 act_unlock_thread(hint_act
);
860 act_deallocate(hint_act
);
862 if (option
== SWITCH_OPTION_WAIT
)
863 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
865 if (option
== SWITCH_OPTION_DEPRESS
)
866 _mk_sp_thread_depress_ms(option_time
);
868 self
->saved
.swtch
.option
= option
;
870 thread_run(self
, _mk_sp_thread_switch_continue
, thread
);
874 thread_unlock(thread
);
878 act_unlock_thread(hint_act
);
879 act_deallocate(hint_act
);
883 * No handoff hint supplied, or hint was wrong. Call thread_block() in
884 * hopes of running something else. If nothing else is runnable,
885 * thread_block will detect this. WARNING: thread_switch with no
886 * option will not do anything useful if the thread calling it is the
887 * highest priority thread (can easily happen with a collection
888 * of timesharing threads).
890 mp_disable_preemption();
891 myprocessor
= current_processor();
892 if ( option
!= SWITCH_OPTION_NONE
||
893 myprocessor
->processor_set
->runq
.count
> 0 ||
894 myprocessor
->runq
.count
> 0 ) {
895 mp_enable_preemption();
897 if (option
== SWITCH_OPTION_WAIT
)
898 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
900 if (option
== SWITCH_OPTION_DEPRESS
)
901 _mk_sp_thread_depress_ms(option_time
);
903 self
->saved
.swtch
.option
= option
;
905 thread_block_reason(_mk_sp_thread_switch_continue
,
906 (option
== SWITCH_OPTION_DEPRESS
)?
907 AST_YIELD
: AST_NONE
);
910 mp_enable_preemption();
913 if (option
== SWITCH_OPTION_WAIT
)
914 thread_cancel_timer();
916 if (option
== SWITCH_OPTION_DEPRESS
)
917 _mk_sp_thread_depress_abort(self
, FALSE
);
919 return (KERN_SUCCESS
);
923 * Depress thread's priority to lowest possible for the specified interval,
924 * with a value of zero resulting in no timeout being scheduled.
927 _mk_sp_thread_depress_abstime(
930 register thread_t self
= current_thread();
937 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
938 processor_t myprocessor
= self
->last_processor
;
940 self
->sched_pri
= DEPRESSPRI
;
941 myprocessor
->current_pri
= self
->sched_pri
;
942 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
943 self
->sched_mode
|= TH_MODE_DEPRESS
;
947 clock_absolutetime_interval_to_deadline(interval
, &deadline
);
948 if (!timer_call_enter(&self
->depress_timer
, deadline
))
949 self
->depress_timer_active
++;
959 _mk_sp_thread_depress_ms(
960 mach_msg_timeout_t interval
)
964 clock_interval_to_absolutetime_interval(
965 interval
, 1000*NSEC_PER_USEC
, &abstime
);
966 _mk_sp_thread_depress_abstime(abstime
);
970 * Priority depression expiration.
973 thread_depress_expire(
974 timer_call_param_t p0
,
975 timer_call_param_t p1
)
977 thread_t thread
= p0
;
982 if (--thread
->depress_timer_active
== 1) {
984 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
985 compute_priority(thread
, FALSE
);
986 thread_unlock(thread
);
989 if (thread
->depress_timer_active
== 0)
990 thread_wakeup_one(&thread
->depress_timer_active
);
996 * Prematurely abort priority depression if there is one.
999 _mk_sp_thread_depress_abort(
1000 register thread_t thread
,
1003 kern_return_t result
= KERN_NOT_DEPRESSED
;
1008 thread_lock(thread
);
1009 if (abortall
|| !(thread
->sched_mode
& TH_MODE_POLLDEPRESS
)) {
1010 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
1011 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
1012 compute_priority(thread
, FALSE
);
1013 result
= KERN_SUCCESS
;
1016 thread_unlock(thread
);
1018 if (timer_call_cancel(&thread
->depress_timer
))
1019 thread
->depress_timer_active
--;
1022 thread_unlock(thread
);
1023 wake_unlock(thread
);
1030 _mk_sp_thread_perhaps_yield(
1035 assert(self
== current_thread());
1038 if (!(self
->sched_mode
& (TH_MODE_REALTIME
|TH_MODE_TIMESHARE
))) {
1039 extern uint64_t max_poll_computation
;
1040 extern int sched_poll_yield_shift
;
1041 uint64_t abstime
, total_computation
;
1043 clock_get_uptime(&abstime
);
1044 total_computation
= abstime
- self
->computation_epoch
;
1045 total_computation
+= self
->computation_metered
;
1046 if (total_computation
>= max_poll_computation
) {
1047 processor_t myprocessor
= current_processor();
1052 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
1053 self
->sched_pri
= DEPRESSPRI
;
1054 myprocessor
->current_pri
= self
->sched_pri
;
1055 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
1057 self
->computation_epoch
= abstime
;
1058 self
->computation_metered
= 0;
1059 self
->sched_mode
|= TH_MODE_POLLDEPRESS
;
1060 thread_unlock(self
);
1062 abstime
+= (total_computation
>> sched_poll_yield_shift
);
1063 if (!timer_call_enter(&self
->depress_timer
, abstime
))
1064 self
->depress_timer_active
++;
1067 if ((preempt
= csw_check(self
, myprocessor
)) != AST_NONE
)