2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
34 #include <mach/boolean.h>
35 #include <mach/thread_switch.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/ipc_kobject.h>
39 #include <kern/processor.h>
40 #include <kern/sched.h>
41 #include <kern/sched_prim.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
46 #include <mach/policy.h>
48 #include <kern/syscall_subr.h>
49 #include <mach/mach_host_server.h>
50 #include <mach/mach_syscalls.h>
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
57 #include <kern/mk_sp.h>
58 #include <kern/misc_protos.h>
60 #include <kern/sched.h>
61 #include <kern/sched_prim.h>
62 #include <kern/assert.h>
63 #include <kern/thread.h>
64 #include <mach/mach_host_server.h>
67 *** ??? The next two files supply the prototypes for `thread_set_policy()'
68 *** and `thread_policy.' These routines cannot stay here if they are
69 *** exported Mach system calls.
71 #include <mach/thread_act_server.h>
72 #include <mach/host_priv_server.h>
73 #include <sys/kdebug.h>
76 _mk_sp_thread_unblock(
79 if (!(thread
->state
& TH_IDLE
))
80 thread_setrun(thread
, TRUE
, TAIL_Q
);
82 thread
->current_quantum
= 0;
83 thread
->metered_computation
= 0;
84 thread
->reason
= AST_NONE
;
86 KERNEL_DEBUG_CONSTANT(
87 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
88 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
95 processor_t myprocessor
= cpu_to_processor(cpu_number());
98 * A running thread is being taken off a processor:
100 clock_get_uptime(&myprocessor
->last_dispatch
);
101 if (!(thread
->state
& TH_IDLE
)) {
102 if ( first_quantum(myprocessor
) &&
103 myprocessor
->quantum_end
> myprocessor
->last_dispatch
)
104 thread
->current_quantum
=
105 (myprocessor
->quantum_end
- myprocessor
->last_dispatch
);
107 thread
->current_quantum
= 0;
109 if (!(thread
->sched_mode
& TH_MODE_REALTIME
)) {
110 if (thread
->current_quantum
< min_std_quantum
) {
111 thread
->reason
|= AST_QUANTUM
;
112 thread
->current_quantum
+= std_quantum
;
116 if (thread
->current_quantum
== 0)
117 thread
->reason
|= AST_QUANTUM
;
119 thread
->metered_computation
+=
120 (myprocessor
->last_dispatch
- thread
->computation_epoch
);
128 processor_t myprocessor
= cpu_to_processor(cpu_number());
131 * The designated thread is beginning execution:
133 if (!(thread
->state
& TH_IDLE
)) {
134 if (thread
->current_quantum
== 0)
135 thread
->current_quantum
=
136 (thread
->sched_mode
& TH_MODE_REALTIME
)?
137 thread
->realtime
.computation
: std_quantum
;
139 myprocessor
->quantum_end
=
140 (myprocessor
->last_dispatch
+ thread
->current_quantum
);
141 timer_call_enter1(&myprocessor
->quantum_timer
,
142 thread
, myprocessor
->quantum_end
);
144 myprocessor
->slice_quanta
=
145 (thread
->sched_mode
& TH_MODE_TIMESHARE
)?
146 myprocessor
->processor_set
->set_quanta
: 1;
148 thread
->computation_epoch
= myprocessor
->last_dispatch
;
151 timer_call_cancel(&myprocessor
->quantum_timer
);
153 myprocessor
->slice_quanta
= 1;
158 _mk_sp_thread_dispatch(
161 if (old_thread
->reason
& AST_QUANTUM
)
162 thread_setrun(old_thread
, FALSE
, TAIL_Q
);
164 thread_setrun(old_thread
, FALSE
, HEAD_Q
);
166 old_thread
->reason
= AST_NONE
;
170 * thread_policy_common:
172 * Set scheduling policy & priority for thread.
175 thread_policy_common(
182 if ( thread
== THREAD_NULL
||
183 invalid_policy(policy
) )
184 return(KERN_INVALID_ARGUMENT
);
189 if ( !(thread
->sched_mode
& TH_MODE_REALTIME
) &&
190 !(thread
->safe_mode
& TH_MODE_REALTIME
) ) {
191 if (!(thread
->sched_mode
& TH_MODE_FAILSAFE
)) {
192 if (policy
== POLICY_TIMESHARE
)
193 thread
->sched_mode
|= TH_MODE_TIMESHARE
;
195 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
198 if (policy
== POLICY_TIMESHARE
)
199 thread
->safe_mode
|= TH_MODE_TIMESHARE
;
201 thread
->safe_mode
&= ~TH_MODE_TIMESHARE
;
204 if (priority
>= thread
->max_priority
)
205 priority
= thread
->max_priority
- thread
->task_priority
;
207 if (priority
>= MINPRI_KERNEL
)
208 priority
-= MINPRI_KERNEL
;
210 if (priority
>= MINPRI_SYSTEM
)
211 priority
-= MINPRI_SYSTEM
;
213 priority
-= BASEPRI_DEFAULT
;
215 priority
+= thread
->task_priority
;
217 if (priority
> thread
->max_priority
)
218 priority
= thread
->max_priority
;
220 thread
->importance
= priority
- thread
->task_priority
;
223 * Set priorities. If a depression is in progress,
224 * change the priority to restore.
226 if (thread
->depress_priority
>= 0)
227 thread
->depress_priority
= priority
;
229 thread
->priority
= priority
;
230 compute_priority(thread
, TRUE
);
233 * If the current thread has changed its
234 * priority let the ast code decide whether
235 * a different thread should run.
237 if (thread
== current_thread())
242 thread_unlock(thread
);
245 return (KERN_SUCCESS
);
251 * Set scheduling policy and parameters, both base and limit, for
252 * the given thread. Policy can be any policy implemented by the
253 * processor set, whether enabled or not.
257 thread_act_t thr_act
,
258 processor_set_t pset
,
261 mach_msg_type_number_t base_count
,
262 policy_limit_t limit
,
263 mach_msg_type_number_t limit_count
)
267 kern_return_t result
= KERN_SUCCESS
;
269 if ( thr_act
== THR_ACT_NULL
||
270 pset
== PROCESSOR_SET_NULL
)
271 return (KERN_INVALID_ARGUMENT
);
273 thread
= act_lock_thread(thr_act
);
274 if (thread
== THREAD_NULL
) {
275 act_unlock_thread(thr_act
);
277 return(KERN_INVALID_ARGUMENT
);
280 if (pset
!= thread
->processor_set
) {
281 act_unlock_thread(thr_act
);
283 return(KERN_FAILURE
);
290 policy_rr_base_t rr_base
= (policy_rr_base_t
) base
;
291 policy_rr_limit_t rr_limit
= (policy_rr_limit_t
) limit
;
293 if ( base_count
!= POLICY_RR_BASE_COUNT
||
294 limit_count
!= POLICY_RR_LIMIT_COUNT
) {
295 result
= KERN_INVALID_ARGUMENT
;
299 bas
= rr_base
->base_priority
;
300 max
= rr_limit
->max_priority
;
301 if (invalid_pri(bas
) || invalid_pri(max
)) {
302 result
= KERN_INVALID_ARGUMENT
;
311 policy_fifo_base_t fifo_base
= (policy_fifo_base_t
) base
;
312 policy_fifo_limit_t fifo_limit
= (policy_fifo_limit_t
) limit
;
314 if ( base_count
!= POLICY_FIFO_BASE_COUNT
||
315 limit_count
!= POLICY_FIFO_LIMIT_COUNT
) {
316 result
= KERN_INVALID_ARGUMENT
;
320 bas
= fifo_base
->base_priority
;
321 max
= fifo_limit
->max_priority
;
322 if (invalid_pri(bas
) || invalid_pri(max
)) {
323 result
= KERN_INVALID_ARGUMENT
;
330 case POLICY_TIMESHARE
:
332 policy_timeshare_base_t ts_base
= (policy_timeshare_base_t
) base
;
333 policy_timeshare_limit_t ts_limit
=
334 (policy_timeshare_limit_t
) limit
;
336 if ( base_count
!= POLICY_TIMESHARE_BASE_COUNT
||
337 limit_count
!= POLICY_TIMESHARE_LIMIT_COUNT
) {
338 result
= KERN_INVALID_ARGUMENT
;
342 bas
= ts_base
->base_priority
;
343 max
= ts_limit
->max_priority
;
344 if (invalid_pri(bas
) || invalid_pri(max
)) {
345 result
= KERN_INVALID_ARGUMENT
;
353 result
= KERN_INVALID_POLICY
;
356 if (result
!= KERN_SUCCESS
) {
357 act_unlock_thread(thr_act
);
362 result
= thread_policy_common(thread
, policy
, bas
);
363 act_unlock_thread(thr_act
);
372 * Set scheduling policy and parameters, both base and limit, for
373 * the given thread. Policy must be a policy which is enabled for the
374 * processor set. Change contained threads if requested.
378 thread_act_t thr_act
,
381 mach_msg_type_number_t count
,
385 processor_set_t pset
;
386 kern_return_t result
= KERN_SUCCESS
;
387 policy_limit_t limit
;
389 policy_rr_limit_data_t rr_limit
;
390 policy_fifo_limit_data_t fifo_limit
;
391 policy_timeshare_limit_data_t ts_limit
;
393 if (thr_act
== THR_ACT_NULL
)
394 return (KERN_INVALID_ARGUMENT
);
396 thread
= act_lock_thread(thr_act
);
397 pset
= thread
->processor_set
;
398 if ( thread
== THREAD_NULL
||
399 pset
== PROCESSOR_SET_NULL
){
400 act_unlock_thread(thr_act
);
402 return(KERN_INVALID_ARGUMENT
);
405 if ( invalid_policy(policy
) ||
406 ((POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
) & policy
) == 0 ) {
407 act_unlock_thread(thr_act
);
409 return(KERN_INVALID_POLICY
);
414 * Set scheduling limits to base priority.
420 policy_rr_base_t rr_base
;
422 if (count
!= POLICY_RR_BASE_COUNT
) {
423 result
= KERN_INVALID_ARGUMENT
;
427 limcount
= POLICY_RR_LIMIT_COUNT
;
428 rr_base
= (policy_rr_base_t
) base
;
429 rr_limit
.max_priority
= rr_base
->base_priority
;
430 limit
= (policy_limit_t
) &rr_limit
;
437 policy_fifo_base_t fifo_base
;
439 if (count
!= POLICY_FIFO_BASE_COUNT
) {
440 result
= KERN_INVALID_ARGUMENT
;
444 limcount
= POLICY_FIFO_LIMIT_COUNT
;
445 fifo_base
= (policy_fifo_base_t
) base
;
446 fifo_limit
.max_priority
= fifo_base
->base_priority
;
447 limit
= (policy_limit_t
) &fifo_limit
;
452 case POLICY_TIMESHARE
:
454 policy_timeshare_base_t ts_base
;
456 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
457 result
= KERN_INVALID_ARGUMENT
;
461 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
462 ts_base
= (policy_timeshare_base_t
) base
;
463 ts_limit
.max_priority
= ts_base
->base_priority
;
464 limit
= (policy_limit_t
) &ts_limit
;
470 result
= KERN_INVALID_POLICY
;
477 * Use current scheduling limits. Ensure that the
478 * new base priority will not exceed current limits.
484 policy_rr_base_t rr_base
;
486 if (count
!= POLICY_RR_BASE_COUNT
) {
487 result
= KERN_INVALID_ARGUMENT
;
491 limcount
= POLICY_RR_LIMIT_COUNT
;
492 rr_base
= (policy_rr_base_t
) base
;
493 if (rr_base
->base_priority
> thread
->max_priority
) {
494 result
= KERN_POLICY_LIMIT
;
498 rr_limit
.max_priority
= thread
->max_priority
;
499 limit
= (policy_limit_t
) &rr_limit
;
506 policy_fifo_base_t fifo_base
;
508 if (count
!= POLICY_FIFO_BASE_COUNT
) {
509 result
= KERN_INVALID_ARGUMENT
;
513 limcount
= POLICY_FIFO_LIMIT_COUNT
;
514 fifo_base
= (policy_fifo_base_t
) base
;
515 if (fifo_base
->base_priority
> thread
->max_priority
) {
516 result
= KERN_POLICY_LIMIT
;
520 fifo_limit
.max_priority
= thread
->max_priority
;
521 limit
= (policy_limit_t
) &fifo_limit
;
526 case POLICY_TIMESHARE
:
528 policy_timeshare_base_t ts_base
;
530 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
531 result
= KERN_INVALID_ARGUMENT
;
535 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
536 ts_base
= (policy_timeshare_base_t
) base
;
537 if (ts_base
->base_priority
> thread
->max_priority
) {
538 result
= KERN_POLICY_LIMIT
;
542 ts_limit
.max_priority
= thread
->max_priority
;
543 limit
= (policy_limit_t
) &ts_limit
;
549 result
= KERN_INVALID_POLICY
;
555 act_unlock_thread(thr_act
);
557 if (result
== KERN_SUCCESS
)
558 result
= thread_set_policy(thr_act
, pset
,
559 policy
, base
, count
, limit
, limcount
);
565 * Define shifts for simulating (5/8)**n
568 shift_data_t wait_shift
[32] = {
569 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
570 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
571 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
572 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
575 * do_priority_computation:
577 * Calculate new priority for thread based on its base priority plus
578 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
579 * usage to priorities. SCHED_SHIFT converts for the scaling
580 * of the sched_usage field by SCHED_SCALE. This scaling comes
581 * from the multiplication by sched_load (thread_timer_delta)
582 * in sched.h. sched_load is calculated as a scaled overload
583 * factor in compute_mach_factor (mach_factor.c).
587 #define do_priority_computation(thread, pri) \
589 (pri) = (thread)->priority /* start with base priority */ \
590 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
591 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
592 if ((pri) < MINPRI_STANDARD) \
593 (pri) = MINPRI_STANDARD; \
595 if ((pri) > MAXPRI_STANDARD) \
596 (pri) = MAXPRI_STANDARD; \
598 #else /* PRI_SHIFT_2 */
599 #define do_priority_computation(thread, pri) \
601 (pri) = (thread)->priority /* start with base priority */ \
602 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
603 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
604 if ((pri) < MINPRI_STANDARD) \
605 (pri) = MINPRI_STANDARD; \
607 if ((pri) > MAXPRI_STANDARD) \
608 (pri) = MAXPRI_STANDARD; \
610 #endif /* PRI_SHIFT_2 */
611 #else /* defined(PRI_SHIFT_2) */
612 #define do_priority_computation(thread, pri) \
614 (pri) = (thread)->priority /* start with base priority */ \
615 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
616 if ((pri) < MINPRI_STANDARD) \
617 (pri) = MINPRI_STANDARD; \
619 if ((pri) > MAXPRI_STANDARD) \
620 (pri) = MAXPRI_STANDARD; \
622 #endif /* defined(PRI_SHIFT_2) */
627 * Compute the effective priority of the specified thread.
628 * The effective priority computation is as follows:
630 * Take the base priority for this thread and add
631 * to it an increment derived from its cpu_usage.
633 * The thread *must* be locked by the caller.
638 register thread_t thread
,
643 if (thread
->sched_mode
& TH_MODE_TIMESHARE
) {
644 do_priority_computation(thread
, pri
);
645 if (thread
->depress_priority
< 0)
646 set_pri(thread
, pri
, resched
);
648 thread
->depress_priority
= pri
;
651 set_pri(thread
, thread
->priority
, resched
);
655 * compute_my_priority:
657 * Version of compute priority for current thread or thread
658 * being manipulated by scheduler (going on or off a runq).
659 * Only used for priority updates. Policy or priority changes
660 * must call compute_priority above. Caller must have thread
661 * locked and know it is timesharing and not depressed.
666 register thread_t thread
)
670 do_priority_computation(thread
, pri
);
671 assert(thread
->runq
== RUN_QUEUE_NULL
);
672 thread
->sched_pri
= pri
;
678 * Cause the priority computation of a thread that has been
679 * sleeping or suspended to "catch up" with the system. Thread
680 * *MUST* be locked by caller. If thread is running, then this
681 * can only be called by the thread on itself.
685 register thread_t thread
)
687 register unsigned int ticks
;
688 register shift_t shiftp
;
690 ticks
= sched_tick
- thread
->sched_stamp
;
694 * If asleep for more than 30 seconds forget all
695 * cpu_usage, else catch up on missed aging.
696 * 5/8 ** n is approximated by the two shifts
697 * in the wait_shift array.
699 thread
->sched_stamp
+= ticks
;
700 thread_timer_delta(thread
);
702 thread
->cpu_usage
= 0;
703 thread
->sched_usage
= 0;
706 thread
->cpu_usage
+= thread
->cpu_delta
;
707 thread
->sched_usage
+= thread
->sched_delta
;
709 shiftp
= &wait_shift
[ticks
];
710 if (shiftp
->shift2
> 0) {
712 (thread
->cpu_usage
>> shiftp
->shift1
) +
713 (thread
->cpu_usage
>> shiftp
->shift2
);
714 thread
->sched_usage
=
715 (thread
->sched_usage
>> shiftp
->shift1
) +
716 (thread
->sched_usage
>> shiftp
->shift2
);
720 (thread
->cpu_usage
>> shiftp
->shift1
) -
721 (thread
->cpu_usage
>> -(shiftp
->shift2
));
722 thread
->sched_usage
=
723 (thread
->sched_usage
>> shiftp
->shift1
) -
724 (thread
->sched_usage
>> -(shiftp
->shift2
));
728 thread
->cpu_delta
= 0;
729 thread
->sched_delta
= 0;
731 if ( (thread
->sched_mode
& TH_MODE_FAILSAFE
) &&
732 thread
->sched_stamp
>= thread
->safe_release
) {
733 if (!(thread
->safe_mode
& TH_MODE_TIMESHARE
)) {
734 if (thread
->safe_mode
& TH_MODE_REALTIME
) {
735 if (thread
->depress_priority
< 0)
736 thread
->priority
= BASEPRI_REALTIME
;
738 thread
->depress_priority
= BASEPRI_REALTIME
;
740 thread
->sched_mode
|= TH_MODE_REALTIME
;
743 if ( thread
->depress_priority
< 0 &&
744 thread
->sched_pri
!= thread
->priority
) {
747 runq
= rem_runq(thread
);
748 thread
->sched_pri
= thread
->priority
;
749 if (runq
!= RUN_QUEUE_NULL
)
750 thread_setrun(thread
, TRUE
, TAIL_Q
);
753 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
756 thread
->safe_mode
= 0;
757 thread
->sched_mode
&= ~TH_MODE_FAILSAFE
;
761 * Recompute priority if appropriate.
763 if ( (thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
764 thread
->depress_priority
< 0 ) {
765 register int new_pri
;
767 do_priority_computation(thread
, new_pri
);
768 if (new_pri
!= thread
->sched_pri
) {
771 runq
= rem_runq(thread
);
772 thread
->sched_pri
= new_pri
;
773 if (runq
!= RUN_QUEUE_NULL
)
774 thread_setrun(thread
, TRUE
, TAIL_Q
);
780 * thread_switch_continue:
782 * Continuation routine for a thread switch.
784 * Just need to arrange the return value gets sent out correctly and that
785 * we cancel the timer or the depression called for by the options to the
786 * thread_switch call.
789 _mk_sp_thread_switch_continue(void)
791 register thread_t self
= current_thread();
792 int wait_result
= self
->wait_result
;
793 int option
= self
->saved
.swtch
.option
;
795 if (option
== SWITCH_OPTION_WAIT
&& wait_result
!= THREAD_TIMED_OUT
)
796 thread_cancel_timer();
798 if (option
== SWITCH_OPTION_DEPRESS
)
799 _mk_sp_thread_depress_abort(self
, FALSE
);
801 thread_syscall_return(KERN_SUCCESS
);
808 * Context switch. User may supply thread hint.
810 * Fixed priority threads that call this get what they asked for
811 * even if that violates priority order.
814 _mk_sp_thread_switch(
815 thread_act_t hint_act
,
817 mach_msg_timeout_t option_time
)
819 register thread_t self
= current_thread();
820 register processor_t myprocessor
;
824 * Check and use thr_act hint if appropriate. It is not
825 * appropriate to give a hint that shares the current shuttle.
827 if (hint_act
!= THR_ACT_NULL
) {
828 register thread_t thread
= act_lock_thread(hint_act
);
830 if ( thread
!= THREAD_NULL
&&
832 thread
->top_act
== hint_act
) {
837 * Check if the thread is in the right pset. Then
838 * pull it off its run queue. If it
839 * doesn't come, then it's not eligible.
841 if ( thread
->processor_set
== self
->processor_set
&&
842 rem_runq(thread
) != RUN_QUEUE_NULL
) {
846 thread_unlock(thread
);
848 act_unlock_thread(hint_act
);
849 act_deallocate(hint_act
);
851 if (option
== SWITCH_OPTION_WAIT
)
852 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
854 if (option
== SWITCH_OPTION_DEPRESS
)
855 _mk_sp_thread_depress_ms(option_time
);
857 self
->saved
.swtch
.option
= option
;
859 thread_run(self
, _mk_sp_thread_switch_continue
, thread
);
865 thread_unlock(thread
);
869 act_unlock_thread(hint_act
);
870 act_deallocate(hint_act
);
874 * No handoff hint supplied, or hint was wrong. Call thread_block() in
875 * hopes of running something else. If nothing else is runnable,
876 * thread_block will detect this. WARNING: thread_switch with no
877 * option will not do anything useful if the thread calling it is the
878 * highest priority thread (can easily happen with a collection
879 * of timesharing threads).
881 mp_disable_preemption();
882 myprocessor
= current_processor();
883 if ( option
!= SWITCH_OPTION_NONE
||
884 myprocessor
->processor_set
->runq
.count
> 0 ||
885 myprocessor
->runq
.count
> 0 ) {
886 mp_enable_preemption();
888 if (option
== SWITCH_OPTION_WAIT
)
889 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
891 if (option
== SWITCH_OPTION_DEPRESS
)
892 _mk_sp_thread_depress_ms(option_time
);
894 self
->saved
.swtch
.option
= option
;
896 thread_block(_mk_sp_thread_switch_continue
);
899 mp_enable_preemption();
902 if (option
== SWITCH_OPTION_WAIT
)
903 thread_cancel_timer();
905 if (option
== SWITCH_OPTION_DEPRESS
)
906 _mk_sp_thread_depress_abort(self
, FALSE
);
908 return (KERN_SUCCESS
);
912 * Depress thread's priority to lowest possible for the specified interval,
913 * with a value of zero resulting in no timeout being scheduled.
916 _mk_sp_thread_depress_abstime(
919 register thread_t self
= current_thread();
926 if (self
->depress_priority
< 0) {
927 self
->depress_priority
= self
->priority
;
928 self
->sched_pri
= self
->priority
= DEPRESSPRI
;
932 clock_absolutetime_interval_to_deadline(interval
, &deadline
);
933 if (!timer_call_enter(&self
->depress_timer
, deadline
))
934 self
->depress_timer_active
++;
944 _mk_sp_thread_depress_ms(
945 mach_msg_timeout_t interval
)
949 clock_interval_to_absolutetime_interval(
950 interval
, 1000*NSEC_PER_USEC
, &abstime
);
951 _mk_sp_thread_depress_abstime(abstime
);
955 * Priority depression expiration.
958 thread_depress_expire(
959 timer_call_param_t p0
,
960 timer_call_param_t p1
)
962 thread_t thread
= p0
;
967 if (--thread
->depress_timer_active
== 1) {
969 if (thread
->depress_priority
>= 0) {
970 thread
->priority
= thread
->depress_priority
;
971 thread
->depress_priority
= -1;
972 compute_priority(thread
, TRUE
);
975 if (thread
->depress_priority
== -2) {
977 * Thread was temporarily undepressed by thread_suspend, to
978 * be redepressed in special_handler as it blocks. We need to
979 * prevent special_handler from redepressing it, since depression
982 thread
->depress_priority
= -1;
984 thread
->sched_mode
&= ~TH_MODE_POLLDEPRESS
;
985 thread_unlock(thread
);
988 if (thread
->depress_timer_active
== 0)
989 thread_wakeup_one(&thread
->depress_timer_active
);
995 * Prematurely abort priority depression if there is one.
998 _mk_sp_thread_depress_abort(
999 register thread_t thread
,
1002 kern_return_t result
= KERN_NOT_DEPRESSED
;
1007 thread_lock(thread
);
1008 if (abortall
|| !(thread
->sched_mode
& TH_MODE_POLLDEPRESS
)) {
1009 if (thread
->depress_priority
>= 0) {
1010 thread
->priority
= thread
->depress_priority
;
1011 thread
->depress_priority
= -1;
1012 compute_priority(thread
, TRUE
);
1013 result
= KERN_SUCCESS
;
1016 thread
->sched_mode
&= ~TH_MODE_POLLDEPRESS
;
1017 thread_unlock(thread
);
1019 if (timer_call_cancel(&thread
->depress_timer
))
1020 thread
->depress_timer_active
--;
1023 thread_unlock(thread
);
1024 wake_unlock(thread
);
1031 _mk_sp_thread_perhaps_yield(
1036 assert(self
== current_thread());
1040 if (!(self
->sched_mode
& (TH_MODE_REALTIME
|TH_MODE_TIMESHARE
))) {
1041 extern uint64_t max_poll_computation
;
1042 extern int sched_poll_yield_shift
;
1043 uint64_t abstime
, total_computation
;
1045 clock_get_uptime(&abstime
);
1046 total_computation
= abstime
- self
->computation_epoch
;
1047 total_computation
+= self
->metered_computation
;
1048 if (total_computation
>= max_poll_computation
) {
1049 processor_t myprocessor
;
1051 thread_unlock(self
);
1055 if (self
->depress_priority
< 0) {
1056 self
->depress_priority
= self
->priority
;
1057 self
->sched_pri
= self
->priority
= DEPRESSPRI
;
1059 self
->computation_epoch
= abstime
;
1060 self
->metered_computation
= 0;
1061 self
->sched_mode
|= TH_MODE_POLLDEPRESS
;
1062 thread_unlock(self
);
1064 abstime
+= (total_computation
>> sched_poll_yield_shift
);
1065 if (!timer_call_enter(&self
->depress_timer
, abstime
))
1066 self
->depress_timer_active
++;
1069 myprocessor
= current_processor();
1070 if (csw_needed(self
, myprocessor
))
1074 thread_unlock(self
);
1077 thread_unlock(self
);