2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
34 #include <mach/boolean.h>
35 #include <mach/thread_switch.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/ipc_kobject.h>
39 #include <kern/processor.h>
40 #include <kern/sched.h>
41 #include <kern/sched_prim.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
46 #include <mach/policy.h>
48 #include <kern/syscall_subr.h>
49 #include <mach/mach_host_server.h>
50 #include <mach/mach_syscalls.h>
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
57 #include <kern/mk_sp.h>
58 #include <kern/misc_protos.h>
60 #include <kern/sched.h>
61 #include <kern/sched_prim.h>
62 #include <kern/assert.h>
63 #include <kern/thread.h>
64 #include <mach/mach_host_server.h>
67 *** ??? The next two files supply the prototypes for `thread_set_policy()'
68 *** and `thread_policy.' These routines cannot stay here if they are
69 *** exported Mach system calls.
71 #include <mach/thread_act_server.h>
72 #include <mach/host_priv_server.h>
75 _mk_sp_thread_unblock(
78 if (thread
->state
& TH_IDLE
)
81 if (thread
->sched_mode
& TH_MODE_REALTIME
) {
82 thread
->realtime
.deadline
= mach_absolute_time();
83 thread
->realtime
.deadline
+= thread
->realtime
.constraint
;
86 thread
->current_quantum
= 0;
87 thread
->computation_metered
= 0;
88 thread
->reason
= AST_NONE
;
95 processor_t processor
)
98 * A running thread is being taken off a processor:
100 processor
->last_dispatch
= mach_absolute_time();
102 if (old_thread
->state
& TH_IDLE
)
106 * Compute remainder of current quantum.
108 if ( first_timeslice(processor
) &&
109 processor
->quantum_end
> processor
->last_dispatch
)
110 old_thread
->current_quantum
=
111 (processor
->quantum_end
- processor
->last_dispatch
);
113 old_thread
->current_quantum
= 0;
115 if (old_thread
->sched_mode
& TH_MODE_REALTIME
) {
117 * Cancel the deadline if the thread has
118 * consumed the entire quantum.
120 if (old_thread
->current_quantum
== 0) {
121 old_thread
->realtime
.deadline
= UINT64_MAX
;
122 old_thread
->reason
|= AST_QUANTUM
;
127 * For non-realtime threads treat a tiny
128 * remaining quantum as an expired quantum
129 * but include what's left next time.
131 if (old_thread
->current_quantum
< min_std_quantum
) {
132 old_thread
->reason
|= AST_QUANTUM
;
133 old_thread
->current_quantum
+= std_quantum
;
138 * If we are doing a direct handoff then
139 * give the remainder of our quantum to
142 if ((old_thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
143 new_thread
->current_quantum
= old_thread
->current_quantum
;
144 old_thread
->reason
|= AST_QUANTUM
;
145 old_thread
->current_quantum
= 0;
148 old_thread
->last_switch
= processor
->last_dispatch
;
150 old_thread
->computation_metered
+=
151 (old_thread
->last_switch
- old_thread
->computation_epoch
);
157 processor_t processor
)
161 * The designated thread is beginning execution:
163 if (thread
->state
& TH_IDLE
) {
164 timer_call_cancel(&processor
->quantum_timer
);
165 processor
->timeslice
= 1;
170 if (thread
->current_quantum
== 0)
171 thread_quantum_init(thread
);
173 processor
->quantum_end
=
174 (processor
->last_dispatch
+ thread
->current_quantum
);
175 timer_call_enter1(&processor
->quantum_timer
,
176 thread
, processor
->quantum_end
);
178 processor_timeslice_setup(processor
, thread
);
180 thread
->last_switch
= processor
->last_dispatch
;
182 thread
->computation_epoch
= thread
->last_switch
;
186 _mk_sp_thread_dispatch(
189 if (thread
->reason
& AST_QUANTUM
)
190 thread_setrun(thread
, SCHED_TAILQ
);
192 if (thread
->reason
& AST_PREEMPT
)
193 thread_setrun(thread
, SCHED_HEADQ
);
195 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
197 thread
->reason
= AST_NONE
;
201 * thread_policy_common:
203 * Set scheduling policy & priority for thread.
206 thread_policy_common(
213 if ( thread
== THREAD_NULL
||
214 invalid_policy(policy
) )
215 return(KERN_INVALID_ARGUMENT
);
220 if ( !(thread
->sched_mode
& TH_MODE_REALTIME
) &&
221 !(thread
->safe_mode
& TH_MODE_REALTIME
) ) {
222 if (!(thread
->sched_mode
& TH_MODE_FAILSAFE
)) {
223 integer_t oldmode
= (thread
->sched_mode
& TH_MODE_TIMESHARE
);
225 if (policy
== POLICY_TIMESHARE
&& !oldmode
) {
226 thread
->sched_mode
|= TH_MODE_TIMESHARE
;
228 if (thread
->state
& TH_RUN
)
229 pset_share_incr(thread
->processor_set
);
232 if (policy
!= POLICY_TIMESHARE
&& oldmode
) {
233 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
235 if (thread
->state
& TH_RUN
)
236 pset_share_decr(thread
->processor_set
);
240 if (policy
== POLICY_TIMESHARE
)
241 thread
->safe_mode
|= TH_MODE_TIMESHARE
;
243 thread
->safe_mode
&= ~TH_MODE_TIMESHARE
;
246 if (priority
>= thread
->max_priority
)
247 priority
= thread
->max_priority
- thread
->task_priority
;
249 if (priority
>= MINPRI_KERNEL
)
250 priority
-= MINPRI_KERNEL
;
252 if (priority
>= MINPRI_SYSTEM
)
253 priority
-= MINPRI_SYSTEM
;
255 priority
-= BASEPRI_DEFAULT
;
257 priority
+= thread
->task_priority
;
259 if (priority
> thread
->max_priority
)
260 priority
= thread
->max_priority
;
262 if (priority
< MINPRI
)
265 thread
->importance
= priority
- thread
->task_priority
;
267 set_priority(thread
, priority
);
270 thread_unlock(thread
);
273 return (KERN_SUCCESS
);
279 * Set scheduling policy and parameters, both base and limit, for
280 * the given thread. Policy can be any policy implemented by the
281 * processor set, whether enabled or not.
285 thread_act_t thr_act
,
286 processor_set_t pset
,
289 mach_msg_type_number_t base_count
,
290 policy_limit_t limit
,
291 mach_msg_type_number_t limit_count
)
295 kern_return_t result
= KERN_SUCCESS
;
297 if ( thr_act
== THR_ACT_NULL
||
298 pset
== PROCESSOR_SET_NULL
)
299 return (KERN_INVALID_ARGUMENT
);
301 thread
= act_lock_thread(thr_act
);
302 if (thread
== THREAD_NULL
) {
303 act_unlock_thread(thr_act
);
305 return(KERN_INVALID_ARGUMENT
);
308 if (pset
!= thread
->processor_set
) {
309 act_unlock_thread(thr_act
);
311 return(KERN_FAILURE
);
318 policy_rr_base_t rr_base
= (policy_rr_base_t
) base
;
319 policy_rr_limit_t rr_limit
= (policy_rr_limit_t
) limit
;
321 if ( base_count
!= POLICY_RR_BASE_COUNT
||
322 limit_count
!= POLICY_RR_LIMIT_COUNT
) {
323 result
= KERN_INVALID_ARGUMENT
;
327 bas
= rr_base
->base_priority
;
328 max
= rr_limit
->max_priority
;
329 if (invalid_pri(bas
) || invalid_pri(max
)) {
330 result
= KERN_INVALID_ARGUMENT
;
339 policy_fifo_base_t fifo_base
= (policy_fifo_base_t
) base
;
340 policy_fifo_limit_t fifo_limit
= (policy_fifo_limit_t
) limit
;
342 if ( base_count
!= POLICY_FIFO_BASE_COUNT
||
343 limit_count
!= POLICY_FIFO_LIMIT_COUNT
) {
344 result
= KERN_INVALID_ARGUMENT
;
348 bas
= fifo_base
->base_priority
;
349 max
= fifo_limit
->max_priority
;
350 if (invalid_pri(bas
) || invalid_pri(max
)) {
351 result
= KERN_INVALID_ARGUMENT
;
358 case POLICY_TIMESHARE
:
360 policy_timeshare_base_t ts_base
= (policy_timeshare_base_t
) base
;
361 policy_timeshare_limit_t ts_limit
=
362 (policy_timeshare_limit_t
) limit
;
364 if ( base_count
!= POLICY_TIMESHARE_BASE_COUNT
||
365 limit_count
!= POLICY_TIMESHARE_LIMIT_COUNT
) {
366 result
= KERN_INVALID_ARGUMENT
;
370 bas
= ts_base
->base_priority
;
371 max
= ts_limit
->max_priority
;
372 if (invalid_pri(bas
) || invalid_pri(max
)) {
373 result
= KERN_INVALID_ARGUMENT
;
381 result
= KERN_INVALID_POLICY
;
384 if (result
!= KERN_SUCCESS
) {
385 act_unlock_thread(thr_act
);
390 result
= thread_policy_common(thread
, policy
, bas
);
391 act_unlock_thread(thr_act
);
400 * Set scheduling policy and parameters, both base and limit, for
401 * the given thread. Policy must be a policy which is enabled for the
402 * processor set. Change contained threads if requested.
406 thread_act_t thr_act
,
409 mach_msg_type_number_t count
,
413 processor_set_t pset
;
414 kern_return_t result
= KERN_SUCCESS
;
415 policy_limit_t limit
;
417 policy_rr_limit_data_t rr_limit
;
418 policy_fifo_limit_data_t fifo_limit
;
419 policy_timeshare_limit_data_t ts_limit
;
421 if (thr_act
== THR_ACT_NULL
)
422 return (KERN_INVALID_ARGUMENT
);
424 thread
= act_lock_thread(thr_act
);
425 pset
= thread
->processor_set
;
426 if ( thread
== THREAD_NULL
||
427 pset
== PROCESSOR_SET_NULL
){
428 act_unlock_thread(thr_act
);
430 return(KERN_INVALID_ARGUMENT
);
433 if ( invalid_policy(policy
) ||
434 ((POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
) & policy
) == 0 ) {
435 act_unlock_thread(thr_act
);
437 return(KERN_INVALID_POLICY
);
442 * Set scheduling limits to base priority.
448 policy_rr_base_t rr_base
;
450 if (count
!= POLICY_RR_BASE_COUNT
) {
451 result
= KERN_INVALID_ARGUMENT
;
455 limcount
= POLICY_RR_LIMIT_COUNT
;
456 rr_base
= (policy_rr_base_t
) base
;
457 rr_limit
.max_priority
= rr_base
->base_priority
;
458 limit
= (policy_limit_t
) &rr_limit
;
465 policy_fifo_base_t fifo_base
;
467 if (count
!= POLICY_FIFO_BASE_COUNT
) {
468 result
= KERN_INVALID_ARGUMENT
;
472 limcount
= POLICY_FIFO_LIMIT_COUNT
;
473 fifo_base
= (policy_fifo_base_t
) base
;
474 fifo_limit
.max_priority
= fifo_base
->base_priority
;
475 limit
= (policy_limit_t
) &fifo_limit
;
480 case POLICY_TIMESHARE
:
482 policy_timeshare_base_t ts_base
;
484 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
485 result
= KERN_INVALID_ARGUMENT
;
489 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
490 ts_base
= (policy_timeshare_base_t
) base
;
491 ts_limit
.max_priority
= ts_base
->base_priority
;
492 limit
= (policy_limit_t
) &ts_limit
;
498 result
= KERN_INVALID_POLICY
;
505 * Use current scheduling limits. Ensure that the
506 * new base priority will not exceed current limits.
512 policy_rr_base_t rr_base
;
514 if (count
!= POLICY_RR_BASE_COUNT
) {
515 result
= KERN_INVALID_ARGUMENT
;
519 limcount
= POLICY_RR_LIMIT_COUNT
;
520 rr_base
= (policy_rr_base_t
) base
;
521 if (rr_base
->base_priority
> thread
->max_priority
) {
522 result
= KERN_POLICY_LIMIT
;
526 rr_limit
.max_priority
= thread
->max_priority
;
527 limit
= (policy_limit_t
) &rr_limit
;
534 policy_fifo_base_t fifo_base
;
536 if (count
!= POLICY_FIFO_BASE_COUNT
) {
537 result
= KERN_INVALID_ARGUMENT
;
541 limcount
= POLICY_FIFO_LIMIT_COUNT
;
542 fifo_base
= (policy_fifo_base_t
) base
;
543 if (fifo_base
->base_priority
> thread
->max_priority
) {
544 result
= KERN_POLICY_LIMIT
;
548 fifo_limit
.max_priority
= thread
->max_priority
;
549 limit
= (policy_limit_t
) &fifo_limit
;
554 case POLICY_TIMESHARE
:
556 policy_timeshare_base_t ts_base
;
558 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
559 result
= KERN_INVALID_ARGUMENT
;
563 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
564 ts_base
= (policy_timeshare_base_t
) base
;
565 if (ts_base
->base_priority
> thread
->max_priority
) {
566 result
= KERN_POLICY_LIMIT
;
570 ts_limit
.max_priority
= thread
->max_priority
;
571 limit
= (policy_limit_t
) &ts_limit
;
577 result
= KERN_INVALID_POLICY
;
583 act_unlock_thread(thr_act
);
585 if (result
== KERN_SUCCESS
)
586 result
= thread_set_policy(thr_act
, pset
,
587 policy
, base
, count
, limit
, limcount
);
593 * Define shifts for simulating (5/8)**n
596 shift_data_t wait_shift
[32] = {
597 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
598 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
599 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
600 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
603 * do_priority_computation:
605 * Calculate new priority for thread based on its base priority plus
606 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
607 * usage to priorities. SCHED_SHIFT converts for the scaling
608 * of the sched_usage field by SCHED_SCALE. This scaling comes
609 * from the multiplication by sched_load (thread_timer_delta)
610 * in sched.h. sched_load is calculated as a scaled overload
611 * factor in compute_mach_factor (mach_factor.c).
615 #define do_priority_computation(thread, pri) \
617 (pri) = (thread)->priority /* start with base priority */ \
618 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
619 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
620 if ((pri) < MINPRI_STANDARD) \
621 (pri) = MINPRI_STANDARD; \
623 if ((pri) > MAXPRI_STANDARD) \
624 (pri) = MAXPRI_STANDARD; \
626 #else /* PRI_SHIFT_2 */
627 #define do_priority_computation(thread, pri) \
629 (pri) = (thread)->priority /* start with base priority */ \
630 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
631 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
632 if ((pri) < MINPRI_STANDARD) \
633 (pri) = MINPRI_STANDARD; \
635 if ((pri) > MAXPRI_STANDARD) \
636 (pri) = MAXPRI_STANDARD; \
638 #endif /* PRI_SHIFT_2 */
639 #else /* defined(PRI_SHIFT_2) */
640 #define do_priority_computation(thread, pri) \
642 (pri) = (thread)->priority /* start with base priority */ \
643 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
644 if ((pri) < MINPRI_STANDARD) \
645 (pri) = MINPRI_STANDARD; \
647 if ((pri) > MAXPRI_STANDARD) \
648 (pri) = MAXPRI_STANDARD; \
650 #endif /* defined(PRI_SHIFT_2) */
654 register thread_t thread
,
655 register int priority
)
657 thread
->priority
= priority
;
658 compute_priority(thread
, FALSE
);
664 * Reset the current scheduled priority of the
665 * thread according to its base priority if the
666 * thread has not been promoted or depressed.
668 * If the thread is timesharing, adjust according
669 * to recent cpu usage.
671 * The thread *must* be locked by the caller.
675 register thread_t thread
,
676 boolean_t override_depress
)
678 register int priority
;
680 if ( !(thread
->sched_mode
& TH_MODE_PROMOTED
) &&
681 (!(thread
->sched_mode
& TH_MODE_ISDEPRESSED
) ||
682 override_depress
) ) {
683 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
684 do_priority_computation(thread
, priority
);
686 priority
= thread
->priority
;
688 set_sched_pri(thread
, priority
);
693 * compute_my_priority:
695 * Version of compute priority for current thread.
696 * Caller must have thread locked and thread must
697 * be timesharing and not depressed.
699 * Only used for priority updates.
703 register thread_t thread
)
705 register int priority
;
707 do_priority_computation(thread
, priority
);
708 assert(thread
->runq
== RUN_QUEUE_NULL
);
709 thread
->sched_pri
= priority
;
715 * Cause the priority computation of a thread that has been
716 * sleeping or suspended to "catch up" with the system. Thread
717 * *MUST* be locked by caller. If thread is running, then this
718 * can only be called by the thread on itself.
722 register thread_t thread
)
724 register unsigned int ticks
;
725 register shift_t shiftp
;
727 ticks
= sched_tick
- thread
->sched_stamp
;
731 * If asleep for more than 30 seconds forget all
732 * cpu_usage, else catch up on missed aging.
733 * 5/8 ** n is approximated by the two shifts
734 * in the wait_shift array.
736 thread
->sched_stamp
+= ticks
;
737 thread_timer_delta(thread
);
739 thread
->cpu_usage
= 0;
740 thread
->sched_usage
= 0;
743 thread
->cpu_usage
+= thread
->cpu_delta
;
744 thread
->sched_usage
+= thread
->sched_delta
;
746 shiftp
= &wait_shift
[ticks
];
747 if (shiftp
->shift2
> 0) {
749 (thread
->cpu_usage
>> shiftp
->shift1
) +
750 (thread
->cpu_usage
>> shiftp
->shift2
);
751 thread
->sched_usage
=
752 (thread
->sched_usage
>> shiftp
->shift1
) +
753 (thread
->sched_usage
>> shiftp
->shift2
);
757 (thread
->cpu_usage
>> shiftp
->shift1
) -
758 (thread
->cpu_usage
>> -(shiftp
->shift2
));
759 thread
->sched_usage
=
760 (thread
->sched_usage
>> shiftp
->shift1
) -
761 (thread
->sched_usage
>> -(shiftp
->shift2
));
765 thread
->cpu_delta
= 0;
766 thread
->sched_delta
= 0;
769 * Check for fail-safe release.
771 if ( (thread
->sched_mode
& TH_MODE_FAILSAFE
) &&
772 thread
->sched_stamp
>= thread
->safe_release
) {
773 if (!(thread
->safe_mode
& TH_MODE_TIMESHARE
)) {
774 if (thread
->safe_mode
& TH_MODE_REALTIME
) {
775 thread
->priority
= BASEPRI_RTQUEUES
;
777 thread
->sched_mode
|= TH_MODE_REALTIME
;
780 thread
->sched_mode
&= ~TH_MODE_TIMESHARE
;
782 if (thread
->state
& TH_RUN
)
783 pset_share_decr(thread
->processor_set
);
785 if (!(thread
->sched_mode
& TH_MODE_ISDEPRESSED
))
786 set_sched_pri(thread
, thread
->priority
);
789 thread
->safe_mode
= 0;
790 thread
->sched_mode
&= ~TH_MODE_FAILSAFE
;
794 * Recompute scheduled priority if appropriate.
796 if ( (thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
797 !(thread
->sched_mode
& TH_MODE_PROMOTED
) &&
798 !(thread
->sched_mode
& TH_MODE_ISDEPRESSED
) ) {
799 register int new_pri
;
801 do_priority_computation(thread
, new_pri
);
802 if (new_pri
!= thread
->sched_pri
) {
805 runq
= run_queue_remove(thread
);
806 thread
->sched_pri
= new_pri
;
807 if (runq
!= RUN_QUEUE_NULL
)
808 thread_setrun(thread
, SCHED_TAILQ
);
814 * thread_switch_continue:
816 * Continuation routine for a thread switch.
818 * Just need to arrange the return value gets sent out correctly and that
819 * we cancel the timer or the depression called for by the options to the
820 * thread_switch call.
823 _mk_sp_thread_switch_continue(void)
825 register thread_t self
= current_thread();
826 int wait_result
= self
->wait_result
;
827 int option
= self
->saved
.swtch
.option
;
829 if (option
== SWITCH_OPTION_WAIT
&& wait_result
!= THREAD_TIMED_OUT
)
830 thread_cancel_timer();
832 if (option
== SWITCH_OPTION_DEPRESS
)
833 _mk_sp_thread_depress_abort(self
, FALSE
);
835 thread_syscall_return(KERN_SUCCESS
);
842 * Context switch. User may supply thread hint.
844 * Fixed priority threads that call this get what they asked for
845 * even if that violates priority order.
848 _mk_sp_thread_switch(
849 thread_act_t hint_act
,
851 mach_msg_timeout_t option_time
)
853 register thread_t self
= current_thread();
857 * Check and use thr_act hint if appropriate. It is not
858 * appropriate to give a hint that shares the current shuttle.
860 if (hint_act
!= THR_ACT_NULL
) {
861 register thread_t thread
= act_lock_thread(hint_act
);
863 if ( thread
!= THREAD_NULL
&&
865 thread
->top_act
== hint_act
) {
866 processor_t processor
;
872 * Check if the thread is in the right pset,
873 * is not bound to a different processor,
874 * and that realtime is not involved.
876 * Next, pull it off its run queue. If it
877 * doesn't come, it's not eligible.
879 processor
= current_processor();
880 if (processor
->current_pri
< BASEPRI_RTQUEUES
&&
881 thread
->sched_pri
< BASEPRI_RTQUEUES
&&
882 thread
->processor_set
== processor
->processor_set
&&
883 (thread
->bound_processor
== PROCESSOR_NULL
||
884 thread
->bound_processor
== processor
) &&
885 run_queue_remove(thread
) != RUN_QUEUE_NULL
) {
889 thread_unlock(thread
);
891 act_unlock_thread(hint_act
);
892 act_deallocate(hint_act
);
894 if (option
== SWITCH_OPTION_WAIT
)
895 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
897 if (option
== SWITCH_OPTION_DEPRESS
)
898 _mk_sp_thread_depress_ms(option_time
);
900 self
->saved
.swtch
.option
= option
;
902 thread_run(self
, _mk_sp_thread_switch_continue
, thread
);
906 thread_unlock(thread
);
910 act_unlock_thread(hint_act
);
911 act_deallocate(hint_act
);
915 * No handoff hint supplied, or hint was wrong. Call thread_block() in
916 * hopes of running something else. If nothing else is runnable,
917 * thread_block will detect this. WARNING: thread_switch with no
918 * option will not do anything useful if the thread calling it is the
919 * highest priority thread (can easily happen with a collection
920 * of timesharing threads).
922 if (option
== SWITCH_OPTION_WAIT
)
923 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
925 if (option
== SWITCH_OPTION_DEPRESS
)
926 _mk_sp_thread_depress_ms(option_time
);
928 self
->saved
.swtch
.option
= option
;
930 thread_block_reason(_mk_sp_thread_switch_continue
, AST_YIELD
);
932 if (option
== SWITCH_OPTION_WAIT
)
933 thread_cancel_timer();
935 if (option
== SWITCH_OPTION_DEPRESS
)
936 _mk_sp_thread_depress_abort(self
, FALSE
);
938 return (KERN_SUCCESS
);
942 * Depress thread's priority to lowest possible for the specified interval,
943 * with a value of zero resulting in no timeout being scheduled.
946 _mk_sp_thread_depress_abstime(
949 register thread_t self
= current_thread();
955 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
956 processor_t myprocessor
= self
->last_processor
;
958 self
->sched_pri
= DEPRESSPRI
;
959 myprocessor
->current_pri
= self
->sched_pri
;
960 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
961 self
->sched_mode
|= TH_MODE_DEPRESS
;
964 clock_absolutetime_interval_to_deadline(interval
, &deadline
);
965 if (!timer_call_enter(&self
->depress_timer
, deadline
))
966 self
->depress_timer_active
++;
974 _mk_sp_thread_depress_ms(
975 mach_msg_timeout_t interval
)
979 clock_interval_to_absolutetime_interval(
980 interval
, 1000*NSEC_PER_USEC
, &abstime
);
981 _mk_sp_thread_depress_abstime(abstime
);
985 * Priority depression expiration.
988 thread_depress_expire(
989 timer_call_param_t p0
,
990 timer_call_param_t p1
)
992 thread_t thread
= p0
;
997 if (--thread
->depress_timer_active
== 1) {
998 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
999 compute_priority(thread
, FALSE
);
1001 thread_unlock(thread
);
1006 * Prematurely abort priority depression if there is one.
1009 _mk_sp_thread_depress_abort(
1010 register thread_t thread
,
1013 kern_return_t result
= KERN_NOT_DEPRESSED
;
1017 thread_lock(thread
);
1018 if (abortall
|| !(thread
->sched_mode
& TH_MODE_POLLDEPRESS
)) {
1019 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
1020 thread
->sched_mode
&= ~TH_MODE_ISDEPRESSED
;
1021 compute_priority(thread
, FALSE
);
1022 result
= KERN_SUCCESS
;
1025 if (timer_call_cancel(&thread
->depress_timer
))
1026 thread
->depress_timer_active
--;
1028 thread_unlock(thread
);
1035 _mk_sp_thread_perhaps_yield(
1040 assert(self
== current_thread());
1043 if (!(self
->sched_mode
& (TH_MODE_REALTIME
|TH_MODE_TIMESHARE
))) {
1044 extern uint64_t max_poll_computation
;
1045 extern int sched_poll_yield_shift
;
1046 uint64_t total_computation
, abstime
;
1048 abstime
= mach_absolute_time();
1049 total_computation
= abstime
- self
->computation_epoch
;
1050 total_computation
+= self
->computation_metered
;
1051 if (total_computation
>= max_poll_computation
) {
1052 processor_t myprocessor
= current_processor();
1056 if (!(self
->sched_mode
& TH_MODE_ISDEPRESSED
)) {
1057 self
->sched_pri
= DEPRESSPRI
;
1058 myprocessor
->current_pri
= self
->sched_pri
;
1059 self
->sched_mode
&= ~TH_MODE_PREEMPT
;
1061 self
->computation_epoch
= abstime
;
1062 self
->computation_metered
= 0;
1063 self
->sched_mode
|= TH_MODE_POLLDEPRESS
;
1065 abstime
+= (total_computation
>> sched_poll_yield_shift
);
1066 if (!timer_call_enter(&self
->depress_timer
, abstime
))
1067 self
->depress_timer_active
++;
1068 thread_unlock(self
);
1070 if ((preempt
= csw_check(self
, myprocessor
)) != AST_NONE
)