2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
34 #include <mach/boolean.h>
35 #include <mach/thread_switch.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/ipc_kobject.h>
39 #include <kern/processor.h>
40 #include <kern/sched.h>
41 #include <kern/sched_prim.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
46 #include <mach/policy.h>
48 #include <kern/syscall_subr.h>
49 #include <mach/mach_host_server.h>
50 #include <mach/mach_syscalls.h>
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
58 #include <kern/mk_sp.h>
59 #include <kern/misc_protos.h>
61 #include <kern/sched.h>
62 #include <kern/sched_prim.h>
63 #include <kern/assert.h>
64 #include <kern/thread.h>
65 #include <mach/mach_host_server.h>
68 void _mk_sp_thread_depress_priority(
70 mach_msg_timeout_t depress_time
);
73 *** ??? The next two files supply the prototypes for `thread_set_policy()'
74 *** and `thread_policy.' These routines cannot stay here if they are
75 *** exported Mach system calls.
77 #include <mach/thread_act_server.h>
78 #include <mach/host_priv_server.h>
81 * Vector containing standard scheduling policy operations
83 sp_ops_t mk_sp_ops
= {
84 _mk_sp_thread_update_mpri
,
85 _mk_sp_thread_unblock
,
88 _mk_sp_thread_dispatch
,
91 _mk_sp_thread_processor
,
92 _mk_sp_thread_processor_set
,
96 _mk_sp_thread_depress_abort
,
97 _mk_sp_thread_depress_timeout
,
98 _mk_sp_thread_runnable
,
102 kern_return_t
thread_policy_common(
106 processor_set_t pset
);
109 * Standard operations for MK Scheduling Policy
113 _mk_sp_thread_update_mpri(
117 if (thread
->sched_stamp
!= sched_tick
)
118 update_priority(thread
);
124 _mk_sp_thread_unblock(
128 /* indicate thread is now runnable */
129 thread
->sp_state
= MK_SP_RUNNABLE
;
131 /* place thread at end of appropriate run queue */
132 if (!(thread
->state
&TH_IDLE
))
133 thread_setrun(thread
, TRUE
, TAIL_Q
);
143 processor_t myprocessor
= cpu_to_processor(cpu_number());
146 * A running thread is being taken off a processor:
148 * - update the thread's `unconsumed_quantum' field
149 * - update the thread's state field
152 old_thread
->unconsumed_quantum
= myprocessor
->quantum
;
154 if (old_thread
->state
& TH_WAIT
)
155 old_thread
->sp_state
= MK_SP_BLOCKED
;
166 processor_t myprocessor
= cpu_to_processor(cpu_number());
167 processor_set_t pset
;
169 pset
= myprocessor
->processor_set
;
171 * The designated thread is about to begin execution:
173 * - update the processor's `quantum' field
175 /* check for legal thread state */
176 assert(thread
->sp_state
== MK_SP_RUNNABLE
);
178 if (thread
->policy
& (POLICY_RR
|POLICY_FIFO
))
179 myprocessor
->quantum
= thread
->unconsumed_quantum
;
181 myprocessor
->quantum
= (thread
->bound_processor
?
182 min_quantum
: pset
->set_quantum
);
188 _mk_sp_thread_dispatch(
192 if (old_thread
->sp_state
& MK_SP_RUNNABLE
) {
193 if (old_thread
->reason
& AST_QUANTUM
) {
194 thread_setrun(old_thread
, FALSE
, TAIL_Q
);
195 old_thread
->unconsumed_quantum
= min_quantum
;
198 thread_setrun(old_thread
, FALSE
, HEAD_Q
);
201 if (old_thread
->sp_state
& MK_SP_ATTACHED
) {
202 /* indicate thread is now runnable */
203 old_thread
->sp_state
= MK_SP_RUNNABLE
;
205 /* place thread at end of appropriate run queue */
206 thread_setrun(old_thread
, FALSE
, TAIL_Q
);
213 * Thread must already be locked.
216 _mk_sp_thread_attach(
220 thread
->sp_state
= MK_SP_ATTACHED
;
222 thread
->max_priority
= thread
->priority
= BASEPRI_DEFAULT
;
223 thread
->depress_priority
= -1;
225 thread
->cpu_usage
= 0;
226 thread
->sched_usage
= 0;
227 thread
->sched_stamp
= 0;
229 thread
->unconsumed_quantum
= min_quantum
;
231 /* Reflect this policy in thread data structure */
232 thread
->policy
= policy
->policy_id
;
238 * Check to make sure that thread is removed from run
239 * queues and active execution; and clear pending
240 * priority depression.
242 * Thread must already be locked.
245 _mk_sp_thread_detach(
249 struct run_queue
*rq
;
251 assert(thread
->policy
== policy
->policy_id
);
253 /* make sure that the thread is no longer on any run queue */
254 if (thread
->runq
!= RUN_QUEUE_NULL
) {
255 rq
= rem_runq(thread
);
256 if (rq
== RUN_QUEUE_NULL
) {
257 panic("mk_sp_thread_detach: missed thread");
261 /* clear pending priority depression */
263 if (thread
->depress_priority
>= 0) {
264 thread
->priority
= thread
->depress_priority
;
265 thread
->depress_priority
= -1;
266 if (thread_call_cancel(&thread
->depress_timer
))
267 thread_call_enter(&thread
->depress_timer
);
270 /* clear the thread's policy field */
271 thread
->policy
= POLICY_NULL
;
277 _mk_sp_thread_processor(
280 processor_t processor
)
286 _mk_sp_thread_processor_set(
289 processor_set_t processor_set
)
291 pset_add_thread(processor_set
, thread
);
302 * Determine thread's state. (It may be an "older" thread
303 * that has just been associated with this policy.)
305 if (thread
->state
& TH_WAIT
)
306 thread
->sp_state
= MK_SP_BLOCKED
;
308 /* recompute priority */
309 thread
->sched_stamp
= sched_tick
;
310 compute_priority(thread
, TRUE
);
316 * thread_priority_internal:
318 * Kernel-internal work function for thread_priority(). Called
319 * with thread "properly locked" to ensure synchrony with RPC
320 * (see act_lock_thread()).
323 thread_priority_internal(
327 kern_return_t result
= KERN_SUCCESS
;
334 * Check for violation of max priority
336 if (priority
> thread
->max_priority
)
337 priority
= thread
->max_priority
;
340 * Set priorities. If a depression is in progress,
341 * change the priority to restore.
343 if (thread
->depress_priority
>= 0)
344 thread
->depress_priority
= priority
;
346 thread
->priority
= priority
;
347 compute_priority(thread
, TRUE
);
350 * If the current thread has changed its
351 * priority let the ast code decide whether
352 * a different thread should run.
354 if (thread
== current_thread())
358 thread_unlock(thread
);
365 * thread_policy_common:
367 * Set scheduling policy for thread. If pset == PROCESSOR_SET_NULL,
368 * policy will be checked to make sure it is enabled.
371 thread_policy_common(
375 processor_set_t pset
)
377 kern_return_t result
= KERN_SUCCESS
;
381 if ( thread
== THREAD_NULL
||
382 invalid_policy(policy
) )
383 return(KERN_INVALID_ARGUMENT
);
389 * Check if changing policy.
391 if (policy
!= thread
->policy
) {
393 * Changing policy. Check if new policy is allowed.
395 if ( pset
== PROCESSOR_SET_NULL
&&
396 (thread
->processor_set
->policies
& policy
) == 0 )
397 result
= KERN_FAILURE
;
399 if (pset
!= thread
->processor_set
)
400 result
= KERN_FAILURE
;
403 * Changing policy. Calculate new
406 thread
->policy
= policy
;
407 compute_priority(thread
, TRUE
);
412 thread_unlock(thread
);
421 * Set scheduling policy and parameters, both base and limit, for
422 * the given thread. Policy can be any policy implemented by the
423 * processor set, whether enabled or not.
427 thread_act_t thr_act
,
428 processor_set_t pset
,
431 mach_msg_type_number_t base_count
,
432 policy_limit_t limit
,
433 mach_msg_type_number_t limit_count
)
436 int max
, bas
, dat
, incr
;
437 kern_return_t result
= KERN_SUCCESS
;
439 if ( thr_act
== THR_ACT_NULL
||
440 pset
== PROCESSOR_SET_NULL
)
441 return (KERN_INVALID_ARGUMENT
);
443 thread
= act_lock_thread(thr_act
);
444 if (thread
== THREAD_NULL
) {
445 act_unlock_thread(thr_act
);
447 return(KERN_INVALID_ARGUMENT
);
450 if (pset
!= thread
->processor_set
) {
451 act_unlock_thread(thr_act
);
453 return(KERN_FAILURE
);
460 policy_rr_base_t rr_base
= (policy_rr_base_t
) base
;
461 policy_rr_limit_t rr_limit
= (policy_rr_limit_t
) limit
;
463 if ( base_count
!= POLICY_RR_BASE_COUNT
||
464 limit_count
!= POLICY_RR_LIMIT_COUNT
) {
465 result
= KERN_INVALID_ARGUMENT
;
469 dat
= rr_base
->quantum
;
470 bas
= rr_base
->base_priority
;
471 max
= rr_limit
->max_priority
;
472 if (invalid_pri(bas
) || invalid_pri(max
)) {
473 result
= KERN_INVALID_ARGUMENT
;
482 policy_fifo_base_t fifo_base
= (policy_fifo_base_t
) base
;
483 policy_fifo_limit_t fifo_limit
= (policy_fifo_limit_t
) limit
;
485 if ( base_count
!= POLICY_FIFO_BASE_COUNT
||
486 limit_count
!= POLICY_FIFO_LIMIT_COUNT
) {
487 result
= KERN_INVALID_ARGUMENT
;
492 bas
= fifo_base
->base_priority
;
493 max
= fifo_limit
->max_priority
;
494 if (invalid_pri(bas
) || invalid_pri(max
)) {
495 result
= KERN_INVALID_ARGUMENT
;
502 case POLICY_TIMESHARE
:
504 policy_timeshare_base_t ts_base
= (policy_timeshare_base_t
) base
;
505 policy_timeshare_limit_t ts_limit
=
506 (policy_timeshare_limit_t
) limit
;
508 if ( base_count
!= POLICY_TIMESHARE_BASE_COUNT
||
509 limit_count
!= POLICY_TIMESHARE_LIMIT_COUNT
) {
510 result
= KERN_INVALID_ARGUMENT
;
515 bas
= ts_base
->base_priority
;
516 max
= ts_limit
->max_priority
;
517 if (invalid_pri(bas
) || invalid_pri(max
)) {
518 result
= KERN_INVALID_ARGUMENT
;
526 result
= KERN_INVALID_POLICY
;
529 if (result
!= KERN_SUCCESS
) {
530 act_unlock_thread(thr_act
);
535 result
= thread_priority_internal(thread
, bas
);
536 if (result
== KERN_SUCCESS
)
537 result
= thread_policy_common(thread
, policy
, dat
, pset
);
538 act_unlock_thread(thr_act
);
547 * Set scheduling policy and parameters, both base and limit, for
548 * the given thread. Policy must be a policy which is enabled for the
549 * processor set. Change contained threads if requested.
553 thread_act_t thr_act
,
556 mach_msg_type_number_t count
,
560 processor_set_t pset
;
561 kern_return_t result
= KERN_SUCCESS
;
562 policy_limit_t limit
;
564 policy_rr_limit_data_t rr_limit
;
565 policy_fifo_limit_data_t fifo_limit
;
566 policy_timeshare_limit_data_t ts_limit
;
568 if (thr_act
== THR_ACT_NULL
)
569 return (KERN_INVALID_ARGUMENT
);
571 thread
= act_lock_thread(thr_act
);
572 pset
= thread
->processor_set
;
573 if ( thread
== THREAD_NULL
||
574 pset
== PROCESSOR_SET_NULL
){
575 act_unlock_thread(thr_act
);
577 return(KERN_INVALID_ARGUMENT
);
580 if ( invalid_policy(policy
) ||
581 (pset
->policies
& policy
) == 0 ) {
582 act_unlock_thread(thr_act
);
584 return(KERN_INVALID_POLICY
);
589 * Set scheduling limits to base priority.
595 policy_rr_base_t rr_base
;
597 if (count
!= POLICY_RR_BASE_COUNT
) {
598 result
= KERN_INVALID_ARGUMENT
;
602 limcount
= POLICY_RR_LIMIT_COUNT
;
603 rr_base
= (policy_rr_base_t
) base
;
604 rr_limit
.max_priority
= rr_base
->base_priority
;
605 limit
= (policy_limit_t
) &rr_limit
;
612 policy_fifo_base_t fifo_base
;
614 if (count
!= POLICY_FIFO_BASE_COUNT
) {
615 result
= KERN_INVALID_ARGUMENT
;
619 limcount
= POLICY_FIFO_LIMIT_COUNT
;
620 fifo_base
= (policy_fifo_base_t
) base
;
621 fifo_limit
.max_priority
= fifo_base
->base_priority
;
622 limit
= (policy_limit_t
) &fifo_limit
;
627 case POLICY_TIMESHARE
:
629 policy_timeshare_base_t ts_base
;
631 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
632 result
= KERN_INVALID_ARGUMENT
;
636 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
637 ts_base
= (policy_timeshare_base_t
) base
;
638 ts_limit
.max_priority
= ts_base
->base_priority
;
639 limit
= (policy_limit_t
) &ts_limit
;
645 result
= KERN_INVALID_POLICY
;
652 * Use current scheduling limits. Ensure that the
653 * new base priority will not exceed current limits.
659 policy_rr_base_t rr_base
;
661 if (count
!= POLICY_RR_BASE_COUNT
) {
662 result
= KERN_INVALID_ARGUMENT
;
666 limcount
= POLICY_RR_LIMIT_COUNT
;
667 rr_base
= (policy_rr_base_t
) base
;
668 if (rr_base
->base_priority
> thread
->max_priority
) {
669 result
= KERN_POLICY_LIMIT
;
673 rr_limit
.max_priority
= thread
->max_priority
;
674 limit
= (policy_limit_t
) &rr_limit
;
681 policy_fifo_base_t fifo_base
;
683 if (count
!= POLICY_FIFO_BASE_COUNT
) {
684 result
= KERN_INVALID_ARGUMENT
;
688 limcount
= POLICY_FIFO_LIMIT_COUNT
;
689 fifo_base
= (policy_fifo_base_t
) base
;
690 if (fifo_base
->base_priority
> thread
->max_priority
) {
691 result
= KERN_POLICY_LIMIT
;
695 fifo_limit
.max_priority
= thread
->max_priority
;
696 limit
= (policy_limit_t
) &fifo_limit
;
701 case POLICY_TIMESHARE
:
703 policy_timeshare_base_t ts_base
;
705 if (count
!= POLICY_TIMESHARE_BASE_COUNT
) {
706 result
= KERN_INVALID_ARGUMENT
;
710 limcount
= POLICY_TIMESHARE_LIMIT_COUNT
;
711 ts_base
= (policy_timeshare_base_t
) base
;
712 if (ts_base
->base_priority
> thread
->max_priority
) {
713 result
= KERN_POLICY_LIMIT
;
717 ts_limit
.max_priority
= thread
->max_priority
;
718 limit
= (policy_limit_t
) &ts_limit
;
724 result
= KERN_INVALID_POLICY
;
730 act_unlock_thread(thr_act
);
732 if (result
== KERN_SUCCESS
)
733 result
= thread_set_policy(thr_act
, pset
,
734 policy
, base
, count
, limit
, limcount
);
740 * Define shifts for simulating (5/8)**n
743 shift_data_t wait_shift
[32] = {
744 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
745 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
746 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
747 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
750 * do_priority_computation:
752 * Calculate new priority for thread based on its base priority plus
753 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
754 * usage to priorities. SCHED_SHIFT converts for the scaling
755 * of the sched_usage field by SCHED_SCALE. This scaling comes
756 * from the multiplication by sched_load (thread_timer_delta)
757 * in sched.h. sched_load is calculated as a scaled overload
758 * factor in compute_mach_factor (mach_factor.c).
762 #define do_priority_computation(thread, pri) \
764 (pri) = (thread)->priority /* start with base priority */ \
765 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
766 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
767 if ((pri) < MINPRI_STANDARD) \
768 (pri) = MINPRI_STANDARD; \
770 if ((pri) > MAXPRI_STANDARD) \
771 (pri) = MAXPRI_STANDARD; \
773 #else /* PRI_SHIFT_2 */
774 #define do_priority_computation(thread, pri) \
776 (pri) = (thread)->priority /* start with base priority */ \
777 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
778 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
779 if ((pri) < MINPRI_STANDARD) \
780 (pri) = MINPRI_STANDARD; \
782 if ((pri) > MAXPRI_STANDARD) \
783 (pri) = MAXPRI_STANDARD; \
785 #endif /* PRI_SHIFT_2 */
786 #else /* defined(PRI_SHIFT_2) */
787 #define do_priority_computation(thread, pri) \
789 (pri) = (thread)->priority /* start with base priority */ \
790 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
791 if ((pri) < MINPRI_STANDARD) \
792 (pri) = MINPRI_STANDARD; \
794 if ((pri) > MAXPRI_STANDARD) \
795 (pri) = MAXPRI_STANDARD; \
797 #endif /* defined(PRI_SHIFT_2) */
802 * Compute the effective priority of the specified thread.
803 * The effective priority computation is as follows:
805 * Take the base priority for this thread and add
806 * to it an increment derived from its cpu_usage.
808 * The thread *must* be locked by the caller.
813 register thread_t thread
,
818 if (thread
->policy
== POLICY_TIMESHARE
) {
819 do_priority_computation(thread
, pri
);
820 if (thread
->depress_priority
< 0)
821 set_pri(thread
, pri
, resched
);
823 thread
->depress_priority
= pri
;
826 set_pri(thread
, thread
->priority
, resched
);
830 * compute_my_priority:
832 * Version of compute priority for current thread or thread
833 * being manipulated by scheduler (going on or off a runq).
834 * Only used for priority updates. Policy or priority changes
835 * must call compute_priority above. Caller must have thread
836 * locked and know it is timesharing and not depressed.
841 register thread_t thread
)
845 do_priority_computation(thread
, pri
);
846 assert(thread
->runq
== RUN_QUEUE_NULL
);
847 thread
->sched_pri
= pri
;
852 natural_t cpu_delta
, sched_delta
;
853 natural_t sched_tick
, ticks
;
854 natural_t cpu_usage
, sched_usage
,
855 aged_cpu
, aged_sched
;
857 } idled_info
, loaded_info
;
863 * Cause the priority computation of a thread that has been
864 * sleeping or suspended to "catch up" with the system. Thread
865 * *MUST* be locked by caller. If thread is running, then this
866 * can only be called by the thread on itself.
870 register thread_t thread
)
872 register unsigned int ticks
;
873 register shift_t shiftp
;
875 ticks
= sched_tick
- thread
->sched_stamp
;
879 * If asleep for more than 30 seconds forget all
880 * cpu_usage, else catch up on missed aging.
881 * 5/8 ** n is approximated by the two shifts
882 * in the wait_shift array.
884 thread
->sched_stamp
+= ticks
;
885 thread_timer_delta(thread
);
887 thread
->cpu_usage
= 0;
888 thread
->sched_usage
= 0;
892 struct mk_sp_usage
*sp_usage
;
895 thread
->cpu_usage
+= thread
->cpu_delta
;
896 thread
->sched_usage
+= thread
->sched_delta
;
899 if (thread
->state
& TH_IDLE
)
900 sp_usage
= &idled_info
;
902 if (thread
== loaded_info
.thread
)
903 sp_usage
= &loaded_info
;
907 if (sp_usage
!= NULL
) {
908 sp_usage
->cpu_delta
= thread
->cpu_delta
;
909 sp_usage
->sched_delta
= thread
->sched_delta
;
910 sp_usage
->sched_tick
= thread
->sched_stamp
;
911 sp_usage
->ticks
= ticks
;
912 sp_usage
->cpu_usage
= thread
->cpu_usage
;
913 sp_usage
->sched_usage
= thread
->sched_usage
;
914 sp_usage
->thread
= thread
;
918 shiftp
= &wait_shift
[ticks
];
919 if (shiftp
->shift2
> 0) {
921 (thread
->cpu_usage
>> shiftp
->shift1
) +
922 (thread
->cpu_usage
>> shiftp
->shift2
);
923 thread
->sched_usage
=
924 (thread
->sched_usage
>> shiftp
->shift1
) +
925 (thread
->sched_usage
>> shiftp
->shift2
);
929 (thread
->cpu_usage
>> shiftp
->shift1
) -
930 (thread
->cpu_usage
>> -(shiftp
->shift2
));
931 thread
->sched_usage
=
932 (thread
->sched_usage
>> shiftp
->shift1
) -
933 (thread
->sched_usage
>> -(shiftp
->shift2
));
937 if (sp_usage
!= NULL
) {
938 sp_usage
->aged_cpu
= thread
->cpu_usage
;
939 sp_usage
->aged_sched
= thread
->sched_usage
;
943 thread
->cpu_delta
= 0;
944 thread
->sched_delta
= 0;
947 * Recompute priority if appropriate.
949 if ( thread
->policy
== POLICY_TIMESHARE
&&
950 thread
->depress_priority
< 0 ) {
951 register int new_pri
;
954 do_priority_computation(thread
, new_pri
);
955 if (new_pri
!= thread
->sched_pri
) {
956 runq
= rem_runq(thread
);
957 thread
->sched_pri
= new_pri
;
958 if (runq
!= RUN_QUEUE_NULL
)
959 thread_setrun(thread
, TRUE
, TAIL_Q
);
965 * `mk_sp_swtch_pri()' attempts to context switch (logic in
966 * thread_block no-ops the context switch if nothing would happen).
967 * A boolean is returned that indicates whether there is anything
970 * This boolean can be used by a thread waiting on a
971 * lock or condition: If FALSE is returned, the thread is justified
972 * in becoming a resource hog by continuing to spin because there's
973 * nothing else useful that the processor could do. If TRUE is
974 * returned, the thread should make one more check on the
975 * lock and then be a good citizen and really suspend.
983 register thread_t self
= current_thread();
984 extern natural_t min_quantum_ms
;
991 * XXX need to think about depression duration.
992 * XXX currently using min quantum.
994 _mk_sp_thread_depress_priority(policy
, min_quantum_ms
);
996 thread_block((void (*)(void)) 0);
998 _mk_sp_thread_depress_abort(policy
, self
);
1002 * thread_switch_continue:
1004 * Continuation routine for a thread switch.
1006 * Just need to arrange the return value gets sent out correctly and that
1007 * we cancel the timer or the depression called for by the options to the
1008 * thread_switch call.
1011 _mk_sp_thread_switch_continue(void)
1013 thread_t self
= current_thread();
1014 int wait_result
= self
->wait_result
;
1015 int option
= self
->saved
.swtch
.option
;
1016 sf_object_t policy
= self
->saved
.swtch
.policy
;
1018 if (option
== SWITCH_OPTION_WAIT
&& wait_result
!= THREAD_TIMED_OUT
)
1019 thread_cancel_timer();
1020 else if (option
== SWITCH_OPTION_DEPRESS
)
1021 _mk_sp_thread_depress_abort(policy
, self
);
1022 thread_syscall_return(KERN_SUCCESS
);
1028 * Context switch. User may supply thread hint.
1030 * Fixed priority threads that call this get what they asked for
1031 * even if that violates priority order.
1034 _mk_sp_thread_switch(
1036 thread_act_t hint_act
,
1038 mach_msg_timeout_t option_time
)
1040 register thread_t self
= current_thread();
1041 register processor_t myprocessor
;
1045 * Check and use thr_act hint if appropriate. It is not
1046 * appropriate to give a hint that shares the current shuttle.
1048 if (hint_act
!= THR_ACT_NULL
) {
1049 register thread_t thread
= act_lock_thread(hint_act
);
1051 if ( thread
!= THREAD_NULL
&&
1053 thread
->top_act
== hint_act
) {
1055 thread_lock(thread
);
1058 * Check if the thread is in the right pset. Then
1059 * pull it off its run queue. If it
1060 * doesn't come, then it's not eligible.
1062 if ( thread
->processor_set
== self
->processor_set
&&
1063 rem_runq(thread
) != RUN_QUEUE_NULL
) {
1067 if (thread
->policy
& (POLICY_FIFO
|POLICY_RR
)) {
1068 myprocessor
= current_processor();
1070 myprocessor
->quantum
= thread
->unconsumed_quantum
;
1071 myprocessor
->first_quantum
= TRUE
;
1073 thread_unlock(thread
);
1075 act_unlock_thread(hint_act
);
1076 act_deallocate(hint_act
);
1078 if (option
== SWITCH_OPTION_WAIT
)
1079 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
1080 else if (option
== SWITCH_OPTION_DEPRESS
)
1081 _mk_sp_thread_depress_priority(policy
, option_time
);
1083 self
->saved
.swtch
.policy
= policy
;
1084 self
->saved
.swtch
.option
= option
;
1086 thread_run(self
, _mk_sp_thread_switch_continue
, thread
);
1092 thread_unlock(thread
);
1096 act_unlock_thread(hint_act
);
1097 act_deallocate(hint_act
);
1101 * No handoff hint supplied, or hint was wrong. Call thread_block() in
1102 * hopes of running something else. If nothing else is runnable,
1103 * thread_block will detect this. WARNING: thread_switch with no
1104 * option will not do anything useful if the thread calling it is the
1105 * highest priority thread (can easily happen with a collection
1106 * of timesharing threads).
1108 mp_disable_preemption();
1109 myprocessor
= current_processor();
1110 if ( option
!= SWITCH_OPTION_NONE
||
1111 myprocessor
->processor_set
->runq
.count
> 0 ||
1112 myprocessor
->runq
.count
> 0 ) {
1113 myprocessor
->first_quantum
= FALSE
;
1114 mp_enable_preemption();
1116 if (option
== SWITCH_OPTION_WAIT
)
1117 assert_wait_timeout(option_time
, THREAD_ABORTSAFE
);
1118 else if (option
== SWITCH_OPTION_DEPRESS
)
1119 _mk_sp_thread_depress_priority(policy
, option_time
);
1121 self
->saved
.swtch
.policy
= policy
;
1122 self
->saved
.swtch
.option
= option
;
1124 thread_block(_mk_sp_thread_switch_continue
);
1127 mp_enable_preemption();
1130 if (option
== SWITCH_OPTION_WAIT
)
1131 thread_cancel_timer();
1132 else if (option
== SWITCH_OPTION_DEPRESS
)
1133 _mk_sp_thread_depress_abort(policy
, self
);
1135 return (KERN_SUCCESS
);
1139 * mk_sp_thread_depress_priority
1141 * Depress thread's priority to lowest possible for specified period.
1142 * Intended for use when thread wants a lock but doesn't know which
1143 * other thread is holding it. As with thread_switch, fixed
1144 * priority threads get exactly what they asked for. Users access
1145 * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time
1146 * of zero will result in no timeout being scheduled.
1149 _mk_sp_thread_depress_priority(
1151 mach_msg_timeout_t interval
)
1153 register thread_t self
= current_thread();
1154 AbsoluteTime deadline
;
1155 boolean_t release
= FALSE
;
1161 if (self
->policy
== policy
->policy_id
) {
1163 * If we haven't already saved the priority to be restored
1164 * (depress_priority), then save it.
1166 if (self
->depress_priority
< 0)
1167 self
->depress_priority
= self
->priority
;
1168 else if (thread_call_cancel(&self
->depress_timer
))
1171 self
->sched_pri
= self
->priority
= DEPRESSPRI
;
1173 if (interval
!= 0) {
1174 clock_interval_to_deadline(
1175 interval
, 1000*NSEC_PER_USEC
, &deadline
);
1176 thread_call_enter_delayed(&self
->depress_timer
, deadline
);
1184 thread_unlock(self
);
1188 thread_deallocate(self
);
1192 * mk_sp_thread_depress_timeout:
1194 * Timeout routine for priority depression.
1197 _mk_sp_thread_depress_timeout(
1199 register thread_t thread
)
1204 thread_lock(thread
);
1205 if (thread
->policy
== policy
->policy_id
) {
1207 * If we lose a race with mk_sp_thread_depress_abort,
1208 * then depress_priority might be -1.
1210 if ( thread
->depress_priority
>= 0 &&
1211 !thread_call_is_delayed(&thread
->depress_timer
, NULL
) ) {
1212 thread
->priority
= thread
->depress_priority
;
1213 thread
->depress_priority
= -1;
1214 compute_priority(thread
, FALSE
);
1217 if (thread
->depress_priority
== -2) {
1219 * Thread was temporarily undepressed by thread_suspend, to
1220 * be redepressed in special_handler as it blocks. We need to
1221 * prevent special_handler from redepressing it, since depression
1224 thread
->depress_priority
= -1;
1227 thread_unlock(thread
);
1232 * mk_sp_thread_depress_abort:
1234 * Prematurely abort priority depression if there is one.
1237 _mk_sp_thread_depress_abort(
1239 register thread_t thread
)
1241 kern_return_t result
= KERN_SUCCESS
;
1242 boolean_t release
= FALSE
;
1246 thread_lock(thread
);
1248 if (thread
->policy
== policy
->policy_id
) {
1249 if (thread
->depress_priority
>= 0) {
1250 if (thread_call_cancel(&thread
->depress_timer
))
1252 thread
->priority
= thread
->depress_priority
;
1253 thread
->depress_priority
= -1;
1254 compute_priority(thread
, FALSE
);
1257 result
= KERN_NOT_DEPRESSED
;
1260 thread_unlock(thread
);
1264 thread_deallocate(thread
);
1270 * mk_sp_thread_runnable:
1272 * Return TRUE iff policy believes thread is runnable
1275 _mk_sp_thread_runnable(
1279 return (thread
->sp_state
== MK_SP_RUNNABLE
);