]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/mk_sp.c
2ba1377506a252b9a402febdf2ea470a65b1a017
[apple/xnu.git] / osfmk / kern / mk_sp.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27 /***
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
32 ***/
33
34 #include <mach/boolean.h>
35 #include <mach/thread_switch.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/ipc_kobject.h>
39 #include <kern/processor.h>
40 #include <kern/sched.h>
41 #include <kern/sched_prim.h>
42 #include <kern/spl.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
45 #include <kern/ast.h>
46 #include <mach/policy.h>
47
48 #include <kern/syscall_subr.h>
49 #include <mach/mach_host_server.h>
50 #include <mach/mach_syscalls.h>
51
52 /***
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
55 ***/
56
57 #include <kern/sf.h>
58 #include <kern/mk_sp.h>
59 #include <kern/misc_protos.h>
60 #include <kern/spl.h>
61 #include <kern/sched.h>
62 #include <kern/sched_prim.h>
63 #include <kern/assert.h>
64 #include <kern/thread.h>
65 #include <mach/mach_host_server.h>
66
67 /* Forwards */
68 void _mk_sp_thread_depress_priority(
69 sf_object_t policy,
70 mach_msg_timeout_t depress_time);
71
72 /***
73 *** ??? The next two files supply the prototypes for `thread_set_policy()'
74 *** and `thread_policy.' These routines cannot stay here if they are
75 *** exported Mach system calls.
76 ***/
77 #include <mach/thread_act_server.h>
78 #include <mach/host_priv_server.h>
79
80 /*
81 * Vector containing standard scheduling policy operations
82 */
83 sp_ops_t mk_sp_ops = {
84 _mk_sp_thread_update_mpri,
85 _mk_sp_thread_unblock,
86 _mk_sp_thread_done,
87 _mk_sp_thread_begin,
88 _mk_sp_thread_dispatch,
89 _mk_sp_thread_attach,
90 _mk_sp_thread_detach,
91 _mk_sp_thread_processor,
92 _mk_sp_thread_processor_set,
93 _mk_sp_thread_setup,
94 _mk_sp_swtch_pri,
95 _mk_sp_thread_switch,
96 _mk_sp_thread_depress_abort,
97 _mk_sp_thread_depress_timeout,
98 _mk_sp_thread_runnable,
99 };
100
101 /* Forwards */
102 kern_return_t thread_policy_common(
103 thread_t thread,
104 int policy,
105 int data,
106 processor_set_t pset);
107
108 /*
109 * Standard operations for MK Scheduling Policy
110 */
111
112 sf_return_t
113 _mk_sp_thread_update_mpri(
114 sf_object_t policy,
115 thread_t thread)
116 {
117 if (thread->sched_stamp != sched_tick)
118 update_priority(thread);
119
120 return(SF_SUCCESS);
121 }
122
123 sf_return_t
124 _mk_sp_thread_unblock(
125 sf_object_t policy,
126 thread_t thread)
127 {
128 /* indicate thread is now runnable */
129 thread->sp_state = MK_SP_RUNNABLE;
130
131 /* place thread at end of appropriate run queue */
132 if (!(thread->state&TH_IDLE))
133 thread_setrun(thread, TRUE, TAIL_Q);
134
135 return(SF_SUCCESS);
136 }
137
138 sf_return_t
139 _mk_sp_thread_done(
140 sf_object_t policy,
141 thread_t old_thread)
142 {
143 processor_t myprocessor = cpu_to_processor(cpu_number());
144
145 /*
146 * A running thread is being taken off a processor:
147 *
148 * - update the thread's `unconsumed_quantum' field
149 * - update the thread's state field
150 */
151
152 old_thread->unconsumed_quantum = myprocessor->quantum;
153
154 if (old_thread->state & TH_WAIT)
155 old_thread->sp_state = MK_SP_BLOCKED;
156
157 return(SF_SUCCESS);
158 }
159
160 sf_return_t
161 _mk_sp_thread_begin(
162 sf_object_t policy,
163 thread_t thread)
164 {
165
166 processor_t myprocessor = cpu_to_processor(cpu_number());
167 processor_set_t pset;
168
169 pset = myprocessor->processor_set;
170 /*
171 * The designated thread is about to begin execution:
172 *
173 * - update the processor's `quantum' field
174 */
175 /* check for legal thread state */
176 assert(thread->sp_state == MK_SP_RUNNABLE);
177
178 if (thread->policy & (POLICY_RR|POLICY_FIFO))
179 myprocessor->quantum = thread->unconsumed_quantum;
180 else
181 myprocessor->quantum = (thread->bound_processor ?
182 min_quantum : pset->set_quantum);
183
184 return(SF_SUCCESS);
185 }
186
187 sf_return_t
188 _mk_sp_thread_dispatch(
189 sf_object_t policy,
190 thread_t old_thread)
191 {
192 if (old_thread->sp_state & MK_SP_RUNNABLE) {
193 if (old_thread->reason & AST_QUANTUM) {
194 thread_setrun(old_thread, FALSE, TAIL_Q);
195 old_thread->unconsumed_quantum = min_quantum;
196 }
197 else
198 thread_setrun(old_thread, FALSE, HEAD_Q);
199 }
200
201 if (old_thread->sp_state & MK_SP_ATTACHED) {
202 /* indicate thread is now runnable */
203 old_thread->sp_state = MK_SP_RUNNABLE;
204
205 /* place thread at end of appropriate run queue */
206 thread_setrun(old_thread, FALSE, TAIL_Q);
207 }
208
209 return(SF_SUCCESS);
210 }
211
212 /*
213 * Thread must already be locked.
214 */
215 sf_return_t
216 _mk_sp_thread_attach(
217 sf_object_t policy,
218 thread_t thread)
219 {
220 thread->sp_state = MK_SP_ATTACHED;
221
222 thread->max_priority = thread->priority = BASEPRI_DEFAULT;
223 thread->depress_priority = -1;
224
225 thread->cpu_usage = 0;
226 thread->sched_usage = 0;
227 thread->sched_stamp = 0;
228
229 thread->unconsumed_quantum = min_quantum;
230
231 /* Reflect this policy in thread data structure */
232 thread->policy = policy->policy_id;
233
234 return(SF_SUCCESS);
235 }
236
237 /*
238 * Check to make sure that thread is removed from run
239 * queues and active execution; and clear pending
240 * priority depression.
241 *
242 * Thread must already be locked.
243 */
244 sf_return_t
245 _mk_sp_thread_detach(
246 sf_object_t policy,
247 thread_t thread)
248 {
249 struct run_queue *rq;
250
251 assert(thread->policy == policy->policy_id);
252
253 /* make sure that the thread is no longer on any run queue */
254 if (thread->runq != RUN_QUEUE_NULL) {
255 rq = rem_runq(thread);
256 if (rq == RUN_QUEUE_NULL) {
257 panic("mk_sp_thread_detach: missed thread");
258 }
259 }
260
261 /* clear pending priority depression */
262
263 if (thread->depress_priority >= 0) {
264 thread->priority = thread->depress_priority;
265 thread->depress_priority = -1;
266 if (thread_call_cancel(&thread->depress_timer))
267 thread_call_enter(&thread->depress_timer);
268 }
269
270 /* clear the thread's policy field */
271 thread->policy = POLICY_NULL;
272
273 return(SF_SUCCESS);
274 }
275
276 sf_return_t
277 _mk_sp_thread_processor(
278 sf_object_t policy,
279 thread_t *thread,
280 processor_t processor)
281 {
282 return(SF_FAILURE);
283 }
284
285 sf_return_t
286 _mk_sp_thread_processor_set(
287 sf_object_t policy,
288 thread_t thread,
289 processor_set_t processor_set)
290 {
291 pset_add_thread(processor_set, thread);
292
293 return(SF_SUCCESS);
294 }
295
296 sf_return_t
297 _mk_sp_thread_setup(
298 sf_object_t policy,
299 thread_t thread)
300 {
301 /*
302 * Determine thread's state. (It may be an "older" thread
303 * that has just been associated with this policy.)
304 */
305 if (thread->state & TH_WAIT)
306 thread->sp_state = MK_SP_BLOCKED;
307
308 /* recompute priority */
309 thread->sched_stamp = sched_tick;
310 compute_priority(thread, TRUE);
311
312 return(SF_SUCCESS);
313 }
314
315 /*
316 * thread_priority_internal:
317 *
318 * Kernel-internal work function for thread_priority(). Called
319 * with thread "properly locked" to ensure synchrony with RPC
320 * (see act_lock_thread()).
321 */
322 kern_return_t
323 thread_priority_internal(
324 thread_t thread,
325 int priority)
326 {
327 kern_return_t result = KERN_SUCCESS;
328 spl_t s;
329
330 s = splsched();
331 thread_lock(thread);
332
333 /*
334 * Check for violation of max priority
335 */
336 if (priority > thread->max_priority)
337 priority = thread->max_priority;
338
339 /*
340 * Set priorities. If a depression is in progress,
341 * change the priority to restore.
342 */
343 if (thread->depress_priority >= 0)
344 thread->depress_priority = priority;
345 else {
346 thread->priority = priority;
347 compute_priority(thread, TRUE);
348
349 /*
350 * If the current thread has changed its
351 * priority let the ast code decide whether
352 * a different thread should run.
353 */
354 if (thread == current_thread())
355 ast_on(AST_BLOCK);
356 }
357
358 thread_unlock(thread);
359 splx(s);
360
361 return (result);
362 }
363
364 /*
365 * thread_policy_common:
366 *
367 * Set scheduling policy for thread. If pset == PROCESSOR_SET_NULL,
368 * policy will be checked to make sure it is enabled.
369 */
370 kern_return_t
371 thread_policy_common(
372 thread_t thread,
373 integer_t policy,
374 integer_t data,
375 processor_set_t pset)
376 {
377 kern_return_t result = KERN_SUCCESS;
378 register int temp;
379 spl_t s;
380
381 if ( thread == THREAD_NULL ||
382 invalid_policy(policy) )
383 return(KERN_INVALID_ARGUMENT);
384
385 s = splsched();
386 thread_lock(thread);
387
388 /*
389 * Check if changing policy.
390 */
391 if (policy != thread->policy) {
392 /*
393 * Changing policy. Check if new policy is allowed.
394 */
395 if ( pset == PROCESSOR_SET_NULL &&
396 (thread->processor_set->policies & policy) == 0 )
397 result = KERN_FAILURE;
398 else {
399 if (pset != thread->processor_set)
400 result = KERN_FAILURE;
401 else {
402 /*
403 * Changing policy. Calculate new
404 * priority.
405 */
406 thread->policy = policy;
407 compute_priority(thread, TRUE);
408 }
409 }
410 }
411
412 thread_unlock(thread);
413 splx(s);
414
415 return (result);
416 }
417
418 /*
419 * thread_set_policy
420 *
421 * Set scheduling policy and parameters, both base and limit, for
422 * the given thread. Policy can be any policy implemented by the
423 * processor set, whether enabled or not.
424 */
425 kern_return_t
426 thread_set_policy(
427 thread_act_t thr_act,
428 processor_set_t pset,
429 policy_t policy,
430 policy_base_t base,
431 mach_msg_type_number_t base_count,
432 policy_limit_t limit,
433 mach_msg_type_number_t limit_count)
434 {
435 thread_t thread;
436 int max, bas, dat, incr;
437 kern_return_t result = KERN_SUCCESS;
438
439 if ( thr_act == THR_ACT_NULL ||
440 pset == PROCESSOR_SET_NULL )
441 return (KERN_INVALID_ARGUMENT);
442
443 thread = act_lock_thread(thr_act);
444 if (thread == THREAD_NULL) {
445 act_unlock_thread(thr_act);
446
447 return(KERN_INVALID_ARGUMENT);
448 }
449
450 if (pset != thread->processor_set) {
451 act_unlock_thread(thr_act);
452
453 return(KERN_FAILURE);
454 }
455
456 switch (policy) {
457
458 case POLICY_RR:
459 {
460 policy_rr_base_t rr_base = (policy_rr_base_t) base;
461 policy_rr_limit_t rr_limit = (policy_rr_limit_t) limit;
462
463 if ( base_count != POLICY_RR_BASE_COUNT ||
464 limit_count != POLICY_RR_LIMIT_COUNT ) {
465 result = KERN_INVALID_ARGUMENT;
466 break;
467 }
468
469 dat = rr_base->quantum;
470 bas = rr_base->base_priority;
471 max = rr_limit->max_priority;
472 if (invalid_pri(bas) || invalid_pri(max)) {
473 result = KERN_INVALID_ARGUMENT;
474 break;
475 }
476
477 break;
478 }
479
480 case POLICY_FIFO:
481 {
482 policy_fifo_base_t fifo_base = (policy_fifo_base_t) base;
483 policy_fifo_limit_t fifo_limit = (policy_fifo_limit_t) limit;
484
485 if ( base_count != POLICY_FIFO_BASE_COUNT ||
486 limit_count != POLICY_FIFO_LIMIT_COUNT) {
487 result = KERN_INVALID_ARGUMENT;
488 break;
489 }
490
491 dat = 0;
492 bas = fifo_base->base_priority;
493 max = fifo_limit->max_priority;
494 if (invalid_pri(bas) || invalid_pri(max)) {
495 result = KERN_INVALID_ARGUMENT;
496 break;
497 }
498
499 break;
500 }
501
502 case POLICY_TIMESHARE:
503 {
504 policy_timeshare_base_t ts_base = (policy_timeshare_base_t) base;
505 policy_timeshare_limit_t ts_limit =
506 (policy_timeshare_limit_t) limit;
507
508 if ( base_count != POLICY_TIMESHARE_BASE_COUNT ||
509 limit_count != POLICY_TIMESHARE_LIMIT_COUNT ) {
510 result = KERN_INVALID_ARGUMENT;
511 break;
512 }
513
514 dat = 0;
515 bas = ts_base->base_priority;
516 max = ts_limit->max_priority;
517 if (invalid_pri(bas) || invalid_pri(max)) {
518 result = KERN_INVALID_ARGUMENT;
519 break;
520 }
521
522 break;
523 }
524
525 default:
526 result = KERN_INVALID_POLICY;
527 }
528
529 if (result != KERN_SUCCESS) {
530 act_unlock_thread(thr_act);
531
532 return(result);
533 }
534
535 result = thread_priority_internal(thread, bas);
536 if (result == KERN_SUCCESS)
537 result = thread_policy_common(thread, policy, dat, pset);
538 act_unlock_thread(thr_act);
539
540 return(result);
541 }
542
543
544 /*
545 * thread_policy
546 *
547 * Set scheduling policy and parameters, both base and limit, for
548 * the given thread. Policy must be a policy which is enabled for the
549 * processor set. Change contained threads if requested.
550 */
551 kern_return_t
552 thread_policy(
553 thread_act_t thr_act,
554 policy_t policy,
555 policy_base_t base,
556 mach_msg_type_number_t count,
557 boolean_t set_limit)
558 {
559 thread_t thread;
560 processor_set_t pset;
561 kern_return_t result = KERN_SUCCESS;
562 policy_limit_t limit;
563 int limcount;
564 policy_rr_limit_data_t rr_limit;
565 policy_fifo_limit_data_t fifo_limit;
566 policy_timeshare_limit_data_t ts_limit;
567
568 if (thr_act == THR_ACT_NULL)
569 return (KERN_INVALID_ARGUMENT);
570
571 thread = act_lock_thread(thr_act);
572 pset = thread->processor_set;
573 if ( thread == THREAD_NULL ||
574 pset == PROCESSOR_SET_NULL ){
575 act_unlock_thread(thr_act);
576
577 return(KERN_INVALID_ARGUMENT);
578 }
579
580 if ( invalid_policy(policy) ||
581 (pset->policies & policy) == 0 ) {
582 act_unlock_thread(thr_act);
583
584 return(KERN_INVALID_POLICY);
585 }
586
587 if (set_limit) {
588 /*
589 * Set scheduling limits to base priority.
590 */
591 switch (policy) {
592
593 case POLICY_RR:
594 {
595 policy_rr_base_t rr_base;
596
597 if (count != POLICY_RR_BASE_COUNT) {
598 result = KERN_INVALID_ARGUMENT;
599 break;
600 }
601
602 limcount = POLICY_RR_LIMIT_COUNT;
603 rr_base = (policy_rr_base_t) base;
604 rr_limit.max_priority = rr_base->base_priority;
605 limit = (policy_limit_t) &rr_limit;
606
607 break;
608 }
609
610 case POLICY_FIFO:
611 {
612 policy_fifo_base_t fifo_base;
613
614 if (count != POLICY_FIFO_BASE_COUNT) {
615 result = KERN_INVALID_ARGUMENT;
616 break;
617 }
618
619 limcount = POLICY_FIFO_LIMIT_COUNT;
620 fifo_base = (policy_fifo_base_t) base;
621 fifo_limit.max_priority = fifo_base->base_priority;
622 limit = (policy_limit_t) &fifo_limit;
623
624 break;
625 }
626
627 case POLICY_TIMESHARE:
628 {
629 policy_timeshare_base_t ts_base;
630
631 if (count != POLICY_TIMESHARE_BASE_COUNT) {
632 result = KERN_INVALID_ARGUMENT;
633 break;
634 }
635
636 limcount = POLICY_TIMESHARE_LIMIT_COUNT;
637 ts_base = (policy_timeshare_base_t) base;
638 ts_limit.max_priority = ts_base->base_priority;
639 limit = (policy_limit_t) &ts_limit;
640
641 break;
642 }
643
644 default:
645 result = KERN_INVALID_POLICY;
646 break;
647 }
648
649 }
650 else {
651 /*
652 * Use current scheduling limits. Ensure that the
653 * new base priority will not exceed current limits.
654 */
655 switch (policy) {
656
657 case POLICY_RR:
658 {
659 policy_rr_base_t rr_base;
660
661 if (count != POLICY_RR_BASE_COUNT) {
662 result = KERN_INVALID_ARGUMENT;
663 break;
664 }
665
666 limcount = POLICY_RR_LIMIT_COUNT;
667 rr_base = (policy_rr_base_t) base;
668 if (rr_base->base_priority > thread->max_priority) {
669 result = KERN_POLICY_LIMIT;
670 break;
671 }
672
673 rr_limit.max_priority = thread->max_priority;
674 limit = (policy_limit_t) &rr_limit;
675
676 break;
677 }
678
679 case POLICY_FIFO:
680 {
681 policy_fifo_base_t fifo_base;
682
683 if (count != POLICY_FIFO_BASE_COUNT) {
684 result = KERN_INVALID_ARGUMENT;
685 break;
686 }
687
688 limcount = POLICY_FIFO_LIMIT_COUNT;
689 fifo_base = (policy_fifo_base_t) base;
690 if (fifo_base->base_priority > thread->max_priority) {
691 result = KERN_POLICY_LIMIT;
692 break;
693 }
694
695 fifo_limit.max_priority = thread->max_priority;
696 limit = (policy_limit_t) &fifo_limit;
697
698 break;
699 }
700
701 case POLICY_TIMESHARE:
702 {
703 policy_timeshare_base_t ts_base;
704
705 if (count != POLICY_TIMESHARE_BASE_COUNT) {
706 result = KERN_INVALID_ARGUMENT;
707 break;
708 }
709
710 limcount = POLICY_TIMESHARE_LIMIT_COUNT;
711 ts_base = (policy_timeshare_base_t) base;
712 if (ts_base->base_priority > thread->max_priority) {
713 result = KERN_POLICY_LIMIT;
714 break;
715 }
716
717 ts_limit.max_priority = thread->max_priority;
718 limit = (policy_limit_t) &ts_limit;
719
720 break;
721 }
722
723 default:
724 result = KERN_INVALID_POLICY;
725 break;
726 }
727
728 }
729
730 act_unlock_thread(thr_act);
731
732 if (result == KERN_SUCCESS)
733 result = thread_set_policy(thr_act, pset,
734 policy, base, count, limit, limcount);
735
736 return(result);
737 }
738
739 /*
740 * Define shifts for simulating (5/8)**n
741 */
742
743 shift_data_t wait_shift[32] = {
744 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
745 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
746 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
747 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
748
749 /*
750 * do_priority_computation:
751 *
752 * Calculate new priority for thread based on its base priority plus
753 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
754 * usage to priorities. SCHED_SHIFT converts for the scaling
755 * of the sched_usage field by SCHED_SCALE. This scaling comes
756 * from the multiplication by sched_load (thread_timer_delta)
757 * in sched.h. sched_load is calculated as a scaled overload
758 * factor in compute_mach_factor (mach_factor.c).
759 */
760 #ifdef PRI_SHIFT_2
761 #if PRI_SHIFT_2 > 0
762 #define do_priority_computation(thread, pri) \
763 MACRO_BEGIN \
764 (pri) = (thread)->priority /* start with base priority */ \
765 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
766 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
767 if ((pri) < MINPRI_STANDARD) \
768 (pri) = MINPRI_STANDARD; \
769 else \
770 if ((pri) > MAXPRI_STANDARD) \
771 (pri) = MAXPRI_STANDARD; \
772 MACRO_END
773 #else /* PRI_SHIFT_2 */
774 #define do_priority_computation(thread, pri) \
775 MACRO_BEGIN \
776 (pri) = (thread)->priority /* start with base priority */ \
777 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
778 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
779 if ((pri) < MINPRI_STANDARD) \
780 (pri) = MINPRI_STANDARD; \
781 else \
782 if ((pri) > MAXPRI_STANDARD) \
783 (pri) = MAXPRI_STANDARD; \
784 MACRO_END
785 #endif /* PRI_SHIFT_2 */
786 #else /* defined(PRI_SHIFT_2) */
787 #define do_priority_computation(thread, pri) \
788 MACRO_BEGIN \
789 (pri) = (thread)->priority /* start with base priority */ \
790 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
791 if ((pri) < MINPRI_STANDARD) \
792 (pri) = MINPRI_STANDARD; \
793 else \
794 if ((pri) > MAXPRI_STANDARD) \
795 (pri) = MAXPRI_STANDARD; \
796 MACRO_END
797 #endif /* defined(PRI_SHIFT_2) */
798
799 /*
800 * compute_priority:
801 *
802 * Compute the effective priority of the specified thread.
803 * The effective priority computation is as follows:
804 *
805 * Take the base priority for this thread and add
806 * to it an increment derived from its cpu_usage.
807 *
808 * The thread *must* be locked by the caller.
809 */
810
811 void
812 compute_priority(
813 register thread_t thread,
814 boolean_t resched)
815 {
816 register int pri;
817
818 if (thread->policy == POLICY_TIMESHARE) {
819 do_priority_computation(thread, pri);
820 if (thread->depress_priority < 0)
821 set_pri(thread, pri, resched);
822 else
823 thread->depress_priority = pri;
824 }
825 else
826 set_pri(thread, thread->priority, resched);
827 }
828
829 /*
830 * compute_my_priority:
831 *
832 * Version of compute priority for current thread or thread
833 * being manipulated by scheduler (going on or off a runq).
834 * Only used for priority updates. Policy or priority changes
835 * must call compute_priority above. Caller must have thread
836 * locked and know it is timesharing and not depressed.
837 */
838
839 void
840 compute_my_priority(
841 register thread_t thread)
842 {
843 register int pri;
844
845 do_priority_computation(thread, pri);
846 assert(thread->runq == RUN_QUEUE_NULL);
847 thread->sched_pri = pri;
848 }
849
850 #if DEBUG
851 struct mk_sp_usage {
852 natural_t cpu_delta, sched_delta;
853 natural_t sched_tick, ticks;
854 natural_t cpu_usage, sched_usage,
855 aged_cpu, aged_sched;
856 thread_t thread;
857 } idled_info, loaded_info;
858 #endif
859
860 /*
861 * update_priority
862 *
863 * Cause the priority computation of a thread that has been
864 * sleeping or suspended to "catch up" with the system. Thread
865 * *MUST* be locked by caller. If thread is running, then this
866 * can only be called by the thread on itself.
867 */
868 void
869 update_priority(
870 register thread_t thread)
871 {
872 register unsigned int ticks;
873 register shift_t shiftp;
874
875 ticks = sched_tick - thread->sched_stamp;
876 assert(ticks != 0);
877
878 /*
879 * If asleep for more than 30 seconds forget all
880 * cpu_usage, else catch up on missed aging.
881 * 5/8 ** n is approximated by the two shifts
882 * in the wait_shift array.
883 */
884 thread->sched_stamp += ticks;
885 thread_timer_delta(thread);
886 if (ticks > 30) {
887 thread->cpu_usage = 0;
888 thread->sched_usage = 0;
889 }
890 else {
891 #if DEBUG
892 struct mk_sp_usage *sp_usage;
893 #endif
894
895 thread->cpu_usage += thread->cpu_delta;
896 thread->sched_usage += thread->sched_delta;
897
898 #if DEBUG
899 if (thread->state & TH_IDLE)
900 sp_usage = &idled_info;
901 else
902 if (thread == loaded_info.thread)
903 sp_usage = &loaded_info;
904 else
905 sp_usage = NULL;
906
907 if (sp_usage != NULL) {
908 sp_usage->cpu_delta = thread->cpu_delta;
909 sp_usage->sched_delta = thread->sched_delta;
910 sp_usage->sched_tick = thread->sched_stamp;
911 sp_usage->ticks = ticks;
912 sp_usage->cpu_usage = thread->cpu_usage;
913 sp_usage->sched_usage = thread->sched_usage;
914 sp_usage->thread = thread;
915 }
916 #endif
917
918 shiftp = &wait_shift[ticks];
919 if (shiftp->shift2 > 0) {
920 thread->cpu_usage =
921 (thread->cpu_usage >> shiftp->shift1) +
922 (thread->cpu_usage >> shiftp->shift2);
923 thread->sched_usage =
924 (thread->sched_usage >> shiftp->shift1) +
925 (thread->sched_usage >> shiftp->shift2);
926 }
927 else {
928 thread->cpu_usage =
929 (thread->cpu_usage >> shiftp->shift1) -
930 (thread->cpu_usage >> -(shiftp->shift2));
931 thread->sched_usage =
932 (thread->sched_usage >> shiftp->shift1) -
933 (thread->sched_usage >> -(shiftp->shift2));
934 }
935
936 #if DEBUG
937 if (sp_usage != NULL) {
938 sp_usage->aged_cpu = thread->cpu_usage;
939 sp_usage->aged_sched = thread->sched_usage;
940 }
941 #endif
942 }
943 thread->cpu_delta = 0;
944 thread->sched_delta = 0;
945
946 /*
947 * Recompute priority if appropriate.
948 */
949 if ( thread->policy == POLICY_TIMESHARE &&
950 thread->depress_priority < 0 ) {
951 register int new_pri;
952 run_queue_t runq;
953
954 do_priority_computation(thread, new_pri);
955 if (new_pri != thread->sched_pri) {
956 runq = rem_runq(thread);
957 thread->sched_pri = new_pri;
958 if (runq != RUN_QUEUE_NULL)
959 thread_setrun(thread, TRUE, TAIL_Q);
960 }
961 }
962 }
963
964 /*
965 * `mk_sp_swtch_pri()' attempts to context switch (logic in
966 * thread_block no-ops the context switch if nothing would happen).
967 * A boolean is returned that indicates whether there is anything
968 * else runnable.
969 *
970 * This boolean can be used by a thread waiting on a
971 * lock or condition: If FALSE is returned, the thread is justified
972 * in becoming a resource hog by continuing to spin because there's
973 * nothing else useful that the processor could do. If TRUE is
974 * returned, the thread should make one more check on the
975 * lock and then be a good citizen and really suspend.
976 */
977
978 void
979 _mk_sp_swtch_pri(
980 sf_object_t policy,
981 int pri)
982 {
983 register thread_t self = current_thread();
984 extern natural_t min_quantum_ms;
985
986 #ifdef lint
987 pri++;
988 #endif /* lint */
989
990 /*
991 * XXX need to think about depression duration.
992 * XXX currently using min quantum.
993 */
994 _mk_sp_thread_depress_priority(policy, min_quantum_ms);
995
996 thread_block((void (*)(void)) 0);
997
998 _mk_sp_thread_depress_abort(policy, self);
999 }
1000
1001 /*
1002 * thread_switch_continue:
1003 *
1004 * Continuation routine for a thread switch.
1005 *
1006 * Just need to arrange the return value gets sent out correctly and that
1007 * we cancel the timer or the depression called for by the options to the
1008 * thread_switch call.
1009 */
1010 void
1011 _mk_sp_thread_switch_continue(void)
1012 {
1013 thread_t self = current_thread();
1014 int wait_result = self->wait_result;
1015 int option = self->saved.swtch.option;
1016 sf_object_t policy = self->saved.swtch.policy;
1017
1018 if (option == SWITCH_OPTION_WAIT && wait_result != THREAD_TIMED_OUT)
1019 thread_cancel_timer();
1020 else if (option == SWITCH_OPTION_DEPRESS)
1021 _mk_sp_thread_depress_abort(policy, self);
1022 thread_syscall_return(KERN_SUCCESS);
1023 }
1024
1025 /*
1026 * thread_switch:
1027 *
1028 * Context switch. User may supply thread hint.
1029 *
1030 * Fixed priority threads that call this get what they asked for
1031 * even if that violates priority order.
1032 */
1033 kern_return_t
1034 _mk_sp_thread_switch(
1035 sf_object_t policy,
1036 thread_act_t hint_act,
1037 int option,
1038 mach_msg_timeout_t option_time)
1039 {
1040 register thread_t self = current_thread();
1041 register processor_t myprocessor;
1042 int s;
1043
1044 /*
1045 * Check and use thr_act hint if appropriate. It is not
1046 * appropriate to give a hint that shares the current shuttle.
1047 */
1048 if (hint_act != THR_ACT_NULL) {
1049 register thread_t thread = act_lock_thread(hint_act);
1050
1051 if ( thread != THREAD_NULL &&
1052 thread != self &&
1053 thread->top_act == hint_act ) {
1054 s = splsched();
1055 thread_lock(thread);
1056
1057 /*
1058 * Check if the thread is in the right pset. Then
1059 * pull it off its run queue. If it
1060 * doesn't come, then it's not eligible.
1061 */
1062 if ( thread->processor_set == self->processor_set &&
1063 rem_runq(thread) != RUN_QUEUE_NULL ) {
1064 /*
1065 * Hah, got it!!
1066 */
1067 if (thread->policy & (POLICY_FIFO|POLICY_RR)) {
1068 myprocessor = current_processor();
1069
1070 myprocessor->quantum = thread->unconsumed_quantum;
1071 myprocessor->first_quantum = TRUE;
1072 }
1073 thread_unlock(thread);
1074
1075 act_unlock_thread(hint_act);
1076 act_deallocate(hint_act);
1077
1078 if (option == SWITCH_OPTION_WAIT)
1079 assert_wait_timeout(option_time, THREAD_ABORTSAFE);
1080 else if (option == SWITCH_OPTION_DEPRESS)
1081 _mk_sp_thread_depress_priority(policy, option_time);
1082
1083 self->saved.swtch.policy = policy;
1084 self->saved.swtch.option = option;
1085
1086 thread_run(self, _mk_sp_thread_switch_continue, thread);
1087 splx(s);
1088
1089 goto out;
1090 }
1091
1092 thread_unlock(thread);
1093 splx(s);
1094 }
1095
1096 act_unlock_thread(hint_act);
1097 act_deallocate(hint_act);
1098 }
1099
1100 /*
1101 * No handoff hint supplied, or hint was wrong. Call thread_block() in
1102 * hopes of running something else. If nothing else is runnable,
1103 * thread_block will detect this. WARNING: thread_switch with no
1104 * option will not do anything useful if the thread calling it is the
1105 * highest priority thread (can easily happen with a collection
1106 * of timesharing threads).
1107 */
1108 mp_disable_preemption();
1109 myprocessor = current_processor();
1110 if ( option != SWITCH_OPTION_NONE ||
1111 myprocessor->processor_set->runq.count > 0 ||
1112 myprocessor->runq.count > 0 ) {
1113 myprocessor->first_quantum = FALSE;
1114 mp_enable_preemption();
1115
1116 if (option == SWITCH_OPTION_WAIT)
1117 assert_wait_timeout(option_time, THREAD_ABORTSAFE);
1118 else if (option == SWITCH_OPTION_DEPRESS)
1119 _mk_sp_thread_depress_priority(policy, option_time);
1120
1121 self->saved.swtch.policy = policy;
1122 self->saved.swtch.option = option;
1123
1124 thread_block(_mk_sp_thread_switch_continue);
1125 }
1126 else
1127 mp_enable_preemption();
1128
1129 out:
1130 if (option == SWITCH_OPTION_WAIT)
1131 thread_cancel_timer();
1132 else if (option == SWITCH_OPTION_DEPRESS)
1133 _mk_sp_thread_depress_abort(policy, self);
1134
1135 return (KERN_SUCCESS);
1136 }
1137
1138 /*
1139 * mk_sp_thread_depress_priority
1140 *
1141 * Depress thread's priority to lowest possible for specified period.
1142 * Intended for use when thread wants a lock but doesn't know which
1143 * other thread is holding it. As with thread_switch, fixed
1144 * priority threads get exactly what they asked for. Users access
1145 * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time
1146 * of zero will result in no timeout being scheduled.
1147 */
1148 void
1149 _mk_sp_thread_depress_priority(
1150 sf_object_t policy,
1151 mach_msg_timeout_t interval)
1152 {
1153 register thread_t self = current_thread();
1154 AbsoluteTime deadline;
1155 boolean_t release = FALSE;
1156 spl_t s;
1157
1158 s = splsched();
1159 thread_lock(self);
1160
1161 if (self->policy == policy->policy_id) {
1162 /*
1163 * If we haven't already saved the priority to be restored
1164 * (depress_priority), then save it.
1165 */
1166 if (self->depress_priority < 0)
1167 self->depress_priority = self->priority;
1168 else if (thread_call_cancel(&self->depress_timer))
1169 release = TRUE;
1170
1171 self->sched_pri = self->priority = DEPRESSPRI;
1172
1173 if (interval != 0) {
1174 clock_interval_to_deadline(
1175 interval, 1000*NSEC_PER_USEC, &deadline);
1176 thread_call_enter_delayed(&self->depress_timer, deadline);
1177 if (!release)
1178 self->ref_count++;
1179 else
1180 release = FALSE;
1181 }
1182 }
1183
1184 thread_unlock(self);
1185 splx(s);
1186
1187 if (release)
1188 thread_deallocate(self);
1189 }
1190
1191 /*
1192 * mk_sp_thread_depress_timeout:
1193 *
1194 * Timeout routine for priority depression.
1195 */
1196 void
1197 _mk_sp_thread_depress_timeout(
1198 sf_object_t policy,
1199 register thread_t thread)
1200 {
1201 spl_t s;
1202
1203 s = splsched();
1204 thread_lock(thread);
1205 if (thread->policy == policy->policy_id) {
1206 /*
1207 * If we lose a race with mk_sp_thread_depress_abort,
1208 * then depress_priority might be -1.
1209 */
1210 if ( thread->depress_priority >= 0 &&
1211 !thread_call_is_delayed(&thread->depress_timer, NULL) ) {
1212 thread->priority = thread->depress_priority;
1213 thread->depress_priority = -1;
1214 compute_priority(thread, FALSE);
1215 }
1216 else
1217 if (thread->depress_priority == -2) {
1218 /*
1219 * Thread was temporarily undepressed by thread_suspend, to
1220 * be redepressed in special_handler as it blocks. We need to
1221 * prevent special_handler from redepressing it, since depression
1222 * has timed out:
1223 */
1224 thread->depress_priority = -1;
1225 }
1226 }
1227 thread_unlock(thread);
1228 splx(s);
1229 }
1230
1231 /*
1232 * mk_sp_thread_depress_abort:
1233 *
1234 * Prematurely abort priority depression if there is one.
1235 */
1236 kern_return_t
1237 _mk_sp_thread_depress_abort(
1238 sf_object_t policy,
1239 register thread_t thread)
1240 {
1241 kern_return_t result = KERN_SUCCESS;
1242 boolean_t release = FALSE;
1243 spl_t s;
1244
1245 s = splsched();
1246 thread_lock(thread);
1247
1248 if (thread->policy == policy->policy_id) {
1249 if (thread->depress_priority >= 0) {
1250 if (thread_call_cancel(&thread->depress_timer))
1251 release = TRUE;
1252 thread->priority = thread->depress_priority;
1253 thread->depress_priority = -1;
1254 compute_priority(thread, FALSE);
1255 }
1256 else
1257 result = KERN_NOT_DEPRESSED;
1258 }
1259
1260 thread_unlock(thread);
1261 splx(s);
1262
1263 if (release)
1264 thread_deallocate(thread);
1265
1266 return (result);
1267 }
1268
1269 /*
1270 * mk_sp_thread_runnable:
1271 *
1272 * Return TRUE iff policy believes thread is runnable
1273 */
1274 boolean_t
1275 _mk_sp_thread_runnable(
1276 sf_object_t policy,
1277 thread_t thread)
1278 {
1279 return (thread->sp_state == MK_SP_RUNNABLE);
1280 }