]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/mk_sp.c
xnu-517.11.1.tar.gz
[apple/xnu.git] / osfmk / kern / mk_sp.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27/***
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
32 ***/
33
34#include <mach/boolean.h>
35#include <mach/thread_switch.h>
36#include <ipc/ipc_port.h>
37#include <ipc/ipc_space.h>
38#include <kern/ipc_kobject.h>
39#include <kern/processor.h>
40#include <kern/sched.h>
41#include <kern/sched_prim.h>
42#include <kern/spl.h>
43#include <kern/task.h>
44#include <kern/thread.h>
45#include <kern/ast.h>
46#include <mach/policy.h>
47
48#include <kern/syscall_subr.h>
49#include <mach/mach_host_server.h>
50#include <mach/mach_syscalls.h>
51
52/***
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
55 ***/
56
1c79356b
A
57#include <kern/mk_sp.h>
58#include <kern/misc_protos.h>
59#include <kern/spl.h>
60#include <kern/sched.h>
61#include <kern/sched_prim.h>
62#include <kern/assert.h>
63#include <kern/thread.h>
64#include <mach/mach_host_server.h>
65
1c79356b
A
66/***
67 *** ??? The next two files supply the prototypes for `thread_set_policy()'
68 *** and `thread_policy.' These routines cannot stay here if they are
69 *** exported Mach system calls.
70 ***/
71#include <mach/thread_act_server.h>
72#include <mach/host_priv_server.h>
73
0b4e3aa0 74void
1c79356b 75_mk_sp_thread_unblock(
1c79356b
A
76 thread_t thread)
77{
55e303ae
A
78 if (thread->state & TH_IDLE)
79 return;
80
81 if (thread->sched_mode & TH_MODE_REALTIME) {
82 thread->realtime.deadline = mach_absolute_time();
83 thread->realtime.deadline += thread->realtime.constraint;
84 }
1c79356b 85
0b4e3aa0 86 thread->current_quantum = 0;
9bccf70c 87 thread->computation_metered = 0;
0b4e3aa0 88 thread->reason = AST_NONE;
1c79356b
A
89}
90
0b4e3aa0 91void
1c79356b 92_mk_sp_thread_done(
9bccf70c
A
93 thread_t old_thread,
94 thread_t new_thread,
95 processor_t processor)
1c79356b 96{
1c79356b
A
97 /*
98 * A running thread is being taken off a processor:
1c79356b 99 */
55e303ae
A
100 processor->last_dispatch = mach_absolute_time();
101
102 if (old_thread->state & TH_IDLE)
103 return;
104
105 /*
106 * Compute remainder of current quantum.
107 */
108 if ( first_timeslice(processor) &&
109 processor->quantum_end > processor->last_dispatch )
110 old_thread->current_quantum =
111 (processor->quantum_end - processor->last_dispatch);
112 else
113 old_thread->current_quantum = 0;
114
115 if (old_thread->sched_mode & TH_MODE_REALTIME) {
9bccf70c 116 /*
55e303ae
A
117 * Cancel the deadline if the thread has
118 * consumed the entire quantum.
9bccf70c 119 */
55e303ae
A
120 if (old_thread->current_quantum == 0) {
121 old_thread->realtime.deadline = UINT64_MAX;
122 old_thread->reason |= AST_QUANTUM;
123 }
124 }
125 else {
9bccf70c
A
126 /*
127 * For non-realtime threads treat a tiny
128 * remaining quantum as an expired quantum
129 * but include what's left next time.
130 */
55e303ae 131 if (old_thread->current_quantum < min_std_quantum) {
9bccf70c 132 old_thread->reason |= AST_QUANTUM;
55e303ae 133 old_thread->current_quantum += std_quantum;
9bccf70c 134 }
55e303ae 135 }
9bccf70c 136
55e303ae
A
137 /*
138 * If we are doing a direct handoff then
139 * give the remainder of our quantum to
140 * the next guy.
141 */
142 if ((old_thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
143 new_thread->current_quantum = old_thread->current_quantum;
144 old_thread->reason |= AST_QUANTUM;
145 old_thread->current_quantum = 0;
0b4e3aa0 146 }
55e303ae
A
147
148 old_thread->last_switch = processor->last_dispatch;
149
150 old_thread->computation_metered +=
151 (old_thread->last_switch - old_thread->computation_epoch);
1c79356b
A
152}
153
0b4e3aa0 154void
1c79356b 155_mk_sp_thread_begin(
9bccf70c
A
156 thread_t thread,
157 processor_t processor)
1c79356b 158{
1c79356b 159
1c79356b 160 /*
0b4e3aa0 161 * The designated thread is beginning execution:
1c79356b 162 */
55e303ae
A
163 if (thread->state & TH_IDLE) {
164 timer_call_cancel(&processor->quantum_timer);
165 processor->timeslice = 1;
0b4e3aa0 166
55e303ae
A
167 return;
168 }
0b4e3aa0 169
55e303ae
A
170 if (thread->current_quantum == 0)
171 thread_quantum_init(thread);
0b4e3aa0 172
55e303ae
A
173 processor->quantum_end =
174 (processor->last_dispatch + thread->current_quantum);
175 timer_call_enter1(&processor->quantum_timer,
176 thread, processor->quantum_end);
9bccf70c 177
55e303ae 178 processor_timeslice_setup(processor, thread);
1c79356b 179
55e303ae
A
180 thread->last_switch = processor->last_dispatch;
181
182 thread->computation_epoch = thread->last_switch;
1c79356b
A
183}
184
0b4e3aa0 185void
1c79356b 186_mk_sp_thread_dispatch(
9bccf70c 187 thread_t thread)
1c79356b 188{
9bccf70c 189 if (thread->reason & AST_QUANTUM)
55e303ae 190 thread_setrun(thread, SCHED_TAILQ);
0b4e3aa0 191 else
55e303ae
A
192 if (thread->reason & AST_PREEMPT)
193 thread_setrun(thread, SCHED_HEADQ);
194 else
195 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1c79356b 196
9bccf70c 197 thread->reason = AST_NONE;
1c79356b
A
198}
199
200/*
201 * thread_policy_common:
202 *
0b4e3aa0 203 * Set scheduling policy & priority for thread.
1c79356b 204 */
0b4e3aa0 205static kern_return_t
1c79356b
A
206thread_policy_common(
207 thread_t thread,
208 integer_t policy,
0b4e3aa0 209 integer_t priority)
1c79356b 210{
1c79356b
A
211 spl_t s;
212
213 if ( thread == THREAD_NULL ||
214 invalid_policy(policy) )
215 return(KERN_INVALID_ARGUMENT);
216
217 s = splsched();
218 thread_lock(thread);
219
0b4e3aa0
A
220 if ( !(thread->sched_mode & TH_MODE_REALTIME) &&
221 !(thread->safe_mode & TH_MODE_REALTIME) ) {
222 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
55e303ae
A
223 integer_t oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);
224
225 if (policy == POLICY_TIMESHARE && !oldmode) {
0b4e3aa0 226 thread->sched_mode |= TH_MODE_TIMESHARE;
55e303ae
A
227
228 if (thread->state & TH_RUN)
229 pset_share_incr(thread->processor_set);
230 }
0b4e3aa0 231 else
55e303ae 232 if (policy != POLICY_TIMESHARE && oldmode) {
0b4e3aa0 233 thread->sched_mode &= ~TH_MODE_TIMESHARE;
55e303ae
A
234
235 if (thread->state & TH_RUN)
236 pset_share_decr(thread->processor_set);
237 }
0b4e3aa0
A
238 }
239 else {
240 if (policy == POLICY_TIMESHARE)
241 thread->safe_mode |= TH_MODE_TIMESHARE;
242 else
243 thread->safe_mode &= ~TH_MODE_TIMESHARE;
244 }
245
246 if (priority >= thread->max_priority)
247 priority = thread->max_priority - thread->task_priority;
248 else
249 if (priority >= MINPRI_KERNEL)
250 priority -= MINPRI_KERNEL;
251 else
252 if (priority >= MINPRI_SYSTEM)
253 priority -= MINPRI_SYSTEM;
254 else
255 priority -= BASEPRI_DEFAULT;
256
257 priority += thread->task_priority;
258
259 if (priority > thread->max_priority)
260 priority = thread->max_priority;
9bccf70c
A
261 else
262 if (priority < MINPRI)
263 priority = MINPRI;
0b4e3aa0
A
264
265 thread->importance = priority - thread->task_priority;
266
9bccf70c 267 set_priority(thread, priority);
1c79356b
A
268 }
269
270 thread_unlock(thread);
271 splx(s);
272
0b4e3aa0 273 return (KERN_SUCCESS);
1c79356b
A
274}
275
276/*
277 * thread_set_policy
278 *
279 * Set scheduling policy and parameters, both base and limit, for
280 * the given thread. Policy can be any policy implemented by the
281 * processor set, whether enabled or not.
282 */
283kern_return_t
284thread_set_policy(
285 thread_act_t thr_act,
286 processor_set_t pset,
287 policy_t policy,
288 policy_base_t base,
289 mach_msg_type_number_t base_count,
290 policy_limit_t limit,
291 mach_msg_type_number_t limit_count)
292{
293 thread_t thread;
0b4e3aa0 294 int max, bas;
1c79356b
A
295 kern_return_t result = KERN_SUCCESS;
296
297 if ( thr_act == THR_ACT_NULL ||
298 pset == PROCESSOR_SET_NULL )
299 return (KERN_INVALID_ARGUMENT);
300
301 thread = act_lock_thread(thr_act);
302 if (thread == THREAD_NULL) {
303 act_unlock_thread(thr_act);
304
305 return(KERN_INVALID_ARGUMENT);
306 }
307
308 if (pset != thread->processor_set) {
309 act_unlock_thread(thr_act);
310
311 return(KERN_FAILURE);
312 }
313
314 switch (policy) {
315
316 case POLICY_RR:
317 {
318 policy_rr_base_t rr_base = (policy_rr_base_t) base;
319 policy_rr_limit_t rr_limit = (policy_rr_limit_t) limit;
320
321 if ( base_count != POLICY_RR_BASE_COUNT ||
322 limit_count != POLICY_RR_LIMIT_COUNT ) {
323 result = KERN_INVALID_ARGUMENT;
324 break;
325 }
326
1c79356b
A
327 bas = rr_base->base_priority;
328 max = rr_limit->max_priority;
329 if (invalid_pri(bas) || invalid_pri(max)) {
330 result = KERN_INVALID_ARGUMENT;
331 break;
332 }
333
334 break;
335 }
336
337 case POLICY_FIFO:
338 {
339 policy_fifo_base_t fifo_base = (policy_fifo_base_t) base;
340 policy_fifo_limit_t fifo_limit = (policy_fifo_limit_t) limit;
341
342 if ( base_count != POLICY_FIFO_BASE_COUNT ||
343 limit_count != POLICY_FIFO_LIMIT_COUNT) {
344 result = KERN_INVALID_ARGUMENT;
345 break;
346 }
347
1c79356b
A
348 bas = fifo_base->base_priority;
349 max = fifo_limit->max_priority;
350 if (invalid_pri(bas) || invalid_pri(max)) {
351 result = KERN_INVALID_ARGUMENT;
352 break;
353 }
354
355 break;
356 }
357
358 case POLICY_TIMESHARE:
359 {
360 policy_timeshare_base_t ts_base = (policy_timeshare_base_t) base;
361 policy_timeshare_limit_t ts_limit =
362 (policy_timeshare_limit_t) limit;
363
364 if ( base_count != POLICY_TIMESHARE_BASE_COUNT ||
365 limit_count != POLICY_TIMESHARE_LIMIT_COUNT ) {
366 result = KERN_INVALID_ARGUMENT;
367 break;
368 }
369
1c79356b
A
370 bas = ts_base->base_priority;
371 max = ts_limit->max_priority;
372 if (invalid_pri(bas) || invalid_pri(max)) {
373 result = KERN_INVALID_ARGUMENT;
374 break;
375 }
376
377 break;
378 }
379
380 default:
381 result = KERN_INVALID_POLICY;
382 }
383
384 if (result != KERN_SUCCESS) {
385 act_unlock_thread(thr_act);
386
387 return(result);
388 }
389
0b4e3aa0 390 result = thread_policy_common(thread, policy, bas);
1c79356b
A
391 act_unlock_thread(thr_act);
392
393 return(result);
394}
395
396
397/*
398 * thread_policy
399 *
400 * Set scheduling policy and parameters, both base and limit, for
401 * the given thread. Policy must be a policy which is enabled for the
402 * processor set. Change contained threads if requested.
403 */
404kern_return_t
405thread_policy(
406 thread_act_t thr_act,
407 policy_t policy,
408 policy_base_t base,
409 mach_msg_type_number_t count,
410 boolean_t set_limit)
411{
412 thread_t thread;
413 processor_set_t pset;
414 kern_return_t result = KERN_SUCCESS;
415 policy_limit_t limit;
416 int limcount;
417 policy_rr_limit_data_t rr_limit;
418 policy_fifo_limit_data_t fifo_limit;
419 policy_timeshare_limit_data_t ts_limit;
420
421 if (thr_act == THR_ACT_NULL)
422 return (KERN_INVALID_ARGUMENT);
423
424 thread = act_lock_thread(thr_act);
425 pset = thread->processor_set;
426 if ( thread == THREAD_NULL ||
427 pset == PROCESSOR_SET_NULL ){
428 act_unlock_thread(thr_act);
429
430 return(KERN_INVALID_ARGUMENT);
431 }
432
0b4e3aa0
A
433 if ( invalid_policy(policy) ||
434 ((POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO) & policy) == 0 ) {
1c79356b
A
435 act_unlock_thread(thr_act);
436
437 return(KERN_INVALID_POLICY);
438 }
439
440 if (set_limit) {
441 /*
442 * Set scheduling limits to base priority.
443 */
444 switch (policy) {
445
446 case POLICY_RR:
447 {
448 policy_rr_base_t rr_base;
449
450 if (count != POLICY_RR_BASE_COUNT) {
451 result = KERN_INVALID_ARGUMENT;
452 break;
453 }
454
455 limcount = POLICY_RR_LIMIT_COUNT;
456 rr_base = (policy_rr_base_t) base;
457 rr_limit.max_priority = rr_base->base_priority;
458 limit = (policy_limit_t) &rr_limit;
459
460 break;
461 }
462
463 case POLICY_FIFO:
464 {
465 policy_fifo_base_t fifo_base;
466
467 if (count != POLICY_FIFO_BASE_COUNT) {
468 result = KERN_INVALID_ARGUMENT;
469 break;
470 }
471
472 limcount = POLICY_FIFO_LIMIT_COUNT;
473 fifo_base = (policy_fifo_base_t) base;
474 fifo_limit.max_priority = fifo_base->base_priority;
475 limit = (policy_limit_t) &fifo_limit;
476
477 break;
478 }
479
480 case POLICY_TIMESHARE:
481 {
482 policy_timeshare_base_t ts_base;
483
484 if (count != POLICY_TIMESHARE_BASE_COUNT) {
485 result = KERN_INVALID_ARGUMENT;
486 break;
487 }
488
489 limcount = POLICY_TIMESHARE_LIMIT_COUNT;
490 ts_base = (policy_timeshare_base_t) base;
491 ts_limit.max_priority = ts_base->base_priority;
492 limit = (policy_limit_t) &ts_limit;
493
494 break;
495 }
496
497 default:
498 result = KERN_INVALID_POLICY;
499 break;
500 }
501
502 }
503 else {
504 /*
505 * Use current scheduling limits. Ensure that the
506 * new base priority will not exceed current limits.
507 */
508 switch (policy) {
509
510 case POLICY_RR:
511 {
512 policy_rr_base_t rr_base;
513
514 if (count != POLICY_RR_BASE_COUNT) {
515 result = KERN_INVALID_ARGUMENT;
516 break;
517 }
518
519 limcount = POLICY_RR_LIMIT_COUNT;
520 rr_base = (policy_rr_base_t) base;
521 if (rr_base->base_priority > thread->max_priority) {
522 result = KERN_POLICY_LIMIT;
523 break;
524 }
525
526 rr_limit.max_priority = thread->max_priority;
527 limit = (policy_limit_t) &rr_limit;
528
529 break;
530 }
531
532 case POLICY_FIFO:
533 {
534 policy_fifo_base_t fifo_base;
535
536 if (count != POLICY_FIFO_BASE_COUNT) {
537 result = KERN_INVALID_ARGUMENT;
538 break;
539 }
540
541 limcount = POLICY_FIFO_LIMIT_COUNT;
542 fifo_base = (policy_fifo_base_t) base;
543 if (fifo_base->base_priority > thread->max_priority) {
544 result = KERN_POLICY_LIMIT;
545 break;
546 }
547
548 fifo_limit.max_priority = thread->max_priority;
549 limit = (policy_limit_t) &fifo_limit;
550
551 break;
552 }
553
554 case POLICY_TIMESHARE:
555 {
556 policy_timeshare_base_t ts_base;
557
558 if (count != POLICY_TIMESHARE_BASE_COUNT) {
559 result = KERN_INVALID_ARGUMENT;
560 break;
561 }
562
563 limcount = POLICY_TIMESHARE_LIMIT_COUNT;
564 ts_base = (policy_timeshare_base_t) base;
565 if (ts_base->base_priority > thread->max_priority) {
566 result = KERN_POLICY_LIMIT;
567 break;
568 }
569
570 ts_limit.max_priority = thread->max_priority;
571 limit = (policy_limit_t) &ts_limit;
572
573 break;
574 }
575
576 default:
577 result = KERN_INVALID_POLICY;
578 break;
579 }
580
581 }
582
583 act_unlock_thread(thr_act);
584
585 if (result == KERN_SUCCESS)
586 result = thread_set_policy(thr_act, pset,
587 policy, base, count, limit, limcount);
588
589 return(result);
590}
591
592/*
593 * Define shifts for simulating (5/8)**n
594 */
595
596shift_data_t wait_shift[32] = {
597 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
598 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
599 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
600 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
601
602/*
603 * do_priority_computation:
604 *
605 * Calculate new priority for thread based on its base priority plus
606 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
607 * usage to priorities. SCHED_SHIFT converts for the scaling
608 * of the sched_usage field by SCHED_SCALE. This scaling comes
609 * from the multiplication by sched_load (thread_timer_delta)
610 * in sched.h. sched_load is calculated as a scaled overload
611 * factor in compute_mach_factor (mach_factor.c).
612 */
613#ifdef PRI_SHIFT_2
614#if PRI_SHIFT_2 > 0
615#define do_priority_computation(thread, pri) \
616 MACRO_BEGIN \
617 (pri) = (thread)->priority /* start with base priority */ \
618 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
619 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
620 if ((pri) < MINPRI_STANDARD) \
621 (pri) = MINPRI_STANDARD; \
622 else \
623 if ((pri) > MAXPRI_STANDARD) \
624 (pri) = MAXPRI_STANDARD; \
625 MACRO_END
626#else /* PRI_SHIFT_2 */
627#define do_priority_computation(thread, pri) \
628 MACRO_BEGIN \
629 (pri) = (thread)->priority /* start with base priority */ \
630 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
631 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
632 if ((pri) < MINPRI_STANDARD) \
633 (pri) = MINPRI_STANDARD; \
634 else \
635 if ((pri) > MAXPRI_STANDARD) \
636 (pri) = MAXPRI_STANDARD; \
637 MACRO_END
638#endif /* PRI_SHIFT_2 */
639#else /* defined(PRI_SHIFT_2) */
640#define do_priority_computation(thread, pri) \
641 MACRO_BEGIN \
642 (pri) = (thread)->priority /* start with base priority */ \
643 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
644 if ((pri) < MINPRI_STANDARD) \
645 (pri) = MINPRI_STANDARD; \
646 else \
647 if ((pri) > MAXPRI_STANDARD) \
648 (pri) = MAXPRI_STANDARD; \
649 MACRO_END
650#endif /* defined(PRI_SHIFT_2) */
651
9bccf70c
A
652void
653set_priority(
654 register thread_t thread,
655 register int priority)
656{
657 thread->priority = priority;
658 compute_priority(thread, FALSE);
659}
660
1c79356b
A
661/*
662 * compute_priority:
663 *
9bccf70c
A
664 * Reset the current scheduled priority of the
665 * thread according to its base priority if the
666 * thread has not been promoted or depressed.
1c79356b 667 *
9bccf70c
A
668 * If the thread is timesharing, adjust according
669 * to recent cpu usage.
1c79356b 670 *
9bccf70c 671 * The thread *must* be locked by the caller.
1c79356b 672 */
1c79356b
A
673void
674compute_priority(
675 register thread_t thread,
9bccf70c 676 boolean_t override_depress)
1c79356b 677{
9bccf70c
A
678 register int priority;
679
680 if ( !(thread->sched_mode & TH_MODE_PROMOTED) &&
681 (!(thread->sched_mode & TH_MODE_ISDEPRESSED) ||
682 override_depress ) ) {
683 if (thread->sched_mode & TH_MODE_TIMESHARE)
684 do_priority_computation(thread, priority);
685 else
686 priority = thread->priority;
687
688 set_sched_pri(thread, priority);
1c79356b 689 }
1c79356b
A
690}
691
692/*
693 * compute_my_priority:
694 *
9bccf70c
A
695 * Version of compute priority for current thread.
696 * Caller must have thread locked and thread must
697 * be timesharing and not depressed.
698 *
699 * Only used for priority updates.
1c79356b 700 */
1c79356b
A
701void
702compute_my_priority(
703 register thread_t thread)
704{
9bccf70c 705 register int priority;
1c79356b 706
9bccf70c 707 do_priority_computation(thread, priority);
1c79356b 708 assert(thread->runq == RUN_QUEUE_NULL);
9bccf70c 709 thread->sched_pri = priority;
1c79356b
A
710}
711
1c79356b
A
712/*
713 * update_priority
714 *
715 * Cause the priority computation of a thread that has been
716 * sleeping or suspended to "catch up" with the system. Thread
717 * *MUST* be locked by caller. If thread is running, then this
718 * can only be called by the thread on itself.
719 */
720void
721update_priority(
722 register thread_t thread)
723{
724 register unsigned int ticks;
725 register shift_t shiftp;
726
727 ticks = sched_tick - thread->sched_stamp;
728 assert(ticks != 0);
729
730 /*
731 * If asleep for more than 30 seconds forget all
732 * cpu_usage, else catch up on missed aging.
733 * 5/8 ** n is approximated by the two shifts
734 * in the wait_shift array.
735 */
736 thread->sched_stamp += ticks;
737 thread_timer_delta(thread);
738 if (ticks > 30) {
739 thread->cpu_usage = 0;
740 thread->sched_usage = 0;
741 }
742 else {
1c79356b
A
743 thread->cpu_usage += thread->cpu_delta;
744 thread->sched_usage += thread->sched_delta;
745
1c79356b
A
746 shiftp = &wait_shift[ticks];
747 if (shiftp->shift2 > 0) {
748 thread->cpu_usage =
749 (thread->cpu_usage >> shiftp->shift1) +
750 (thread->cpu_usage >> shiftp->shift2);
751 thread->sched_usage =
752 (thread->sched_usage >> shiftp->shift1) +
753 (thread->sched_usage >> shiftp->shift2);
754 }
755 else {
756 thread->cpu_usage =
757 (thread->cpu_usage >> shiftp->shift1) -
758 (thread->cpu_usage >> -(shiftp->shift2));
759 thread->sched_usage =
760 (thread->sched_usage >> shiftp->shift1) -
761 (thread->sched_usage >> -(shiftp->shift2));
762 }
1c79356b 763 }
0b4e3aa0 764
1c79356b
A
765 thread->cpu_delta = 0;
766 thread->sched_delta = 0;
767
9bccf70c
A
768 /*
769 * Check for fail-safe release.
770 */
0b4e3aa0
A
771 if ( (thread->sched_mode & TH_MODE_FAILSAFE) &&
772 thread->sched_stamp >= thread->safe_release ) {
773 if (!(thread->safe_mode & TH_MODE_TIMESHARE)) {
774 if (thread->safe_mode & TH_MODE_REALTIME) {
55e303ae 775 thread->priority = BASEPRI_RTQUEUES;
0b4e3aa0
A
776
777 thread->sched_mode |= TH_MODE_REALTIME;
778 }
779
0b4e3aa0 780 thread->sched_mode &= ~TH_MODE_TIMESHARE;
9bccf70c 781
55e303ae
A
782 if (thread->state & TH_RUN)
783 pset_share_decr(thread->processor_set);
784
9bccf70c
A
785 if (!(thread->sched_mode & TH_MODE_ISDEPRESSED))
786 set_sched_pri(thread, thread->priority);
0b4e3aa0
A
787 }
788
789 thread->safe_mode = 0;
790 thread->sched_mode &= ~TH_MODE_FAILSAFE;
791 }
792
1c79356b 793 /*
9bccf70c 794 * Recompute scheduled priority if appropriate.
1c79356b 795 */
0b4e3aa0 796 if ( (thread->sched_mode & TH_MODE_TIMESHARE) &&
9bccf70c
A
797 !(thread->sched_mode & TH_MODE_PROMOTED) &&
798 !(thread->sched_mode & TH_MODE_ISDEPRESSED) ) {
1c79356b 799 register int new_pri;
1c79356b
A
800
801 do_priority_computation(thread, new_pri);
802 if (new_pri != thread->sched_pri) {
0b4e3aa0
A
803 run_queue_t runq;
804
55e303ae 805 runq = run_queue_remove(thread);
1c79356b
A
806 thread->sched_pri = new_pri;
807 if (runq != RUN_QUEUE_NULL)
55e303ae 808 thread_setrun(thread, SCHED_TAILQ);
1c79356b
A
809 }
810 }
811}
812
1c79356b
A
813/*
814 * thread_switch_continue:
815 *
816 * Continuation routine for a thread switch.
817 *
818 * Just need to arrange the return value gets sent out correctly and that
819 * we cancel the timer or the depression called for by the options to the
820 * thread_switch call.
821 */
822void
823_mk_sp_thread_switch_continue(void)
824{
0b4e3aa0
A
825 register thread_t self = current_thread();
826 int wait_result = self->wait_result;
827 int option = self->saved.swtch.option;
1c79356b
A
828
829 if (option == SWITCH_OPTION_WAIT && wait_result != THREAD_TIMED_OUT)
830 thread_cancel_timer();
0b4e3aa0
A
831 else
832 if (option == SWITCH_OPTION_DEPRESS)
833 _mk_sp_thread_depress_abort(self, FALSE);
834
1c79356b 835 thread_syscall_return(KERN_SUCCESS);
0b4e3aa0 836 /*NOTREACHED*/
1c79356b
A
837}
838
839/*
840 * thread_switch:
841 *
842 * Context switch. User may supply thread hint.
843 *
844 * Fixed priority threads that call this get what they asked for
845 * even if that violates priority order.
846 */
847kern_return_t
848_mk_sp_thread_switch(
1c79356b
A
849 thread_act_t hint_act,
850 int option,
851 mach_msg_timeout_t option_time)
852{
853 register thread_t self = current_thread();
1c79356b
A
854 int s;
855
856 /*
857 * Check and use thr_act hint if appropriate. It is not
858 * appropriate to give a hint that shares the current shuttle.
859 */
860 if (hint_act != THR_ACT_NULL) {
861 register thread_t thread = act_lock_thread(hint_act);
862
863 if ( thread != THREAD_NULL &&
864 thread != self &&
865 thread->top_act == hint_act ) {
55e303ae
A
866 processor_t processor;
867
1c79356b
A
868 s = splsched();
869 thread_lock(thread);
870
871 /*
55e303ae
A
872 * Check if the thread is in the right pset,
873 * is not bound to a different processor,
874 * and that realtime is not involved.
875 *
876 * Next, pull it off its run queue. If it
877 * doesn't come, it's not eligible.
1c79356b 878 */
55e303ae
A
879 processor = current_processor();
880 if (processor->current_pri < BASEPRI_RTQUEUES &&
881 thread->sched_pri < BASEPRI_RTQUEUES &&
882 thread->processor_set == processor->processor_set &&
883 (thread->bound_processor == PROCESSOR_NULL ||
884 thread->bound_processor == processor) &&
885 run_queue_remove(thread) != RUN_QUEUE_NULL ) {
1c79356b
A
886 /*
887 * Hah, got it!!
888 */
1c79356b
A
889 thread_unlock(thread);
890
891 act_unlock_thread(hint_act);
892 act_deallocate(hint_act);
893
894 if (option == SWITCH_OPTION_WAIT)
895 assert_wait_timeout(option_time, THREAD_ABORTSAFE);
0b4e3aa0
A
896 else
897 if (option == SWITCH_OPTION_DEPRESS)
898 _mk_sp_thread_depress_ms(option_time);
1c79356b 899
1c79356b
A
900 self->saved.swtch.option = option;
901
902 thread_run(self, _mk_sp_thread_switch_continue, thread);
9bccf70c 903 /* NOTREACHED */
1c79356b
A
904 }
905
906 thread_unlock(thread);
907 splx(s);
908 }
909
910 act_unlock_thread(hint_act);
911 act_deallocate(hint_act);
912 }
913
914 /*
915 * No handoff hint supplied, or hint was wrong. Call thread_block() in
916 * hopes of running something else. If nothing else is runnable,
917 * thread_block will detect this. WARNING: thread_switch with no
918 * option will not do anything useful if the thread calling it is the
919 * highest priority thread (can easily happen with a collection
920 * of timesharing threads).
921 */
55e303ae
A
922 if (option == SWITCH_OPTION_WAIT)
923 assert_wait_timeout(option_time, THREAD_ABORTSAFE);
924 else
925 if (option == SWITCH_OPTION_DEPRESS)
926 _mk_sp_thread_depress_ms(option_time);
1c79356b 927
55e303ae 928 self->saved.swtch.option = option;
1c79356b 929
55e303ae 930 thread_block_reason(_mk_sp_thread_switch_continue, AST_YIELD);
1c79356b 931
1c79356b
A
932 if (option == SWITCH_OPTION_WAIT)
933 thread_cancel_timer();
0b4e3aa0
A
934 else
935 if (option == SWITCH_OPTION_DEPRESS)
936 _mk_sp_thread_depress_abort(self, FALSE);
1c79356b
A
937
938 return (KERN_SUCCESS);
939}
940
941/*
0b4e3aa0
A
942 * Depress thread's priority to lowest possible for the specified interval,
943 * with a value of zero resulting in no timeout being scheduled.
1c79356b
A
944 */
945void
0b4e3aa0
A
946_mk_sp_thread_depress_abstime(
947 uint64_t interval)
1c79356b
A
948{
949 register thread_t self = current_thread();
0b4e3aa0 950 uint64_t deadline;
1c79356b
A
951 spl_t s;
952
953 s = splsched();
954 thread_lock(self);
9bccf70c
A
955 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
956 processor_t myprocessor = self->last_processor;
957
958 self->sched_pri = DEPRESSPRI;
959 myprocessor->current_pri = self->sched_pri;
960 self->sched_mode &= ~TH_MODE_PREEMPT;
961 self->sched_mode |= TH_MODE_DEPRESS;
1c79356b
A
962
963 if (interval != 0) {
0b4e3aa0
A
964 clock_absolutetime_interval_to_deadline(interval, &deadline);
965 if (!timer_call_enter(&self->depress_timer, deadline))
966 self->depress_timer_active++;
1c79356b
A
967 }
968 }
55e303ae 969 thread_unlock(self);
1c79356b 970 splx(s);
0b4e3aa0 971}
1c79356b 972
0b4e3aa0
A
973void
974_mk_sp_thread_depress_ms(
975 mach_msg_timeout_t interval)
976{
977 uint64_t abstime;
978
979 clock_interval_to_absolutetime_interval(
980 interval, 1000*NSEC_PER_USEC, &abstime);
981 _mk_sp_thread_depress_abstime(abstime);
982}
1c79356b
A
983
984/*
0b4e3aa0 985 * Priority depression expiration.
1c79356b
A
986 */
987void
0b4e3aa0
A
988thread_depress_expire(
989 timer_call_param_t p0,
990 timer_call_param_t p1)
1c79356b 991{
0b4e3aa0
A
992 thread_t thread = p0;
993 spl_t s;
1c79356b
A
994
995 s = splsched();
55e303ae 996 thread_lock(thread);
0b4e3aa0 997 if (--thread->depress_timer_active == 1) {
9bccf70c
A
998 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
999 compute_priority(thread, FALSE);
1c79356b 1000 }
55e303ae 1001 thread_unlock(thread);
0b4e3aa0 1002 splx(s);
1c79356b
A
1003}
1004
1005/*
1c79356b
A
1006 * Prematurely abort priority depression if there is one.
1007 */
1008kern_return_t
1009_mk_sp_thread_depress_abort(
0b4e3aa0
A
1010 register thread_t thread,
1011 boolean_t abortall)
1c79356b 1012{
0b4e3aa0 1013 kern_return_t result = KERN_NOT_DEPRESSED;
1c79356b
A
1014 spl_t s;
1015
1016 s = splsched();
1017 thread_lock(thread);
0b4e3aa0 1018 if (abortall || !(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
9bccf70c
A
1019 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1020 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
1021 compute_priority(thread, FALSE);
0b4e3aa0 1022 result = KERN_SUCCESS;
1c79356b 1023 }
1c79356b 1024
0b4e3aa0
A
1025 if (timer_call_cancel(&thread->depress_timer))
1026 thread->depress_timer_active--;
1027 }
55e303ae 1028 thread_unlock(thread);
0b4e3aa0 1029 splx(s);
1c79356b
A
1030
1031 return (result);
1032}
1033
0b4e3aa0
A
1034void
1035_mk_sp_thread_perhaps_yield(
1036 thread_t self)
1c79356b 1037{
0b4e3aa0
A
1038 spl_t s;
1039
1040 assert(self == current_thread());
1041
1042 s = splsched();
0b4e3aa0
A
1043 if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
1044 extern uint64_t max_poll_computation;
1045 extern int sched_poll_yield_shift;
55e303ae 1046 uint64_t total_computation, abstime;
0b4e3aa0 1047
55e303ae 1048 abstime = mach_absolute_time();
0b4e3aa0 1049 total_computation = abstime - self->computation_epoch;
9bccf70c 1050 total_computation += self->computation_metered;
0b4e3aa0 1051 if (total_computation >= max_poll_computation) {
9bccf70c
A
1052 processor_t myprocessor = current_processor();
1053 ast_t preempt;
0b4e3aa0 1054
0b4e3aa0 1055 thread_lock(self);
9bccf70c
A
1056 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
1057 self->sched_pri = DEPRESSPRI;
1058 myprocessor->current_pri = self->sched_pri;
1059 self->sched_mode &= ~TH_MODE_PREEMPT;
0b4e3aa0
A
1060 }
1061 self->computation_epoch = abstime;
9bccf70c 1062 self->computation_metered = 0;
0b4e3aa0 1063 self->sched_mode |= TH_MODE_POLLDEPRESS;
0b4e3aa0
A
1064
1065 abstime += (total_computation >> sched_poll_yield_shift);
1066 if (!timer_call_enter(&self->depress_timer, abstime))
1067 self->depress_timer_active++;
55e303ae 1068 thread_unlock(self);
0b4e3aa0 1069
9bccf70c
A
1070 if ((preempt = csw_check(self, myprocessor)) != AST_NONE)
1071 ast_on(preempt);
0b4e3aa0 1072 }
0b4e3aa0 1073 }
0b4e3aa0 1074 splx(s);
1c79356b 1075}