]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/mk_sp.c
xnu-344.34.tar.gz
[apple/xnu.git] / osfmk / kern / mk_sp.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27/***
28 *** ??? The following lines were picked up when code was incorporated
29 *** into this file from `kern/syscall_subr.c.' These should be moved
30 *** with the code if it moves again. Otherwise, they should be trimmed,
31 *** based on the files included above.
32 ***/
33
34#include <mach/boolean.h>
35#include <mach/thread_switch.h>
36#include <ipc/ipc_port.h>
37#include <ipc/ipc_space.h>
38#include <kern/ipc_kobject.h>
39#include <kern/processor.h>
40#include <kern/sched.h>
41#include <kern/sched_prim.h>
42#include <kern/spl.h>
43#include <kern/task.h>
44#include <kern/thread.h>
45#include <kern/ast.h>
46#include <mach/policy.h>
47
48#include <kern/syscall_subr.h>
49#include <mach/mach_host_server.h>
50#include <mach/mach_syscalls.h>
51
52/***
53 *** ??? End of lines picked up when code was incorporated
54 *** into this file from `kern/syscall_subr.c.'
55 ***/
56
1c79356b
A
57#include <kern/mk_sp.h>
58#include <kern/misc_protos.h>
59#include <kern/spl.h>
60#include <kern/sched.h>
61#include <kern/sched_prim.h>
62#include <kern/assert.h>
63#include <kern/thread.h>
64#include <mach/mach_host_server.h>
65
1c79356b
A
66/***
67 *** ??? The next two files supply the prototypes for `thread_set_policy()'
68 *** and `thread_policy.' These routines cannot stay here if they are
69 *** exported Mach system calls.
70 ***/
71#include <mach/thread_act_server.h>
72#include <mach/host_priv_server.h>
0b4e3aa0 73#include <sys/kdebug.h>
1c79356b 74
0b4e3aa0 75void
1c79356b 76_mk_sp_thread_unblock(
1c79356b
A
77 thread_t thread)
78{
9bccf70c 79 thread_setrun(thread, TAIL_Q);
1c79356b 80
0b4e3aa0 81 thread->current_quantum = 0;
9bccf70c 82 thread->computation_metered = 0;
0b4e3aa0
A
83 thread->reason = AST_NONE;
84
85 KERNEL_DEBUG_CONSTANT(
86 MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
87 (int)thread, (int)thread->sched_pri, 0, 0, 0);
1c79356b
A
88}
89
0b4e3aa0 90void
1c79356b 91_mk_sp_thread_done(
9bccf70c
A
92 thread_t old_thread,
93 thread_t new_thread,
94 processor_t processor)
1c79356b 95{
1c79356b
A
96 /*
97 * A running thread is being taken off a processor:
1c79356b 98 */
9bccf70c
A
99 clock_get_uptime(&processor->last_dispatch);
100 if (!(old_thread->state & TH_IDLE)) {
101 /*
102 * Compute remainder of current quantum.
103 */
104 if ( first_quantum(processor) &&
105 processor->quantum_end > processor->last_dispatch )
106 old_thread->current_quantum =
107 (processor->quantum_end - processor->last_dispatch);
0b4e3aa0 108 else
9bccf70c 109 old_thread->current_quantum = 0;
1c79356b 110
9bccf70c
A
111 /*
112 * For non-realtime threads treat a tiny
113 * remaining quantum as an expired quantum
114 * but include what's left next time.
115 */
116 if (!(old_thread->sched_mode & TH_MODE_REALTIME)) {
117 if (old_thread->current_quantum < min_std_quantum) {
118 old_thread->reason |= AST_QUANTUM;
119 old_thread->current_quantum += std_quantum;
0b4e3aa0
A
120 }
121 }
122 else
9bccf70c
A
123 if (old_thread->current_quantum == 0)
124 old_thread->reason |= AST_QUANTUM;
1c79356b 125
9bccf70c
A
126 /*
127 * If we are doing a direct handoff then
128 * give the remainder of our quantum to
129 * the next guy.
130 */
131 if ((old_thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
132 new_thread->current_quantum = old_thread->current_quantum;
133 old_thread->reason |= AST_QUANTUM;
134 old_thread->current_quantum = 0;
135 }
136
137 old_thread->last_switch = processor->last_dispatch;
138
139 old_thread->computation_metered +=
140 (old_thread->last_switch - old_thread->computation_epoch);
0b4e3aa0 141 }
1c79356b
A
142}
143
0b4e3aa0 144void
1c79356b 145_mk_sp_thread_begin(
9bccf70c
A
146 thread_t thread,
147 processor_t processor)
1c79356b 148{
1c79356b 149
1c79356b 150 /*
0b4e3aa0 151 * The designated thread is beginning execution:
1c79356b 152 */
0b4e3aa0
A
153 if (!(thread->state & TH_IDLE)) {
154 if (thread->current_quantum == 0)
155 thread->current_quantum =
156 (thread->sched_mode & TH_MODE_REALTIME)?
157 thread->realtime.computation: std_quantum;
158
9bccf70c
A
159 processor->quantum_end =
160 (processor->last_dispatch + thread->current_quantum);
161 timer_call_enter1(&processor->quantum_timer,
162 thread, processor->quantum_end);
0b4e3aa0 163
9bccf70c 164 processor->slice_quanta =
0b4e3aa0 165 (thread->sched_mode & TH_MODE_TIMESHARE)?
9bccf70c 166 processor->processor_set->set_quanta: 1;
0b4e3aa0 167
9bccf70c
A
168 thread->last_switch = processor->last_dispatch;
169
170 thread->computation_epoch = thread->last_switch;
0b4e3aa0
A
171 }
172 else {
9bccf70c 173 timer_call_cancel(&processor->quantum_timer);
1c79356b 174
9bccf70c 175 processor->slice_quanta = 1;
0b4e3aa0 176 }
1c79356b
A
177}
178
0b4e3aa0 179void
1c79356b 180_mk_sp_thread_dispatch(
9bccf70c 181 thread_t thread)
1c79356b 182{
9bccf70c
A
183 if (thread->reason & AST_QUANTUM)
184 thread_setrun(thread, TAIL_Q);
0b4e3aa0 185 else
9bccf70c 186 thread_setrun(thread, HEAD_Q);
1c79356b 187
9bccf70c 188 thread->reason = AST_NONE;
1c79356b
A
189}
190
191/*
192 * thread_policy_common:
193 *
0b4e3aa0 194 * Set scheduling policy & priority for thread.
1c79356b 195 */
0b4e3aa0 196static kern_return_t
1c79356b
A
197thread_policy_common(
198 thread_t thread,
199 integer_t policy,
0b4e3aa0 200 integer_t priority)
1c79356b 201{
1c79356b
A
202 spl_t s;
203
204 if ( thread == THREAD_NULL ||
205 invalid_policy(policy) )
206 return(KERN_INVALID_ARGUMENT);
207
208 s = splsched();
209 thread_lock(thread);
210
0b4e3aa0
A
211 if ( !(thread->sched_mode & TH_MODE_REALTIME) &&
212 !(thread->safe_mode & TH_MODE_REALTIME) ) {
213 if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
214 if (policy == POLICY_TIMESHARE)
215 thread->sched_mode |= TH_MODE_TIMESHARE;
216 else
217 thread->sched_mode &= ~TH_MODE_TIMESHARE;
218 }
219 else {
220 if (policy == POLICY_TIMESHARE)
221 thread->safe_mode |= TH_MODE_TIMESHARE;
222 else
223 thread->safe_mode &= ~TH_MODE_TIMESHARE;
224 }
225
226 if (priority >= thread->max_priority)
227 priority = thread->max_priority - thread->task_priority;
228 else
229 if (priority >= MINPRI_KERNEL)
230 priority -= MINPRI_KERNEL;
231 else
232 if (priority >= MINPRI_SYSTEM)
233 priority -= MINPRI_SYSTEM;
234 else
235 priority -= BASEPRI_DEFAULT;
236
237 priority += thread->task_priority;
238
239 if (priority > thread->max_priority)
240 priority = thread->max_priority;
9bccf70c
A
241 else
242 if (priority < MINPRI)
243 priority = MINPRI;
0b4e3aa0
A
244
245 thread->importance = priority - thread->task_priority;
246
9bccf70c 247 set_priority(thread, priority);
1c79356b
A
248 }
249
250 thread_unlock(thread);
251 splx(s);
252
0b4e3aa0 253 return (KERN_SUCCESS);
1c79356b
A
254}
255
256/*
257 * thread_set_policy
258 *
259 * Set scheduling policy and parameters, both base and limit, for
260 * the given thread. Policy can be any policy implemented by the
261 * processor set, whether enabled or not.
262 */
263kern_return_t
264thread_set_policy(
265 thread_act_t thr_act,
266 processor_set_t pset,
267 policy_t policy,
268 policy_base_t base,
269 mach_msg_type_number_t base_count,
270 policy_limit_t limit,
271 mach_msg_type_number_t limit_count)
272{
273 thread_t thread;
0b4e3aa0 274 int max, bas;
1c79356b
A
275 kern_return_t result = KERN_SUCCESS;
276
277 if ( thr_act == THR_ACT_NULL ||
278 pset == PROCESSOR_SET_NULL )
279 return (KERN_INVALID_ARGUMENT);
280
281 thread = act_lock_thread(thr_act);
282 if (thread == THREAD_NULL) {
283 act_unlock_thread(thr_act);
284
285 return(KERN_INVALID_ARGUMENT);
286 }
287
288 if (pset != thread->processor_set) {
289 act_unlock_thread(thr_act);
290
291 return(KERN_FAILURE);
292 }
293
294 switch (policy) {
295
296 case POLICY_RR:
297 {
298 policy_rr_base_t rr_base = (policy_rr_base_t) base;
299 policy_rr_limit_t rr_limit = (policy_rr_limit_t) limit;
300
301 if ( base_count != POLICY_RR_BASE_COUNT ||
302 limit_count != POLICY_RR_LIMIT_COUNT ) {
303 result = KERN_INVALID_ARGUMENT;
304 break;
305 }
306
1c79356b
A
307 bas = rr_base->base_priority;
308 max = rr_limit->max_priority;
309 if (invalid_pri(bas) || invalid_pri(max)) {
310 result = KERN_INVALID_ARGUMENT;
311 break;
312 }
313
314 break;
315 }
316
317 case POLICY_FIFO:
318 {
319 policy_fifo_base_t fifo_base = (policy_fifo_base_t) base;
320 policy_fifo_limit_t fifo_limit = (policy_fifo_limit_t) limit;
321
322 if ( base_count != POLICY_FIFO_BASE_COUNT ||
323 limit_count != POLICY_FIFO_LIMIT_COUNT) {
324 result = KERN_INVALID_ARGUMENT;
325 break;
326 }
327
1c79356b
A
328 bas = fifo_base->base_priority;
329 max = fifo_limit->max_priority;
330 if (invalid_pri(bas) || invalid_pri(max)) {
331 result = KERN_INVALID_ARGUMENT;
332 break;
333 }
334
335 break;
336 }
337
338 case POLICY_TIMESHARE:
339 {
340 policy_timeshare_base_t ts_base = (policy_timeshare_base_t) base;
341 policy_timeshare_limit_t ts_limit =
342 (policy_timeshare_limit_t) limit;
343
344 if ( base_count != POLICY_TIMESHARE_BASE_COUNT ||
345 limit_count != POLICY_TIMESHARE_LIMIT_COUNT ) {
346 result = KERN_INVALID_ARGUMENT;
347 break;
348 }
349
1c79356b
A
350 bas = ts_base->base_priority;
351 max = ts_limit->max_priority;
352 if (invalid_pri(bas) || invalid_pri(max)) {
353 result = KERN_INVALID_ARGUMENT;
354 break;
355 }
356
357 break;
358 }
359
360 default:
361 result = KERN_INVALID_POLICY;
362 }
363
364 if (result != KERN_SUCCESS) {
365 act_unlock_thread(thr_act);
366
367 return(result);
368 }
369
0b4e3aa0 370 result = thread_policy_common(thread, policy, bas);
1c79356b
A
371 act_unlock_thread(thr_act);
372
373 return(result);
374}
375
376
377/*
378 * thread_policy
379 *
380 * Set scheduling policy and parameters, both base and limit, for
381 * the given thread. Policy must be a policy which is enabled for the
382 * processor set. Change contained threads if requested.
383 */
384kern_return_t
385thread_policy(
386 thread_act_t thr_act,
387 policy_t policy,
388 policy_base_t base,
389 mach_msg_type_number_t count,
390 boolean_t set_limit)
391{
392 thread_t thread;
393 processor_set_t pset;
394 kern_return_t result = KERN_SUCCESS;
395 policy_limit_t limit;
396 int limcount;
397 policy_rr_limit_data_t rr_limit;
398 policy_fifo_limit_data_t fifo_limit;
399 policy_timeshare_limit_data_t ts_limit;
400
401 if (thr_act == THR_ACT_NULL)
402 return (KERN_INVALID_ARGUMENT);
403
404 thread = act_lock_thread(thr_act);
405 pset = thread->processor_set;
406 if ( thread == THREAD_NULL ||
407 pset == PROCESSOR_SET_NULL ){
408 act_unlock_thread(thr_act);
409
410 return(KERN_INVALID_ARGUMENT);
411 }
412
0b4e3aa0
A
413 if ( invalid_policy(policy) ||
414 ((POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO) & policy) == 0 ) {
1c79356b
A
415 act_unlock_thread(thr_act);
416
417 return(KERN_INVALID_POLICY);
418 }
419
420 if (set_limit) {
421 /*
422 * Set scheduling limits to base priority.
423 */
424 switch (policy) {
425
426 case POLICY_RR:
427 {
428 policy_rr_base_t rr_base;
429
430 if (count != POLICY_RR_BASE_COUNT) {
431 result = KERN_INVALID_ARGUMENT;
432 break;
433 }
434
435 limcount = POLICY_RR_LIMIT_COUNT;
436 rr_base = (policy_rr_base_t) base;
437 rr_limit.max_priority = rr_base->base_priority;
438 limit = (policy_limit_t) &rr_limit;
439
440 break;
441 }
442
443 case POLICY_FIFO:
444 {
445 policy_fifo_base_t fifo_base;
446
447 if (count != POLICY_FIFO_BASE_COUNT) {
448 result = KERN_INVALID_ARGUMENT;
449 break;
450 }
451
452 limcount = POLICY_FIFO_LIMIT_COUNT;
453 fifo_base = (policy_fifo_base_t) base;
454 fifo_limit.max_priority = fifo_base->base_priority;
455 limit = (policy_limit_t) &fifo_limit;
456
457 break;
458 }
459
460 case POLICY_TIMESHARE:
461 {
462 policy_timeshare_base_t ts_base;
463
464 if (count != POLICY_TIMESHARE_BASE_COUNT) {
465 result = KERN_INVALID_ARGUMENT;
466 break;
467 }
468
469 limcount = POLICY_TIMESHARE_LIMIT_COUNT;
470 ts_base = (policy_timeshare_base_t) base;
471 ts_limit.max_priority = ts_base->base_priority;
472 limit = (policy_limit_t) &ts_limit;
473
474 break;
475 }
476
477 default:
478 result = KERN_INVALID_POLICY;
479 break;
480 }
481
482 }
483 else {
484 /*
485 * Use current scheduling limits. Ensure that the
486 * new base priority will not exceed current limits.
487 */
488 switch (policy) {
489
490 case POLICY_RR:
491 {
492 policy_rr_base_t rr_base;
493
494 if (count != POLICY_RR_BASE_COUNT) {
495 result = KERN_INVALID_ARGUMENT;
496 break;
497 }
498
499 limcount = POLICY_RR_LIMIT_COUNT;
500 rr_base = (policy_rr_base_t) base;
501 if (rr_base->base_priority > thread->max_priority) {
502 result = KERN_POLICY_LIMIT;
503 break;
504 }
505
506 rr_limit.max_priority = thread->max_priority;
507 limit = (policy_limit_t) &rr_limit;
508
509 break;
510 }
511
512 case POLICY_FIFO:
513 {
514 policy_fifo_base_t fifo_base;
515
516 if (count != POLICY_FIFO_BASE_COUNT) {
517 result = KERN_INVALID_ARGUMENT;
518 break;
519 }
520
521 limcount = POLICY_FIFO_LIMIT_COUNT;
522 fifo_base = (policy_fifo_base_t) base;
523 if (fifo_base->base_priority > thread->max_priority) {
524 result = KERN_POLICY_LIMIT;
525 break;
526 }
527
528 fifo_limit.max_priority = thread->max_priority;
529 limit = (policy_limit_t) &fifo_limit;
530
531 break;
532 }
533
534 case POLICY_TIMESHARE:
535 {
536 policy_timeshare_base_t ts_base;
537
538 if (count != POLICY_TIMESHARE_BASE_COUNT) {
539 result = KERN_INVALID_ARGUMENT;
540 break;
541 }
542
543 limcount = POLICY_TIMESHARE_LIMIT_COUNT;
544 ts_base = (policy_timeshare_base_t) base;
545 if (ts_base->base_priority > thread->max_priority) {
546 result = KERN_POLICY_LIMIT;
547 break;
548 }
549
550 ts_limit.max_priority = thread->max_priority;
551 limit = (policy_limit_t) &ts_limit;
552
553 break;
554 }
555
556 default:
557 result = KERN_INVALID_POLICY;
558 break;
559 }
560
561 }
562
563 act_unlock_thread(thr_act);
564
565 if (result == KERN_SUCCESS)
566 result = thread_set_policy(thr_act, pset,
567 policy, base, count, limit, limcount);
568
569 return(result);
570}
571
572/*
573 * Define shifts for simulating (5/8)**n
574 */
575
576shift_data_t wait_shift[32] = {
577 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
578 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
579 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
580 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
581
582/*
583 * do_priority_computation:
584 *
585 * Calculate new priority for thread based on its base priority plus
586 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
587 * usage to priorities. SCHED_SHIFT converts for the scaling
588 * of the sched_usage field by SCHED_SCALE. This scaling comes
589 * from the multiplication by sched_load (thread_timer_delta)
590 * in sched.h. sched_load is calculated as a scaled overload
591 * factor in compute_mach_factor (mach_factor.c).
592 */
593#ifdef PRI_SHIFT_2
594#if PRI_SHIFT_2 > 0
595#define do_priority_computation(thread, pri) \
596 MACRO_BEGIN \
597 (pri) = (thread)->priority /* start with base priority */ \
598 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
599 - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
600 if ((pri) < MINPRI_STANDARD) \
601 (pri) = MINPRI_STANDARD; \
602 else \
603 if ((pri) > MAXPRI_STANDARD) \
604 (pri) = MAXPRI_STANDARD; \
605 MACRO_END
606#else /* PRI_SHIFT_2 */
607#define do_priority_computation(thread, pri) \
608 MACRO_BEGIN \
609 (pri) = (thread)->priority /* start with base priority */ \
610 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
611 + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
612 if ((pri) < MINPRI_STANDARD) \
613 (pri) = MINPRI_STANDARD; \
614 else \
615 if ((pri) > MAXPRI_STANDARD) \
616 (pri) = MAXPRI_STANDARD; \
617 MACRO_END
618#endif /* PRI_SHIFT_2 */
619#else /* defined(PRI_SHIFT_2) */
620#define do_priority_computation(thread, pri) \
621 MACRO_BEGIN \
622 (pri) = (thread)->priority /* start with base priority */ \
623 - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
624 if ((pri) < MINPRI_STANDARD) \
625 (pri) = MINPRI_STANDARD; \
626 else \
627 if ((pri) > MAXPRI_STANDARD) \
628 (pri) = MAXPRI_STANDARD; \
629 MACRO_END
630#endif /* defined(PRI_SHIFT_2) */
631
9bccf70c
A
632void
633set_priority(
634 register thread_t thread,
635 register int priority)
636{
637 thread->priority = priority;
638 compute_priority(thread, FALSE);
639}
640
1c79356b
A
641/*
642 * compute_priority:
643 *
9bccf70c
A
644 * Reset the current scheduled priority of the
645 * thread according to its base priority if the
646 * thread has not been promoted or depressed.
1c79356b 647 *
9bccf70c
A
648 * If the thread is timesharing, adjust according
649 * to recent cpu usage.
1c79356b 650 *
9bccf70c 651 * The thread *must* be locked by the caller.
1c79356b 652 */
1c79356b
A
653void
654compute_priority(
655 register thread_t thread,
9bccf70c 656 boolean_t override_depress)
1c79356b 657{
9bccf70c
A
658 register int priority;
659
660 if ( !(thread->sched_mode & TH_MODE_PROMOTED) &&
661 (!(thread->sched_mode & TH_MODE_ISDEPRESSED) ||
662 override_depress ) ) {
663 if (thread->sched_mode & TH_MODE_TIMESHARE)
664 do_priority_computation(thread, priority);
665 else
666 priority = thread->priority;
667
668 set_sched_pri(thread, priority);
1c79356b 669 }
1c79356b
A
670}
671
672/*
673 * compute_my_priority:
674 *
9bccf70c
A
675 * Version of compute priority for current thread.
676 * Caller must have thread locked and thread must
677 * be timesharing and not depressed.
678 *
679 * Only used for priority updates.
1c79356b 680 */
1c79356b
A
681void
682compute_my_priority(
683 register thread_t thread)
684{
9bccf70c 685 register int priority;
1c79356b 686
9bccf70c 687 do_priority_computation(thread, priority);
1c79356b 688 assert(thread->runq == RUN_QUEUE_NULL);
9bccf70c 689 thread->sched_pri = priority;
1c79356b
A
690}
691
1c79356b
A
692/*
693 * update_priority
694 *
695 * Cause the priority computation of a thread that has been
696 * sleeping or suspended to "catch up" with the system. Thread
697 * *MUST* be locked by caller. If thread is running, then this
698 * can only be called by the thread on itself.
699 */
700void
701update_priority(
702 register thread_t thread)
703{
704 register unsigned int ticks;
705 register shift_t shiftp;
706
707 ticks = sched_tick - thread->sched_stamp;
708 assert(ticks != 0);
709
710 /*
711 * If asleep for more than 30 seconds forget all
712 * cpu_usage, else catch up on missed aging.
713 * 5/8 ** n is approximated by the two shifts
714 * in the wait_shift array.
715 */
716 thread->sched_stamp += ticks;
717 thread_timer_delta(thread);
718 if (ticks > 30) {
719 thread->cpu_usage = 0;
720 thread->sched_usage = 0;
721 }
722 else {
1c79356b
A
723 thread->cpu_usage += thread->cpu_delta;
724 thread->sched_usage += thread->sched_delta;
725
1c79356b
A
726 shiftp = &wait_shift[ticks];
727 if (shiftp->shift2 > 0) {
728 thread->cpu_usage =
729 (thread->cpu_usage >> shiftp->shift1) +
730 (thread->cpu_usage >> shiftp->shift2);
731 thread->sched_usage =
732 (thread->sched_usage >> shiftp->shift1) +
733 (thread->sched_usage >> shiftp->shift2);
734 }
735 else {
736 thread->cpu_usage =
737 (thread->cpu_usage >> shiftp->shift1) -
738 (thread->cpu_usage >> -(shiftp->shift2));
739 thread->sched_usage =
740 (thread->sched_usage >> shiftp->shift1) -
741 (thread->sched_usage >> -(shiftp->shift2));
742 }
1c79356b 743 }
0b4e3aa0 744
1c79356b
A
745 thread->cpu_delta = 0;
746 thread->sched_delta = 0;
747
9bccf70c
A
748 /*
749 * Check for fail-safe release.
750 */
0b4e3aa0
A
751 if ( (thread->sched_mode & TH_MODE_FAILSAFE) &&
752 thread->sched_stamp >= thread->safe_release ) {
753 if (!(thread->safe_mode & TH_MODE_TIMESHARE)) {
754 if (thread->safe_mode & TH_MODE_REALTIME) {
9bccf70c 755 thread->priority = BASEPRI_REALTIME;
0b4e3aa0
A
756
757 thread->sched_mode |= TH_MODE_REALTIME;
758 }
759
0b4e3aa0 760 thread->sched_mode &= ~TH_MODE_TIMESHARE;
9bccf70c
A
761
762 if (!(thread->sched_mode & TH_MODE_ISDEPRESSED))
763 set_sched_pri(thread, thread->priority);
0b4e3aa0
A
764 }
765
766 thread->safe_mode = 0;
767 thread->sched_mode &= ~TH_MODE_FAILSAFE;
768 }
769
1c79356b 770 /*
9bccf70c 771 * Recompute scheduled priority if appropriate.
1c79356b 772 */
0b4e3aa0 773 if ( (thread->sched_mode & TH_MODE_TIMESHARE) &&
9bccf70c
A
774 !(thread->sched_mode & TH_MODE_PROMOTED) &&
775 !(thread->sched_mode & TH_MODE_ISDEPRESSED) ) {
1c79356b 776 register int new_pri;
1c79356b
A
777
778 do_priority_computation(thread, new_pri);
779 if (new_pri != thread->sched_pri) {
0b4e3aa0
A
780 run_queue_t runq;
781
1c79356b
A
782 runq = rem_runq(thread);
783 thread->sched_pri = new_pri;
784 if (runq != RUN_QUEUE_NULL)
9bccf70c 785 thread_setrun(thread, TAIL_Q);
1c79356b
A
786 }
787 }
788}
789
1c79356b
A
790/*
791 * thread_switch_continue:
792 *
793 * Continuation routine for a thread switch.
794 *
795 * Just need to arrange the return value gets sent out correctly and that
796 * we cancel the timer or the depression called for by the options to the
797 * thread_switch call.
798 */
799void
800_mk_sp_thread_switch_continue(void)
801{
0b4e3aa0
A
802 register thread_t self = current_thread();
803 int wait_result = self->wait_result;
804 int option = self->saved.swtch.option;
1c79356b
A
805
806 if (option == SWITCH_OPTION_WAIT && wait_result != THREAD_TIMED_OUT)
807 thread_cancel_timer();
0b4e3aa0
A
808 else
809 if (option == SWITCH_OPTION_DEPRESS)
810 _mk_sp_thread_depress_abort(self, FALSE);
811
1c79356b 812 thread_syscall_return(KERN_SUCCESS);
0b4e3aa0 813 /*NOTREACHED*/
1c79356b
A
814}
815
816/*
817 * thread_switch:
818 *
819 * Context switch. User may supply thread hint.
820 *
821 * Fixed priority threads that call this get what they asked for
822 * even if that violates priority order.
823 */
824kern_return_t
825_mk_sp_thread_switch(
1c79356b
A
826 thread_act_t hint_act,
827 int option,
828 mach_msg_timeout_t option_time)
829{
830 register thread_t self = current_thread();
831 register processor_t myprocessor;
832 int s;
833
834 /*
835 * Check and use thr_act hint if appropriate. It is not
836 * appropriate to give a hint that shares the current shuttle.
837 */
838 if (hint_act != THR_ACT_NULL) {
839 register thread_t thread = act_lock_thread(hint_act);
840
841 if ( thread != THREAD_NULL &&
842 thread != self &&
843 thread->top_act == hint_act ) {
844 s = splsched();
845 thread_lock(thread);
846
847 /*
848 * Check if the thread is in the right pset. Then
849 * pull it off its run queue. If it
850 * doesn't come, then it's not eligible.
851 */
852 if ( thread->processor_set == self->processor_set &&
853 rem_runq(thread) != RUN_QUEUE_NULL ) {
854 /*
855 * Hah, got it!!
856 */
1c79356b
A
857 thread_unlock(thread);
858
859 act_unlock_thread(hint_act);
860 act_deallocate(hint_act);
861
862 if (option == SWITCH_OPTION_WAIT)
863 assert_wait_timeout(option_time, THREAD_ABORTSAFE);
0b4e3aa0
A
864 else
865 if (option == SWITCH_OPTION_DEPRESS)
866 _mk_sp_thread_depress_ms(option_time);
1c79356b 867
1c79356b
A
868 self->saved.swtch.option = option;
869
870 thread_run(self, _mk_sp_thread_switch_continue, thread);
9bccf70c 871 /* NOTREACHED */
1c79356b
A
872 }
873
874 thread_unlock(thread);
875 splx(s);
876 }
877
878 act_unlock_thread(hint_act);
879 act_deallocate(hint_act);
880 }
881
882 /*
883 * No handoff hint supplied, or hint was wrong. Call thread_block() in
884 * hopes of running something else. If nothing else is runnable,
885 * thread_block will detect this. WARNING: thread_switch with no
886 * option will not do anything useful if the thread calling it is the
887 * highest priority thread (can easily happen with a collection
888 * of timesharing threads).
889 */
890 mp_disable_preemption();
891 myprocessor = current_processor();
892 if ( option != SWITCH_OPTION_NONE ||
893 myprocessor->processor_set->runq.count > 0 ||
894 myprocessor->runq.count > 0 ) {
1c79356b
A
895 mp_enable_preemption();
896
897 if (option == SWITCH_OPTION_WAIT)
898 assert_wait_timeout(option_time, THREAD_ABORTSAFE);
0b4e3aa0
A
899 else
900 if (option == SWITCH_OPTION_DEPRESS)
901 _mk_sp_thread_depress_ms(option_time);
1c79356b 902
1c79356b
A
903 self->saved.swtch.option = option;
904
9bccf70c
A
905 thread_block_reason(_mk_sp_thread_switch_continue,
906 (option == SWITCH_OPTION_DEPRESS)?
907 AST_YIELD: AST_NONE);
1c79356b
A
908 }
909 else
910 mp_enable_preemption();
911
912out:
913 if (option == SWITCH_OPTION_WAIT)
914 thread_cancel_timer();
0b4e3aa0
A
915 else
916 if (option == SWITCH_OPTION_DEPRESS)
917 _mk_sp_thread_depress_abort(self, FALSE);
1c79356b
A
918
919 return (KERN_SUCCESS);
920}
921
922/*
0b4e3aa0
A
923 * Depress thread's priority to lowest possible for the specified interval,
924 * with a value of zero resulting in no timeout being scheduled.
1c79356b
A
925 */
926void
0b4e3aa0
A
927_mk_sp_thread_depress_abstime(
928 uint64_t interval)
1c79356b
A
929{
930 register thread_t self = current_thread();
0b4e3aa0 931 uint64_t deadline;
1c79356b
A
932 spl_t s;
933
934 s = splsched();
0b4e3aa0 935 wake_lock(self);
1c79356b 936 thread_lock(self);
9bccf70c
A
937 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
938 processor_t myprocessor = self->last_processor;
939
940 self->sched_pri = DEPRESSPRI;
941 myprocessor->current_pri = self->sched_pri;
942 self->sched_mode &= ~TH_MODE_PREEMPT;
943 self->sched_mode |= TH_MODE_DEPRESS;
0b4e3aa0 944 thread_unlock(self);
1c79356b
A
945
946 if (interval != 0) {
0b4e3aa0
A
947 clock_absolutetime_interval_to_deadline(interval, &deadline);
948 if (!timer_call_enter(&self->depress_timer, deadline))
949 self->depress_timer_active++;
1c79356b
A
950 }
951 }
0b4e3aa0
A
952 else
953 thread_unlock(self);
954 wake_unlock(self);
1c79356b 955 splx(s);
0b4e3aa0 956}
1c79356b 957
0b4e3aa0
A
958void
959_mk_sp_thread_depress_ms(
960 mach_msg_timeout_t interval)
961{
962 uint64_t abstime;
963
964 clock_interval_to_absolutetime_interval(
965 interval, 1000*NSEC_PER_USEC, &abstime);
966 _mk_sp_thread_depress_abstime(abstime);
967}
1c79356b
A
968
969/*
0b4e3aa0 970 * Priority depression expiration.
1c79356b
A
971 */
972void
0b4e3aa0
A
973thread_depress_expire(
974 timer_call_param_t p0,
975 timer_call_param_t p1)
1c79356b 976{
0b4e3aa0
A
977 thread_t thread = p0;
978 spl_t s;
1c79356b
A
979
980 s = splsched();
0b4e3aa0
A
981 wake_lock(thread);
982 if (--thread->depress_timer_active == 1) {
983 thread_lock(thread);
9bccf70c
A
984 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
985 compute_priority(thread, FALSE);
0b4e3aa0 986 thread_unlock(thread);
1c79356b 987 }
0b4e3aa0
A
988 else
989 if (thread->depress_timer_active == 0)
990 thread_wakeup_one(&thread->depress_timer_active);
991 wake_unlock(thread);
992 splx(s);
1c79356b
A
993}
994
995/*
1c79356b
A
996 * Prematurely abort priority depression if there is one.
997 */
998kern_return_t
999_mk_sp_thread_depress_abort(
0b4e3aa0
A
1000 register thread_t thread,
1001 boolean_t abortall)
1c79356b 1002{
0b4e3aa0 1003 kern_return_t result = KERN_NOT_DEPRESSED;
1c79356b
A
1004 spl_t s;
1005
1006 s = splsched();
0b4e3aa0 1007 wake_lock(thread);
1c79356b 1008 thread_lock(thread);
0b4e3aa0 1009 if (abortall || !(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
9bccf70c
A
1010 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1011 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
1012 compute_priority(thread, FALSE);
0b4e3aa0 1013 result = KERN_SUCCESS;
1c79356b 1014 }
1c79356b 1015
0b4e3aa0 1016 thread_unlock(thread);
1c79356b 1017
0b4e3aa0
A
1018 if (timer_call_cancel(&thread->depress_timer))
1019 thread->depress_timer_active--;
1020 }
1021 else
1022 thread_unlock(thread);
1023 wake_unlock(thread);
1024 splx(s);
1c79356b
A
1025
1026 return (result);
1027}
1028
0b4e3aa0
A
1029void
1030_mk_sp_thread_perhaps_yield(
1031 thread_t self)
1c79356b 1032{
0b4e3aa0
A
1033 spl_t s;
1034
1035 assert(self == current_thread());
1036
1037 s = splsched();
0b4e3aa0
A
1038 if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
1039 extern uint64_t max_poll_computation;
1040 extern int sched_poll_yield_shift;
1041 uint64_t abstime, total_computation;
1042
1043 clock_get_uptime(&abstime);
1044 total_computation = abstime - self->computation_epoch;
9bccf70c 1045 total_computation += self->computation_metered;
0b4e3aa0 1046 if (total_computation >= max_poll_computation) {
9bccf70c
A
1047 processor_t myprocessor = current_processor();
1048 ast_t preempt;
0b4e3aa0
A
1049
1050 wake_lock(self);
1051 thread_lock(self);
9bccf70c
A
1052 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
1053 self->sched_pri = DEPRESSPRI;
1054 myprocessor->current_pri = self->sched_pri;
1055 self->sched_mode &= ~TH_MODE_PREEMPT;
0b4e3aa0
A
1056 }
1057 self->computation_epoch = abstime;
9bccf70c 1058 self->computation_metered = 0;
0b4e3aa0
A
1059 self->sched_mode |= TH_MODE_POLLDEPRESS;
1060 thread_unlock(self);
1061
1062 abstime += (total_computation >> sched_poll_yield_shift);
1063 if (!timer_call_enter(&self->depress_timer, abstime))
1064 self->depress_timer_active++;
1065 wake_unlock(self);
1066
9bccf70c
A
1067 if ((preempt = csw_check(self, myprocessor)) != AST_NONE)
1068 ast_on(preempt);
0b4e3aa0 1069 }
0b4e3aa0 1070 }
0b4e3aa0 1071 splx(s);
1c79356b 1072}