]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_FREE_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: sched_prim.c | |
54 | * Author: Avadis Tevanian, Jr. | |
55 | * Date: 1986 | |
56 | * | |
57 | * Scheduling primitives | |
58 | * | |
59 | */ | |
60 | ||
61 | #include <debug.h> | |
62 | #include <cpus.h> | |
63 | #include <mach_kdb.h> | |
64 | #include <simple_clock.h> | |
65 | #include <power_save.h> | |
66 | #include <task_swapper.h> | |
67 | ||
68 | #include <ddb/db_output.h> | |
69 | #include <mach/machine.h> | |
70 | #include <machine/machine_routines.h> | |
71 | #include <machine/sched_param.h> | |
72 | #include <kern/ast.h> | |
73 | #include <kern/clock.h> | |
74 | #include <kern/counters.h> | |
75 | #include <kern/cpu_number.h> | |
76 | #include <kern/cpu_data.h> | |
77 | #include <kern/etap_macros.h> | |
78 | #include <kern/lock.h> | |
79 | #include <kern/macro_help.h> | |
80 | #include <kern/machine.h> | |
81 | #include <kern/misc_protos.h> | |
82 | #include <kern/processor.h> | |
83 | #include <kern/queue.h> | |
84 | #include <kern/sched.h> | |
85 | #include <kern/sched_prim.h> | |
86 | #include <kern/syscall_subr.h> | |
87 | #include <kern/task.h> | |
88 | #include <kern/thread.h> | |
89 | #include <kern/thread_swap.h> | |
90 | #include <vm/pmap.h> | |
91 | #include <vm/vm_kern.h> | |
92 | #include <vm/vm_map.h> | |
93 | #include <mach/policy.h> | |
94 | #include <mach/sync_policy.h> | |
95 | #include <kern/sf.h> | |
96 | #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/ | |
97 | #include <sys/kdebug.h> | |
98 | ||
99 | #if TASK_SWAPPER | |
100 | #include <kern/task_swap.h> | |
101 | extern int task_swap_on; | |
102 | #endif /* TASK_SWAPPER */ | |
103 | ||
104 | extern int hz; | |
105 | ||
106 | #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ | |
107 | int default_preemption_rate = DEFAULT_PREEMPTION_RATE; | |
108 | ||
109 | #define NO_KERNEL_PREEMPT 0 | |
110 | #define KERNEL_PREEMPT 1 | |
111 | int kernel_preemption_mode = KERNEL_PREEMPT; | |
112 | ||
113 | int min_quantum; | |
114 | natural_t min_quantum_ms; | |
115 | ||
116 | unsigned sched_tick; | |
117 | ||
118 | #if SIMPLE_CLOCK | |
119 | int sched_usec; | |
120 | #endif /* SIMPLE_CLOCK */ | |
121 | ||
122 | /* Forwards */ | |
123 | void thread_continue(thread_t); | |
124 | ||
125 | void wait_queues_init(void); | |
126 | ||
127 | void set_pri( | |
128 | thread_t thread, | |
129 | int pri, | |
130 | int resched); | |
131 | ||
132 | thread_t choose_pset_thread( | |
133 | processor_t myprocessor, | |
134 | processor_set_t pset); | |
135 | ||
136 | thread_t choose_thread( | |
137 | processor_t myprocessor); | |
138 | ||
139 | int run_queue_enqueue( | |
140 | run_queue_t runq, | |
141 | thread_t thread, | |
142 | boolean_t tail); | |
143 | ||
144 | void idle_thread_continue(void); | |
145 | void do_thread_scan(void); | |
146 | ||
147 | void clear_wait_internal( | |
148 | thread_t thread, | |
149 | int result); | |
150 | ||
151 | #if DEBUG | |
152 | void dump_run_queues( | |
153 | run_queue_t rq); | |
154 | void dump_run_queue_struct( | |
155 | run_queue_t rq); | |
156 | void dump_processor( | |
157 | processor_t p); | |
158 | void dump_processor_set( | |
159 | processor_set_t ps); | |
160 | ||
161 | void checkrq( | |
162 | run_queue_t rq, | |
163 | char *msg); | |
164 | ||
165 | void thread_check( | |
166 | thread_t thread, | |
167 | run_queue_t runq); | |
168 | #endif /*DEBUG*/ | |
169 | ||
170 | boolean_t thread_runnable( | |
171 | thread_t thread); | |
172 | ||
173 | /* | |
174 | * State machine | |
175 | * | |
176 | * states are combinations of: | |
177 | * R running | |
178 | * W waiting (or on wait queue) | |
179 | * N non-interruptible | |
180 | * O swapped out | |
181 | * I being swapped in | |
182 | * | |
183 | * init action | |
184 | * assert_wait thread_block clear_wait swapout swapin | |
185 | * | |
186 | * R RW, RWN R; setrun - - | |
187 | * RN RWN RN; setrun - - | |
188 | * | |
189 | * RW W R - | |
190 | * RWN WN RN - | |
191 | * | |
192 | * W R; setrun WO | |
193 | * WN RN; setrun - | |
194 | * | |
195 | * RO - - R | |
196 | * | |
197 | */ | |
198 | ||
199 | /* | |
200 | * Waiting protocols and implementation: | |
201 | * | |
202 | * Each thread may be waiting for exactly one event; this event | |
203 | * is set using assert_wait(). That thread may be awakened either | |
204 | * by performing a thread_wakeup_prim() on its event, | |
205 | * or by directly waking that thread up with clear_wait(). | |
206 | * | |
207 | * The implementation of wait events uses a hash table. Each | |
208 | * bucket is queue of threads having the same hash function | |
209 | * value; the chain for the queue (linked list) is the run queue | |
210 | * field. [It is not possible to be waiting and runnable at the | |
211 | * same time.] | |
212 | * | |
213 | * Locks on both the thread and on the hash buckets govern the | |
214 | * wait event field and the queue chain field. Because wakeup | |
215 | * operations only have the event as an argument, the event hash | |
216 | * bucket must be locked before any thread. | |
217 | * | |
218 | * Scheduling operations may also occur at interrupt level; therefore, | |
219 | * interrupts below splsched() must be prevented when holding | |
220 | * thread or hash bucket locks. | |
221 | * | |
222 | * The wait event hash table declarations are as follows: | |
223 | */ | |
224 | ||
225 | #define NUMQUEUES 59 | |
226 | ||
227 | struct wait_queue wait_queues[NUMQUEUES]; | |
228 | ||
229 | #define wait_hash(event) \ | |
230 | ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES) | |
231 | ||
232 | void | |
233 | sched_init(void) | |
234 | { | |
235 | /* | |
236 | * Calculate the minimum quantum | |
237 | * in ticks. | |
238 | */ | |
239 | if (default_preemption_rate < 1) | |
240 | default_preemption_rate = DEFAULT_PREEMPTION_RATE; | |
241 | min_quantum = hz / default_preemption_rate; | |
242 | ||
243 | /* | |
244 | * Round up result (4/5) to an | |
245 | * integral number of ticks. | |
246 | */ | |
247 | if (((hz * 10) / default_preemption_rate) - (min_quantum * 10) >= 5) | |
248 | min_quantum++; | |
249 | if (min_quantum < 1) | |
250 | min_quantum = 1; | |
251 | ||
252 | min_quantum_ms = (1000 / hz) * min_quantum; | |
253 | ||
254 | printf("scheduling quantum is %d ms\n", min_quantum_ms); | |
255 | ||
256 | wait_queues_init(); | |
257 | pset_sys_bootstrap(); /* initialize processor mgmt. */ | |
258 | processor_action(); | |
259 | sched_tick = 0; | |
260 | #if SIMPLE_CLOCK | |
261 | sched_usec = 0; | |
262 | #endif /* SIMPLE_CLOCK */ | |
263 | ast_init(); | |
264 | sf_init(); | |
265 | } | |
266 | ||
267 | void | |
268 | wait_queues_init(void) | |
269 | { | |
270 | register int i; | |
271 | ||
272 | for (i = 0; i < NUMQUEUES; i++) { | |
273 | wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO); | |
274 | } | |
275 | } | |
276 | ||
277 | /* | |
278 | * Thread timeout routine, called when timer expires. | |
279 | */ | |
280 | void | |
281 | thread_timer_expire( | |
282 | timer_call_param_t p0, | |
283 | timer_call_param_t p1) | |
284 | { | |
285 | thread_t thread = p0; | |
286 | spl_t s; | |
287 | ||
288 | s = splsched(); | |
289 | wake_lock(thread); | |
290 | if ( thread->wait_timer_is_set && | |
291 | !timer_call_is_delayed(&thread->wait_timer, NULL) ) { | |
292 | thread->wait_timer_active--; | |
293 | thread->wait_timer_is_set = FALSE; | |
294 | thread_lock(thread); | |
295 | if (thread->active) | |
296 | clear_wait_internal(thread, THREAD_TIMED_OUT); | |
297 | thread_unlock(thread); | |
298 | } | |
299 | else | |
300 | if (--thread->wait_timer_active == 0) | |
301 | thread_wakeup_one(&thread->wait_timer_active); | |
302 | wake_unlock(thread); | |
303 | splx(s); | |
304 | } | |
305 | ||
306 | /* | |
307 | * thread_set_timer: | |
308 | * | |
309 | * Set a timer for the current thread, if the thread | |
310 | * is ready to wait. Must be called between assert_wait() | |
311 | * and thread_block(). | |
312 | */ | |
313 | void | |
314 | thread_set_timer( | |
315 | natural_t interval, | |
316 | natural_t scale_factor) | |
317 | { | |
318 | thread_t thread = current_thread(); | |
319 | AbsoluteTime deadline; | |
320 | spl_t s; | |
321 | ||
322 | s = splsched(); | |
323 | wake_lock(thread); | |
324 | thread_lock(thread); | |
325 | if ((thread->state & TH_WAIT) != 0) { | |
326 | clock_interval_to_deadline(interval, scale_factor, &deadline); | |
327 | timer_call_enter(&thread->wait_timer, deadline); | |
328 | assert(!thread->wait_timer_is_set); | |
329 | thread->wait_timer_active++; | |
330 | thread->wait_timer_is_set = TRUE; | |
331 | } | |
332 | thread_unlock(thread); | |
333 | wake_unlock(thread); | |
334 | splx(s); | |
335 | } | |
336 | ||
337 | void | |
338 | thread_set_timer_deadline( | |
339 | AbsoluteTime deadline) | |
340 | { | |
341 | thread_t thread = current_thread(); | |
342 | spl_t s; | |
343 | ||
344 | s = splsched(); | |
345 | wake_lock(thread); | |
346 | thread_lock(thread); | |
347 | if ((thread->state & TH_WAIT) != 0) { | |
348 | timer_call_enter(&thread->wait_timer, deadline); | |
349 | assert(!thread->wait_timer_is_set); | |
350 | thread->wait_timer_active++; | |
351 | thread->wait_timer_is_set = TRUE; | |
352 | } | |
353 | thread_unlock(thread); | |
354 | wake_unlock(thread); | |
355 | splx(s); | |
356 | } | |
357 | ||
358 | void | |
359 | thread_cancel_timer(void) | |
360 | { | |
361 | thread_t thread = current_thread(); | |
362 | spl_t s; | |
363 | ||
364 | s = splsched(); | |
365 | wake_lock(thread); | |
366 | if (thread->wait_timer_is_set) { | |
367 | if (timer_call_cancel(&thread->wait_timer)) | |
368 | thread->wait_timer_active--; | |
369 | thread->wait_timer_is_set = FALSE; | |
370 | } | |
371 | wake_unlock(thread); | |
372 | splx(s); | |
373 | } | |
374 | ||
375 | /* | |
376 | * thread_depress_timeout: | |
377 | * | |
378 | * Timeout routine for priority depression. | |
379 | */ | |
380 | void | |
381 | thread_depress_timeout( | |
382 | thread_call_param_t p0, | |
383 | thread_call_param_t p1) | |
384 | { | |
385 | thread_t thread = p0; | |
386 | sched_policy_t *policy; | |
387 | spl_t s; | |
388 | ||
389 | s = splsched(); | |
390 | thread_lock(thread); | |
391 | policy = policy_id_to_sched_policy(thread->policy); | |
392 | thread_unlock(thread); | |
393 | splx(s); | |
394 | ||
395 | if (policy != SCHED_POLICY_NULL) | |
396 | policy->sp_ops.sp_thread_depress_timeout(policy, thread); | |
397 | ||
398 | thread_deallocate(thread); | |
399 | } | |
400 | ||
401 | /* | |
402 | * Set up thread timeout element when thread is created. | |
403 | */ | |
404 | void | |
405 | thread_timer_setup( | |
406 | thread_t thread) | |
407 | { | |
408 | timer_call_setup(&thread->wait_timer, thread_timer_expire, thread); | |
409 | thread->wait_timer_is_set = FALSE; | |
410 | thread->wait_timer_active = 1; | |
411 | thread->ref_count++; | |
412 | ||
413 | thread_call_setup(&thread->depress_timer, thread_depress_timeout, thread); | |
414 | } | |
415 | ||
416 | void | |
417 | thread_timer_terminate(void) | |
418 | { | |
419 | thread_t thread = current_thread(); | |
420 | spl_t s; | |
421 | ||
422 | s = splsched(); | |
423 | wake_lock(thread); | |
424 | if (thread->wait_timer_is_set) { | |
425 | if (timer_call_cancel(&thread->wait_timer)) | |
426 | thread->wait_timer_active--; | |
427 | thread->wait_timer_is_set = FALSE; | |
428 | } | |
429 | ||
430 | thread->wait_timer_active--; | |
431 | ||
432 | while (thread->wait_timer_active > 0) { | |
433 | assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT); | |
434 | wake_unlock(thread); | |
435 | splx(s); | |
436 | ||
437 | thread_block((void (*)(void)) 0); | |
438 | ||
439 | s = splsched(); | |
440 | wake_lock(thread); | |
441 | } | |
442 | ||
443 | wake_unlock(thread); | |
444 | splx(s); | |
445 | ||
446 | thread_deallocate(thread); | |
447 | } | |
448 | ||
449 | /* | |
450 | * Routine: thread_go_locked | |
451 | * Purpose: | |
452 | * Start a thread running. | |
453 | * Conditions: | |
454 | * thread lock held, IPC locks may be held. | |
455 | * thread must have been pulled from wait queue under same lock hold. | |
456 | */ | |
457 | void | |
458 | thread_go_locked( | |
459 | thread_t thread, | |
460 | int result) | |
461 | { | |
462 | int state; | |
463 | sched_policy_t *policy; | |
464 | sf_return_t sfr; | |
465 | ||
466 | assert(thread->at_safe_point == FALSE); | |
467 | assert(thread->wait_event == NO_EVENT); | |
468 | assert(thread->wait_queue == WAIT_QUEUE_NULL); | |
469 | ||
470 | if (thread->state & TH_WAIT) { | |
471 | ||
472 | thread->state &= ~(TH_WAIT|TH_UNINT); | |
473 | if (!(thread->state & TH_RUN)) { | |
474 | thread->state |= TH_RUN; | |
475 | #if THREAD_SWAPPER | |
476 | if (thread->state & TH_SWAPPED_OUT) | |
477 | thread_swapin(thread->top_act, FALSE); | |
478 | else | |
479 | #endif /* THREAD_SWAPPER */ | |
480 | { | |
481 | policy = &sched_policy[thread->policy]; | |
482 | sfr = policy->sp_ops.sp_thread_unblock(policy, thread); | |
483 | assert(sfr == SF_SUCCESS); | |
484 | } | |
485 | } | |
486 | thread->wait_result = result; | |
487 | } | |
488 | ||
489 | ||
490 | /* | |
491 | * The next few lines are a major hack. Hopefully this will get us | |
492 | * around all of the scheduling framework hooha. We can't call | |
493 | * sp_thread_unblock yet because we could still be finishing up the | |
494 | * durn two stage block on another processor and thread_setrun | |
495 | * could be called by s_t_u and we'll really be messed up then. | |
496 | */ | |
497 | /* Don't mess with this if we are still swapped out */ | |
498 | if (!(thread->state & TH_SWAPPED_OUT)) | |
499 | thread->sp_state = MK_SP_RUNNABLE; | |
500 | ||
501 | } | |
502 | ||
503 | void | |
504 | thread_mark_wait_locked( | |
505 | thread_t thread, | |
506 | int interruptible) | |
507 | { | |
508 | ||
509 | assert(thread == current_thread()); | |
510 | ||
511 | thread->wait_result = -1; /* JMM - Needed for non-assert kernel */ | |
512 | thread->state |= (interruptible && thread->interruptible) ? | |
513 | TH_WAIT : (TH_WAIT | TH_UNINT); | |
514 | thread->at_safe_point = (interruptible == THREAD_ABORTSAFE) && (thread->interruptible); | |
515 | thread->sleep_stamp = sched_tick; | |
516 | } | |
517 | ||
518 | ||
519 | ||
520 | /* | |
521 | * Routine: assert_wait_timeout | |
522 | * Purpose: | |
523 | * Assert that the thread intends to block, | |
524 | * waiting for a timeout (no user known event). | |
525 | */ | |
526 | unsigned int assert_wait_timeout_event; | |
527 | ||
528 | void | |
529 | assert_wait_timeout( | |
530 | mach_msg_timeout_t msecs, | |
531 | int interruptible) | |
532 | { | |
533 | spl_t s; | |
534 | ||
535 | assert_wait((event_t)&assert_wait_timeout_event, interruptible); | |
536 | thread_set_timer(msecs, 1000*NSEC_PER_USEC); | |
537 | } | |
538 | ||
539 | /* | |
540 | * Check to see if an assert wait is possible, without actually doing one. | |
541 | * This is used by debug code in locks and elsewhere to verify that it is | |
542 | * always OK to block when trying to take a blocking lock (since waiting | |
543 | * for the actual assert_wait to catch the case may make it hard to detect | |
544 | * this case. | |
545 | */ | |
546 | boolean_t | |
547 | assert_wait_possible(void) | |
548 | { | |
549 | ||
550 | thread_t thread; | |
551 | extern unsigned int debug_mode; | |
552 | ||
553 | #if DEBUG | |
554 | if(debug_mode) return TRUE; /* Always succeed in debug mode */ | |
555 | #endif | |
556 | ||
557 | thread = current_thread(); | |
558 | ||
559 | return (thread == NULL || wait_queue_assert_possible(thread)); | |
560 | } | |
561 | ||
562 | /* | |
563 | * assert_wait: | |
564 | * | |
565 | * Assert that the current thread is about to go to | |
566 | * sleep until the specified event occurs. | |
567 | */ | |
568 | void | |
569 | assert_wait( | |
570 | event_t event, | |
571 | int interruptible) | |
572 | { | |
573 | register wait_queue_t wq; | |
574 | register int index; | |
575 | ||
576 | assert(event != NO_EVENT); | |
577 | assert(assert_wait_possible()); | |
578 | ||
579 | index = wait_hash(event); | |
580 | wq = &wait_queues[index]; | |
581 | wait_queue_assert_wait(wq, | |
582 | event, | |
583 | interruptible); | |
584 | } | |
585 | ||
586 | ||
587 | /* | |
588 | * thread_[un]stop(thread) | |
589 | * Once a thread has blocked interruptibly (via assert_wait) prevent | |
590 | * it from running until thread_unstop. | |
591 | * | |
592 | * If someone else has already stopped the thread, wait for the | |
593 | * stop to be cleared, and then stop it again. | |
594 | * | |
595 | * Return FALSE if interrupted. | |
596 | * | |
597 | * NOTE: thread_hold/thread_suspend should be called on the activation | |
598 | * before calling thread_stop. TH_SUSP is only recognized when | |
599 | * a thread blocks and only prevents clear_wait/thread_wakeup | |
600 | * from restarting an interruptible wait. The wake_active flag is | |
601 | * used to indicate that someone is waiting on the thread. | |
602 | */ | |
603 | boolean_t | |
604 | thread_stop( | |
605 | thread_t thread) | |
606 | { | |
607 | spl_t s; | |
608 | ||
609 | s = splsched(); | |
610 | wake_lock(thread); | |
611 | ||
612 | while (thread->state & TH_SUSP) { | |
e7c99d92 A |
613 | int wait_result; |
614 | ||
1c79356b A |
615 | thread->wake_active = TRUE; |
616 | assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE); | |
617 | wake_unlock(thread); | |
618 | splx(s); | |
619 | ||
e7c99d92 A |
620 | wait_result = thread_block((void (*)(void)) 0); |
621 | if (wait_result != THREAD_AWAKENED) | |
1c79356b A |
622 | return (FALSE); |
623 | ||
624 | s = splsched(); | |
625 | wake_lock(thread); | |
626 | } | |
627 | thread_lock(thread); | |
628 | thread->state |= TH_SUSP; | |
629 | thread_unlock(thread); | |
630 | ||
631 | wake_unlock(thread); | |
632 | splx(s); | |
633 | ||
634 | return (TRUE); | |
635 | } | |
636 | ||
637 | /* | |
638 | * Clear TH_SUSP and if the thread has been stopped and is now runnable, | |
639 | * put it back on the run queue. | |
640 | */ | |
641 | void | |
642 | thread_unstop( | |
643 | thread_t thread) | |
644 | { | |
645 | sched_policy_t *policy; | |
646 | sf_return_t sfr; | |
647 | spl_t s; | |
648 | ||
649 | s = splsched(); | |
650 | wake_lock(thread); | |
651 | thread_lock(thread); | |
652 | ||
653 | if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP/*|TH_UNINT*/)) == TH_SUSP) { | |
654 | thread->state = (thread->state & ~TH_SUSP) | TH_RUN; | |
655 | #if THREAD_SWAPPER | |
656 | if (thread->state & TH_SWAPPED_OUT) | |
657 | thread_swapin(thread->top_act, FALSE); | |
658 | else | |
659 | #endif /* THREAD_SWAPPER */ | |
660 | { | |
661 | policy = &sched_policy[thread->policy]; | |
662 | sfr = policy->sp_ops.sp_thread_unblock(policy, thread); | |
663 | assert(sfr == SF_SUCCESS); | |
664 | } | |
665 | } | |
666 | else | |
667 | if (thread->state & TH_SUSP) { | |
668 | thread->state &= ~TH_SUSP; | |
669 | ||
670 | if (thread->wake_active) { | |
671 | thread->wake_active = FALSE; | |
672 | thread_unlock(thread); | |
673 | wake_unlock(thread); | |
674 | splx(s); | |
675 | thread_wakeup((event_t)&thread->wake_active); | |
676 | ||
677 | return; | |
678 | } | |
679 | } | |
680 | ||
681 | thread_unlock(thread); | |
682 | wake_unlock(thread); | |
683 | splx(s); | |
684 | } | |
685 | ||
686 | /* | |
687 | * Wait for the thread's RUN bit to clear | |
688 | */ | |
689 | boolean_t | |
690 | thread_wait( | |
691 | thread_t thread) | |
692 | { | |
693 | spl_t s; | |
694 | ||
695 | s = splsched(); | |
696 | wake_lock(thread); | |
697 | ||
698 | while (thread->state & (TH_RUN/*|TH_UNINT*/)) { | |
e7c99d92 A |
699 | int wait_result; |
700 | ||
1c79356b A |
701 | if (thread->last_processor != PROCESSOR_NULL) |
702 | cause_ast_check(thread->last_processor); | |
703 | ||
704 | thread->wake_active = TRUE; | |
705 | assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE); | |
706 | wake_unlock(thread); | |
707 | splx(s); | |
708 | ||
e7c99d92 A |
709 | wait_result = thread_block((void (*)(void))0); |
710 | if (wait_result != THREAD_AWAKENED) | |
711 | return FALSE; | |
1c79356b A |
712 | |
713 | s = splsched(); | |
714 | wake_lock(thread); | |
715 | } | |
1c79356b A |
716 | wake_unlock(thread); |
717 | splx(s); | |
e7c99d92 | 718 | return TRUE; |
1c79356b A |
719 | } |
720 | ||
721 | ||
722 | /* | |
723 | * thread_stop_wait(thread) | |
724 | * Stop the thread then wait for it to block interruptibly | |
725 | */ | |
726 | boolean_t | |
727 | thread_stop_wait( | |
728 | thread_t thread) | |
729 | { | |
730 | if (thread_stop(thread)) { | |
731 | if (thread_wait(thread)) | |
732 | return (TRUE); | |
733 | ||
734 | thread_unstop(thread); | |
735 | } | |
736 | ||
737 | return (FALSE); | |
738 | } | |
739 | ||
740 | ||
741 | /* | |
742 | * Routine: clear_wait_internal | |
743 | * | |
744 | * Clear the wait condition for the specified thread. | |
745 | * Start the thread executing if that is appropriate. | |
746 | * Arguments: | |
747 | * thread thread to awaken | |
748 | * result Wakeup result the thread should see | |
749 | * Conditions: | |
750 | * At splsched | |
751 | * the thread is locked. | |
752 | */ | |
753 | void | |
754 | clear_wait_internal( | |
755 | thread_t thread, | |
756 | int result) | |
757 | { | |
758 | /* | |
759 | * If the thread isn't in a wait queue, just set it running. Otherwise, | |
760 | * try to remove it from the queue and, if successful, then set it | |
761 | * running. NEVER interrupt an uninterruptible thread. | |
762 | */ | |
763 | if (!((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT))) { | |
764 | if (wait_queue_assert_possible(thread) || | |
765 | (wait_queue_remove(thread) == KERN_SUCCESS)) { | |
766 | thread_go_locked(thread, result); | |
767 | } | |
768 | } | |
769 | } | |
770 | ||
771 | ||
772 | /* | |
773 | * clear_wait: | |
774 | * | |
775 | * Clear the wait condition for the specified thread. Start the thread | |
776 | * executing if that is appropriate. | |
777 | * | |
778 | * parameters: | |
779 | * thread thread to awaken | |
780 | * result Wakeup result the thread should see | |
781 | */ | |
782 | void | |
783 | clear_wait( | |
784 | thread_t thread, | |
785 | int result) | |
786 | { | |
787 | spl_t s; | |
788 | ||
789 | s = splsched(); | |
790 | thread_lock(thread); | |
791 | clear_wait_internal(thread, result); | |
792 | thread_unlock(thread); | |
793 | splx(s); | |
794 | } | |
795 | ||
796 | ||
797 | /* | |
798 | * thread_wakeup_prim: | |
799 | * | |
800 | * Common routine for thread_wakeup, thread_wakeup_with_result, | |
801 | * and thread_wakeup_one. | |
802 | * | |
803 | */ | |
804 | void | |
805 | thread_wakeup_prim( | |
806 | event_t event, | |
807 | boolean_t one_thread, | |
808 | int result) | |
809 | { | |
810 | register wait_queue_t wq; | |
811 | register int index; | |
812 | ||
813 | index = wait_hash(event); | |
814 | wq = &wait_queues[index]; | |
815 | if (one_thread) | |
816 | wait_queue_wakeup_one(wq, event, result); | |
817 | else | |
818 | wait_queue_wakeup_all(wq, event, result); | |
819 | } | |
820 | ||
821 | /* | |
822 | * thread_bind: | |
823 | * | |
824 | * Force a thread to execute on the specified processor. | |
825 | * If the thread is currently executing, it may wait until its | |
826 | * time slice is up before switching onto the specified processor. | |
827 | * | |
828 | * A processor of PROCESSOR_NULL causes the thread to be unbound. | |
829 | * xxx - DO NOT export this to users. | |
830 | */ | |
831 | void | |
832 | thread_bind( | |
833 | register thread_t thread, | |
834 | processor_t processor) | |
835 | { | |
836 | spl_t s; | |
837 | ||
838 | s = splsched(); | |
839 | thread_lock(thread); | |
840 | thread_bind_locked(thread, processor); | |
841 | thread_unlock(thread); | |
842 | splx(s); | |
843 | } | |
844 | ||
845 | /* | |
846 | * Select a thread for this processor (the current processor) to run. | |
847 | * May select the current thread, which must already be locked. | |
848 | */ | |
849 | thread_t | |
850 | thread_select( | |
851 | register processor_t myprocessor) | |
852 | { | |
853 | register thread_t thread; | |
854 | processor_set_t pset; | |
855 | register run_queue_t runq = &myprocessor->runq; | |
856 | boolean_t other_runnable; | |
857 | sched_policy_t *policy; | |
858 | ||
859 | /* | |
860 | * Check for other non-idle runnable threads. | |
861 | */ | |
862 | myprocessor->first_quantum = TRUE; | |
863 | pset = myprocessor->processor_set; | |
864 | thread = current_thread(); | |
865 | ||
866 | #if 0 /* CHECKME! */ | |
867 | thread->unconsumed_quantum = myprocessor->quantum; | |
868 | #endif | |
869 | ||
870 | simple_lock(&runq->lock); | |
871 | simple_lock(&pset->runq.lock); | |
872 | ||
873 | other_runnable = runq->count > 0 || pset->runq.count > 0; | |
874 | ||
875 | if ( thread->state == TH_RUN && | |
876 | (!other_runnable || | |
877 | (runq->highq < thread->sched_pri && | |
878 | pset->runq.highq < thread->sched_pri)) && | |
879 | thread->processor_set == pset && | |
880 | (thread->bound_processor == PROCESSOR_NULL || | |
881 | thread->bound_processor == myprocessor) ) { | |
882 | ||
883 | /* I am the highest priority runnable (non-idle) thread */ | |
884 | simple_unlock(&pset->runq.lock); | |
885 | simple_unlock(&runq->lock); | |
886 | ||
887 | /* Update the thread's meta-priority */ | |
888 | policy = policy_id_to_sched_policy(thread->policy); | |
889 | assert(policy != SCHED_POLICY_NULL); | |
890 | (void)policy->sp_ops.sp_thread_update_mpri(policy, thread); | |
891 | } | |
892 | else | |
893 | if (other_runnable) { | |
894 | simple_unlock(&pset->runq.lock); | |
895 | simple_unlock(&runq->lock); | |
896 | thread = choose_thread(myprocessor); | |
897 | } | |
898 | else { | |
899 | simple_unlock(&pset->runq.lock); | |
900 | simple_unlock(&runq->lock); | |
901 | ||
902 | /* | |
903 | * Nothing is runnable, so set this processor idle if it | |
904 | * was running. If it was in an assignment or shutdown, | |
905 | * leave it alone. Return its idle thread. | |
906 | */ | |
907 | simple_lock(&pset->idle_lock); | |
908 | if (myprocessor->state == PROCESSOR_RUNNING) { | |
909 | myprocessor->state = PROCESSOR_IDLE; | |
910 | /* | |
911 | * XXX Until it goes away, put master on end of queue, others | |
912 | * XXX on front so master gets used last. | |
913 | */ | |
914 | if (myprocessor == master_processor) | |
915 | queue_enter(&(pset->idle_queue), myprocessor, | |
916 | processor_t, processor_queue); | |
917 | else | |
918 | queue_enter_first(&(pset->idle_queue), myprocessor, | |
919 | processor_t, processor_queue); | |
920 | ||
921 | pset->idle_count++; | |
922 | } | |
923 | simple_unlock(&pset->idle_lock); | |
924 | ||
925 | thread = myprocessor->idle_thread; | |
926 | } | |
927 | ||
928 | return (thread); | |
929 | } | |
930 | ||
931 | ||
932 | /* | |
933 | * Stop running the current thread and start running the new thread. | |
934 | * If continuation is non-zero, and the current thread is blocked, | |
935 | * then it will resume by executing continuation on a new stack. | |
936 | * Returns TRUE if the hand-off succeeds. | |
937 | * The reason parameter == AST_QUANTUM if the thread blocked | |
938 | * because its quantum expired. | |
939 | * Assumes splsched. | |
940 | */ | |
941 | ||
942 | ||
943 | static thread_t | |
944 | __current_thread(void) | |
945 | { | |
946 | return (current_thread()); | |
947 | } | |
948 | ||
949 | boolean_t | |
950 | thread_invoke( | |
951 | register thread_t old_thread, | |
952 | register thread_t new_thread, | |
953 | int reason, | |
954 | void (*continuation)(void)) | |
955 | { | |
956 | sched_policy_t *policy; | |
957 | sf_return_t sfr; | |
958 | void (*lcont)(void); | |
959 | ||
960 | /* | |
961 | * Mark thread interruptible. | |
962 | */ | |
963 | thread_lock(new_thread); | |
964 | new_thread->state &= ~TH_UNINT; | |
965 | ||
966 | if (cpu_data[cpu_number()].preemption_level != 1) | |
967 | panic("thread_invoke: preemption_level %d\n", | |
968 | cpu_data[cpu_number()].preemption_level); | |
969 | ||
970 | ||
971 | assert(thread_runnable(new_thread)); | |
972 | ||
973 | assert(old_thread->continuation == (void (*)(void))0); | |
974 | ||
975 | if ((old_thread->sched_mode & TH_MODE_REALTIME) && (!old_thread->stack_privilege)) { | |
976 | old_thread->stack_privilege = old_thread->kernel_stack; | |
977 | } | |
978 | ||
979 | if (continuation != (void (*)()) 0) { | |
980 | switch (new_thread->state & TH_STACK_STATE) { | |
981 | case TH_STACK_HANDOFF: | |
982 | ||
983 | /* | |
984 | * If the old thread has stack privilege, we can't give | |
985 | * his stack away. So go and get him one and treat this | |
986 | * as a traditional context switch. | |
987 | */ | |
988 | if (old_thread->stack_privilege == current_stack()) | |
989 | goto get_new_stack; | |
990 | ||
991 | /* | |
992 | * Make the whole handoff/dispatch atomic to match the | |
993 | * non-handoff case. | |
994 | */ | |
995 | disable_preemption(); | |
996 | ||
997 | /* | |
998 | * Set up ast context of new thread and switch to its timer. | |
999 | */ | |
1000 | new_thread->state &= ~(TH_STACK_HANDOFF|TH_UNINT); | |
1001 | new_thread->last_processor = current_processor(); | |
1002 | ast_context(new_thread->top_act, cpu_number()); | |
1003 | timer_switch(&new_thread->system_timer); | |
1004 | thread_unlock(new_thread); | |
1005 | ||
1006 | old_thread->continuation = continuation; | |
1007 | stack_handoff(old_thread, new_thread); | |
1008 | ||
1009 | wake_lock(old_thread); | |
1010 | thread_lock(old_thread); | |
1011 | act_machine_sv_free(old_thread->top_act); | |
1012 | ||
1013 | /* | |
1014 | * inline thread_dispatch but don't free stack | |
1015 | */ | |
1016 | ||
1017 | switch (old_thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) { | |
1018 | sched_policy_t *policy; | |
1019 | sf_return_t sfr; | |
1020 | ||
1021 | case TH_RUN | TH_UNINT: | |
1022 | case TH_RUN: | |
1023 | /* | |
1024 | * No reason to stop. Put back on a run queue. | |
1025 | */ | |
1026 | old_thread->state |= TH_STACK_HANDOFF; | |
1027 | ||
1028 | /* Get pointer to scheduling policy "object" */ | |
1029 | policy = &sched_policy[old_thread->policy]; | |
1030 | ||
1031 | /* Leave enqueueing thread up to scheduling policy */ | |
1032 | sfr = policy->sp_ops.sp_thread_dispatch(policy, old_thread); | |
1033 | assert(sfr == SF_SUCCESS); | |
1034 | break; | |
1035 | ||
1036 | case TH_RUN | TH_WAIT | TH_UNINT: | |
1037 | case TH_RUN | TH_WAIT: | |
1038 | old_thread->sleep_stamp = sched_tick; | |
1039 | /* fallthrough */ | |
1040 | ||
1041 | case TH_WAIT: /* this happens! */ | |
1042 | /* | |
1043 | * Waiting | |
1044 | */ | |
1045 | old_thread->state |= TH_STACK_HANDOFF; | |
1046 | old_thread->state &= ~TH_RUN; | |
1047 | if (old_thread->state & TH_TERMINATE) | |
1048 | thread_reaper_enqueue(old_thread); | |
1049 | ||
1050 | if (old_thread->wake_active) { | |
1051 | old_thread->wake_active = FALSE; | |
1052 | thread_unlock(old_thread); | |
1053 | wake_unlock(old_thread); | |
1054 | thread_wakeup((event_t)&old_thread->wake_active); | |
1055 | wake_lock(old_thread); | |
1056 | thread_lock(old_thread); | |
1057 | } | |
1058 | break; | |
1059 | ||
1060 | case TH_RUN | TH_IDLE: | |
1061 | /* | |
1062 | * Drop idle thread -- it is already in | |
1063 | * idle_thread_array. | |
1064 | */ | |
1065 | old_thread->state |= TH_STACK_HANDOFF; | |
1066 | break; | |
1067 | ||
1068 | default: | |
1069 | panic("State 0x%x \n",old_thread->state); | |
1070 | } | |
1071 | ||
1072 | /* Get pointer to scheduling policy "object" */ | |
1073 | policy = &sched_policy[old_thread->policy]; | |
1074 | ||
1075 | /* Indicate to sched policy that old thread has stopped execution */ | |
1076 | /*** ??? maybe use a macro -- rkc, 1/4/96 ***/ | |
1077 | sfr = policy->sp_ops.sp_thread_done(policy, old_thread); | |
1078 | assert(sfr == SF_SUCCESS); | |
1079 | thread_unlock(old_thread); | |
1080 | wake_unlock(old_thread); | |
1081 | thread_lock(new_thread); | |
1082 | ||
1083 | assert(thread_runnable(new_thread)); | |
1084 | ||
1085 | /* Get pointer to scheduling policy "object" */ | |
1086 | policy = &sched_policy[new_thread->policy]; | |
1087 | ||
1088 | /* Indicate to sched policy that new thread has started execution */ | |
1089 | /*** ??? maybe use a macro ***/ | |
1090 | sfr = policy->sp_ops.sp_thread_begin(policy, new_thread); | |
1091 | assert(sfr == SF_SUCCESS); | |
1092 | ||
1093 | lcont = new_thread->continuation; | |
1094 | new_thread->continuation = (void(*)(void))0; | |
1095 | ||
1096 | thread_unlock(new_thread); | |
1097 | enable_preemption(); | |
1098 | ||
1099 | counter_always(c_thread_invoke_hits++); | |
1100 | ||
1101 | if (new_thread->funnel_state & TH_FN_REFUNNEL) { | |
1102 | kern_return_t save_wait_result; | |
1103 | new_thread->funnel_state = 0; | |
1104 | save_wait_result = new_thread->wait_result; | |
1105 | KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0); | |
1106 | //mutex_lock(new_thread->funnel_lock); | |
1107 | funnel_lock(new_thread->funnel_lock); | |
1108 | KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0); | |
1109 | new_thread->funnel_state = TH_FN_OWNED; | |
1110 | new_thread->wait_result = save_wait_result; | |
1111 | } | |
1112 | (void) spllo(); | |
1113 | ||
1114 | assert(lcont); | |
1115 | call_continuation(lcont); | |
1116 | /*NOTREACHED*/ | |
1117 | return TRUE; | |
1118 | ||
1119 | case TH_STACK_COMING_IN: | |
1120 | /* | |
1121 | * waiting for a stack | |
1122 | */ | |
1123 | thread_swapin(new_thread); | |
1124 | thread_unlock(new_thread); | |
1125 | counter_always(c_thread_invoke_misses++); | |
1126 | return FALSE; | |
1127 | ||
1128 | case 0: | |
1129 | /* | |
1130 | * already has a stack - can't handoff | |
1131 | */ | |
1132 | if (new_thread == old_thread) { | |
1133 | ||
1134 | /* same thread but with continuation */ | |
1135 | counter(++c_thread_invoke_same); | |
1136 | thread_unlock(new_thread); | |
1137 | ||
1138 | if (old_thread->funnel_state & TH_FN_REFUNNEL) { | |
1139 | kern_return_t save_wait_result; | |
1140 | ||
1141 | old_thread->funnel_state = 0; | |
1142 | save_wait_result = old_thread->wait_result; | |
1143 | KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0); | |
1144 | funnel_lock(old_thread->funnel_lock); | |
1145 | KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0); | |
1146 | old_thread->funnel_state = TH_FN_OWNED; | |
1147 | old_thread->wait_result = save_wait_result; | |
1148 | } | |
1149 | (void) spllo(); | |
1150 | call_continuation(continuation); | |
1151 | /*NOTREACHED*/ | |
1152 | } | |
1153 | break; | |
1154 | } | |
1155 | } else { | |
1156 | /* | |
1157 | * check that the new thread has a stack | |
1158 | */ | |
1159 | if (new_thread->state & TH_STACK_STATE) { | |
1160 | get_new_stack: | |
1161 | /* has no stack. if not already waiting for one try to get one */ | |
1162 | if ((new_thread->state & TH_STACK_COMING_IN) || | |
1163 | /* not already waiting. nonblocking try to get one */ | |
1164 | !stack_alloc_try(new_thread, thread_continue)) | |
1165 | { | |
1166 | /* couldn't get one. schedule new thread to get a stack and | |
1167 | return failure so we can try another thread. */ | |
1168 | thread_swapin(new_thread); | |
1169 | thread_unlock(new_thread); | |
1170 | counter_always(c_thread_invoke_misses++); | |
1171 | return FALSE; | |
1172 | } | |
1173 | } else if (old_thread == new_thread) { | |
1174 | counter(++c_thread_invoke_same); | |
1175 | thread_unlock(new_thread); | |
1176 | return TRUE; | |
1177 | } | |
1178 | ||
1179 | /* new thread now has a stack. it has been setup to resume in | |
1180 | thread_continue so it can dispatch the old thread, deal with | |
1181 | funnelling and then go to it's true continuation point */ | |
1182 | } | |
1183 | ||
1184 | new_thread->state &= ~(TH_STACK_HANDOFF | TH_UNINT); | |
1185 | ||
1186 | /* | |
1187 | * Set up ast context of new thread and switch to its timer. | |
1188 | */ | |
1189 | new_thread->last_processor = current_processor(); | |
1190 | ast_context(new_thread->top_act, cpu_number()); | |
1191 | timer_switch(&new_thread->system_timer); | |
1192 | assert(thread_runnable(new_thread)); | |
1193 | ||
1194 | /* | |
1195 | * N.B. On return from the call to switch_context, 'old_thread' | |
1196 | * points at the thread that yielded to us. Unfortunately, at | |
1197 | * this point, there are no simple_locks held, so if we are preempted | |
1198 | * before the call to thread_dispatch blocks preemption, it is | |
1199 | * possible for 'old_thread' to terminate, leaving us with a | |
1200 | * stale thread pointer. | |
1201 | */ | |
1202 | disable_preemption(); | |
1203 | ||
1204 | thread_unlock(new_thread); | |
1205 | ||
1206 | counter_always(c_thread_invoke_csw++); | |
1207 | current_task()->csw++; | |
1208 | ||
1209 | ||
1210 | thread_lock(old_thread); | |
1211 | old_thread->reason = reason; | |
1212 | assert(old_thread->runq == RUN_QUEUE_NULL); | |
1213 | ||
1214 | if (continuation != (void (*)(void))0) | |
1215 | old_thread->continuation = continuation; | |
1216 | ||
1217 | /* Indicate to sched policy that old thread has stopped execution */ | |
1218 | policy = &sched_policy[old_thread->policy]; | |
1219 | /*** ??? maybe use a macro -- ***/ | |
1220 | sfr = policy->sp_ops.sp_thread_done(policy, old_thread); | |
1221 | assert(sfr == SF_SUCCESS); | |
1222 | thread_unlock(old_thread); | |
1223 | ||
1224 | /* | |
1225 | * switch_context is machine-dependent. It does the | |
1226 | * machine-dependent components of a context-switch, like | |
1227 | * changing address spaces. It updates active_threads. | |
1228 | */ | |
1229 | old_thread = switch_context(old_thread, continuation, new_thread); | |
1230 | ||
1231 | /* Now on new thread's stack. Set a local variable to refer to it. */ | |
1232 | new_thread = __current_thread(); | |
1233 | assert(old_thread != new_thread); | |
1234 | ||
1235 | assert(thread_runnable(new_thread)); | |
1236 | ||
1237 | thread_lock(new_thread); | |
1238 | assert(thread_runnable(new_thread)); | |
1239 | /* Indicate to sched policy that new thread has started execution */ | |
1240 | policy = &sched_policy[new_thread->policy]; | |
1241 | /*** ??? maybe use a macro -- rkc, 1/4/96 ***/ | |
1242 | sfr = policy->sp_ops.sp_thread_begin(policy, new_thread); | |
1243 | assert(sfr == SF_SUCCESS); | |
1244 | thread_unlock(new_thread); | |
1245 | ||
1246 | /* | |
1247 | * We're back. Now old_thread is the thread that resumed | |
1248 | * us, and we have to dispatch it. | |
1249 | */ | |
1250 | /* CHECKME! */ | |
1251 | // Code from OSF in Grenoble deleted the following fields. They were | |
1252 | // used in HPPA and 386 code, but not in the PPC for other than | |
1253 | // just setting and resetting. They didn't delete these lines from | |
1254 | // the MACH_RT builds, though, causing compile errors. I'm going | |
1255 | // to make a wild guess and assume we can just delete these. | |
1256 | #if 0 | |
1257 | if (old_thread->preempt == TH_NOT_PREEMPTABLE) { | |
1258 | /* | |
1259 | * Mark that we have been really preempted | |
1260 | */ | |
1261 | old_thread->preempt = TH_PREEMPTED; | |
1262 | } | |
1263 | #endif | |
1264 | thread_dispatch(old_thread); | |
1265 | enable_preemption(); | |
1266 | ||
1267 | /* if we get here and 'continuation' is set that means the | |
1268 | * switch_context() path returned and did not call out | |
1269 | * to the continuation. we will do it manually here */ | |
1270 | if (continuation) { | |
1271 | call_continuation(continuation); | |
1272 | /* NOTREACHED */ | |
1273 | } | |
1274 | ||
1275 | return TRUE; | |
1276 | } | |
1277 | ||
1278 | /* | |
1279 | * thread_continue: | |
1280 | * | |
1281 | * Called when the launching a new thread, at splsched(); | |
1282 | */ | |
1283 | void | |
1284 | thread_continue( | |
1285 | register thread_t old_thread) | |
1286 | { | |
1287 | register thread_t self; | |
1288 | register void (*continuation)(); | |
1289 | sched_policy_t *policy; | |
1290 | sf_return_t sfr; | |
1291 | ||
1292 | self = current_thread(); | |
1293 | ||
1294 | /* | |
1295 | * We must dispatch the old thread and then | |
1296 | * call the current thread's continuation. | |
1297 | * There might not be an old thread, if we are | |
1298 | * the first thread to run on this processor. | |
1299 | */ | |
1300 | if (old_thread != THREAD_NULL) { | |
1301 | thread_dispatch(old_thread); | |
1302 | ||
1303 | thread_lock(self); | |
1304 | ||
1305 | /* Get pointer to scheduling policy "object" */ | |
1306 | policy = &sched_policy[self->policy]; | |
1307 | ||
1308 | /* Indicate to sched policy that new thread has started execution */ | |
1309 | /*** ??? maybe use a macro -- rkc, 1/4/96 ***/ | |
1310 | sfr = policy->sp_ops.sp_thread_begin(policy,self); | |
1311 | assert(sfr == SF_SUCCESS); | |
1312 | } else { | |
1313 | thread_lock(self); | |
1314 | } | |
1315 | ||
1316 | continuation = self->continuation; | |
1317 | self->continuation = (void (*)(void))0; | |
1318 | thread_unlock(self); | |
1319 | ||
1320 | /* | |
1321 | * N.B. - the following is necessary, since thread_invoke() | |
1322 | * inhibits preemption on entry and reenables before it | |
1323 | * returns. Unfortunately, the first time a newly-created | |
1324 | * thread executes, it magically appears here, and never | |
1325 | * executes the enable_preemption() call in thread_invoke(). | |
1326 | */ | |
1327 | enable_preemption(); | |
1328 | ||
1329 | if (self->funnel_state & TH_FN_REFUNNEL) { | |
1330 | kern_return_t save_wait_result; | |
1331 | self->funnel_state = 0; | |
1332 | save_wait_result = self->wait_result; | |
1333 | KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0); | |
1334 | funnel_lock(self->funnel_lock); | |
1335 | KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0); | |
1336 | self->wait_result = save_wait_result; | |
1337 | self->funnel_state = TH_FN_OWNED; | |
1338 | } | |
1339 | spllo(); | |
1340 | ||
1341 | assert(continuation); | |
1342 | (*continuation)(); | |
1343 | /*NOTREACHED*/ | |
1344 | } | |
1345 | ||
1346 | #if MACH_LDEBUG || MACH_KDB | |
1347 | ||
1348 | #define THREAD_LOG_SIZE 300 | |
1349 | ||
1350 | struct t64 { | |
1351 | unsigned long h; | |
1352 | unsigned long l; | |
1353 | }; | |
1354 | ||
1355 | struct { | |
1356 | struct t64 stamp; | |
1357 | thread_t thread; | |
1358 | long info1; | |
1359 | long info2; | |
1360 | long info3; | |
1361 | char * action; | |
1362 | } thread_log[THREAD_LOG_SIZE]; | |
1363 | ||
1364 | int thread_log_index; | |
1365 | ||
1366 | void check_thread_time(long n); | |
1367 | ||
1368 | ||
1369 | int check_thread_time_crash; | |
1370 | ||
1371 | #if 0 | |
1372 | void | |
1373 | check_thread_time(long us) | |
1374 | { | |
1375 | struct t64 temp; | |
1376 | ||
1377 | if (!check_thread_time_crash) | |
1378 | return; | |
1379 | ||
1380 | temp = thread_log[0].stamp; | |
1381 | cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp); | |
1382 | ||
1383 | if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */ | |
1384 | panic ("check_thread_time"); | |
1385 | } | |
1386 | #endif | |
1387 | ||
1388 | void | |
1389 | log_thread_action(char * action, long info1, long info2, long info3) | |
1390 | { | |
1391 | int i; | |
1392 | spl_t x; | |
1393 | static unsigned int tstamp; | |
1394 | ||
1395 | x = splhigh(); | |
1396 | ||
1397 | for (i = THREAD_LOG_SIZE-1; i > 0; i--) { | |
1398 | thread_log[i] = thread_log[i-1]; | |
1399 | } | |
1400 | ||
1401 | thread_log[0].stamp.h = 0; | |
1402 | thread_log[0].stamp.l = tstamp++; | |
1403 | thread_log[0].thread = current_thread(); | |
1404 | thread_log[0].info1 = info1; | |
1405 | thread_log[0].info2 = info2; | |
1406 | thread_log[0].info3 = info3; | |
1407 | thread_log[0].action = action; | |
1408 | /* strcpy (&thread_log[0].action[0], action);*/ | |
1409 | ||
1410 | splx(x); | |
1411 | } | |
1412 | #endif /* MACH_LDEBUG || MACH_KDB */ | |
1413 | ||
1414 | #if MACH_KDB | |
1415 | #include <ddb/db_output.h> | |
1416 | void db_show_thread_log(void); | |
1417 | ||
1418 | void | |
1419 | db_show_thread_log(void) | |
1420 | { | |
1421 | int i; | |
1422 | ||
1423 | db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ", | |
1424 | " Info3 ", " Timestamp ", "Action"); | |
1425 | ||
1426 | for (i = 0; i < THREAD_LOG_SIZE; i++) { | |
1427 | db_printf ("%08x %08x %08x %08x %08x/%08x %s\n", | |
1428 | thread_log[i].thread, | |
1429 | thread_log[i].info1, | |
1430 | thread_log[i].info2, | |
1431 | thread_log[i].info3, | |
1432 | thread_log[i].stamp.h, | |
1433 | thread_log[i].stamp.l, | |
1434 | thread_log[i].action); | |
1435 | } | |
1436 | } | |
1437 | #endif /* MACH_KDB */ | |
1438 | ||
1439 | /* | |
1440 | * thread_block_reason: | |
1441 | * | |
1442 | * Block the current thread. If the thread is runnable | |
1443 | * then someone must have woken it up between its request | |
1444 | * to sleep and now. In this case, it goes back on a | |
1445 | * run queue. | |
1446 | * | |
1447 | * If a continuation is specified, then thread_block will | |
1448 | * attempt to discard the thread's kernel stack. When the | |
1449 | * thread resumes, it will execute the continuation function | |
1450 | * on a new kernel stack. | |
1451 | */ | |
1452 | counter(mach_counter_t c_thread_block_calls = 0;) | |
1453 | ||
1454 | int | |
1455 | thread_block_reason( | |
1456 | void (*continuation)(void), | |
1457 | int reason) | |
1458 | { | |
1459 | register thread_t thread = current_thread(); | |
1460 | register processor_t myprocessor; | |
1461 | register thread_t new_thread; | |
1462 | spl_t s; | |
1463 | ||
1464 | counter(++c_thread_block_calls); | |
1465 | ||
1466 | check_simple_locks(); | |
1467 | ||
1468 | machine_clock_assist(); | |
1469 | ||
1470 | s = splsched(); | |
1471 | ||
1472 | if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) { | |
1473 | thread->funnel_state = TH_FN_REFUNNEL; | |
1474 | KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0); | |
1475 | funnel_unlock(thread->funnel_lock); | |
1476 | } | |
1477 | ||
1478 | myprocessor = current_processor(); | |
1479 | ||
1480 | thread_lock(thread); | |
1481 | if (thread->state & TH_ABORT) | |
1482 | clear_wait_internal(thread, THREAD_INTERRUPTED); | |
1483 | ||
1484 | /* Unconditionally remove either | both */ | |
1485 | ast_off(AST_QUANTUM|AST_BLOCK|AST_URGENT); | |
1486 | ||
1487 | new_thread = thread_select(myprocessor); | |
1488 | assert(new_thread); | |
1489 | assert(thread_runnable(new_thread)); | |
1490 | thread_unlock(thread); | |
1491 | while (!thread_invoke(thread, new_thread, reason, continuation)) { | |
1492 | thread_lock(thread); | |
1493 | new_thread = thread_select(myprocessor); | |
1494 | assert(new_thread); | |
1495 | assert(thread_runnable(new_thread)); | |
1496 | thread_unlock(thread); | |
1497 | } | |
1498 | ||
1499 | if (thread->funnel_state & TH_FN_REFUNNEL) { | |
1500 | kern_return_t save_wait_result; | |
1501 | ||
1502 | save_wait_result = thread->wait_result; | |
1503 | thread->funnel_state = 0; | |
1504 | KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0); | |
1505 | funnel_lock(thread->funnel_lock); | |
1506 | KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0); | |
1507 | thread->funnel_state = TH_FN_OWNED; | |
1508 | thread->wait_result = save_wait_result; | |
1509 | } | |
1510 | ||
1511 | splx(s); | |
1512 | ||
1513 | return thread->wait_result; | |
1514 | } | |
1515 | ||
1516 | /* | |
1517 | * thread_block: | |
1518 | * | |
1519 | * Now calls thread_block_reason() which forwards the | |
1520 | * the reason parameter to thread_invoke() so it can | |
1521 | * do the right thing if the thread's quantum expired. | |
1522 | */ | |
1523 | int | |
1524 | thread_block( | |
1525 | void (*continuation)(void)) | |
1526 | { | |
1527 | return thread_block_reason(continuation, 0); | |
1528 | } | |
1529 | ||
1530 | /* | |
1531 | * thread_run: | |
1532 | * | |
1533 | * Switch directly from the current thread to a specified | |
1534 | * thread. Both the current and new threads must be | |
1535 | * runnable. | |
1536 | * | |
1537 | * Assumption: | |
1538 | * at splsched. | |
1539 | */ | |
1540 | int | |
1541 | thread_run( | |
1542 | thread_t old_thread, | |
1543 | void (*continuation)(void), | |
1544 | thread_t new_thread) | |
1545 | { | |
1546 | while (!thread_invoke(old_thread, new_thread, 0, continuation)) { | |
1547 | register processor_t myprocessor = current_processor(); | |
1548 | thread_lock(old_thread); | |
1549 | new_thread = thread_select(myprocessor); | |
1550 | thread_unlock(old_thread); | |
1551 | } | |
1552 | return old_thread->wait_result; | |
1553 | } | |
1554 | ||
1555 | /* | |
1556 | * Dispatches a running thread that is not on a runq. | |
1557 | * Called at splsched. | |
1558 | */ | |
1559 | void | |
1560 | thread_dispatch( | |
1561 | register thread_t thread) | |
1562 | { | |
1563 | sched_policy_t *policy; | |
1564 | sf_return_t sfr; | |
1565 | ||
1566 | /* | |
1567 | * If we are discarding the thread's stack, we must do it | |
1568 | * before the thread has a chance to run. | |
1569 | */ | |
1570 | wake_lock(thread); | |
1571 | thread_lock(thread); | |
1572 | ||
1573 | #ifndef i386 | |
1574 | /* no continuations on i386 for now */ | |
1575 | if (thread->continuation != (void (*)())0) { | |
1576 | assert((thread->state & TH_STACK_STATE) == 0); | |
1577 | thread->state |= TH_STACK_HANDOFF; | |
1578 | stack_free(thread); | |
1579 | if (thread->top_act) { | |
1580 | act_machine_sv_free(thread->top_act); | |
1581 | } | |
1582 | } | |
1583 | #endif | |
1584 | ||
1585 | switch (thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) { | |
1586 | ||
1587 | case TH_RUN | TH_UNINT: | |
1588 | case TH_RUN: | |
1589 | /* | |
1590 | * No reason to stop. Put back on a run queue. | |
1591 | */ | |
1592 | /* Leave enqueueing thread up to scheduling policy */ | |
1593 | policy = &sched_policy[thread->policy]; | |
1594 | /*** ??? maybe use a macro ***/ | |
1595 | sfr = policy->sp_ops.sp_thread_dispatch(policy, thread); | |
1596 | assert(sfr == SF_SUCCESS); | |
1597 | break; | |
1598 | ||
1599 | case TH_RUN | TH_WAIT | TH_UNINT: | |
1600 | case TH_RUN | TH_WAIT: | |
1601 | thread->sleep_stamp = sched_tick; | |
1602 | /* fallthrough */ | |
1603 | case TH_WAIT: /* this happens! */ | |
1604 | ||
1605 | /* | |
1606 | * Waiting | |
1607 | */ | |
1608 | thread->state &= ~TH_RUN; | |
1609 | if (thread->state & TH_TERMINATE) | |
1610 | thread_reaper_enqueue(thread); | |
1611 | ||
1612 | if (thread->wake_active) { | |
1613 | thread->wake_active = FALSE; | |
1614 | thread_unlock(thread); | |
1615 | wake_unlock(thread); | |
1616 | thread_wakeup((event_t)&thread->wake_active); | |
1617 | return; | |
1618 | } | |
1619 | break; | |
1620 | ||
1621 | case TH_RUN | TH_IDLE: | |
1622 | /* | |
1623 | * Drop idle thread -- it is already in | |
1624 | * idle_thread_array. | |
1625 | */ | |
1626 | break; | |
1627 | ||
1628 | default: | |
1629 | panic("State 0x%x \n",thread->state); | |
1630 | } | |
1631 | thread_unlock(thread); | |
1632 | wake_unlock(thread); | |
1633 | } | |
1634 | ||
1635 | /* | |
1636 | * Enqueue thread on run queue. Thread must be locked, | |
1637 | * and not already be on a run queue. | |
1638 | */ | |
1639 | int | |
1640 | run_queue_enqueue( | |
1641 | register run_queue_t rq, | |
1642 | register thread_t thread, | |
1643 | boolean_t tail) | |
1644 | { | |
1645 | register int whichq; | |
1646 | int oldrqcount; | |
1647 | ||
1648 | whichq = thread->sched_pri; | |
1649 | assert(whichq >= MINPRI && whichq <= MAXPRI); | |
1650 | ||
1651 | simple_lock(&rq->lock); /* lock the run queue */ | |
1652 | assert(thread->runq == RUN_QUEUE_NULL); | |
1653 | if (tail) | |
1654 | enqueue_tail(&rq->queues[whichq], (queue_entry_t)thread); | |
1655 | else | |
1656 | enqueue_head(&rq->queues[whichq], (queue_entry_t)thread); | |
1657 | ||
1658 | setbit(MAXPRI - whichq, rq->bitmap); | |
1659 | if (whichq > rq->highq) | |
1660 | rq->highq = whichq; | |
1661 | ||
1662 | oldrqcount = rq->count++; | |
1663 | thread->runq = rq; | |
1664 | thread->whichq = whichq; | |
1665 | #if DEBUG | |
1666 | thread_check(thread, rq); | |
1667 | #endif /* DEBUG */ | |
1668 | simple_unlock(&rq->lock); | |
1669 | ||
1670 | return (oldrqcount); | |
1671 | } | |
1672 | ||
1673 | /* | |
1674 | * thread_setrun: | |
1675 | * | |
1676 | * Make thread runnable; dispatch directly onto an idle processor | |
1677 | * if possible. Else put on appropriate run queue (processor | |
1678 | * if bound, else processor set. Caller must have lock on thread. | |
1679 | * This is always called at splsched. | |
1680 | * The tail parameter, if TRUE || TAIL_Q, indicates that the | |
1681 | * thread should be placed at the tail of the runq. If | |
1682 | * FALSE || HEAD_Q the thread will be placed at the head of the | |
1683 | * appropriate runq. | |
1684 | */ | |
1685 | void | |
1686 | thread_setrun( | |
1687 | register thread_t new_thread, | |
1688 | boolean_t may_preempt, | |
1689 | boolean_t tail) | |
1690 | { | |
1691 | register processor_t processor; | |
1692 | register run_queue_t runq; | |
1693 | register processor_set_t pset; | |
1694 | thread_t thread; | |
1695 | ast_t ast_flags = AST_BLOCK; | |
1696 | ||
1697 | mp_disable_preemption(); | |
1698 | ||
1699 | assert(!(new_thread->state & TH_SWAPPED_OUT)); | |
1700 | assert(thread_runnable(new_thread)); | |
1701 | ||
1702 | /* | |
1703 | * Update priority if needed. | |
1704 | */ | |
1705 | if (new_thread->sched_stamp != sched_tick) | |
1706 | update_priority(new_thread); | |
1707 | ||
1708 | if (new_thread->policy & (POLICY_FIFO|POLICY_RR)) { | |
1709 | if ( new_thread->sched_pri >= (MAXPRI_KERNBAND - 2) && | |
1710 | kernel_preemption_mode == KERNEL_PREEMPT ) | |
1711 | ast_flags |= AST_URGENT; | |
1712 | } | |
1713 | ||
1714 | assert(new_thread->runq == RUN_QUEUE_NULL); | |
1715 | ||
1716 | /* | |
1717 | * Try to dispatch the thread directly onto an idle processor. | |
1718 | */ | |
1719 | if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) { | |
1720 | /* | |
1721 | * Not bound, any processor in the processor set is ok. | |
1722 | */ | |
1723 | pset = new_thread->processor_set; | |
1724 | if (pset->idle_count > 0) { | |
1725 | simple_lock(&pset->idle_lock); | |
1726 | if (pset->idle_count > 0) { | |
1727 | processor = (processor_t) queue_first(&pset->idle_queue); | |
1728 | queue_remove(&(pset->idle_queue), processor, processor_t, | |
1729 | processor_queue); | |
1730 | pset->idle_count--; | |
1731 | processor->next_thread = new_thread; | |
1732 | processor->state = PROCESSOR_DISPATCHING; | |
1733 | simple_unlock(&pset->idle_lock); | |
1734 | if(processor->slot_num != cpu_number()) | |
1735 | machine_signal_idle(processor); | |
1736 | mp_enable_preemption(); | |
1737 | return; | |
1738 | } | |
1739 | simple_unlock(&pset->idle_lock); | |
1740 | } | |
1741 | ||
1742 | ||
1743 | /* | |
1744 | * Preempt check | |
1745 | */ | |
1746 | runq = &pset->runq; | |
1747 | thread = current_thread(); | |
1748 | processor = current_processor(); | |
1749 | if ( may_preempt && | |
1750 | pset == processor->processor_set && | |
1751 | thread->sched_pri < new_thread->sched_pri ) { | |
1752 | /* | |
1753 | * XXX if we have a non-empty local runq or are | |
1754 | * XXX running a bound thread, ought to check for | |
1755 | * XXX another cpu running lower-pri thread to preempt. | |
1756 | */ | |
1757 | /* | |
1758 | * Turn off first_quantum to allow csw. | |
1759 | */ | |
1760 | processor->first_quantum = FALSE; | |
1761 | ||
1762 | ast_on(ast_flags); | |
1763 | } | |
1764 | ||
1765 | /* | |
1766 | * Put us on the end of the runq, if we are not preempting | |
1767 | * or the guy we are preempting. | |
1768 | */ | |
1769 | run_queue_enqueue(runq, new_thread, tail); | |
1770 | } | |
1771 | else { | |
1772 | /* | |
1773 | * Bound, can only run on bound processor. Have to lock | |
1774 | * processor here because it may not be the current one. | |
1775 | */ | |
1776 | if (processor->state == PROCESSOR_IDLE) { | |
1777 | simple_lock(&processor->lock); | |
1778 | pset = processor->processor_set; | |
1779 | simple_lock(&pset->idle_lock); | |
1780 | if (processor->state == PROCESSOR_IDLE) { | |
1781 | queue_remove(&pset->idle_queue, processor, | |
1782 | processor_t, processor_queue); | |
1783 | pset->idle_count--; | |
1784 | processor->next_thread = new_thread; | |
1785 | processor->state = PROCESSOR_DISPATCHING; | |
1786 | simple_unlock(&pset->idle_lock); | |
1787 | simple_unlock(&processor->lock); | |
1788 | if(processor->slot_num != cpu_number()) | |
1789 | machine_signal_idle(processor); | |
1790 | mp_enable_preemption(); | |
1791 | return; | |
1792 | } | |
1793 | simple_unlock(&pset->idle_lock); | |
1794 | simple_unlock(&processor->lock); | |
1795 | } | |
1796 | ||
1797 | /* | |
1798 | * Cause ast on processor if processor is on line, and the | |
1799 | * currently executing thread is not bound to that processor | |
1800 | * (bound threads have implicit priority over non-bound threads). | |
1801 | * We also avoid sending the AST to the idle thread (if it got | |
1802 | * scheduled in the window between the 'if' above and here), | |
1803 | * since the idle_thread is bound. | |
1804 | */ | |
1805 | runq = &processor->runq; | |
1806 | thread = current_thread(); | |
1807 | if (processor == current_processor()) { | |
1808 | if ( thread->bound_processor == PROCESSOR_NULL || | |
1809 | thread->sched_pri < new_thread->sched_pri ) { | |
1810 | processor->first_quantum = FALSE; | |
1811 | ast_on(ast_flags); | |
1812 | } | |
1813 | ||
1814 | run_queue_enqueue(runq, new_thread, tail); | |
1815 | } | |
1816 | else { | |
1817 | thread = cpu_data[processor->slot_num].active_thread; | |
1818 | if ( run_queue_enqueue(runq, new_thread, tail) == 0 && | |
1819 | processor->state != PROCESSOR_OFF_LINE && | |
1820 | thread && thread->bound_processor != processor ) | |
1821 | cause_ast_check(processor); | |
1822 | } | |
1823 | } | |
1824 | ||
1825 | mp_enable_preemption(); | |
1826 | } | |
1827 | ||
1828 | /* | |
1829 | * set_pri: | |
1830 | * | |
1831 | * Set the priority of the specified thread to the specified | |
1832 | * priority. This may cause the thread to change queues. | |
1833 | * | |
1834 | * The thread *must* be locked by the caller. | |
1835 | */ | |
1836 | void | |
1837 | set_pri( | |
1838 | thread_t thread, | |
1839 | int pri, | |
1840 | boolean_t resched) | |
1841 | { | |
1842 | register struct run_queue *rq; | |
1843 | ||
1844 | rq = rem_runq(thread); | |
1845 | assert(thread->runq == RUN_QUEUE_NULL); | |
1846 | thread->sched_pri = pri; | |
1847 | if (rq != RUN_QUEUE_NULL) { | |
1848 | if (resched) | |
1849 | thread_setrun(thread, TRUE, TAIL_Q); | |
1850 | else | |
1851 | run_queue_enqueue(rq, thread, TAIL_Q); | |
1852 | } | |
1853 | } | |
1854 | ||
1855 | /* | |
1856 | * rem_runq: | |
1857 | * | |
1858 | * Remove a thread from its run queue. | |
1859 | * The run queue that the process was on is returned | |
1860 | * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked | |
1861 | * before calling this routine. Unusual locking protocol on runq | |
1862 | * field in thread structure makes this code interesting; see thread.h. | |
1863 | */ | |
1864 | run_queue_t | |
1865 | rem_runq( | |
1866 | thread_t thread) | |
1867 | { | |
1868 | register struct run_queue *rq; | |
1869 | ||
1870 | rq = thread->runq; | |
1871 | /* | |
1872 | * If rq is RUN_QUEUE_NULL, the thread will stay out of the | |
1873 | * run_queues because the caller locked the thread. Otherwise | |
1874 | * the thread is on a runq, but could leave. | |
1875 | */ | |
1876 | if (rq != RUN_QUEUE_NULL) { | |
1877 | simple_lock(&rq->lock); | |
1878 | if (rq == thread->runq) { | |
1879 | /* | |
1880 | * Thread is in a runq and we have a lock on | |
1881 | * that runq. | |
1882 | */ | |
1883 | #if DEBUG | |
1884 | thread_check(thread, rq); | |
1885 | #endif /* DEBUG */ | |
1886 | remqueue(&rq->queues[0], (queue_entry_t)thread); | |
1887 | rq->count--; | |
1888 | ||
1889 | if (queue_empty(rq->queues + thread->sched_pri)) { | |
1890 | /* update run queue status */ | |
1891 | if (thread->sched_pri != IDLEPRI) | |
1892 | clrbit(MAXPRI - thread->sched_pri, rq->bitmap); | |
1893 | rq->highq = MAXPRI - ffsbit(rq->bitmap); | |
1894 | } | |
1895 | thread->runq = RUN_QUEUE_NULL; | |
1896 | simple_unlock(&rq->lock); | |
1897 | } | |
1898 | else { | |
1899 | /* | |
1900 | * The thread left the runq before we could | |
1901 | * lock the runq. It is not on a runq now, and | |
1902 | * can't move again because this routine's | |
1903 | * caller locked the thread. | |
1904 | */ | |
1905 | assert(thread->runq == RUN_QUEUE_NULL); | |
1906 | simple_unlock(&rq->lock); | |
1907 | rq = RUN_QUEUE_NULL; | |
1908 | } | |
1909 | } | |
1910 | ||
1911 | return (rq); | |
1912 | } | |
1913 | ||
1914 | ||
1915 | /* | |
1916 | * choose_thread: | |
1917 | * | |
1918 | * Choose a thread to execute. The thread chosen is removed | |
1919 | * from its run queue. Note that this requires only that the runq | |
1920 | * lock be held. | |
1921 | * | |
1922 | * Strategy: | |
1923 | * Check processor runq first; if anything found, run it. | |
1924 | * Else check pset runq; if nothing found, return idle thread. | |
1925 | * | |
1926 | * Second line of strategy is implemented by choose_pset_thread. | |
1927 | * This is only called on processor startup and when thread_block | |
1928 | * thinks there's something in the processor runq. | |
1929 | */ | |
1930 | thread_t | |
1931 | choose_thread( | |
1932 | processor_t myprocessor) | |
1933 | { | |
1934 | thread_t thread; | |
1935 | register queue_t q; | |
1936 | register run_queue_t runq; | |
1937 | processor_set_t pset; | |
1938 | ||
1939 | runq = &myprocessor->runq; | |
1940 | pset = myprocessor->processor_set; | |
1941 | ||
1942 | simple_lock(&runq->lock); | |
1943 | if (runq->count > 0 && runq->highq >= pset->runq.highq) { | |
1944 | q = runq->queues + runq->highq; | |
1945 | #if MACH_ASSERT | |
1946 | if (!queue_empty(q)) { | |
1947 | #endif /*MACH_ASSERT*/ | |
1948 | thread = (thread_t)q->next; | |
1949 | ((queue_entry_t)thread)->next->prev = q; | |
1950 | q->next = ((queue_entry_t)thread)->next; | |
1951 | thread->runq = RUN_QUEUE_NULL; | |
1952 | runq->count--; | |
1953 | if (queue_empty(q)) { | |
1954 | if (runq->highq != IDLEPRI) | |
1955 | clrbit(MAXPRI - runq->highq, runq->bitmap); | |
1956 | runq->highq = MAXPRI - ffsbit(runq->bitmap); | |
1957 | } | |
1958 | simple_unlock(&runq->lock); | |
1959 | return (thread); | |
1960 | #if MACH_ASSERT | |
1961 | } | |
1962 | panic("choose_thread"); | |
1963 | #endif /*MACH_ASSERT*/ | |
1964 | /*NOTREACHED*/ | |
1965 | } | |
1966 | ||
1967 | simple_unlock(&runq->lock); | |
1968 | simple_lock(&pset->runq.lock); | |
1969 | return (choose_pset_thread(myprocessor, pset)); | |
1970 | } | |
1971 | ||
1972 | ||
1973 | /* | |
1974 | * choose_pset_thread: choose a thread from processor_set runq or | |
1975 | * set processor idle and choose its idle thread. | |
1976 | * | |
1977 | * Caller must be at splsched and have a lock on the runq. This | |
1978 | * lock is released by this routine. myprocessor is always the current | |
1979 | * processor, and pset must be its processor set. | |
1980 | * This routine chooses and removes a thread from the runq if there | |
1981 | * is one (and returns it), else it sets the processor idle and | |
1982 | * returns its idle thread. | |
1983 | */ | |
1984 | thread_t | |
1985 | choose_pset_thread( | |
1986 | register processor_t myprocessor, | |
1987 | processor_set_t pset) | |
1988 | { | |
1989 | register run_queue_t runq; | |
1990 | register thread_t thread; | |
1991 | register queue_t q; | |
1992 | ||
1993 | runq = &pset->runq; | |
1994 | if (runq->count > 0) { | |
1995 | q = runq->queues + runq->highq; | |
1996 | #if MACH_ASSERT | |
1997 | if (!queue_empty(q)) { | |
1998 | #endif /*MACH_ASSERT*/ | |
1999 | thread = (thread_t)q->next; | |
2000 | ((queue_entry_t)thread)->next->prev = q; | |
2001 | q->next = ((queue_entry_t)thread)->next; | |
2002 | thread->runq = RUN_QUEUE_NULL; | |
2003 | runq->count--; | |
2004 | if (queue_empty(q)) { | |
2005 | if (runq->highq != IDLEPRI) | |
2006 | clrbit(MAXPRI - runq->highq, runq->bitmap); | |
2007 | runq->highq = MAXPRI - ffsbit(runq->bitmap); | |
2008 | } | |
2009 | simple_unlock(&runq->lock); | |
2010 | return (thread); | |
2011 | #if MACH_ASSERT | |
2012 | } | |
2013 | panic("choose_pset_thread"); | |
2014 | #endif /*MACH_ASSERT*/ | |
2015 | /*NOTREACHED*/ | |
2016 | } | |
2017 | simple_unlock(&runq->lock); | |
2018 | ||
2019 | /* | |
2020 | * Nothing is runnable, so set this processor idle if it | |
2021 | * was running. If it was in an assignment or shutdown, | |
2022 | * leave it alone. Return its idle thread. | |
2023 | */ | |
2024 | simple_lock(&pset->idle_lock); | |
2025 | if (myprocessor->state == PROCESSOR_RUNNING) { | |
2026 | myprocessor->state = PROCESSOR_IDLE; | |
2027 | /* | |
2028 | * XXX Until it goes away, put master on end of queue, others | |
2029 | * XXX on front so master gets used last. | |
2030 | */ | |
2031 | if (myprocessor == master_processor) | |
2032 | queue_enter(&(pset->idle_queue), myprocessor, | |
2033 | processor_t, processor_queue); | |
2034 | else | |
2035 | queue_enter_first(&(pset->idle_queue), myprocessor, | |
2036 | processor_t, processor_queue); | |
2037 | ||
2038 | pset->idle_count++; | |
2039 | } | |
2040 | simple_unlock(&pset->idle_lock); | |
2041 | ||
2042 | return (myprocessor->idle_thread); | |
2043 | } | |
2044 | ||
2045 | /* | |
2046 | * no_dispatch_count counts number of times processors go non-idle | |
2047 | * without being dispatched. This should be very rare. | |
2048 | */ | |
2049 | int no_dispatch_count = 0; | |
2050 | ||
2051 | /* | |
2052 | * This is the idle thread, which just looks for other threads | |
2053 | * to execute. | |
2054 | */ | |
2055 | void | |
2056 | idle_thread_continue(void) | |
2057 | { | |
2058 | register processor_t myprocessor; | |
2059 | register volatile thread_t *threadp; | |
2060 | register volatile int *gcount; | |
2061 | register volatile int *lcount; | |
2062 | register thread_t new_thread; | |
2063 | register int state; | |
2064 | register processor_set_t pset; | |
2065 | int mycpu; | |
2066 | ||
2067 | mycpu = cpu_number(); | |
2068 | myprocessor = current_processor(); | |
2069 | threadp = (volatile thread_t *) &myprocessor->next_thread; | |
2070 | lcount = (volatile int *) &myprocessor->runq.count; | |
2071 | ||
2072 | for (;;) { | |
2073 | #ifdef MARK_CPU_IDLE | |
2074 | MARK_CPU_IDLE(mycpu); | |
2075 | #endif /* MARK_CPU_IDLE */ | |
2076 | ||
2077 | gcount = (volatile int *)&myprocessor->processor_set->runq.count; | |
2078 | ||
2079 | (void)splsched(); | |
2080 | while ( (*threadp == (volatile thread_t)THREAD_NULL) && | |
2081 | (*gcount == 0) && (*lcount == 0) ) { | |
2082 | ||
2083 | /* check for ASTs while we wait */ | |
2084 | ||
2085 | if (need_ast[mycpu] &~ (AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT)) { | |
2086 | /* don't allow scheduling ASTs */ | |
2087 | need_ast[mycpu] &= ~(AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT); | |
2088 | ast_taken(FALSE, AST_ALL, TRUE); /* back at spllo */ | |
2089 | } | |
2090 | else | |
2091 | #ifdef __ppc__ | |
2092 | machine_idle(); | |
2093 | #else | |
2094 | (void)spllo(); | |
2095 | #endif | |
2096 | machine_clock_assist(); | |
2097 | ||
2098 | (void)splsched(); | |
2099 | } | |
2100 | ||
2101 | #ifdef MARK_CPU_ACTIVE | |
2102 | (void)spllo(); | |
2103 | MARK_CPU_ACTIVE(mycpu); | |
2104 | (void)splsched(); | |
2105 | #endif /* MARK_CPU_ACTIVE */ | |
2106 | ||
2107 | /* | |
2108 | * This is not a switch statement to avoid the | |
2109 | * bounds checking code in the common case. | |
2110 | */ | |
2111 | pset = myprocessor->processor_set; | |
2112 | simple_lock(&pset->idle_lock); | |
2113 | retry: | |
2114 | state = myprocessor->state; | |
2115 | if (state == PROCESSOR_DISPATCHING) { | |
2116 | /* | |
2117 | * Commmon case -- cpu dispatched. | |
2118 | */ | |
2119 | new_thread = *threadp; | |
2120 | *threadp = (volatile thread_t) THREAD_NULL; | |
2121 | myprocessor->state = PROCESSOR_RUNNING; | |
2122 | simple_unlock(&pset->idle_lock); | |
2123 | ||
2124 | thread_lock(new_thread); | |
2125 | simple_lock(&myprocessor->runq.lock); | |
2126 | simple_lock(&pset->runq.lock); | |
2127 | if ( myprocessor->runq.highq > new_thread->sched_pri || | |
2128 | pset->runq.highq > new_thread->sched_pri ) { | |
2129 | simple_unlock(&pset->runq.lock); | |
2130 | simple_unlock(&myprocessor->runq.lock); | |
2131 | ||
2132 | if (new_thread->bound_processor != PROCESSOR_NULL) | |
2133 | run_queue_enqueue(&myprocessor->runq, new_thread, HEAD_Q); | |
2134 | else | |
2135 | run_queue_enqueue(&pset->runq, new_thread, HEAD_Q); | |
2136 | thread_unlock(new_thread); | |
2137 | ||
2138 | counter(c_idle_thread_block++); | |
2139 | thread_block(idle_thread_continue); | |
2140 | } | |
2141 | else { | |
2142 | simple_unlock(&pset->runq.lock); | |
2143 | simple_unlock(&myprocessor->runq.lock); | |
2144 | ||
2145 | /* | |
2146 | * set up quantum for new thread. | |
2147 | */ | |
2148 | if (new_thread->policy & (POLICY_RR|POLICY_FIFO)) | |
2149 | myprocessor->quantum = new_thread->unconsumed_quantum; | |
2150 | else | |
2151 | myprocessor->quantum = pset->set_quantum; | |
2152 | thread_unlock(new_thread); | |
2153 | ||
2154 | myprocessor->first_quantum = TRUE; | |
2155 | counter(c_idle_thread_handoff++); | |
2156 | thread_run(myprocessor->idle_thread, | |
2157 | idle_thread_continue, new_thread); | |
2158 | } | |
2159 | } | |
2160 | else | |
2161 | if (state == PROCESSOR_IDLE) { | |
2162 | if (myprocessor->state != PROCESSOR_IDLE) { | |
2163 | /* | |
2164 | * Something happened, try again. | |
2165 | */ | |
2166 | goto retry; | |
2167 | } | |
2168 | /* | |
2169 | * Processor was not dispatched (Rare). | |
2170 | * Set it running again. | |
2171 | */ | |
2172 | no_dispatch_count++; | |
2173 | pset->idle_count--; | |
2174 | queue_remove(&pset->idle_queue, myprocessor, | |
2175 | processor_t, processor_queue); | |
2176 | myprocessor->state = PROCESSOR_RUNNING; | |
2177 | simple_unlock(&pset->idle_lock); | |
2178 | ||
2179 | counter(c_idle_thread_block++); | |
2180 | thread_block(idle_thread_continue); | |
2181 | } | |
2182 | else | |
2183 | if ( state == PROCESSOR_ASSIGN || | |
2184 | state == PROCESSOR_SHUTDOWN ) { | |
2185 | /* | |
2186 | * Changing processor sets, or going off-line. | |
2187 | * Release next_thread if there is one. Actual | |
2188 | * thread to run is on a runq. | |
2189 | */ | |
2190 | if ((new_thread = (thread_t)*threadp) != THREAD_NULL) { | |
2191 | *threadp = (volatile thread_t) THREAD_NULL; | |
2192 | simple_unlock(&pset->idle_lock); | |
2193 | thread_lock(new_thread); | |
2194 | thread_setrun(new_thread, FALSE, TAIL_Q); | |
2195 | thread_unlock(new_thread); | |
2196 | } else | |
2197 | simple_unlock(&pset->idle_lock); | |
2198 | ||
2199 | counter(c_idle_thread_block++); | |
2200 | thread_block(idle_thread_continue); | |
2201 | } | |
2202 | else { | |
2203 | simple_unlock(&pset->idle_lock); | |
2204 | printf("Bad processor state %d (Cpu %d)\n", | |
2205 | cpu_state(mycpu), mycpu); | |
2206 | panic("idle_thread"); | |
2207 | ||
2208 | } | |
2209 | ||
2210 | (void)spllo(); | |
2211 | } | |
2212 | } | |
2213 | ||
2214 | void | |
2215 | idle_thread(void) | |
2216 | { | |
2217 | thread_t self = current_thread(); | |
2218 | spl_t s; | |
2219 | ||
2220 | stack_privilege(self); | |
2221 | thread_swappable(current_act(), FALSE); | |
2222 | ||
2223 | s = splsched(); | |
2224 | thread_lock(self); | |
2225 | ||
2226 | self->priority = IDLEPRI; | |
2227 | self->sched_pri = self->priority; | |
2228 | ||
2229 | thread_unlock(self); | |
2230 | splx(s); | |
2231 | ||
2232 | counter(c_idle_thread_block++); | |
2233 | thread_block((void(*)(void))0); | |
2234 | idle_thread_continue(); | |
2235 | /*NOTREACHED*/ | |
2236 | } | |
2237 | ||
2238 | static AbsoluteTime sched_tick_interval, sched_tick_deadline; | |
2239 | ||
2240 | /* | |
2241 | * sched_tick_thread | |
2242 | * | |
2243 | * Update the priorities of all threads periodically. | |
2244 | */ | |
2245 | void | |
2246 | sched_tick_thread_continue(void) | |
2247 | { | |
2248 | AbsoluteTime abstime; | |
2249 | #if SIMPLE_CLOCK | |
2250 | int new_usec; | |
2251 | #endif /* SIMPLE_CLOCK */ | |
2252 | ||
2253 | clock_get_uptime(&abstime); | |
2254 | ||
2255 | sched_tick++; /* age usage one more time */ | |
2256 | #if SIMPLE_CLOCK | |
2257 | /* | |
2258 | * Compensate for clock drift. sched_usec is an | |
2259 | * exponential average of the number of microseconds in | |
2260 | * a second. It decays in the same fashion as cpu_usage. | |
2261 | */ | |
2262 | new_usec = sched_usec_elapsed(); | |
2263 | sched_usec = (5*sched_usec + 3*new_usec)/8; | |
2264 | #endif /* SIMPLE_CLOCK */ | |
2265 | ||
2266 | /* | |
2267 | * Compute the scheduler load factors. | |
2268 | */ | |
2269 | compute_mach_factor(); | |
2270 | ||
2271 | /* | |
2272 | * Scan the run queues for runnable threads that need to | |
2273 | * have their priorities recalculated. | |
2274 | */ | |
2275 | do_thread_scan(); | |
2276 | ||
2277 | clock_deadline_for_periodic_event(sched_tick_interval, abstime, | |
2278 | &sched_tick_deadline); | |
2279 | ||
2280 | assert_wait((event_t)sched_tick_thread_continue, THREAD_INTERRUPTIBLE); | |
2281 | thread_set_timer_deadline(sched_tick_deadline); | |
2282 | thread_block(sched_tick_thread_continue); | |
2283 | /*NOTREACHED*/ | |
2284 | } | |
2285 | ||
2286 | void | |
2287 | sched_tick_thread(void) | |
2288 | { | |
2289 | thread_t self = current_thread(); | |
2290 | natural_t rate; | |
2291 | spl_t s; | |
2292 | ||
2293 | stack_privilege(self); | |
2294 | thread_swappable(self->top_act, FALSE); | |
2295 | ||
2296 | s = splsched(); | |
2297 | thread_lock(self); | |
2298 | ||
2299 | self->priority = MAXPRI_STANDARD; | |
2300 | self->sched_pri = self->priority; | |
2301 | ||
2302 | thread_unlock(self); | |
2303 | splx(s); | |
2304 | ||
2305 | rate = (1000 >> SCHED_TICK_SHIFT); | |
2306 | clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC, | |
2307 | &sched_tick_interval); | |
2308 | clock_get_uptime(&sched_tick_deadline); | |
2309 | ||
2310 | thread_block(sched_tick_thread_continue); | |
2311 | /*NOTREACHED*/ | |
2312 | } | |
2313 | ||
2314 | #define MAX_STUCK_THREADS 128 | |
2315 | ||
2316 | /* | |
2317 | * do_thread_scan: scan for stuck threads. A thread is stuck if | |
2318 | * it is runnable but its priority is so low that it has not | |
2319 | * run for several seconds. Its priority should be higher, but | |
2320 | * won't be until it runs and calls update_priority. The scanner | |
2321 | * finds these threads and does the updates. | |
2322 | * | |
2323 | * Scanner runs in two passes. Pass one squirrels likely | |
2324 | * thread ids away in an array (takes out references for them). | |
2325 | * Pass two does the priority updates. This is necessary because | |
2326 | * the run queue lock is required for the candidate scan, but | |
2327 | * cannot be held during updates [set_pri will deadlock]. | |
2328 | * | |
2329 | * Array length should be enough so that restart isn't necessary, | |
2330 | * but restart logic is included. Does not scan processor runqs. | |
2331 | * | |
2332 | */ | |
2333 | thread_t stuck_threads[MAX_STUCK_THREADS]; | |
2334 | int stuck_count = 0; | |
2335 | ||
2336 | /* | |
2337 | * do_runq_scan is the guts of pass 1. It scans a runq for | |
2338 | * stuck threads. A boolean is returned indicating whether | |
2339 | * a retry is needed. | |
2340 | */ | |
2341 | boolean_t | |
2342 | do_runq_scan( | |
2343 | run_queue_t runq) | |
2344 | { | |
2345 | register queue_t q; | |
2346 | register thread_t thread; | |
2347 | register int count; | |
2348 | spl_t s; | |
2349 | boolean_t result = FALSE; | |
2350 | ||
2351 | s = splsched(); | |
2352 | simple_lock(&runq->lock); | |
2353 | if ((count = runq->count) > 0) { | |
2354 | q = runq->queues + runq->highq; | |
2355 | while (count > 0) { | |
2356 | queue_iterate(q, thread, thread_t, links) { | |
2357 | if ( !(thread->state & (TH_WAIT|TH_SUSP)) && | |
2358 | thread->policy == POLICY_TIMESHARE ) { | |
2359 | if (thread->sched_stamp != sched_tick) { | |
2360 | /* | |
2361 | * Stuck, save its id for later. | |
2362 | */ | |
2363 | if (stuck_count == MAX_STUCK_THREADS) { | |
2364 | /* | |
2365 | * !@#$% No more room. | |
2366 | */ | |
2367 | simple_unlock(&runq->lock); | |
2368 | splx(s); | |
2369 | ||
2370 | return (TRUE); | |
2371 | } | |
2372 | ||
2373 | /* | |
2374 | * Inline version of thread_reference | |
2375 | * XXX - lock ordering problem here: | |
2376 | * thread locks should be taken before runq | |
2377 | * locks: just try and get the thread's locks | |
2378 | * and ignore this thread if we fail, we might | |
2379 | * have better luck next time. | |
2380 | */ | |
2381 | if (simple_lock_try(&thread->lock)) { | |
2382 | thread->ref_count++; | |
2383 | thread_unlock(thread); | |
2384 | stuck_threads[stuck_count++] = thread; | |
2385 | } | |
2386 | else | |
2387 | result = TRUE; | |
2388 | } | |
2389 | } | |
2390 | ||
2391 | count--; | |
2392 | } | |
2393 | ||
2394 | q--; | |
2395 | } | |
2396 | } | |
2397 | simple_unlock(&runq->lock); | |
2398 | splx(s); | |
2399 | ||
2400 | return (result); | |
2401 | } | |
2402 | ||
2403 | boolean_t thread_scan_enabled = TRUE; | |
2404 | ||
2405 | void | |
2406 | do_thread_scan(void) | |
2407 | { | |
2408 | register boolean_t restart_needed = FALSE; | |
2409 | register thread_t thread; | |
2410 | register processor_set_t pset = &default_pset; | |
2411 | register processor_t processor; | |
2412 | spl_t s; | |
2413 | ||
2414 | if (!thread_scan_enabled) | |
2415 | return; | |
2416 | ||
2417 | do { | |
2418 | restart_needed = do_runq_scan(&pset->runq); | |
2419 | if (!restart_needed) { | |
2420 | simple_lock(&pset->processors_lock); | |
2421 | processor = (processor_t)queue_first(&pset->processors); | |
2422 | while (!queue_end(&pset->processors, (queue_entry_t)processor)) { | |
2423 | if (restart_needed = do_runq_scan(&processor->runq)) | |
2424 | break; | |
2425 | ||
2426 | processor = (processor_t)queue_next(&processor->processors); | |
2427 | } | |
2428 | simple_unlock(&pset->processors_lock); | |
2429 | } | |
2430 | ||
2431 | /* | |
2432 | * Ok, we now have a collection of candidates -- fix them. | |
2433 | */ | |
2434 | while (stuck_count > 0) { | |
2435 | thread = stuck_threads[--stuck_count]; | |
2436 | stuck_threads[stuck_count] = THREAD_NULL; | |
2437 | s = splsched(); | |
2438 | thread_lock(thread); | |
2439 | if (thread->policy == POLICY_TIMESHARE) { | |
2440 | if ( !(thread->state & (TH_WAIT|TH_SUSP)) && | |
2441 | thread->sched_stamp != sched_tick ) | |
2442 | update_priority(thread); | |
2443 | } | |
2444 | thread_unlock(thread); | |
2445 | splx(s); | |
2446 | thread_deallocate(thread); | |
2447 | } | |
2448 | ||
2449 | } while (restart_needed); | |
2450 | } | |
2451 | ||
2452 | /* | |
2453 | * Just in case someone doesn't use the macro | |
2454 | */ | |
2455 | #undef thread_wakeup | |
2456 | void | |
2457 | thread_wakeup( | |
2458 | event_t x); | |
2459 | ||
2460 | void | |
2461 | thread_wakeup( | |
2462 | event_t x) | |
2463 | { | |
2464 | thread_wakeup_with_result(x, THREAD_AWAKENED); | |
2465 | } | |
2466 | ||
2467 | boolean_t | |
2468 | thread_runnable( | |
2469 | thread_t thread) | |
2470 | { | |
2471 | sched_policy_t *policy; | |
2472 | ||
2473 | /* Ask sched policy if thread is runnable */ | |
2474 | policy = policy_id_to_sched_policy(thread->policy); | |
2475 | ||
2476 | return ((policy != SCHED_POLICY_NULL)? | |
2477 | policy->sp_ops.sp_thread_runnable(policy, thread) : FALSE); | |
2478 | } | |
2479 | ||
2480 | #if DEBUG | |
2481 | ||
2482 | void | |
2483 | dump_processor_set( | |
2484 | processor_set_t ps) | |
2485 | { | |
2486 | printf("processor_set: %08x\n",ps); | |
2487 | printf("idle_queue: %08x %08x, idle_count: 0x%x\n", | |
2488 | ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count); | |
2489 | printf("processors: %08x %08x, processor_count: 0x%x\n", | |
2490 | ps->processors.next,ps->processors.prev,ps->processor_count); | |
2491 | printf("tasks: %08x %08x, task_count: 0x%x\n", | |
2492 | ps->tasks.next,ps->tasks.prev,ps->task_count); | |
2493 | printf("threads: %08x %08x, thread_count: 0x%x\n", | |
2494 | ps->threads.next,ps->threads.prev,ps->thread_count); | |
2495 | printf("ref_count: 0x%x, active: %x\n", | |
2496 | ps->ref_count,ps->active); | |
2497 | printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self); | |
2498 | printf("max_priority: 0x%x, policies: 0x%x, set_quantum: 0x%x\n", | |
2499 | ps->max_priority, ps->policies, ps->set_quantum); | |
2500 | } | |
2501 | ||
2502 | #define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s]) | |
2503 | ||
2504 | void | |
2505 | dump_processor( | |
2506 | processor_t p) | |
2507 | { | |
2508 | char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING", | |
2509 | "ASSIGN","SHUTDOWN"}; | |
2510 | ||
2511 | printf("processor: %08x\n",p); | |
2512 | printf("processor_queue: %08x %08x\n", | |
2513 | p->processor_queue.next,p->processor_queue.prev); | |
2514 | printf("state: %8s, next_thread: %08x, idle_thread: %08x\n", | |
2515 | processor_state(p->state), p->next_thread, p->idle_thread); | |
2516 | printf("quantum: %u, first_quantum: %x, last_quantum: %u\n", | |
2517 | p->quantum, p->first_quantum, p->last_quantum); | |
2518 | printf("processor_set: %08x, processor_set_next: %08x\n", | |
2519 | p->processor_set, p->processor_set_next); | |
2520 | printf("processors: %08x %08x\n", p->processors.next,p->processors.prev); | |
2521 | printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num); | |
2522 | } | |
2523 | ||
2524 | void | |
2525 | dump_run_queue_struct( | |
2526 | run_queue_t rq) | |
2527 | { | |
2528 | char dump_buf[80]; | |
2529 | int i; | |
2530 | ||
2531 | for( i=0; i < NRQS; ) { | |
2532 | int j; | |
2533 | ||
2534 | printf("%6s",(i==0)?"runq:":""); | |
2535 | for( j=0; (j<8) && (i < NRQS); j++,i++ ) { | |
2536 | if( rq->queues[i].next == &rq->queues[i] ) | |
2537 | printf( " --------"); | |
2538 | else | |
2539 | printf(" %08x",rq->queues[i].next); | |
2540 | } | |
2541 | printf("\n"); | |
2542 | } | |
2543 | for( i=0; i < NRQBM; ) { | |
2544 | register unsigned int mask; | |
2545 | char *d=dump_buf; | |
2546 | ||
2547 | mask = ~0; | |
2548 | mask ^= (mask>>1); | |
2549 | ||
2550 | do { | |
2551 | *d++ = ((rq->bitmap[i]&mask)?'r':'e'); | |
2552 | mask >>=1; | |
2553 | } while( mask ); | |
2554 | *d = '\0'; | |
2555 | printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf); | |
2556 | i++; | |
2557 | } | |
2558 | printf("highq: 0x%x, count: %u\n", rq->highq, rq->count); | |
2559 | } | |
2560 | ||
2561 | void | |
2562 | dump_run_queues( | |
2563 | run_queue_t runq) | |
2564 | { | |
2565 | register queue_t q1; | |
2566 | register int i; | |
2567 | register queue_entry_t e; | |
2568 | ||
2569 | q1 = runq->queues; | |
2570 | for (i = 0; i < NRQS; i++) { | |
2571 | if (q1->next != q1) { | |
2572 | int t_cnt; | |
2573 | ||
2574 | printf("[%u]",i); | |
2575 | for (t_cnt=0, e = q1->next; e != q1; e = e->next) { | |
2576 | printf("\t0x%08x",e); | |
2577 | if( (t_cnt = ++t_cnt%4) == 0 ) | |
2578 | printf("\n"); | |
2579 | } | |
2580 | if( t_cnt ) | |
2581 | printf("\n"); | |
2582 | } | |
2583 | /* else | |
2584 | printf("[%u]\t<empty>\n",i); | |
2585 | */ | |
2586 | q1++; | |
2587 | } | |
2588 | } | |
2589 | ||
2590 | void | |
2591 | checkrq( | |
2592 | run_queue_t rq, | |
2593 | char *msg) | |
2594 | { | |
2595 | register queue_t q1; | |
2596 | register int i, j; | |
2597 | register queue_entry_t e; | |
2598 | register int highq; | |
2599 | ||
2600 | highq = NRQS; | |
2601 | j = 0; | |
2602 | q1 = rq->queues; | |
2603 | for (i = MAXPRI; i >= 0; i--) { | |
2604 | if (q1->next == q1) { | |
2605 | if (q1->prev != q1) { | |
2606 | panic("checkrq: empty at %s", msg); | |
2607 | } | |
2608 | } | |
2609 | else { | |
2610 | if (highq == -1) | |
2611 | highq = i; | |
2612 | ||
2613 | for (e = q1->next; e != q1; e = e->next) { | |
2614 | j++; | |
2615 | if (e->next->prev != e) | |
2616 | panic("checkrq-2 at %s", msg); | |
2617 | if (e->prev->next != e) | |
2618 | panic("checkrq-3 at %s", msg); | |
2619 | } | |
2620 | } | |
2621 | q1++; | |
2622 | } | |
2623 | if (j != rq->count) | |
2624 | panic("checkrq: count wrong at %s", msg); | |
2625 | if (rq->count != 0 && highq > rq->highq) | |
2626 | panic("checkrq: highq wrong at %s", msg); | |
2627 | } | |
2628 | ||
2629 | void | |
2630 | thread_check( | |
2631 | register thread_t thread, | |
2632 | register run_queue_t rq) | |
2633 | { | |
2634 | register int whichq = thread->sched_pri; | |
2635 | register queue_entry_t queue, entry; | |
2636 | ||
2637 | if (whichq < MINPRI || whichq > MAXPRI) | |
2638 | panic("thread_check: bad pri"); | |
2639 | ||
2640 | if (whichq != thread->whichq) | |
2641 | panic("thread_check: whichq"); | |
2642 | ||
2643 | queue = &rq->queues[whichq]; | |
2644 | entry = queue_first(queue); | |
2645 | while (!queue_end(queue, entry)) { | |
2646 | if (entry == (queue_entry_t)thread) | |
2647 | return; | |
2648 | ||
2649 | entry = queue_next(entry); | |
2650 | } | |
2651 | ||
2652 | panic("thread_check: not found"); | |
2653 | } | |
2654 | ||
2655 | #endif /* DEBUG */ | |
2656 | ||
2657 | #if MACH_KDB | |
2658 | #include <ddb/db_output.h> | |
2659 | #define printf kdbprintf | |
2660 | extern int db_indent; | |
2661 | void db_sched(void); | |
2662 | ||
2663 | void | |
2664 | db_sched(void) | |
2665 | { | |
2666 | iprintf("Scheduling Statistics:\n"); | |
2667 | db_indent += 2; | |
2668 | iprintf("Thread invocations: csw %d same %d\n", | |
2669 | c_thread_invoke_csw, c_thread_invoke_same); | |
2670 | #if MACH_COUNTERS | |
2671 | iprintf("Thread block: calls %d\n", | |
2672 | c_thread_block_calls); | |
2673 | iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n", | |
2674 | c_idle_thread_handoff, | |
2675 | c_idle_thread_block, no_dispatch_count); | |
2676 | iprintf("Sched thread blocks: %d\n", c_sched_thread_block); | |
2677 | #endif /* MACH_COUNTERS */ | |
2678 | db_indent -= 2; | |
2679 | } | |
2680 | #endif /* MACH_KDB */ |