]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_FREE_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Copyright (c) 1993 The University of Utah and | |
27 | * the Center for Software Science (CSS). All rights reserved. | |
28 | * | |
29 | * Permission to use, copy, modify and distribute this software and its | |
30 | * documentation is hereby granted, provided that both the copyright | |
31 | * notice and this permission notice appear in all copies of the | |
32 | * software, derivative works or modified versions, and any portions | |
33 | * thereof, and that both notices appear in supporting documentation. | |
34 | * | |
35 | * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
36 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF | |
37 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
38 | * | |
39 | * CSS requests users of this software to return to css-dist@cs.utah.edu any | |
40 | * improvements that they make and grant CSS redistribution rights. | |
41 | * | |
42 | * Author: Bryan Ford, University of Utah CSS | |
43 | * | |
44 | * Thread management routines | |
45 | */ | |
46 | #include <mach/mach_types.h> | |
47 | #include <mach/kern_return.h> | |
48 | #include <mach/alert.h> | |
49 | #include <mach_prof.h> | |
50 | #include <mach/rpc.h> | |
51 | #include <mach/thread_act_server.h> | |
52 | ||
53 | #include <kern/kern_types.h> | |
54 | #include <kern/ast.h> | |
55 | #include <kern/mach_param.h> | |
56 | #include <kern/zalloc.h> | |
57 | #include <kern/thread.h> | |
58 | #include <kern/task.h> | |
59 | #include <kern/sched_prim.h> | |
60 | #include <kern/misc_protos.h> | |
61 | #include <kern/assert.h> | |
62 | #include <kern/exception.h> | |
63 | #include <kern/ipc_mig.h> | |
64 | #include <kern/ipc_tt.h> | |
65 | #include <kern/profile.h> | |
66 | #include <kern/machine.h> | |
67 | #include <kern/spl.h> | |
68 | #include <kern/syscall_subr.h> | |
69 | #include <kern/sync_lock.h> | |
70 | #include <kern/processor.h> | |
71 | #include <kern/timer.h> | |
72 | #include <mach_prof.h> | |
73 | #include <mach/rpc.h> | |
74 | ||
75 | void act_abort(thread_t); | |
76 | void act_set_apc(thread_t); | |
77 | void install_special_handler_locked(thread_t); | |
78 | void special_handler_continue(void); | |
79 | ||
80 | /* | |
81 | * Internal routine to terminate a thread. | |
82 | * Sometimes called with task already locked. | |
83 | */ | |
84 | kern_return_t | |
85 | thread_terminate_internal( | |
86 | thread_t thread) | |
87 | { | |
88 | kern_return_t result = KERN_SUCCESS; | |
89 | ||
90 | thread_mtx_lock(thread); | |
91 | ||
92 | if (thread->active) { | |
93 | thread->active = FALSE; | |
94 | ||
95 | act_abort(thread); | |
96 | ||
97 | if (thread->started) | |
98 | clear_wait(thread, THREAD_INTERRUPTED); | |
99 | else { | |
100 | clear_wait(thread, THREAD_AWAKENED); | |
101 | thread->started = TRUE; | |
102 | } | |
103 | } | |
104 | else | |
105 | result = KERN_TERMINATED; | |
106 | ||
107 | thread_mtx_unlock(thread); | |
108 | ||
109 | if (thread != current_thread() && result == KERN_SUCCESS) | |
110 | thread_wait(thread); | |
111 | ||
112 | return (result); | |
113 | } | |
114 | ||
115 | /* | |
116 | * Terminate a thread. | |
117 | */ | |
118 | kern_return_t | |
119 | thread_terminate( | |
120 | thread_t thread) | |
121 | { | |
122 | kern_return_t result; | |
123 | ||
124 | if (thread == THREAD_NULL) | |
125 | return (KERN_INVALID_ARGUMENT); | |
126 | ||
127 | if ( thread->task == kernel_task && | |
128 | thread != current_thread() ) | |
129 | return (KERN_FAILURE); | |
130 | ||
131 | result = thread_terminate_internal(thread); | |
132 | ||
133 | /* | |
134 | * If a kernel thread is terminating itself, force an AST here. | |
135 | * Kernel threads don't normally pass through the AST checking | |
136 | * code - and all threads finish their own termination in the | |
137 | * special handler APC. | |
138 | */ | |
139 | if (thread->task == kernel_task) { | |
140 | ml_set_interrupts_enabled(FALSE); | |
141 | ast_taken(AST_APC, TRUE); | |
142 | panic("thread_terminate"); | |
143 | } | |
144 | ||
145 | return (result); | |
146 | } | |
147 | ||
148 | /* | |
149 | * Suspend execution of the specified thread. | |
150 | * This is a recursive-style suspension of the thread, a count of | |
151 | * suspends is maintained. | |
152 | * | |
153 | * Called with thread mutex held. | |
154 | */ | |
155 | void | |
156 | thread_hold( | |
157 | register thread_t thread) | |
158 | { | |
159 | if (thread->suspend_count++ == 0) { | |
160 | install_special_handler(thread); | |
161 | if (thread->started) | |
162 | thread_wakeup_one(&thread->suspend_count); | |
163 | } | |
164 | } | |
165 | ||
166 | /* | |
167 | * Decrement internal suspension count, setting thread | |
168 | * runnable when count falls to zero. | |
169 | * | |
170 | * Called with thread mutex held. | |
171 | */ | |
172 | void | |
173 | thread_release( | |
174 | register thread_t thread) | |
175 | { | |
176 | if ( thread->suspend_count > 0 && | |
177 | --thread->suspend_count == 0 ) { | |
178 | if (thread->started) | |
179 | thread_wakeup_one(&thread->suspend_count); | |
180 | else { | |
181 | clear_wait(thread, THREAD_AWAKENED); | |
182 | thread->started = TRUE; | |
183 | } | |
184 | } | |
185 | } | |
186 | ||
187 | kern_return_t | |
188 | thread_suspend( | |
189 | register thread_t thread) | |
190 | { | |
191 | thread_t self = current_thread(); | |
192 | kern_return_t result = KERN_SUCCESS; | |
193 | ||
194 | if (thread == THREAD_NULL || thread->task == kernel_task) | |
195 | return (KERN_INVALID_ARGUMENT); | |
196 | ||
197 | thread_mtx_lock(thread); | |
198 | ||
199 | if (thread->active) { | |
200 | if ( thread->user_stop_count++ == 0 && | |
201 | thread->suspend_count++ == 0 ) { | |
202 | install_special_handler(thread); | |
203 | if (thread != self) | |
204 | thread_wakeup_one(&thread->suspend_count); | |
205 | } | |
206 | } | |
207 | else | |
208 | result = KERN_TERMINATED; | |
209 | ||
210 | thread_mtx_unlock(thread); | |
211 | ||
212 | if (thread != self && result == KERN_SUCCESS) | |
213 | thread_wait(thread); | |
214 | ||
215 | return (result); | |
216 | } | |
217 | ||
218 | kern_return_t | |
219 | thread_resume( | |
220 | register thread_t thread) | |
221 | { | |
222 | kern_return_t result = KERN_SUCCESS; | |
223 | ||
224 | if (thread == THREAD_NULL || thread->task == kernel_task) | |
225 | return (KERN_INVALID_ARGUMENT); | |
226 | ||
227 | thread_mtx_lock(thread); | |
228 | ||
229 | if (thread->active) { | |
230 | if (thread->user_stop_count > 0) { | |
231 | if ( --thread->user_stop_count == 0 && | |
232 | --thread->suspend_count == 0 ) { | |
233 | if (thread->started) | |
234 | thread_wakeup_one(&thread->suspend_count); | |
235 | else { | |
236 | clear_wait(thread, THREAD_AWAKENED); | |
237 | thread->started = TRUE; | |
238 | } | |
239 | } | |
240 | } | |
241 | else | |
242 | result = KERN_FAILURE; | |
243 | } | |
244 | else | |
245 | result = KERN_TERMINATED; | |
246 | ||
247 | thread_mtx_unlock(thread); | |
248 | ||
249 | return (result); | |
250 | } | |
251 | ||
252 | /* | |
253 | * thread_depress_abort: | |
254 | * | |
255 | * Prematurely abort priority depression if there is one. | |
256 | */ | |
257 | kern_return_t | |
258 | thread_depress_abort( | |
259 | register thread_t thread) | |
260 | { | |
261 | kern_return_t result; | |
262 | ||
263 | if (thread == THREAD_NULL) | |
264 | return (KERN_INVALID_ARGUMENT); | |
265 | ||
266 | thread_mtx_lock(thread); | |
267 | ||
268 | if (thread->active) | |
269 | result = thread_depress_abort_internal(thread); | |
270 | else | |
271 | result = KERN_TERMINATED; | |
272 | ||
273 | thread_mtx_unlock(thread); | |
274 | ||
275 | return (result); | |
276 | } | |
277 | ||
278 | ||
279 | /* | |
280 | * Indicate that the activation should run its | |
281 | * special handler to detect a condition. | |
282 | * | |
283 | * Called with thread mutex held. | |
284 | */ | |
285 | void | |
286 | act_abort( | |
287 | thread_t thread) | |
288 | { | |
289 | spl_t s = splsched(); | |
290 | ||
291 | thread_lock(thread); | |
292 | ||
293 | if (!(thread->state & TH_ABORT)) { | |
294 | thread->state |= TH_ABORT; | |
295 | install_special_handler_locked(thread); | |
296 | } | |
297 | else | |
298 | thread->state &= ~TH_ABORT_SAFELY; | |
299 | ||
300 | thread_unlock(thread); | |
301 | splx(s); | |
302 | } | |
303 | ||
304 | kern_return_t | |
305 | thread_abort( | |
306 | register thread_t thread) | |
307 | { | |
308 | kern_return_t result = KERN_SUCCESS; | |
309 | ||
310 | if (thread == THREAD_NULL) | |
311 | return (KERN_INVALID_ARGUMENT); | |
312 | ||
313 | thread_mtx_lock(thread); | |
314 | ||
315 | if (thread->active) { | |
316 | act_abort(thread); | |
317 | clear_wait(thread, THREAD_INTERRUPTED); | |
318 | } | |
319 | else | |
320 | result = KERN_TERMINATED; | |
321 | ||
322 | thread_mtx_unlock(thread); | |
323 | ||
324 | return (result); | |
325 | } | |
326 | ||
327 | kern_return_t | |
328 | thread_abort_safely( | |
329 | thread_t thread) | |
330 | { | |
331 | kern_return_t result = KERN_SUCCESS; | |
332 | ||
333 | if (thread == THREAD_NULL) | |
334 | return (KERN_INVALID_ARGUMENT); | |
335 | ||
336 | thread_mtx_lock(thread); | |
337 | ||
338 | if (thread->active) { | |
339 | spl_t s = splsched(); | |
340 | ||
341 | thread_lock(thread); | |
342 | if (!thread->at_safe_point || | |
343 | clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { | |
344 | if (!(thread->state & TH_ABORT)) { | |
345 | thread->state |= (TH_ABORT|TH_ABORT_SAFELY); | |
346 | install_special_handler_locked(thread); | |
347 | } | |
348 | } | |
349 | thread_unlock(thread); | |
350 | splx(s); | |
351 | } | |
352 | else | |
353 | result = KERN_TERMINATED; | |
354 | ||
355 | thread_mtx_unlock(thread); | |
356 | ||
357 | return (result); | |
358 | } | |
359 | ||
360 | /*** backward compatibility hacks ***/ | |
361 | #include <mach/thread_info.h> | |
362 | #include <mach/thread_special_ports.h> | |
363 | #include <ipc/ipc_port.h> | |
364 | ||
365 | kern_return_t | |
366 | thread_info( | |
367 | thread_t thread, | |
368 | thread_flavor_t flavor, | |
369 | thread_info_t thread_info_out, | |
370 | mach_msg_type_number_t *thread_info_count) | |
371 | { | |
372 | kern_return_t result; | |
373 | ||
374 | if (thread == THREAD_NULL) | |
375 | return (KERN_INVALID_ARGUMENT); | |
376 | ||
377 | thread_mtx_lock(thread); | |
378 | ||
379 | if (thread->active) | |
380 | result = thread_info_internal( | |
381 | thread, flavor, thread_info_out, thread_info_count); | |
382 | else | |
383 | result = KERN_TERMINATED; | |
384 | ||
385 | thread_mtx_unlock(thread); | |
386 | ||
387 | return (result); | |
388 | } | |
389 | ||
390 | kern_return_t | |
391 | thread_get_state( | |
392 | register thread_t thread, | |
393 | int flavor, | |
394 | thread_state_t state, /* pointer to OUT array */ | |
395 | mach_msg_type_number_t *state_count) /*IN/OUT*/ | |
396 | { | |
397 | kern_return_t result = KERN_SUCCESS; | |
398 | ||
399 | if (thread == THREAD_NULL) | |
400 | return (KERN_INVALID_ARGUMENT); | |
401 | ||
402 | thread_mtx_lock(thread); | |
403 | ||
404 | if (thread->active) { | |
405 | if (thread != current_thread()) { | |
406 | thread_hold(thread); | |
407 | ||
408 | thread_mtx_unlock(thread); | |
409 | ||
410 | if (thread_stop(thread)) { | |
411 | thread_mtx_lock(thread); | |
412 | result = machine_thread_get_state( | |
413 | thread, flavor, state, state_count); | |
414 | thread_unstop(thread); | |
415 | } | |
416 | else { | |
417 | thread_mtx_lock(thread); | |
418 | result = KERN_ABORTED; | |
419 | } | |
420 | ||
421 | thread_release(thread); | |
422 | } | |
423 | else | |
424 | result = machine_thread_get_state( | |
425 | thread, flavor, state, state_count); | |
426 | } | |
427 | else | |
428 | result = KERN_TERMINATED; | |
429 | ||
430 | thread_mtx_unlock(thread); | |
431 | ||
432 | return (result); | |
433 | } | |
434 | ||
435 | /* | |
436 | * Change thread's machine-dependent state. Called with nothing | |
437 | * locked. Returns same way. | |
438 | */ | |
439 | kern_return_t | |
440 | thread_set_state( | |
441 | register thread_t thread, | |
442 | int flavor, | |
443 | thread_state_t state, | |
444 | mach_msg_type_number_t state_count) | |
445 | { | |
446 | kern_return_t result = KERN_SUCCESS; | |
447 | ||
448 | if (thread == THREAD_NULL) | |
449 | return (KERN_INVALID_ARGUMENT); | |
450 | ||
451 | thread_mtx_lock(thread); | |
452 | ||
453 | if (thread->active) { | |
454 | if (thread != current_thread()) { | |
455 | thread_hold(thread); | |
456 | ||
457 | thread_mtx_unlock(thread); | |
458 | ||
459 | if (thread_stop(thread)) { | |
460 | thread_mtx_lock(thread); | |
461 | result = machine_thread_set_state( | |
462 | thread, flavor, state, state_count); | |
463 | thread_unstop(thread); | |
464 | } | |
465 | else { | |
466 | thread_mtx_lock(thread); | |
467 | result = KERN_ABORTED; | |
468 | } | |
469 | ||
470 | thread_release(thread); | |
471 | } | |
472 | else | |
473 | result = machine_thread_set_state( | |
474 | thread, flavor, state, state_count); | |
475 | } | |
476 | else | |
477 | result = KERN_TERMINATED; | |
478 | ||
479 | thread_mtx_unlock(thread); | |
480 | ||
481 | return (result); | |
482 | } | |
483 | ||
484 | ||
485 | /* | |
486 | * Kernel-internal "thread" interfaces used outside this file: | |
487 | */ | |
488 | ||
489 | /* Initialize (or re-initialize) a thread state. Called from execve | |
490 | * with nothing locked, returns same way. | |
491 | */ | |
492 | kern_return_t | |
493 | thread_state_initialize( | |
494 | register thread_t thread) | |
495 | { | |
496 | kern_return_t result = KERN_SUCCESS; | |
497 | ||
498 | if (thread == THREAD_NULL) | |
499 | return (KERN_INVALID_ARGUMENT); | |
500 | ||
501 | thread_mtx_lock(thread); | |
502 | ||
503 | if (thread->active) { | |
504 | if (thread != current_thread()) { | |
505 | thread_hold(thread); | |
506 | ||
507 | thread_mtx_unlock(thread); | |
508 | ||
509 | if (thread_stop(thread)) { | |
510 | thread_mtx_lock(thread); | |
511 | result = machine_thread_state_initialize( thread ); | |
512 | thread_unstop(thread); | |
513 | } | |
514 | else { | |
515 | thread_mtx_lock(thread); | |
516 | result = KERN_ABORTED; | |
517 | } | |
518 | ||
519 | thread_release(thread); | |
520 | } | |
521 | else | |
522 | result = machine_thread_state_initialize( thread ); | |
523 | } | |
524 | else | |
525 | result = KERN_TERMINATED; | |
526 | ||
527 | thread_mtx_unlock(thread); | |
528 | ||
529 | return (result); | |
530 | } | |
531 | ||
532 | ||
533 | kern_return_t | |
534 | thread_dup( | |
535 | register thread_t target) | |
536 | { | |
537 | thread_t self = current_thread(); | |
538 | kern_return_t result = KERN_SUCCESS; | |
539 | ||
540 | if (target == THREAD_NULL || target == self) | |
541 | return (KERN_INVALID_ARGUMENT); | |
542 | ||
543 | thread_mtx_lock(target); | |
544 | ||
545 | if (target->active) { | |
546 | thread_hold(target); | |
547 | ||
548 | thread_mtx_unlock(target); | |
549 | ||
550 | if (thread_stop(target)) { | |
551 | thread_mtx_lock(target); | |
552 | result = machine_thread_dup(self, target); | |
553 | thread_unstop(target); | |
554 | } | |
555 | else { | |
556 | thread_mtx_lock(target); | |
557 | result = KERN_ABORTED; | |
558 | } | |
559 | ||
560 | thread_release(target); | |
561 | } | |
562 | else | |
563 | result = KERN_TERMINATED; | |
564 | ||
565 | thread_mtx_unlock(target); | |
566 | ||
567 | return (result); | |
568 | } | |
569 | ||
570 | ||
571 | /* | |
572 | * thread_setstatus: | |
573 | * | |
574 | * Set the status of the specified thread. | |
575 | * Called with (and returns with) no locks held. | |
576 | */ | |
577 | kern_return_t | |
578 | thread_setstatus( | |
579 | register thread_t thread, | |
580 | int flavor, | |
581 | thread_state_t tstate, | |
582 | mach_msg_type_number_t count) | |
583 | { | |
584 | ||
585 | return (thread_set_state(thread, flavor, tstate, count)); | |
586 | } | |
587 | ||
588 | /* | |
589 | * thread_getstatus: | |
590 | * | |
591 | * Get the status of the specified thread. | |
592 | */ | |
593 | kern_return_t | |
594 | thread_getstatus( | |
595 | register thread_t thread, | |
596 | int flavor, | |
597 | thread_state_t tstate, | |
598 | mach_msg_type_number_t *count) | |
599 | { | |
600 | return (thread_get_state(thread, flavor, tstate, count)); | |
601 | } | |
602 | ||
603 | /* | |
604 | * install_special_handler: | |
605 | * | |
606 | * Install the special returnhandler that handles suspension and | |
607 | * termination, if it hasn't been installed already. | |
608 | * | |
609 | * Called with the thread mutex held. | |
610 | */ | |
611 | void | |
612 | install_special_handler( | |
613 | thread_t thread) | |
614 | { | |
615 | spl_t s = splsched(); | |
616 | ||
617 | thread_lock(thread); | |
618 | install_special_handler_locked(thread); | |
619 | thread_unlock(thread); | |
620 | splx(s); | |
621 | } | |
622 | ||
623 | /* | |
624 | * install_special_handler_locked: | |
625 | * | |
626 | * Do the work of installing the special_handler. | |
627 | * | |
628 | * Called with the thread mutex and scheduling lock held. | |
629 | */ | |
630 | void | |
631 | install_special_handler_locked( | |
632 | thread_t thread) | |
633 | { | |
634 | ReturnHandler **rh; | |
635 | ||
636 | /* The work handler must always be the last ReturnHandler on the list, | |
637 | because it can do tricky things like detach the thr_act. */ | |
638 | for (rh = &thread->handlers; *rh; rh = &(*rh)->next) | |
639 | continue; | |
640 | ||
641 | if (rh != &thread->special_handler.next) | |
642 | *rh = &thread->special_handler; | |
643 | ||
644 | /* | |
645 | * Temporarily undepress, so target has | |
646 | * a chance to do locking required to | |
647 | * block itself in special_handler(). | |
648 | */ | |
649 | if (thread->sched_mode & TH_MODE_ISDEPRESSED) | |
650 | compute_priority(thread, TRUE); | |
651 | ||
652 | thread_ast_set(thread, AST_APC); | |
653 | ||
654 | if (thread == current_thread()) | |
655 | ast_propagate(thread->ast); | |
656 | else { | |
657 | processor_t processor = thread->last_processor; | |
658 | ||
659 | if ( processor != PROCESSOR_NULL && | |
660 | processor->state == PROCESSOR_RUNNING && | |
661 | processor->active_thread == thread ) | |
662 | cause_ast_check(processor); | |
663 | } | |
664 | } | |
665 | ||
666 | /* | |
667 | * Activation control support routines internal to this file: | |
668 | */ | |
669 | ||
670 | void | |
671 | act_execute_returnhandlers(void) | |
672 | { | |
673 | thread_t thread = current_thread(); | |
674 | ||
675 | thread_ast_clear(thread, AST_APC); | |
676 | spllo(); | |
677 | ||
678 | for (;;) { | |
679 | ReturnHandler *rh; | |
680 | ||
681 | thread_mtx_lock(thread); | |
682 | ||
683 | (void)splsched(); | |
684 | thread_lock(thread); | |
685 | ||
686 | rh = thread->handlers; | |
687 | if (rh != NULL) { | |
688 | thread->handlers = rh->next; | |
689 | ||
690 | thread_unlock(thread); | |
691 | spllo(); | |
692 | ||
693 | thread_mtx_unlock(thread); | |
694 | ||
695 | /* Execute it */ | |
696 | (*rh->handler)(rh, thread); | |
697 | } | |
698 | else | |
699 | break; | |
700 | } | |
701 | ||
702 | thread_unlock(thread); | |
703 | spllo(); | |
704 | ||
705 | thread_mtx_unlock(thread); | |
706 | } | |
707 | ||
708 | /* | |
709 | * special_handler_continue | |
710 | * | |
711 | * Continuation routine for the special handler blocks. It checks | |
712 | * to see whether there has been any new suspensions. If so, it | |
713 | * installs the special handler again. Otherwise, it checks to see | |
714 | * if the current depression needs to be re-instated (it may have | |
715 | * been temporarily removed in order to get to this point in a hurry). | |
716 | */ | |
717 | void | |
718 | special_handler_continue(void) | |
719 | { | |
720 | thread_t thread = current_thread(); | |
721 | ||
722 | thread_mtx_lock(thread); | |
723 | ||
724 | if (thread->suspend_count > 0) | |
725 | install_special_handler(thread); | |
726 | else { | |
727 | spl_t s = splsched(); | |
728 | ||
729 | thread_lock(thread); | |
730 | if (thread->sched_mode & TH_MODE_ISDEPRESSED) { | |
731 | processor_t myprocessor = thread->last_processor; | |
732 | ||
733 | thread->sched_pri = DEPRESSPRI; | |
734 | myprocessor->current_pri = thread->sched_pri; | |
735 | thread->sched_mode &= ~TH_MODE_PREEMPT; | |
736 | } | |
737 | thread_unlock(thread); | |
738 | splx(s); | |
739 | } | |
740 | ||
741 | thread_mtx_unlock(thread); | |
742 | ||
743 | thread_exception_return(); | |
744 | /*NOTREACHED*/ | |
745 | } | |
746 | ||
747 | /* | |
748 | * special_handler - handles suspension, termination. Called | |
749 | * with nothing locked. Returns (if it returns) the same way. | |
750 | */ | |
751 | void | |
752 | special_handler( | |
753 | __unused ReturnHandler *rh, | |
754 | thread_t thread) | |
755 | { | |
756 | spl_t s; | |
757 | ||
758 | thread_mtx_lock(thread); | |
759 | ||
760 | s = splsched(); | |
761 | thread_lock(thread); | |
762 | thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */ | |
763 | thread_unlock(thread); | |
764 | splx(s); | |
765 | ||
766 | /* | |
767 | * If we're suspended, go to sleep and wait for someone to wake us up. | |
768 | */ | |
769 | if (thread->active) { | |
770 | if (thread->suspend_count > 0) { | |
771 | if (thread->handlers == NULL) { | |
772 | assert_wait(&thread->suspend_count, THREAD_ABORTSAFE); | |
773 | thread_mtx_unlock(thread); | |
774 | thread_block((thread_continue_t)special_handler_continue); | |
775 | /*NOTREACHED*/ | |
776 | } | |
777 | ||
778 | thread_mtx_unlock(thread); | |
779 | ||
780 | special_handler_continue(); | |
781 | /*NOTREACHED*/ | |
782 | } | |
783 | } | |
784 | else { | |
785 | thread_mtx_unlock(thread); | |
786 | ||
787 | thread_terminate_self(); | |
788 | /*NOTREACHED*/ | |
789 | } | |
790 | ||
791 | thread_mtx_unlock(thread); | |
792 | } | |
793 | ||
794 | kern_return_t | |
795 | act_set_state( | |
796 | thread_t thread, | |
797 | int flavor, | |
798 | thread_state_t state, | |
799 | mach_msg_type_number_t count) | |
800 | { | |
801 | if (thread == current_thread()) | |
802 | return (KERN_INVALID_ARGUMENT); | |
803 | ||
804 | return (thread_set_state(thread, flavor, state, count)); | |
805 | ||
806 | } | |
807 | ||
808 | kern_return_t | |
809 | act_get_state( | |
810 | thread_t thread, | |
811 | int flavor, | |
812 | thread_state_t state, | |
813 | mach_msg_type_number_t *count) | |
814 | { | |
815 | if (thread == current_thread()) | |
816 | return (KERN_INVALID_ARGUMENT); | |
817 | ||
818 | return (thread_get_state(thread, flavor, state, count)); | |
819 | } | |
820 | ||
821 | void | |
822 | act_set_astbsd( | |
823 | thread_t thread) | |
824 | { | |
825 | spl_t s = splsched(); | |
826 | ||
827 | if (thread == current_thread()) { | |
828 | thread_ast_set(thread, AST_BSD); | |
829 | ast_propagate(thread->ast); | |
830 | } | |
831 | else { | |
832 | processor_t processor; | |
833 | ||
834 | thread_lock(thread); | |
835 | thread_ast_set(thread, AST_BSD); | |
836 | processor = thread->last_processor; | |
837 | if ( processor != PROCESSOR_NULL && | |
838 | processor->state == PROCESSOR_RUNNING && | |
839 | processor->active_thread == thread ) | |
840 | cause_ast_check(processor); | |
841 | thread_unlock(thread); | |
842 | } | |
843 | ||
844 | splx(s); | |
845 | } | |
846 | ||
847 | void | |
848 | act_set_apc( | |
849 | thread_t thread) | |
850 | { | |
851 | spl_t s = splsched(); | |
852 | ||
853 | if (thread == current_thread()) { | |
854 | thread_ast_set(thread, AST_APC); | |
855 | ast_propagate(thread->ast); | |
856 | } | |
857 | else { | |
858 | processor_t processor; | |
859 | ||
860 | thread_lock(thread); | |
861 | thread_ast_set(thread, AST_APC); | |
862 | processor = thread->last_processor; | |
863 | if ( processor != PROCESSOR_NULL && | |
864 | processor->state == PROCESSOR_RUNNING && | |
865 | processor->active_thread == thread ) | |
866 | cause_ast_check(processor); | |
867 | thread_unlock(thread); | |
868 | } | |
869 | ||
870 | splx(s); | |
871 | } |