]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/syscall_subr.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / kern / syscall_subr.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
1c79356b
A
59#include <mach/boolean.h>
60#include <mach/thread_switch.h>
61#include <ipc/ipc_port.h>
62#include <ipc/ipc_space.h>
63#include <kern/counters.h>
1c79356b
A
64#include <kern/ipc_kobject.h>
65#include <kern/processor.h>
66#include <kern/sched.h>
67#include <kern/sched_prim.h>
68#include <kern/spl.h>
69#include <kern/task.h>
70#include <kern/thread.h>
39037602
A
71#include <kern/policy_internal.h>
72
1c79356b
A
73#include <mach/policy.h>
74
75#include <kern/syscall_subr.h>
76#include <mach/mach_host_server.h>
77#include <mach/mach_syscalls.h>
fe8ab488 78#include <sys/kdebug.h>
813fb2f6 79#include <kern/ast.h>
b0d623f7
A
80
81#ifdef MACH_BSD
82extern void workqueue_thread_yielded(void);
39236c6e 83extern sched_call_t workqueue_get_sched_callback(void);
b0d623f7
A
84#endif /* MACH_BSD */
85
813fb2f6 86extern wait_result_t thread_handoff_reason(thread_t thread, ast_t reason);
b0d623f7
A
87
88/* Called from commpage to take a delayed preemption when exiting
89 * the "Preemption Free Zone" (PFZ).
90 */
91kern_return_t
92pfz_exit(
93__unused struct pfz_exit_args *args)
94{
95 /* For now, nothing special to do. We'll pick up the ASTs on kernel exit. */
96
97 return (KERN_SUCCESS);
98}
99
100
1c79356b
A
101/*
102 * swtch and swtch_pri both attempt to context switch (logic in
103 * thread_block no-ops the context switch if nothing would happen).
104 * A boolean is returned that indicates whether there is anything
3e170ce0 105 * else runnable. That's no excuse to spin, though.
1c79356b
A
106 */
107
91447636 108static void
0b4e3aa0 109swtch_continue(void)
1c79356b 110{
39037602 111 processor_t myprocessor;
5ba3f43e 112 boolean_t result;
1c79356b 113
5ba3f43e 114 disable_preemption();
1c79356b 115 myprocessor = current_processor();
5ba3f43e 116 result = SCHED(thread_should_yield)(myprocessor, current_thread());
91447636 117 enable_preemption();
1c79356b 118
0b4e3aa0
A
119 thread_syscall_return(result);
120 /*NOTREACHED*/
121}
1c79356b
A
122
123boolean_t
91447636
A
124swtch(
125 __unused struct swtch_args *args)
1c79356b 126{
39037602 127 processor_t myprocessor;
1c79356b
A
128 boolean_t result;
129
91447636 130 disable_preemption();
1c79356b 131 myprocessor = current_processor();
5ba3f43e 132 if (!SCHED(thread_should_yield)(myprocessor, current_thread())) {
1c79356b
A
133 mp_enable_preemption();
134
135 return (FALSE);
136 }
91447636 137 enable_preemption();
1c79356b
A
138
139 counter(c_swtch_block++);
140
91447636 141 thread_block_reason((thread_continue_t)swtch_continue, NULL, AST_YIELD);
1c79356b 142
91447636 143 disable_preemption();
1c79356b 144 myprocessor = current_processor();
5ba3f43e 145 result = SCHED(thread_should_yield)(myprocessor, current_thread());
91447636 146 enable_preemption();
1c79356b
A
147
148 return (result);
149}
150
91447636 151static void
0b4e3aa0
A
152swtch_pri_continue(void)
153{
39037602 154 processor_t myprocessor;
5ba3f43e 155 boolean_t result;
0b4e3aa0 156
91447636 157 thread_depress_abort_internal(current_thread());
0b4e3aa0 158
5ba3f43e 159 disable_preemption();
0b4e3aa0 160 myprocessor = current_processor();
5ba3f43e 161 result = SCHED(thread_should_yield)(myprocessor, current_thread());
0b4e3aa0
A
162 mp_enable_preemption();
163
164 thread_syscall_return(result);
165 /*NOTREACHED*/
166}
167
1c79356b
A
168boolean_t
169swtch_pri(
91447636 170__unused struct swtch_pri_args *args)
1c79356b 171{
39037602 172 processor_t myprocessor;
1c79356b 173 boolean_t result;
1c79356b 174
91447636 175 disable_preemption();
1c79356b 176 myprocessor = current_processor();
5ba3f43e 177 if (!SCHED(thread_should_yield)(myprocessor, current_thread())) {
0b4e3aa0 178 mp_enable_preemption();
1c79356b
A
179
180 return (FALSE);
181 }
91447636 182 enable_preemption();
0b4e3aa0
A
183
184 counter(c_swtch_pri_block++);
185
6d2010ae 186 thread_depress_abstime(thread_depress_time);
1c79356b 187
91447636 188 thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);
1c79356b 189
91447636 190 thread_depress_abort_internal(current_thread());
1c79356b 191
91447636 192 disable_preemption();
1c79356b 193 myprocessor = current_processor();
5ba3f43e 194 result = SCHED(thread_should_yield)(myprocessor, current_thread());
91447636 195 enable_preemption();
1c79356b
A
196
197 return (result);
198}
199
39037602 200static boolean_t
39236c6e
A
201thread_switch_disable_workqueue_sched_callback(void)
202{
203 sched_call_t callback = workqueue_get_sched_callback();
39037602 204 return thread_disable_sched_call(current_thread(), callback) != NULL;
39236c6e
A
205}
206
207static void
208thread_switch_enable_workqueue_sched_callback(void)
209{
210 sched_call_t callback = workqueue_get_sched_callback();
39037602 211 thread_reenable_sched_call(current_thread(), callback);
39236c6e
A
212}
213
91447636
A
214static void
215thread_switch_continue(void)
216{
39037602 217 thread_t self = current_thread();
91447636 218 int option = self->saved.swtch.option;
39236c6e
A
219 boolean_t reenable_workq_callback = self->saved.swtch.reenable_workq_callback;
220
91447636 221
39236c6e 222 if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS)
91447636
A
223 thread_depress_abort_internal(self);
224
39236c6e
A
225 if (reenable_workq_callback)
226 thread_switch_enable_workqueue_sched_callback();
227
91447636
A
228 thread_syscall_return(KERN_SUCCESS);
229 /*NOTREACHED*/
230}
231
1c79356b
A
232/*
233 * thread_switch:
234 *
235 * Context switch. User may supply thread hint.
236 */
237kern_return_t
238thread_switch(
91447636 239 struct thread_switch_args *args)
1c79356b 240{
3e170ce0
A
241 thread_t thread = THREAD_NULL;
242 thread_t self = current_thread();
91447636
A
243 mach_port_name_t thread_name = args->thread_name;
244 int option = args->option;
245 mach_msg_timeout_t option_time = args->option_time;
39236c6e
A
246 uint32_t scale_factor = NSEC_PER_MSEC;
247 boolean_t reenable_workq_callback = FALSE;
248 boolean_t depress_option = FALSE;
249 boolean_t wait_option = FALSE;
1c79356b
A
250
251 /*
39236c6e 252 * Validate and process option.
1c79356b
A
253 */
254 switch (option) {
255
256 case SWITCH_OPTION_NONE:
39236c6e
A
257 workqueue_thread_yielded();
258 break;
1c79356b 259 case SWITCH_OPTION_WAIT:
39236c6e
A
260 wait_option = TRUE;
261 workqueue_thread_yielded();
262 break;
263 case SWITCH_OPTION_DEPRESS:
264 depress_option = TRUE;
265 workqueue_thread_yielded();
266 break;
267 case SWITCH_OPTION_DISPATCH_CONTENTION:
268 scale_factor = NSEC_PER_USEC;
269 wait_option = TRUE;
270 if (thread_switch_disable_workqueue_sched_callback())
271 reenable_workq_callback = TRUE;
272 break;
273 case SWITCH_OPTION_OSLOCK_DEPRESS:
274 depress_option = TRUE;
275 if (thread_switch_disable_workqueue_sched_callback())
276 reenable_workq_callback = TRUE;
277 break;
278 case SWITCH_OPTION_OSLOCK_WAIT:
279 wait_option = TRUE;
280 if (thread_switch_disable_workqueue_sched_callback())
281 reenable_workq_callback = TRUE;
282 break;
1c79356b
A
283 default:
284 return (KERN_INVALID_ARGUMENT);
285 }
286
91447636
A
287 /*
288 * Translate the port name if supplied.
289 */
3e170ce0
A
290 if (thread_name != MACH_PORT_NULL) {
291 ipc_port_t port;
1c79356b 292
91447636 293 if (ipc_port_translate_send(self->task->itk_space,
3e170ce0 294 thread_name, &port) == KERN_SUCCESS) {
1c79356b
A
295 ip_reference(port);
296 ip_unlock(port);
297
91447636 298 thread = convert_port_to_thread(port);
316670eb 299 ip_release(port);
91447636
A
300
301 if (thread == self) {
3e170ce0 302 thread_deallocate(thread);
91447636
A
303 thread = THREAD_NULL;
304 }
1c79356b
A
305 }
306 }
39236c6e
A
307
308 if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) {
309 if (thread != THREAD_NULL) {
310
311 if (thread->task != self->task) {
312 /*
313 * OSLock boosting only applies to other threads
314 * in your same task (even if you have a port for
315 * a thread in another task)
316 */
317
3e170ce0 318 thread_deallocate(thread);
39236c6e
A
319 thread = THREAD_NULL;
320 } else {
321 /*
322 * Attempt to kick the lock owner up to our same IO throttling tier.
323 * If the thread is currently blocked in throttle_lowpri_io(),
324 * it will immediately break out.
3e170ce0
A
325 *
326 * TODO: SFI break out?
39236c6e
A
327 */
328 int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO);
3e170ce0 329
39236c6e
A
330 set_thread_iotier_override(thread, new_policy);
331 }
332 }
333 }
334
91447636
A
335 /*
336 * Try to handoff if supplied.
337 */
338 if (thread != THREAD_NULL) {
3e170ce0 339 spl_t s = splsched();
91447636 340
3e170ce0
A
341 /* This may return a different thread if the target is pushing on something */
342 thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
91447636 343
fe8ab488 344 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
3e170ce0
A
345 thread_tid(thread), thread->state,
346 pulled_thread ? TRUE : FALSE, 0, 0);
347
348 if (pulled_thread != THREAD_NULL) {
349 /* We can't be dropping the last ref here */
350 thread_deallocate_safe(thread);
91447636 351
39236c6e 352 if (wait_option)
91447636 353 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
3e170ce0
A
354 option_time, scale_factor);
355 else if (depress_option)
91447636
A
356 thread_depress_ms(option_time);
357
358 self->saved.swtch.option = option;
39236c6e 359 self->saved.swtch.reenable_workq_callback = reenable_workq_callback;
91447636 360
3e170ce0 361 thread_run(self, (thread_continue_t)thread_switch_continue, NULL, pulled_thread);
91447636 362 /* NOTREACHED */
3e170ce0 363 panic("returned from thread_run!");
91447636
A
364 }
365
91447636
A
366 splx(s);
367
368 thread_deallocate(thread);
369 }
3e170ce0 370
39236c6e
A
371 if (wait_option)
372 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, scale_factor);
3e170ce0 373 else if (depress_option)
91447636 374 thread_depress_ms(option_time);
3e170ce0 375
91447636 376 self->saved.swtch.option = option;
39236c6e 377 self->saved.swtch.reenable_workq_callback = reenable_workq_callback;
91447636
A
378
379 thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);
380
39236c6e 381 if (depress_option)
91447636
A
382 thread_depress_abort_internal(self);
383
39236c6e
A
384 if (reenable_workq_callback)
385 thread_switch_enable_workqueue_sched_callback();
386
91447636
A
387 return (KERN_SUCCESS);
388}
1c79356b 389
39037602
A
390/* Returns a +1 thread reference */
391thread_t
392port_name_to_thread_for_ulock(mach_port_name_t thread_name)
393{
394 thread_t thread = THREAD_NULL;
395 thread_t self = current_thread();
396
397 /*
398 * Translate the port name if supplied.
399 */
400 if (thread_name != MACH_PORT_NULL) {
401 ipc_port_t port;
402
403 if (ipc_port_translate_send(self->task->itk_space,
404 thread_name, &port) == KERN_SUCCESS) {
405 ip_reference(port);
406 ip_unlock(port);
407
408 thread = convert_port_to_thread(port);
409 ip_release(port);
410
411 if (thread == THREAD_NULL) {
412 return thread;
413 }
414
415 if ((thread == self) || (thread->task != self->task)) {
416 thread_deallocate(thread);
417 thread = THREAD_NULL;
418 }
419 }
420 }
421
422 return thread;
423}
424
425/* This function is called after an assert_wait(), therefore it must not
426 * cause another wait until after the thread_run() or thread_block()
427 *
428 * Consumes a ref on thread
429 */
430wait_result_t
431thread_handoff(thread_t thread)
432{
433 thread_t deallocate_thread = THREAD_NULL;
434 thread_t self = current_thread();
435
436 /*
437 * Try to handoff if supplied.
438 */
439 if (thread != THREAD_NULL) {
440 spl_t s = splsched();
441
442 thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
443
444 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
445 thread_tid(thread), thread->state,
446 pulled_thread ? TRUE : FALSE, 0, 0);
447
448 if (pulled_thread != THREAD_NULL) {
449 /* We can't be dropping the last ref here */
450 thread_deallocate_safe(thread);
451
452 int result = thread_run(self, THREAD_CONTINUE_NULL, NULL, pulled_thread);
453
454 splx(s);
455 return result;
456 }
457
458 splx(s);
459
460 deallocate_thread = thread;
461 thread = THREAD_NULL;
462 }
463
464 int result = thread_block(THREAD_CONTINUE_NULL);
465 if (deallocate_thread != THREAD_NULL) {
466 thread_deallocate(deallocate_thread);
467 }
468
469 return result;
470}
471
91447636
A
472/*
473 * Depress thread's priority to lowest possible for the specified interval,
474 * with a value of zero resulting in no timeout being scheduled.
475 */
476void
477thread_depress_abstime(
478 uint64_t interval)
479{
39037602 480 thread_t self = current_thread();
91447636
A
481 uint64_t deadline;
482 spl_t s;
483
484 s = splsched();
485 thread_lock(self);
6d2010ae 486 if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
91447636
A
487 processor_t myprocessor = self->last_processor;
488
489 self->sched_pri = DEPRESSPRI;
490019cf
A
490
491 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
492 (uintptr_t)thread_tid(self),
493 self->base_pri,
494 self->sched_pri,
495 0, /* eventually, 'reason' */
496 0);
497
91447636 498 myprocessor->current_pri = self->sched_pri;
5ba3f43e 499 myprocessor->current_perfctl_class = thread_get_perfcontrol_class(self);
6d2010ae 500 self->sched_flags |= TH_SFLAG_DEPRESS;
91447636
A
501
502 if (interval != 0) {
503 clock_absolutetime_interval_to_deadline(interval, &deadline);
39236c6e 504 if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL))
91447636
A
505 self->depress_timer_active++;
506 }
507 }
508 thread_unlock(self);
509 splx(s);
510}
511
512void
513thread_depress_ms(
514 mach_msg_timeout_t interval)
515{
516 uint64_t abstime;
517
518 clock_interval_to_absolutetime_interval(
39236c6e 519 interval, NSEC_PER_MSEC, &abstime);
91447636
A
520 thread_depress_abstime(abstime);
521}
522
523/*
524 * Priority depression expiration.
525 */
526void
527thread_depress_expire(
528 void *p0,
529 __unused void *p1)
530{
531 thread_t thread = p0;
532 spl_t s;
533
534 s = splsched();
535 thread_lock(thread);
536 if (--thread->depress_timer_active == 0) {
6d2010ae 537 thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
3e170ce0 538 thread_recompute_sched_pri(thread, FALSE);
91447636
A
539 }
540 thread_unlock(thread);
541 splx(s);
542}
543
544/*
545 * Prematurely abort priority depression if there is one.
546 */
547kern_return_t
548thread_depress_abort_internal(
549 thread_t thread)
550{
551 kern_return_t result = KERN_NOT_DEPRESSED;
552 spl_t s;
553
554 s = splsched();
555 thread_lock(thread);
6d2010ae
A
556 if (!(thread->sched_flags & TH_SFLAG_POLLDEPRESS)) {
557 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
558 thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
3e170ce0 559 thread_recompute_sched_pri(thread, FALSE);
91447636
A
560 result = KERN_SUCCESS;
561 }
562
563 if (timer_call_cancel(&thread->depress_timer))
564 thread->depress_timer_active--;
565 }
566 thread_unlock(thread);
567 splx(s);
568
569 return (result);
570}
571
572void
573thread_poll_yield(
574 thread_t self)
575{
576 spl_t s;
577
578 assert(self == current_thread());
579
580 s = splsched();
6d2010ae 581 if (self->sched_mode == TH_MODE_FIXED) {
91447636
A
582 uint64_t total_computation, abstime;
583
584 abstime = mach_absolute_time();
585 total_computation = abstime - self->computation_epoch;
586 total_computation += self->computation_metered;
587 if (total_computation >= max_poll_computation) {
588 processor_t myprocessor = current_processor();
589 ast_t preempt;
590
591 thread_lock(self);
6d2010ae 592 if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
91447636 593 self->sched_pri = DEPRESSPRI;
490019cf
A
594
595 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
596 (uintptr_t)thread_tid(self),
597 self->base_pri,
598 self->sched_pri,
599 0, /* eventually, 'reason' */
600 0);
601
91447636 602 myprocessor->current_pri = self->sched_pri;
5ba3f43e 603 myprocessor->current_perfctl_class = thread_get_perfcontrol_class(self);
91447636
A
604 }
605 self->computation_epoch = abstime;
606 self->computation_metered = 0;
6d2010ae 607 self->sched_flags |= TH_SFLAG_POLLDEPRESS;
91447636
A
608
609 abstime += (total_computation >> sched_poll_yield_shift);
39236c6e 610 if (!timer_call_enter(&self->depress_timer, abstime, TIMER_CALL_USER_CRITICAL))
91447636 611 self->depress_timer_active++;
91447636 612
fe8ab488 613 if ((preempt = csw_check(myprocessor, AST_NONE)) != AST_NONE)
91447636 614 ast_on(preempt);
fe8ab488
A
615
616 thread_unlock(self);
91447636
A
617 }
618 }
619 splx(s);
1c79356b 620}
2d21ac55
A
621
622
623void
624thread_yield_internal(
625 mach_msg_timeout_t ms)
626{
627 processor_t myprocessor;
628
629 disable_preemption();
630 myprocessor = current_processor();
5ba3f43e 631 if (!SCHED(thread_should_yield)(myprocessor, current_thread())) {
2d21ac55
A
632 mp_enable_preemption();
633
634 return;
635 }
636 enable_preemption();
637
638 thread_depress_ms(ms);
639
640 thread_block_reason(THREAD_CONTINUE_NULL, NULL, AST_YIELD);
641
642 thread_depress_abort_internal(current_thread());
643}
644
39037602
A
645/*
646 * This yields to a possible non-urgent preemption pending on the current processor.
647 *
648 * This is useful when doing a long computation in the kernel without returning to userspace.
649 *
650 * As opposed to other yielding mechanisms, this does not drop the priority of the current thread.
651 */
652void
653thread_yield_to_preemption()
654{
655 /*
656 * ast_pending() should ideally be called with interrupts disabled, but
657 * the check here is fine because csw_check() will do the right thing.
658 */
659 ast_t *pending_ast = ast_pending();
660 ast_t ast = AST_NONE;
661 processor_t p;
662
663 if (*pending_ast & AST_PREEMPT) {
664 thread_t self = current_thread();
665
666 spl_t s = splsched();
667
668 p = current_processor();
669 thread_lock(self);
670 ast = csw_check(p, AST_YIELD);
671 ast_on(ast);
672 thread_unlock(self);
673
674 if (ast != AST_NONE) {
675 (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
676 }
677
678 splx(s);
679 }
680}
681