/*
- * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <mach/mach_host_server.h>
#include <mach/mach_syscalls.h>
+
+#ifdef MACH_BSD
+extern void workqueue_thread_yielded(void);
+#endif /* MACH_BSD */
+
+
+/* Called from commpage to take a delayed preemption when exiting
+ * the "Preemption Free Zone" (PFZ).
+ */
+kern_return_t
+pfz_exit(
+__unused struct pfz_exit_args *args)
+{
+ /* For now, nothing special to do. We'll pick up the ASTs on kernel exit. */
+
+ return (KERN_SUCCESS);
+}
+
+
/*
* swtch and swtch_pri both attempt to context switch (logic in
* thread_block no-ops the context switch if nothing would happen).
disable_preemption();
myprocessor = current_processor();
- result = myprocessor->runq.count > 0 || rt_runq.count > 0;
+ result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
enable_preemption();
thread_syscall_return(result);
disable_preemption();
myprocessor = current_processor();
- if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
+ if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
mp_enable_preemption();
return (FALSE);
disable_preemption();
myprocessor = current_processor();
- result = myprocessor->runq.count > 0 || rt_runq.count > 0;
+ result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
enable_preemption();
return (result);
disable_preemption();
myprocessor = current_processor();
- result = myprocessor->runq.count > 0 || rt_runq.count > 0;
+ result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
mp_enable_preemption();
thread_syscall_return(result);
disable_preemption();
myprocessor = current_processor();
- if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
+ if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
mp_enable_preemption();
return (FALSE);
counter(c_swtch_pri_block++);
- thread_depress_abstime(std_quantum);
+ thread_depress_abstime(thread_depress_time);
thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);
disable_preemption();
myprocessor = current_processor();
- result = myprocessor->runq.count > 0 || rt_runq.count > 0;
+ result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
enable_preemption();
return (result);
return (KERN_INVALID_ARGUMENT);
}
+ workqueue_thread_yielded();
+
/*
* Translate the port name if supplied.
*/
ip_unlock(port);
thread = convert_port_to_thread(port);
- ipc_port_release(port);
+ ip_release(port);
if (thread == self) {
(void)thread_deallocate_internal(thread);
thread->sched_pri < BASEPRI_RTQUEUES &&
(thread->bound_processor == PROCESSOR_NULL ||
thread->bound_processor == processor) &&
- run_queue_remove(thread) ) {
+ thread_run_queue_remove(thread) ) {
/*
* Hah, got it!!
*/
s = splsched();
thread_lock(self);
- if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
+ if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
processor_t myprocessor = self->last_processor;
self->sched_pri = DEPRESSPRI;
myprocessor->current_pri = self->sched_pri;
- self->sched_mode |= TH_MODE_DEPRESS;
+ self->sched_flags |= TH_SFLAG_DEPRESS;
if (interval != 0) {
clock_absolutetime_interval_to_deadline(interval, &deadline);
- if (!timer_call_enter(&self->depress_timer, deadline))
+ if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_CRITICAL))
self->depress_timer_active++;
}
}
s = splsched();
thread_lock(thread);
if (--thread->depress_timer_active == 0) {
- thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
- compute_priority(thread, FALSE);
+ thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
+ SCHED(compute_priority)(thread, FALSE);
}
thread_unlock(thread);
splx(s);
s = splsched();
thread_lock(thread);
- if (!(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
- if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
- thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
- compute_priority(thread, FALSE);
+ if (!(thread->sched_flags & TH_SFLAG_POLLDEPRESS)) {
+ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
+ thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
+ SCHED(compute_priority)(thread, FALSE);
result = KERN_SUCCESS;
}
assert(self == current_thread());
s = splsched();
- if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
+ if (self->sched_mode == TH_MODE_FIXED) {
uint64_t total_computation, abstime;
abstime = mach_absolute_time();
ast_t preempt;
thread_lock(self);
- if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
+ if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
self->sched_pri = DEPRESSPRI;
myprocessor->current_pri = self->sched_pri;
}
self->computation_epoch = abstime;
self->computation_metered = 0;
- self->sched_mode |= TH_MODE_POLLDEPRESS;
+ self->sched_flags |= TH_SFLAG_POLLDEPRESS;
abstime += (total_computation >> sched_poll_yield_shift);
- if (!timer_call_enter(&self->depress_timer, abstime))
+ if (!timer_call_enter(&self->depress_timer, abstime, TIMER_CALL_CRITICAL))
self->depress_timer_active++;
thread_unlock(self);
- if ((preempt = csw_check(self, myprocessor)) != AST_NONE)
+ if ((preempt = csw_check(myprocessor)) != AST_NONE)
ast_on(preempt);
}
}
disable_preemption();
myprocessor = current_processor();
- if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
+ if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
mp_enable_preemption();
return;