}
static inline void
-enable_ints_and_unlock(void)
+enable_ints_and_unlock(spl_t s)
{
thread_call_unlock();
- (void)spllo();
+ splx(s);
}
kern_return_t result;
thread_t thread;
int i;
+ spl_t s;
i = sizeof (thread_call_data_t);
thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);
- disable_ints_and_lock();
+ s = disable_ints_and_lock();
queue_init(&thread_call_internal_queue);
for (
thread_call_daemon_awake = TRUE;
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
if (result != KERN_SUCCESS)
timer_call_enter_with_leeway(&group->delayed_timer, NULL,
call->tc_soft_deadline, leeway,
TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LEEWAY,
- ((call->tc_soft_deadline & 0x1) == 0x1));
+ ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED));
}
/*
else
deadline += slop;
- /* Bit 0 of the "soft" deadline indicates that
- * this particular callout requires rate-limiting
- * behaviour. Maintain the invariant deadline >= soft_deadline
- */
- deadline |= 1;
if (ratelimited) {
- call->tc_soft_deadline |= 0x1ULL;
+ call->tc_flags |= TIMER_CALL_RATELIMITED;
} else {
- call->tc_soft_deadline &= ~0x1ULL;
+ call->tc_flags &= ~TIMER_CALL_RATELIMITED;
}
+
call->tc_call.param1 = param1;
call->ttd = (sdeadline > abstime) ? (sdeadline - abstime) : 0;
* if the client has so requested.
*/
static void
-thread_call_finish(thread_call_t call)
+thread_call_finish(thread_call_t call, spl_t *s)
{
boolean_t dowake = FALSE;
panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func);
}
- enable_ints_and_unlock();
+ enable_ints_and_unlock(*s);
zfree(thread_call_zone, call);
- (void)disable_ints_and_lock();
+ *s = disable_ints_and_lock();
}
}
{
thread_t self = current_thread();
boolean_t canwait;
+ spl_t s;
if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0)
(void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT);
panic("thread_terminate() returned?");
}
- (void)disable_ints_and_lock();
+ s = disable_ints_and_lock();
thread_sched_call(self, group->sched_call);
} else
canwait = FALSE;
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
KERNEL_DEBUG_CONSTANT(
MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
}
- (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
-
- (void) disable_ints_and_lock();
+ s = disable_ints_and_lock();
if (canwait) {
/* Frees if so desired */
- thread_call_finish(call);
+ thread_call_finish(call, &s);
}
}
panic("kcall worker unable to assert wait?");
}
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
thread_block_parameter((thread_continue_t)thread_call_thread, group);
} else {
wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
thread_block_parameter((thread_continue_t)thread_call_thread, group);
/* NOTREACHED */
}
}
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
thread_terminate(self);
/* NOTREACHED */
int i;
kern_return_t kr;
thread_call_group_t group;
+ spl_t s;
- (void)disable_ints_and_lock();
+ s = disable_ints_and_lock();
/* Starting at zero happens to be high-priority first. */
for (i = 0; i < THREAD_CALL_GROUP_COUNT; i++) {
while (thread_call_group_should_add_thread(group)) {
group->active_count++;
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
kr = thread_call_thread_create(group);
if (kr != KERN_SUCCESS) {
* We can try again later.
*/
delay(10000); /* 10 ms */
- (void)disable_ints_and_lock();
+ s = disable_ints_and_lock();
goto out;
}
- (void)disable_ints_and_lock();
+ s = disable_ints_and_lock();
}
}
thread_call_daemon_awake = FALSE;
wait_queue_assert_wait(&daemon_wqueue, NO_EVENT, THREAD_UNINT, 0);
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL);
/* NOTREACHED */
while (!queue_end(&group->delayed_queue, qe(call))) {
if (call->tc_soft_deadline <= timestamp) {
- /* Bit 0 of the "soft" deadline indicates that
- * this particular callout is rate-limited
- * and hence shouldn't be processed before its
- * hard deadline. Rate limited timers aren't
- * skipped when a forcible reevaluation is in progress.
- */
- if ((call->tc_soft_deadline & 0x1) &&
+ if ((call->tc_flags & THREAD_CALL_RATELIMITED) &&
(CE(call)->deadline > timestamp) &&
(ml_timer_forced_evaluation() == FALSE)) {
break;
thread_call_isactive(thread_call_t call)
{
boolean_t active;
+ spl_t s;
- disable_ints_and_lock();
+ s = disable_ints_and_lock();
active = (call->tc_submit_count > call->tc_finish_count);
- enable_ints_and_unlock();
+ enable_ints_and_unlock(s);
return active;
}