#include <mach/mach_types.h>
#include <kern/clock.h>
+#include <kern/smp.h>
#include <kern/processor.h>
#include <kern/timer_call.h>
#include <kern/timer_queue.h>
lck_attr_t timer_longterm_lck_attr;
lck_grp_attr_t timer_longterm_lck_grp_attr;
-
+/* Timer queue lock must be acquired with interrupts disabled (under splclock()) */
+#if __SMP__
#define timer_queue_lock_spin(queue) \
lck_mtx_lock_spin_always(&queue->lock_data)
#define timer_queue_unlock(queue) \
lck_mtx_unlock_always(&queue->lock_data)
-
+#else
+#define timer_queue_lock_spin(queue) (void)1
+#define timer_queue_unlock(queue) (void)1
+#endif
#define QUEUE(x) ((queue_t)(x))
#define MPQUEUE(x) ((mpqueue_head_t *)(x))
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
call->async_dequeue,
- TCE(call)->queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
0x1c, 0);
timer_call_enqueue_deadline_unlocked_async1++;
#endif
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
call->async_dequeue,
- TCE(call)->queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
0, 0);
#endif
if (old_queue != NULL) {
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
call->async_dequeue,
- TCE(call)->queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
0x1c, 0);
timer_call_dequeue_unlocked_async1++;
#endif
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ENTER | DBG_FUNC_START,
- call,
- param1, deadline, flags, 0);
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ VM_KERNEL_UNSLIDE_OR_PERM(param1), deadline, flags, 0);
urgency = (flags & TIMER_CALL_URGENCY_MASK);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ENTER | DBG_FUNC_END,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
(old_queue != NULL), deadline, queue->count, 0);
splx(s);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CANCEL | DBG_FUNC_START,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
TCE(call)->deadline, call->soft_deadline, call->flags, 0);
old_queue = timer_call_dequeue_unlocked(call);
}
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CANCEL | DBG_FUNC_END,
- call,
- old_queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ VM_KERNEL_UNSLIDE_OR_PERM(old_queue),
TCE(call)->deadline - mach_absolute_time(),
TCE(call)->deadline - TCE(call)->entry_time, 0);
splx(s);
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
call->async_dequeue,
- TCE(call)->queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
0x2b, 0);
#endif
timer_queue_unlock(queue);
TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
call->soft_deadline,
TCE(call)->deadline,
TCE(call)->entry_time, 0);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CALLOUT | DBG_FUNC_START,
- call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
+ VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func),
+ VM_KERNEL_UNSLIDE_OR_PERM(param0),
+ VM_KERNEL_UNSLIDE_OR_PERM(param1),
+ 0);
#if CONFIG_DTRACE
DTRACE_TMR7(callout__start, timer_call_func_t, func,
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CALLOUT | DBG_FUNC_END,
- call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
+ VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func),
+ VM_KERNEL_UNSLIDE_OR_PERM(param0),
+ VM_KERNEL_UNSLIDE_OR_PERM(param1),
+ 0);
call = NULL;
timer_queue_lock_spin(queue);
} else {
#ifdef TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
- call,
- TCE(call)->queue,
- call->lock.interlock.lock_data,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
+ VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data),
0x2b, 0);
#endif
timer_queue_migrate_lock_skips++;
call->soft_deadline,
TCE(call)->deadline,
TCE(call)->entry_time,
- TCE(call)->func,
+ VM_KERNEL_UNSLIDE(TCE(call)->func),
0);
call = TIMER_CALL(queue_next(qe(call)));
} while (!queue_end(&queue->head, qe(call)));
#ifdef TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
- call,
- TCE(call)->queue,
- call->lock.interlock.lock_data,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
+ VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
+ VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data),
0x2c, 0);
#endif
timer_call_entry_dequeue_async(call);
if (deadline < now)
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_OVERDUE | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
deadline,
now,
threshold,
#endif
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ESCALATE | DBG_FUNC_NONE,
- call,
+ VM_KERNEL_UNSLIDE_OR_PERM(call),
TCE(call)->deadline,
TCE(call)->entry_time,
- TCE(call)->func,
+ VM_KERNEL_UNSLIDE(TCE(call)->func),
0);
tlp->escalates++;
timer_call_entry_dequeue(call);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_UPDATE | DBG_FUNC_START,
- &tlp->queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue),
tlp->threshold.deadline,
tlp->threshold.preempted,
tlp->queue.count, 0);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_UPDATE | DBG_FUNC_END,
- &tlp->queue,
+ VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue),
tlp->threshold.deadline,
tlp->threshold.scans,
tlp->queue.count, 0);