#include <sys/dtrace.h>
#include <sys/dtrace_impl.h>
#include <libkern/OSAtomic.h>
+#include <kern/kern_types.h>
+#include <kern/timer_call.h>
#include <kern/thread_call.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
#include <vm/pmap.h>
#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
+
/*
* pid/proc
*/
lck_mtx_t cpu_lock;
lck_mtx_t mod_lock;
-cpu_t *cpu_list;
+dtrace_cpu_t *cpu_list;
cpu_core_t *cpu_core; /* XXX TLB lockdown? */
/*
return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
}
+/* XXX Get around const poisoning using structure assigns */
gid_t
-crgetgid(const cred_t *cr) { return cr->cr_groups[0]; }
+crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(©_cr); }
uid_t
-crgetuid(const cred_t *cr) { return cr->cr_uid; }
+crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); }
/*
* "cyclic"
*/
-/* osfmk/kern/timer_call.h */
-typedef void *call_entry_param_t;
-typedef void (*call_entry_func_t)(
- call_entry_param_t param0,
- call_entry_param_t param1);
-
-typedef struct call_entry {
- queue_chain_t q_link;
- call_entry_func_t func;
- call_entry_param_t param0;
- call_entry_param_t param1;
- uint64_t deadline;
- enum {
- IDLE,
- PENDING,
- DELAYED } state;
-} call_entry_data_t;
-
-
-typedef struct call_entry *timer_call_t;
-typedef void *timer_call_param_t;
-typedef void (*timer_call_func_t)(
- timer_call_param_t param0,
- timer_call_param_t param1);
-
-extern void
-timer_call_setup(
- timer_call_t call,
- timer_call_func_t func,
- timer_call_param_t param0);
-
-extern boolean_t
-timer_call_enter1(
- timer_call_t call,
- timer_call_param_t param1,
- uint64_t deadline);
-
-extern boolean_t
-timer_call_cancel(
- timer_call_t call);
-
typedef struct wrap_timer_call {
cyc_handler_t hdlr;
cyc_time_t when;
uint64_t deadline;
- struct call_entry call;
+ struct timer_call call;
} wrap_timer_call_t;
#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
(*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
+ timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
/* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */
if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
wrapTC->deadline = now;
clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
- timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
+ timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
return (cyclic_id_t)wrapTC;
}
* and clock drift on later invocations is not a worry.
*/
uint64_t deadline = mach_absolute_time() + nanos;
-
+ /* DRK: consider using a lower priority callout here */
thread_call_enter_delayed(call, deadline);
return call;
ASSERT(kaddr + size >= kaddr);
- if (ml_at_interrupt_context() || /* Avoid possible copyio page fault on int stack, which panics! */
- 0 != recover || /* Avoid reentrancy into copyio facility. */
- uaddr + size < uaddr || /* Avoid address wrap. */
+ if ( uaddr + size < uaddr || /* Avoid address wrap. */
KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
{
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
}
-void
-dtrace_vpanic(const char *format, va_list alist)
-{
- vuprintf( format, alist );
- panic("dtrace_vpanic");
-}
-
#define TOTTY 0x02
extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */