* Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-
-/*
- * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
- * from this file (_dtrace_register_anon_DOF) always needs to be exported for
- * an external kext to link against.
- */
-
-#if CONFIG_DTRACE
-
-#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
#include <kern/thread.h>
-#include <mach/thread_status.h>
-#include <stdarg.h>
-#include <string.h>
-#include <sys/malloc.h>
#include <sys/time.h>
#include <sys/proc.h>
-#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/user.h>
#include <sys/systm.h>
#include <sys/dtrace.h>
#include <sys/dtrace_impl.h>
-#include <libkern/OSAtomic.h>
+#include <machine/atomic.h>
+#include <libkern/OSKextLibPrivate.h>
#include <kern/kern_types.h>
#include <kern/timer_call.h>
#include <kern/thread_call.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
-#include <kern/queue.h>
#include <miscfs/devfs/devfs.h>
#include <kern/kalloc.h>
#include <mach/vm_param.h>
#include <mach/mach_vm.h>
#include <mach/task.h>
-#include <vm/pmap.h>
#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
/*
/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
+KALLOC_HEAP_DEFINE(KHEAP_DTRACE, "dtrace", KHEAP_ID_DEFAULT);
+
+void
+dtrace_sprlock(proc_t *p)
+{
+ lck_mtx_lock(&p->p_dtrace_sprlock);
+}
+
+void
+dtrace_sprunlock(proc_t *p)
+{
+ lck_mtx_unlock(&p->p_dtrace_sprlock);
+}
+
/* Not called from probe context */
-proc_t *
+proc_t *
sprlock(pid_t pid)
{
proc_t* p;
task_suspend_internal(p->task);
- proc_lock(p);
-
- lck_mtx_lock(&p->p_dtrace_sprlock);
+ dtrace_sprlock(p);
return p;
}
sprunlock(proc_t *p)
{
if (p != PROC_NULL) {
- lck_mtx_unlock(&p->p_dtrace_sprlock);
-
- proc_unlock(p);
+ dtrace_sprunlock(p);
task_resume_internal(p->task);
if (map) {
ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
vm_map_deallocate(map);
- } else
+ } else {
ret = KERN_TERMINATED;
-
+ }
+
return (int)ret;
}
vm_map_t map = get_task_map_reference(task);
if (map) {
/* Find the memory permissions. */
- uint32_t nestingDepth=999999;
+ uint32_t nestingDepth = 999999;
vm_region_submap_short_info_data_64_t info;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
mach_vm_address_t address = (mach_vm_address_t)a;
mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
-
+
ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
- if (ret != KERN_SUCCESS)
+ if (ret != KERN_SUCCESS) {
goto done;
+ }
vm_prot_t reprotect;
if (info.max_protection & VM_PROT_WRITE) {
/* The memory is not currently writable, but can be made writable. */
- ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
+ ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
} else {
/*
* The memory is not currently writable, and cannot be made writable. We need to COW this memory.
*
* Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
*/
- ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
+ ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
}
- if (ret != KERN_SUCCESS)
+ if (ret != KERN_SUCCESS) {
goto done;
-
+ }
} else {
/* The memory was already writable. */
reprotect = VM_PROT_NONE;
}
ret = vm_map_write_user( map,
- buf,
- (vm_map_address_t)a,
- (vm_size_t)len);
+ buf,
+ (vm_map_address_t)a,
+ (vm_size_t)len);
+
+ dtrace_flush_caches();
- if (ret != KERN_SUCCESS)
+ if (ret != KERN_SUCCESS) {
goto done;
+ }
if (reprotect != VM_PROT_NONE) {
ASSERT(reprotect & VM_PROT_EXECUTE);
- ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
+ ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
}
done:
vm_map_deallocate(map);
- } else
+ } else {
ret = KERN_TERMINATED;
+ }
return (int)ret;
}
/*
* dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
* that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
- */
+ */
cred_t *
dtrace_CRED(void)
{
struct uthread *uthread = get_bsdthread_info(current_thread());
- if (uthread == NULL)
+ if (uthread == NULL) {
return NULL;
- else
+ } else {
return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
+ }
}
-#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
-#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
- HAS_ALLPRIVS(cr) : \
- PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
-
-int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
+int
+PRIV_POLICY_CHOICE(void* cred, int priv, int all)
{
#pragma unused(priv, all)
return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
}
-int
+int
PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
{
#pragma unused(priv, boolean)
return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
}
-/* XXX Get around const poisoning using structure assigns */
-gid_t
-crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(©_cr); }
-
uid_t
-crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); }
+crgetuid(const cred_t *cr)
+{
+ cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr);
+}
/*
* "cyclic"
typedef struct wrap_timer_call {
/* node attributes */
- cyc_handler_t hdlr;
- cyc_time_t when;
- uint64_t deadline;
- int cpuid;
- boolean_t suspended;
- struct timer_call call;
+ cyc_handler_t hdlr;
+ cyc_time_t when;
+ uint64_t deadline;
+ int cpuid;
+ boolean_t suspended;
+ struct timer_call call;
/* next item in the linked list */
LIST_ENTRY(wrap_timer_call) entries;
} wrap_timer_call_t;
-#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
-#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
+#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
+#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
typedef struct cyc_list {
cyc_omni_handler_t cyl_omni;
wrap_timer_call_t cyl_wrap_by_cpus[];
#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
-} __attribute__ ((aligned (8))) cyc_list_t;
+} __attribute__ ((aligned(8))) cyc_list_t;
#else
} cyc_list_t;
#endif
void dtrace_cpu_state_changed(int, boolean_t);
void
-dtrace_install_cpu_hooks(void) {
+dtrace_install_cpu_hooks(void)
+{
dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
}
void
-dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
+dtrace_cpu_state_changed(int cpuid, boolean_t is_running)
+{
#pragma unused(cpuid)
- wrap_timer_call_t *wrapTC = NULL;
- boolean_t suspend = (is_running ? FALSE : TRUE);
- dtrace_icookie_t s;
+ wrap_timer_call_t *wrapTC = NULL;
+ boolean_t suspend = (is_running ? FALSE : TRUE);
+ dtrace_icookie_t s;
/* Ensure that we're not going to leave the CPU */
s = dtrace_interrupt_disable();
if (suspend) {
assert(!wrapTC->suspended);
/* If this fails, we'll panic anyway, so let's do this now. */
- if (!timer_call_cancel(&wrapTC->call))
+ if (!timer_call_cancel(&wrapTC->call)) {
panic("timer_call_set_suspend() failed to cancel a timer call");
+ }
wrapTC->suspended = TRUE;
} else {
/* Rearm the timer, but ensure it was suspended first. */
assert(wrapTC->suspended);
clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
- &wrapTC->deadline);
+ &wrapTC->deadline);
timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
- TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
wrapTC->suspended = FALSE;
}
-
}
/* Restore the previous interrupt state. */
(*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
- clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
+ clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
}
uint64_t now;
dtrace_icookie_t s;
- timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
+ timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
wrapTC->hdlr = *handler;
wrapTC->when = *when;
now = mach_absolute_time();
wrapTC->deadline = now;
- clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
+ clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
/* Insert the timer to the list of the running timers on this CPU, and start it. */
s = dtrace_interrupt_disable();
- wrapTC->cpuid = cpu_number();
- LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
- timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
- TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
- wrapTC->suspended = FALSE;
+ wrapTC->cpuid = cpu_number();
+ LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
+ timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
+ TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ wrapTC->suspended = FALSE;
dtrace_interrupt_enable(s);
return (cyclic_id_t)wrapTC;
assert(wrapTC);
assert(cpu_number() == wrapTC->cpuid);
- if (!timer_call_cancel(&wrapTC->call))
+ if (!timer_call_cancel(&wrapTC->call)) {
panic("timer_call_remove_cyclic() failed to cancel a timer call");
+ }
- LIST_REMOVE(wrapTC, entries);
+ LIST_REMOVE(wrapTC, entries);
}
static void *
timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
{
- return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
+ return wrapTC ? wrapTC->hdlr.cyh_arg : NULL;
}
cyclic_id_t
cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
{
wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
- if (NULL == wrapTC)
+ if (NULL == wrapTC) {
return CYCLIC_NONE;
- else
+ } else {
return timer_call_add_cyclic( wrapTC, handler, when );
+ }
}
-void
+void
cyclic_timer_remove(cyclic_id_t cyclic)
{
ASSERT( cyclic != CYCLIC_NONE );
cyclic_add_omni(cyc_omni_handler_t *omni)
{
cyc_list_t *cyc_list =
- _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
+ _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
- if (NULL == cyc_list)
+ if (NULL == cyc_list) {
return NULL;
+ }
cyc_list->cyl_omni = *omni;
} wrap_thread_call_t;
/*
- * _cyclic_apply will run on some thread under kernel_task. That's OK for the
+ * _cyclic_apply will run on some thread under kernel_task. That's OK for the
* cleaner and the deadman, but too distant in time and place for the profile provider.
*/
static void
(*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
- clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
+ clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
(void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
/* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
- if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
+ if (wrapTC->when.cyt_interval == WAKEUP_REAPER) {
thread_wakeup((event_t)wrapTC);
+ }
}
cyclic_id_t
uint64_t now;
wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
- if (NULL == wrapTC)
+ if (NULL == wrapTC) {
return CYCLIC_NONE;
+ }
wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
wrapTC->hdlr = *handler;
now = mach_absolute_time();
wrapTC->deadline = now;
- clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
+ clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
(void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
return (cyclic_id_t)wrapTC;
ASSERT(ret == THREAD_AWAKENED);
}
- if (thread_call_free(wrapTC->TChdl))
+ if (thread_call_free(wrapTC->TChdl)) {
_FREE(wrapTC, M_TEMP);
- else {
+ } else {
/* Gut this cyclic and move on ... */
wrapTC->hdlr.cyh_func = noop_cyh_func;
wrapTC->when.cyt_interval = NEARLY_FOREVER;
}
}
-/*
- * ddi
- */
-void
-ddi_report_dev(dev_info_t *devi)
-{
-#pragma unused(devi)
-}
-
-kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
-
-kern_return_t
-_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
+int
+ddi_driver_major(dev_info_t *devi)
{
-#pragma unused(name, data, nelements)
- return KERN_FAILURE;
+ return (int)major(CAST_DOWN_EXPLICIT(int, devi));
}
-int
-ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
-
int
ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
minor_t minor_num, const char *node_type, int flag)
#pragma unused(spec_type,node_type,flag)
dev_t dev = makedev( ddi_driver_major(dip), minor_num );
- if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
+ if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) {
return DDI_FAILURE;
- else
+ } else {
return DDI_SUCCESS;
-}
+ }
+}
void
ddi_remove_minor_node(dev_info_t *dip, char *name)
}
minor_t
-getminor ( dev_t d )
+getminor( dev_t d )
{
return (minor_t) minor(d);
}
-dev_t
-makedevice(major_t major, minor_t minor)
-{
- return makedev( major, minor );
-}
-
-int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
-{
-#pragma unused(dev, dip, flags, name)
-
- return defvalue;
-}
-
-/*
- * Kernel Debug Interface
- */
-int
-kdi_dtrace_set(kdi_dtrace_set_t ignore)
-{
-#pragma unused(ignore)
- return 0; /* Success */
-}
-
extern void Debugger(const char*);
void
-debug_enter(char *c) { Debugger(c); }
+debug_enter(char *c)
+{
+ Debugger(c);
+}
/*
* kmem
*/
void *
-dt_kmem_alloc(size_t size, int kmflag)
+dt_kmem_alloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
{
#pragma unused(kmflag)
/*
* We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
- * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
+ * Requests larger than 8K with M_NOWAIT fail in kalloc_ext.
*/
-#if defined(DTRACE_MEMORY_ZONES)
- return dtrace_alloc(size);
-#else
- return kalloc(size);
-#endif
+ return kalloc_ext(KHEAP_DTRACE, size, Z_WAITOK, site).addr;
}
void *
-dt_kmem_zalloc(size_t size, int kmflag)
+dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
{
#pragma unused(kmflag)
/*
* We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
- * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
+ * Requests larger than 8K with M_NOWAIT fail in kalloc_ext.
*/
-#if defined(DTRACE_MEMORY_ZONES)
- void* buf = dtrace_alloc(size);
-#else
- void* buf = kalloc(size);
-#endif
-
- if(!buf)
- return NULL;
-
- bzero(buf, size);
-
- return buf;
+ return kalloc_ext(KHEAP_DTRACE, size, Z_WAITOK | Z_ZERO, site).addr;
}
void
dt_kmem_free(void *buf, size_t size)
{
-#pragma unused(size)
- /*
- * DTrace relies on this, its doing a lot of NULL frees.
- * A null free causes the debug builds to panic.
- */
- if (buf == NULL) return;
-
- ASSERT(size > 0);
-
-#if defined(DTRACE_MEMORY_ZONES)
- dtrace_free(buf, size);
-#else
- kfree(buf, size);
-#endif
+ kheap_free(KHEAP_DTRACE, buf, size);
}
/*
- * aligned kmem allocator
+ * aligned dt_kmem allocator
* align should be a power of two
*/
-void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
+void*
+dt_kmem_alloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *site)
{
void *mem, **addr_to_free;
intptr_t mem_aligned;
* the address to free and the total size of the buffer.
*/
hdr_size = sizeof(size_t) + sizeof(void*);
- mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
- if (mem == NULL)
+ mem = dt_kmem_alloc_site(size + align + hdr_size, kmflag, site);
+ if (mem == NULL) {
return NULL;
+ }
mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
return (void*) mem_aligned;
}
-void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
+void*
+dt_kmem_zalloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *s)
{
void* buf;
- buf = dt_kmem_alloc_aligned(size, align, kmflag);
+ buf = dt_kmem_alloc_aligned_site(size, align, kmflag, s);
- if(!buf)
+ if (!buf) {
return NULL;
+ }
bzero(buf, size);
return buf;
}
-void dt_kmem_free_aligned(void* buf, size_t size)
+void
+dt_kmem_free_aligned(void* buf, size_t size)
{
#pragma unused(size)
intptr_t ptr = (intptr_t) buf;
void **addr_to_free = (void**) (ptr - sizeof(void*));
size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
- if (buf == NULL)
+ if (buf == NULL) {
return;
+ }
dt_kmem_free(*addr_to_free, *size_to_free);
}
*/
kmem_cache_t *
kmem_cache_create(
- const char *name, /* descriptive name for this cache */
- size_t bufsize, /* size of the objects it manages */
- size_t align, /* required object alignment */
- int (*constructor)(void *, void *, int), /* object constructor */
- void (*destructor)(void *, void *), /* object destructor */
- void (*reclaim)(void *), /* memory reclaim callback */
- void *private, /* pass-thru arg for constr/destr/reclaim */
- vmem_t *vmp, /* vmem source for slab allocation */
- int cflags) /* cache creation flags */
+ const char *name, /* descriptive name for this cache */
+ size_t bufsize, /* size of the objects it manages */
+ size_t align, /* required object alignment */
+ int (*constructor)(void *, void *, int), /* object constructor */
+ void (*destructor)(void *, void *), /* object destructor */
+ void (*reclaim)(void *), /* memory reclaim callback */
+ void *private, /* pass-thru arg for constr/destr/reclaim */
+ vmem_t *vmp, /* vmem source for slab allocation */
+ int cflags) /* cache creation flags */
{
#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
}
-
+
void *
kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
{
#pragma unused(cp)
}
-/*
- * taskq
- */
-extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
-
-static void
-_taskq_apply( task_func_t func, thread_call_param_t arg )
-{
- func( (void *)arg );
-}
-
-taskq_t *
-taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
- int maxalloc, uint_t flags)
-{
-#pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
-
- return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
-}
-
-taskqid_t
-taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
-{
-#pragma unused(flags)
- thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
- thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
- return (taskqid_t) tq /* for lack of anything better */;
-}
-
-void
-taskq_destroy(taskq_t *tq)
-{
- thread_call_cancel( (thread_call_t) tq );
- thread_call_free( (thread_call_t) tq );
-}
-
-pri_t maxclsyspri;
-
/*
* vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
*/
/* By passing around blist *handles*, the underlying blist can be resized as needed. */
struct blist_hdl {
- blist_t blist;
+ blist_t blist;
};
-vmem_t *
+vmem_t *
vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
- void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
+ void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
{
#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
blist_t bl;
struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
-
+
ASSERT(quantum == 1);
ASSERT(NULL == ignore5);
ASSERT(NULL == ignore6);
ASSERT(NULL == source);
ASSERT(0 == qcache_max);
+ ASSERT(size <= INT32_MAX);
ASSERT(vmflag & VMC_IDENTIFIER);
-
+
size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
-
- p->blist = bl = blist_create( size );
- blist_free(bl, 0, size);
- if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
-
+
+ p->blist = bl = blist_create((daddr_t)size);
+ blist_free(bl, 0, (daddr_t)size);
+ if (base) {
+ blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
+ }
return (vmem_t *)p;
}
-
+
void *
vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
{
struct blist_hdl *q = (struct blist_hdl *)vmp;
blist_t bl = q->blist;
daddr_t p;
-
+
p = blist_alloc(bl, (daddr_t)size);
-
- if ((daddr_t)-1 == p) {
+
+ if (p == SWAPBLK_NONE) {
blist_resize(&bl, (bl->bl_blocks) << 1, 1);
q->blist = bl;
p = blist_alloc(bl, (daddr_t)size);
- if ((daddr_t)-1 == p)
+ if (p == SWAPBLK_NONE) {
panic("vmem_alloc: failure after blist_resize!");
+ }
}
-
+
return (void *)(uintptr_t)p;
}
vmem_free(vmem_t *vmp, void *vaddr, size_t size)
{
struct blist_hdl *p = (struct blist_hdl *)vmp;
-
+
blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
}
vmem_destroy(vmem_t *vmp)
{
struct blist_hdl *p = (struct blist_hdl *)vmp;
-
+
blist_destroy( p->blist );
- _FREE( p, sizeof(struct blist_hdl) );
+ _FREE( p, sizeof(struct blist_hdl));
}
/*
*/
/*
- * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
+ * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
* January 1, 1970. Because it can be called from probe context, it must take no locks.
*/
hrtime_t
dtrace_gethrestime(void)
{
- clock_sec_t secs;
- clock_nsec_t nanosecs;
- uint64_t secs64, ns64;
-
+ clock_sec_t secs;
+ clock_nsec_t nanosecs;
+ uint64_t secs64, ns64;
+
clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
secs64 = (uint64_t)secs;
ns64 = (uint64_t)nanosecs;
* denominator in a fraction.
*/
- if ( sTimebaseInfo.denom == 0 ) {
+ if (sTimebaseInfo.denom == 0) {
(void) clock_timebase_info(&sTimebaseInfo);
}
* Provided the final result is representable in 64 bits the following maneuver will
* deliver that result without intermediate overflow.
*/
- if (sTimebaseInfo.denom == sTimebaseInfo.numer)
+ if (sTimebaseInfo.denom == sTimebaseInfo.numer) {
return elapsed;
- else if (sTimebaseInfo.denom == 1)
+ } else if (sTimebaseInfo.denom == 1) {
return elapsed * (uint64_t)sTimebaseInfo.numer;
- else {
+ } else {
/* Decompose elapsed = eta32 * 2^32 + eps32: */
uint64_t eta32 = elapsed >> 32;
uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
uint64_t lambda64 = numer * eps32;
/* Divide the constituents by denom: */
- uint64_t q32 = mu64/denom;
+ uint64_t q32 = mu64 / denom;
uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
- return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
+ return (q32 << 32) + ((r32 << 32) + lambda64) / denom;
}
}
hrtime_t
dtrace_gethrtime(void)
{
- static uint64_t start = 0;
-
- if (start == 0)
+ static uint64_t start = 0;
+
+ if (start == 0) {
start = mach_absolute_time();
-
- return dtrace_abs_to_nano(mach_absolute_time() - start);
+ }
+
+ return dtrace_abs_to_nano(mach_absolute_time() - start);
}
/*
uint32_t
dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
{
- if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
+ if (OSCompareAndSwap((UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) {
return cmp;
- else
+ } else {
return ~cmp; /* Must return something *other* than cmp */
+ }
}
void *
dtrace_casptr(void *target, void *cmp, void *new)
{
- if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
+ if (OSCompareAndSwapPtr( cmp, new, (void**)target )) {
return cmp;
- else
+ } else {
return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
+ }
}
/*
* MP coordination
*/
static void
-dtrace_sync_func(void) {}
+dtrace_sync_func(void)
+{
+}
/*
* dtrace_sync() is not called from probe context.
extern kern_return_t dtrace_copyio_preflight(addr64_t);
extern kern_return_t dtrace_copyio_postflight(addr64_t);
-
+
static int
dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
{
ASSERT(kaddr + size >= kaddr);
- if ( uaddr + size < uaddr || /* Avoid address wrap. */
- KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
- {
+ if (uaddr + size < uaddr || /* Avoid address wrap. */
+ KERN_FAILURE == dtrace_copyio_preflight(uaddr)) { /* Machine specific setup/constraints. */
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
- return (0);
+ return 0;
}
- return (1);
+ return 1;
}
void
dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
{
#pragma unused(flags)
-
+
if (dtrace_copycheck( src, dst, len )) {
if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
{
#pragma unused(flags)
-
+
size_t actual;
-
+
if (dtrace_copycheck( src, dst, len )) {
/* copyin as many as 'len' bytes. */
int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
{
#pragma unused(flags)
-
+
if (dtrace_copycheck( dst, src, len )) {
if (copyout((const void *)src, dst, (vm_size_t)len)) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
{
#pragma unused(flags)
-
+
size_t actual;
if (dtrace_copycheck( dst, src, len )) {
-
/*
* ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
* not encountered. We raise CPU_DTRACE_BADADDR in that case.
extern const int copysize_limit_panic;
+int
+dtrace_copy_maxsize(void)
+{
+ return copysize_limit_panic;
+}
+
+
int
dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes)
{
+ int maxsize = dtrace_copy_maxsize();
/*
* Partition the copyout in copysize_limit_panic-sized chunks
*/
- while (nbytes >= (vm_size_t)copysize_limit_panic) {
- if (copyout(kaddr, uaddr, copysize_limit_panic) != 0)
- return (EFAULT);
+ while (nbytes >= (vm_size_t)maxsize) {
+ if (copyout(kaddr, uaddr, maxsize) != 0) {
+ return EFAULT;
+ }
- nbytes -= copysize_limit_panic;
- uaddr += copysize_limit_panic;
- kaddr += copysize_limit_panic;
+ nbytes -= maxsize;
+ uaddr += maxsize;
+ kaddr += maxsize;
}
if (nbytes > 0) {
- if (copyout(kaddr, uaddr, nbytes) != 0)
- return (EFAULT);
+ if (copyout(kaddr, uaddr, nbytes) != 0) {
+ return EFAULT;
+ }
}
- return (0);
+ return 0;
}
uint8_t
}
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
- return(ret);
+ return ret;
}
uint16_t
}
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
- return(ret);
+ return ret;
}
uint32_t
}
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
- return(ret);
+ return ret;
}
uint64_t
}
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
- return(ret);
+ return ret;
}
/*
return 0;
}
-void
-fuword8_noerr(user_addr_t uaddr, uint8_t *value)
-{
- if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
- *value = 0;
- }
-}
-
-void
-fuword16_noerr(user_addr_t uaddr, uint16_t *value)
-{
- if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
- *value = 0;
- }
-}
-
void
fuword32_noerr(user_addr_t uaddr, uint32_t *value)
{
return 0;
}
-int
-suword16(user_addr_t addr, uint16_t value)
-{
- if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
- return -1;
- }
-
- return 0;
-}
-
-int
-suword8(user_addr_t addr, uint8_t value)
-{
- if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
- return -1;
- }
-
- return 0;
-}
-
-
/*
* Miscellaneous
*/
{
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
- return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
+ return DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE;
}
#define TOTTY 0x02
}
/* Not called from probe context */
-void cmn_err( int level, const char *format, ... )
+void
+cmn_err( int level, const char *format, ... )
{
#pragma unused(level)
va_list alist;
uprintf("\n");
}
-/*
- * History:
- * 2002-01-24 gvdl Initial implementation of strstr
- */
-
-__private_extern__ const char *
-strstr(const char *in, const char *str)
-{
- char c;
- size_t len;
- if (!in || !str)
- return in;
-
- c = *str++;
- if (!c)
- return (const char *) in; // Trivial empty string case
-
- len = strlen(str);
- do {
- char sc;
-
- do {
- sc = *in++;
- if (!sc)
- return (char *) 0;
- } while (sc != c);
- } while (strncmp(in, str, len) != 0);
-
- return (const char *) (in - 1);
-}
-
const void*
bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *))
{
for (lim = nmemb; lim != 0; lim >>= 1) {
p = base + (lim >> 1) * size;
cmp = (*compar)(key, p);
- if (cmp == 0)
+ if (cmp == 0) {
return p;
- if (cmp > 0) { /* key > p: move right */
+ }
+ if (cmp > 0) { /* key > p: move right */
base = (const char *)p + size;
lim--;
- } /* else move left */
+ } /* else move left */
}
- return (NULL);
+ return NULL;
}
/*
int depth = 0;
int on_intr;
- if ((on_intr = CPU_ON_INTR(CPU)) != 0)
+ if ((on_intr = CPU_ON_INTR(CPU)) != 0) {
stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
- else
+ } else {
stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
+ }
minfp = fp;
/*
* Hop from interrupt stack to thread stack.
*/
- vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
+ vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
- minfp = (struct frame *)kstack_base;
- stacktop = (struct frame *)(kstack_base + kernel_stack_size);
+ minfp = (struct frame *)kstack_base;
+ stacktop = (struct frame *)(kstack_base + kernel_stack_size);
on_intr = 0;
continue;
minfp = fp;
}
- if (depth <= aframes)
- return (0);
+ if (depth <= aframes) {
+ return 0;
+ }
+
+ return depth - aframes;
+}
- return (depth - aframes);
+int
+dtrace_addr_in_module(void* addr, struct modctl *ctl)
+{
+ return OSKextKextForAddress(addr) == (void*)ctl->mod_address;
}
/*
* Unconsidered
*/
void
-dtrace_vtime_enable(void) {}
+dtrace_vtime_enable(void)
+{
+}
void
-dtrace_vtime_disable(void) {}
-
-#else /* else ! CONFIG_DTRACE */
-
-#include <sys/types.h>
-#include <mach/vm_types.h>
-#include <mach/kmod.h>
-
-/*
- * This exists to prevent build errors when dtrace is unconfigured.
- */
-
-kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
-
-kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
-#pragma unused(arg1, arg2, arg3)
-
- return KERN_FAILURE;
+dtrace_vtime_disable(void)
+{
}
-
-#endif /* CONFIG_DTRACE */