/*
- * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/* all thread states code */
+/*
+ * Profile Every Thread (PET) provides a profile of all threads on the system
+ * when a timer fires. PET supports the "record waiting threads" mode in
+ * Instruments, and used to be called All Thread States (ATS). New tools should
+ * adopt the lightweight PET mode, which provides the same information, but with
+ * much less overhead.
+ *
+ * When traditional (non-lightweight) PET is active, a migrating timer call
+ * causes the PET thread to wake up. The timer handler also issues a broadcast
+ * IPI to the other CPUs, to provide a (somewhat) synchronized set of on-core
+ * samples. This is provided for backwards-compatibility with clients that
+ * expect on-core samples, when PET's timer was based off the on-core timers.
+ * Because PET sampling can take on the order of milliseconds, the PET thread
+ * will enter a new timer deadline after it finished sampling This perturbs the
+ * timer cadence by the duration of PET sampling, but it leaves the system to
+ * work on non-profiling tasks for the duration of the timer period.
+ *
+ * Lightweight PET samples the system less-intrusively than normal PET
+ * mode. Instead of iterating tasks and threads on each sample, it increments
+ * a global generation count, `kppet_gencount`, which is checked as threads are
+ * context switched on-core. If the thread's local generation count is older
+ * than the global generation, the thread samples itself.
+ *
+ * | |
+ * thread A +--+---------|
+ * | |
+ * thread B |--+---------------|
+ * | |
+ * thread C | | |-------------------------------------
+ * | | |
+ * thread D | | | |-------------------------------
+ * | | | |
+ * +--+---------+-----+--------------------------------> time
+ * | │ |
+ * | +-----+--- threads sampled when they come on-core in
+ * | kperf_pet_switch_context
+ * |
+ * +--- PET timer fire, sample on-core threads A and B,
+ * increment kppet_gencount
+ */
+
#include <mach/mach_types.h>
-#include <IOKit/IOTypes.h>
-#include <IOKit/IOLocks.h>
#include <sys/errno.h>
-#include <chud/chud_xnu.h>
-
+#include <kperf/kperf.h>
#include <kperf/buffer.h>
#include <kperf/sample.h>
#include <kperf/context.h>
#include <kperf/action.h>
-#include <kperf/filter.h>
#include <kperf/pet.h>
-#include <kperf/timetrigger.h>
+#include <kperf/kptimer.h>
+
+#include <kern/task.h>
+#include <kern/kalloc.h>
+#if defined(__x86_64__)
+#include <i386/mp.h>
+#endif /* defined(__x86_64__) */
+
+static LCK_MTX_DECLARE(kppet_mtx, &kperf_lck_grp);
+
+static struct {
+ unsigned int g_actionid;
+ /*
+ * The idle rate controls how many sampling periods to skip if a thread
+ * is idle.
+ */
+ uint32_t g_idle_rate;
+ bool g_setup:1;
+ bool g_lightweight:1;
+ struct kperf_sample *g_sample;
+
+ thread_t g_sample_thread;
+
+ /*
+ * Used by the PET thread to manage which threads and tasks to sample.
+ */
+ thread_t *g_threads;
+ unsigned int g_nthreads;
+ size_t g_threads_size;
+
+ task_t *g_tasks;
+ unsigned int g_ntasks;
+ size_t g_tasks_size;
+} kppet = {
+ .g_actionid = 0,
+ .g_idle_rate = KPERF_PET_DEFAULT_IDLE_RATE,
+};
+
+bool kppet_lightweight_active = false;
+_Atomic uint32_t kppet_gencount = 0;
+
+static uint64_t kppet_sample_tasks(uint32_t idle_rate);
+static void kppet_thread(void * param, wait_result_t wr);
-/* timer id to call back on */
-static unsigned pet_timerid = 0;
+static void
+kppet_lock_assert_owned(void)
+{
+ lck_mtx_assert(&kppet_mtx, LCK_MTX_ASSERT_OWNED);
+}
-/* aciton ID to call
- * We also use this as the sync point for waiting, for no good reason
- */
-static unsigned pet_actionid = 0;
+static void
+kppet_lock(void)
+{
+ lck_mtx_lock(&kppet_mtx);
+}
-/* the actual thread pointer */
-static thread_t pet_thread = NULL;
+static void
+kppet_unlock(void)
+{
+ lck_mtx_unlock(&kppet_mtx);
+}
-/* Lock on which to synchronise */
-static IOLock *pet_lock = NULL;
+void
+kppet_on_cpu(thread_t thread, thread_continue_t continuation,
+ uintptr_t *starting_fp)
+{
+ assert(thread != NULL);
+ assert(ml_get_interrupts_enabled() == FALSE);
+
+ uint32_t actionid = kppet.g_actionid;
+ if (actionid == 0) {
+ return;
+ }
+
+ if (thread->kperf_pet_gen != atomic_load(&kppet_gencount)) {
+ BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START,
+ atomic_load_explicit(&kppet_gencount,
+ memory_order_relaxed), thread->kperf_pet_gen);
+
+ task_t task = get_threadtask(thread);
+ struct kperf_context ctx = {
+ .cur_thread = thread,
+ .cur_task = task,
+ .cur_pid = task_pid(task),
+ .starting_fp = starting_fp,
+ };
+ /*
+ * Use a per-CPU interrupt buffer, since this is only called
+ * while interrupts are disabled, from the scheduler.
+ */
+ struct kperf_sample *sample = kperf_intr_sample_buffer();
+ if (!sample) {
+ BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END, 1);
+ return;
+ }
+
+ unsigned int flags = SAMPLE_FLAG_NON_INTERRUPT | SAMPLE_FLAG_PEND_USER;
+ if (continuation != NULL) {
+ flags |= SAMPLE_FLAG_CONTINUATION;
+ }
+ kperf_sample(sample, &ctx, actionid, flags);
+
+ BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END);
+ } else {
+ BUF_VERB(PERF_PET_SAMPLE_THREAD,
+ os_atomic_load(&kppet_gencount, relaxed), thread->kperf_pet_gen);
+ }
+}
-/* where to sample data to */
-static struct kperf_sample pet_sample_buf;
+#pragma mark - state transitions
-/* sample an actual, honest to god thread! */
+/*
+ * Lazily initialize PET. The PET thread never exits once PET has been used
+ * once.
+ */
static void
-pet_sample_thread( thread_t thread )
+kppet_setup(void)
{
- struct kperf_context ctx;
- task_t task;
+ if (kppet.g_setup) {
+ return;
+ }
+
+ kern_return_t kr = kernel_thread_start(kppet_thread, NULL,
+ &kppet.g_sample_thread);
+ if (kr != KERN_SUCCESS) {
+ panic("kperf: failed to create PET thread %d", kr);
+ }
+
+ thread_set_thread_name(kppet.g_sample_thread, "kperf-pet-sampling");
+ kppet.g_setup = true;
+}
+
+void
+kppet_config(unsigned int actionid)
+{
+ /*
+ * Resetting kperf shouldn't get the PET thread started.
+ */
+ if (actionid == 0 && !kppet.g_setup) {
+ return;
+ }
+
+ kppet_setup();
+
+ kppet_lock();
+
+ kppet.g_actionid = actionid;
+
+ if (actionid > 0) {
+ if (!kppet.g_sample) {
+ kppet.g_sample = kalloc_tag(sizeof(*kppet.g_sample),
+ VM_KERN_MEMORY_DIAG);
+ }
+ } else {
+ if (kppet.g_tasks) {
+ assert(kppet.g_tasks_size != 0);
+ kfree(kppet.g_tasks, kppet.g_tasks_size);
+ kppet.g_tasks = NULL;
+ kppet.g_tasks_size = 0;
+ kppet.g_ntasks = 0;
+ }
+ if (kppet.g_threads) {
+ assert(kppet.g_threads_size != 0);
+ kfree(kppet.g_threads, kppet.g_threads_size);
+ kppet.g_threads = NULL;
+ kppet.g_threads_size = 0;
+ kppet.g_nthreads = 0;
+ }
+ if (kppet.g_sample != NULL) {
+ kfree(kppet.g_sample, sizeof(*kppet.g_sample));
+ kppet.g_sample = NULL;
+ }
+ }
- /* work out the context */
- ctx.cur_thread = thread;
- ctx.cur_pid = -1;
+ kppet_unlock();
+}
- task = chudxnu_task_for_thread(thread);
- if(task)
- ctx.cur_pid = chudxnu_pid_for_task(task);
+void
+kppet_reset(void)
+{
+ kppet_config(0);
+ kppet_set_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE);
+ kppet_set_lightweight_pet(0);
+}
- /* do the actual sample */
- kperf_sample( &pet_sample_buf, &ctx, pet_actionid, false );
+void
+kppet_wake_thread(void)
+{
+ thread_wakeup(&kppet);
}
-/* given a list of threads, preferably stopped, sample 'em! */
+__attribute__((noreturn))
static void
-pet_sample_thread_list( mach_msg_type_number_t threadc, thread_array_t threadv )
+kppet_thread(void * __unused param, wait_result_t __unused wr)
{
- unsigned int i;
+ kppet_lock();
+
+ for (;;) {
+ BUF_INFO(PERF_PET_IDLE);
+
+ do {
+ (void)lck_mtx_sleep(&kppet_mtx, LCK_SLEEP_DEFAULT, &kppet,
+ THREAD_UNINT);
+ } while (kppet.g_actionid == 0);
- for( i = 0; i < threadc; i++ )
- {
- thread_t thread = threadv[i];
+ BUF_INFO(PERF_PET_RUN);
- if( !thread )
- /* XXX? */
- continue;
+ uint64_t sampledur_abs = kppet_sample_tasks(kppet.g_idle_rate);
- pet_sample_thread( thread );
+ kptimer_pet_enter(sampledur_abs);
}
}
-/* given a task (preferably stopped), sample all the threads in it */
+#pragma mark - sampling
+
static void
-pet_sample_task( task_t task )
+kppet_sample_thread(int pid, task_t task, thread_t thread, uint32_t idle_rate)
{
- mach_msg_type_number_t threadc;
- thread_array_t threadv;
- kern_return_t kr;
-
- kr = chudxnu_task_threads(task, &threadv, &threadc);
- if( kr != KERN_SUCCESS )
- {
- BUF_INFO2(PERF_PET_ERROR, ERR_THREAD, kr);
- return;
+ kppet_lock_assert_owned();
+
+ uint32_t sample_flags = SAMPLE_FLAG_IDLE_THREADS |
+ SAMPLE_FLAG_THREAD_ONLY;
+
+ BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START);
+
+ struct kperf_context ctx = {
+ .cur_thread = thread,
+ .cur_task = task,
+ .cur_pid = pid,
+ };
+
+ boolean_t thread_dirty = kperf_thread_get_dirty(thread);
+
+ /*
+ * Clean a dirty thread and skip callstack sample if the thread was not
+ * dirty and thread had skipped less than `idle_rate` samples.
+ */
+ if (thread_dirty) {
+ kperf_thread_set_dirty(thread, FALSE);
+ } else if ((thread->kperf_pet_cnt % idle_rate) != 0) {
+ sample_flags |= SAMPLE_FLAG_EMPTY_CALLSTACK;
}
+ thread->kperf_pet_cnt++;
- pet_sample_thread_list( threadc, threadv );
+ kperf_sample(kppet.g_sample, &ctx, kppet.g_actionid, sample_flags);
+ kperf_sample_user(&kppet.g_sample->usample, &ctx, kppet.g_actionid,
+ sample_flags);
- chudxnu_free_thread_list(&threadv, &threadc);
+ BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END);
}
-/* given a list of tasks, sample all the threads in 'em */
-static void
-pet_sample_task_list( int taskc, task_array_t taskv )
+static kern_return_t
+kppet_threads_prepare(task_t task)
{
- int i;
-
- for( i = 0; i < taskc; i++ )
- {
- kern_return_t kr;
- task_t task = taskv[i];
-
- /* FIXME: necessary? old code did this, our hacky
- * filtering code does, too
+ kppet_lock_assert_owned();
+
+ vm_size_t threads_size_needed;
+
+ for (;;) {
+ task_lock(task);
+
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ /*
+ * With the task locked, figure out if enough space has been allocated to
+ * contain all of the thread references.
*/
- if(!task) {
- continue;
+ threads_size_needed = task->thread_count * sizeof(thread_t);
+ if (threads_size_needed <= kppet.g_threads_size) {
+ break;
}
-
- /* try and stop any task other than the kernel task */
- if( task != kernel_task )
- {
- kr = task_suspend( task );
-
- /* try the next task */
- if( kr != KERN_SUCCESS )
- continue;
+
+ /*
+ * Otherwise, allocate more and try again.
+ */
+ task_unlock(task);
+
+ if (kppet.g_threads_size != 0) {
+ kfree(kppet.g_threads, kppet.g_threads_size);
+ }
+
+ assert(threads_size_needed > 0);
+ kppet.g_threads_size = threads_size_needed;
+
+ kppet.g_threads = kalloc_tag(kppet.g_threads_size, VM_KERN_MEMORY_DIAG);
+ if (kppet.g_threads == NULL) {
+ kppet.g_threads_size = 0;
+ return KERN_RESOURCE_SHORTAGE;
}
-
- /* sample it */
- pet_sample_task( task );
+ }
- /* if it wasn't the kernel, resume it */
- if( task != kernel_task )
- task_resume(task);
+ thread_t thread;
+ kppet.g_nthreads = 0;
+ queue_iterate(&(task->threads), thread, thread_t, task_threads) {
+ thread_reference_internal(thread);
+ kppet.g_threads[kppet.g_nthreads++] = thread;
}
+
+ task_unlock(task);
+
+ return (kppet.g_nthreads > 0) ? KERN_SUCCESS : KERN_FAILURE;
}
+/*
+ * Sample a `task`, using `idle_rate` to control whether idle threads need to be
+ * re-sampled.
+ *
+ * The task must be referenced.
+ */
static void
-pet_sample_all_tasks(void)
+kppet_sample_task(task_t task, uint32_t idle_rate)
{
- task_array_t taskv = NULL;
- mach_msg_type_number_t taskc = 0;
- kern_return_t kr;
-
- kr = chudxnu_all_tasks(&taskv, &taskc);
-
- if( kr != KERN_SUCCESS )
- {
- BUF_INFO2(PERF_PET_ERROR, ERR_TASK, kr);
+ kppet_lock_assert_owned();
+ assert(task != kernel_task);
+ if (task == kernel_task) {
return;
}
- pet_sample_task_list( taskc, taskv );
- chudxnu_free_task_list(&taskv, &taskc);
-}
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_START);
-static void
-pet_sample_pid_filter(void)
-{
- task_t *taskv = NULL;
- int *pidv, pidc, i;
- vm_size_t asize;
-
- kperf_filter_pid_list( &pidc, &pidv );
- if( pidc == 0 )
- {
- BUF_INFO2(PERF_PET_ERROR, ERR_PID, 0);
+ int pid = task_pid(task);
+ if (kperf_action_has_task(kppet.g_actionid)) {
+ struct kperf_context ctx = {
+ .cur_task = task,
+ .cur_pid = pid,
+ };
+
+ kperf_sample(kppet.g_sample, &ctx, kppet.g_actionid,
+ SAMPLE_FLAG_TASK_ONLY);
+ }
+
+ if (!kperf_action_has_thread(kppet.g_actionid)) {
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END);
return;
}
- asize = pidc * sizeof(task_t);
- taskv = kalloc( asize );
+ /*
+ * Suspend the task to see an atomic snapshot of all its threads. This
+ * is expensive and disruptive.
+ */
+ kern_return_t kr = task_suspend_internal(task);
+ if (kr != KERN_SUCCESS) {
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, 1);
+ return;
+ }
- if( taskv == NULL )
+ kr = kppet_threads_prepare(task);
+ if (kr != KERN_SUCCESS) {
+ BUF_INFO(PERF_PET_ERROR, ERR_THREAD, kr);
goto out;
-
- /* convert the pid list into a task list */
- for( i = 0; i < pidc; i++ )
- {
- int pid = pidv[i];
- if( pid == -1 )
- taskv[i] = NULL;
- else
- taskv[i] = chudxnu_task_for_pid(pid);
}
- /* now sample the task list */
- pet_sample_task_list( pidc, taskv );
+ for (unsigned int i = 0; i < kppet.g_nthreads; i++) {
+ thread_t thread = kppet.g_threads[i];
+ assert(thread != THREAD_NULL);
- kfree(taskv, asize);
+ kppet_sample_thread(pid, task, thread, idle_rate);
+
+ thread_deallocate(kppet.g_threads[i]);
+ }
out:
- kperf_filter_free_pid_list( &pidc, &pidv );
+ task_resume_internal(task);
+
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, kppet.g_nthreads);
}
-/* do the pet sample */
-static void
-pet_work_unit(void)
+/*
+ * Store and reference all tasks on the system, so they can be safely inspected
+ * outside the `tasks_threads_lock`.
+ */
+static kern_return_t
+kppet_tasks_prepare(void)
{
- int pid_filter;
+ kppet_lock_assert_owned();
+
+ vm_size_t size_needed = 0;
- /* check if we're filtering on pid */
- pid_filter = kperf_filter_on_pid();
+ for (;;) {
+ lck_mtx_lock(&tasks_threads_lock);
- if( pid_filter )
- {
- BUF_INFO1(PERF_PET_SAMPLE | DBG_FUNC_START, 1);
- pet_sample_pid_filter();
+ /*
+ * With the lock held, break out of the lock/unlock loop if
+ * there's enough space to store all the tasks.
+ */
+ size_needed = tasks_count * sizeof(task_t);
+ if (size_needed <= kppet.g_tasks_size) {
+ break;
+ }
+
+ /*
+ * Otherwise, allocate more memory outside of the lock.
+ */
+ lck_mtx_unlock(&tasks_threads_lock);
+
+ if (size_needed > kppet.g_tasks_size) {
+ if (kppet.g_tasks_size != 0) {
+ kfree(kppet.g_tasks, kppet.g_tasks_size);
+ }
+
+ assert(size_needed > 0);
+ kppet.g_tasks_size = size_needed;
+
+ kppet.g_tasks = kalloc_tag(kppet.g_tasks_size, VM_KERN_MEMORY_DIAG);
+ if (!kppet.g_tasks) {
+ kppet.g_tasks_size = 0;
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ }
}
- else
- {
- /* otherwise filter everything */
- BUF_INFO1(PERF_PET_SAMPLE | DBG_FUNC_START, 0);
- pet_sample_all_tasks();
+
+ task_t task = TASK_NULL;
+ kppet.g_ntasks = 0;
+ queue_iterate(&tasks, task, task_t, tasks) {
+ bool eligible_task = task != kernel_task;
+ if (eligible_task) {
+ task_reference_internal(task);
+ kppet.g_tasks[kppet.g_ntasks++] = task;
+ }
}
- BUF_INFO1(PERF_PET_SAMPLE | DBG_FUNC_END, 0);
+ lck_mtx_unlock(&tasks_threads_lock);
+ return KERN_SUCCESS;
}
-/* sleep indefinitely */
-static void
-pet_idle(void)
+static uint64_t
+kppet_sample_tasks(uint32_t idle_rate)
{
- IOLockLock(pet_lock);
- IOLockSleep(pet_lock, &pet_actionid, THREAD_UNINT);
- IOLockUnlock(pet_lock);
-}
+ kppet_lock_assert_owned();
+ assert(kppet.g_actionid > 0);
-/* loop between sampling and waiting */
-static void
-pet_thread_loop( __unused void *param, __unused wait_result_t wr )
-{
- BUF_INFO1(PERF_PET_THREAD, 1);
-
- while(1)
- {
- BUF_INFO1(PERF_PET_IDLE, 0);
- pet_idle();
+ uint64_t start_abs = mach_absolute_time();
- BUF_INFO1(PERF_PET_RUN, 0);
- pet_work_unit();
+ BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_START);
- /* re-program the timer */
- kperf_timer_pet_set( pet_timerid );
+ kern_return_t kr = kppet_tasks_prepare();
+ if (kr != KERN_SUCCESS) {
+ BUF_INFO(PERF_PET_ERROR, ERR_TASK, kr);
+ BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END);
+ return mach_absolute_time() - start_abs;
+ }
- /* FIXME: break here on a condition? */
+ for (unsigned int i = 0; i < kppet.g_ntasks; i++) {
+ task_t task = kppet.g_tasks[i];
+ assert(task != TASK_NULL);
+ kppet_sample_task(task, idle_rate);
+ task_deallocate(task);
+ kppet.g_tasks[i] = TASK_NULL;
}
-}
-/* make sure the thread takes a new period value */
-void
-kperf_pet_timer_config( unsigned timerid, unsigned actionid )
-{
- /* hold the lock so pet thread doesn't run while we do this */
- IOLockLock(pet_lock);
+ BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END, kppet.g_ntasks);
+ kppet.g_ntasks = 0;
+ return mach_absolute_time() - start_abs;
+}
- BUF_INFO1(PERF_PET_THREAD, 3);
+#pragma mark - sysctl accessors
- /* set values */
- pet_timerid = timerid;
- pet_actionid = actionid;
+int
+kppet_get_idle_rate(void)
+{
+ return kppet.g_idle_rate;
+}
- /* done */
- IOLockUnlock(pet_lock);
+int
+kppet_set_idle_rate(int new_idle_rate)
+{
+ kppet.g_idle_rate = new_idle_rate;
+ return 0;
}
-/* make the thread run! */
void
-kperf_pet_thread_go(void)
+kppet_lightweight_active_update(void)
{
- /* Make the thread go */
- IOLockWakeup(pet_lock, &pet_actionid, FALSE);
+ kppet_lightweight_active = (kperf_is_sampling() && kppet.g_lightweight);
+ kperf_on_cpu_update();
}
-
-/* wait for the pet thread to finish a run */
-void
-kperf_pet_thread_wait(void)
+int
+kppet_get_lightweight_pet(void)
{
- /* acquire the lock to ensure the thread is parked. */
- IOLockLock(pet_lock);
- IOLockUnlock(pet_lock);
+ return kppet.g_lightweight;
}
-/* keep the pet thread around while we run */
int
-kperf_pet_init(void)
+kppet_set_lightweight_pet(int on)
{
- kern_return_t rc;
- thread_t t;
-
- if( pet_thread != NULL )
- return 0;
-
- /* make the sync poing */
- pet_lock = IOLockAlloc();
- if( pet_lock == NULL )
- return ENOMEM;
-
- /* create the thread */
- BUF_INFO1(PERF_PET_THREAD, 0);
- rc = kernel_thread_start( pet_thread_loop, NULL, &t );
- if( rc != KERN_SUCCESS )
- {
- IOLockFree( pet_lock );
- pet_lock = NULL;
- return ENOMEM;
+ if (kperf_is_sampling()) {
+ return EBUSY;
}
- /* OK! */
+ kppet.g_lightweight = (on == 1);
+ kppet_lightweight_active_update();
return 0;
}