]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/kperf_timer.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kperf / kperf_timer.c
CommitLineData
39037602
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* Manage timers */
30
31#include <mach/mach_types.h>
32#include <kern/cpu_data.h> /* current_thread() */
33#include <kern/kalloc.h>
5ba3f43e 34#include <stdatomic.h>
39037602
A
35#include <sys/errno.h>
36#include <sys/vm.h>
37#include <sys/ktrace.h>
38
39#include <machine/machine_routines.h>
40#if defined(__x86_64__)
41#include <i386/mp.h>
42#endif /* defined(__x86_64__) */
43
44#include <kperf/kperf.h>
45#include <kperf/buffer.h>
46#include <kperf/context.h>
47#include <kperf/action.h>
48#include <kperf/kperf_timer.h>
49#include <kperf/kperf_arch.h>
50#include <kperf/pet.h>
51#include <kperf/sample.h>
52
53/* the list of timers */
54struct kperf_timer *kperf_timerv = NULL;
55unsigned int kperf_timerc = 0;
56
57static unsigned int pet_timer_id = 999;
58
0a7de745
A
59#define KPERF_TMR_ACTION_MASK (0xff)
60#define KPERF_TMR_ACTION(action_state) ((action_state) & KPERF_TMR_ACTION_MASK)
61#define KPERF_TMR_ACTIVE (0x100)
62
39037602
A
63/* maximum number of timers we can construct */
64#define TIMER_MAX (16)
65
39037602
A
66static uint64_t min_period_abstime;
67static uint64_t min_period_bg_abstime;
68static uint64_t min_period_pet_abstime;
69static uint64_t min_period_pet_bg_abstime;
70
71static uint64_t
72kperf_timer_min_period_abstime(void)
73{
74 if (ktrace_background_active()) {
75 return min_period_bg_abstime;
76 } else {
77 return min_period_abstime;
78 }
79}
80
81static uint64_t
82kperf_timer_min_pet_period_abstime(void)
83{
84 if (ktrace_background_active()) {
85 return min_period_pet_bg_abstime;
86 } else {
87 return min_period_pet_abstime;
88 }
89}
90
91static void
92kperf_timer_schedule(struct kperf_timer *timer, uint64_t now)
93{
94 BUF_INFO(PERF_TM_SCHED, timer->period);
95
96 /* if we re-programmed the timer to zero, just drop it */
97 if (timer->period == 0) {
98 return;
99 }
100
101 /* calculate deadline */
102 uint64_t deadline = now + timer->period;
103
104 /* re-schedule the timer, making sure we don't apply slop */
105 timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
106}
107
5ba3f43e
A
108static void
109kperf_sample_cpu(struct kperf_timer *timer, bool system_sample,
0a7de745 110 bool only_system)
39037602 111{
39037602
A
112 assert(timer != NULL);
113
114 /* Always cut a tracepoint to show a sample event occurred */
115 BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0);
116
117 int ncpu = cpu_number();
118
119 struct kperf_sample *intbuf = kperf_intr_sample_buffer();
a39ff7e2
A
120#if DEVELOPMENT || DEBUG
121 intbuf->sample_time = mach_absolute_time();
122#endif /* DEVELOPMENT || DEBUG */
39037602
A
123
124 /* On a timer, we can see the "real" current thread */
d9a64523
A
125 thread_t thread = current_thread();
126 task_t task = get_threadtask(thread);
a39ff7e2 127 struct kperf_context ctx = {
d9a64523
A
128 .cur_thread = thread,
129 .cur_task = task,
130 .cur_pid = task_pid(task),
a39ff7e2
A
131 .trigger_type = TRIGGER_TYPE_TIMER,
132 .trigger_id = (unsigned int)(timer - kperf_timerv),
133 };
39037602 134
39037602 135 if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) {
5ba3f43e 136 kperf_tid_on_cpus[ncpu] = thread_tid(ctx.cur_thread);
39037602
A
137 }
138
139 /* make sure sampling is on */
140 unsigned int status = kperf_sampling_status();
141 if (status == KPERF_SAMPLING_OFF) {
142 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF);
143 return;
144 } else if (status == KPERF_SAMPLING_SHUTDOWN) {
145 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN);
146 return;
147 }
148
149 /* call the action -- kernel-only from interrupt, pend user */
5ba3f43e 150 int r = kperf_sample(intbuf, &ctx, timer->actionid,
0a7de745
A
151 SAMPLE_FLAG_PEND_USER | (system_sample ? SAMPLE_FLAG_SYSTEM : 0) |
152 (only_system ? SAMPLE_FLAG_ONLY_SYSTEM : 0));
39037602
A
153
154 /* end tracepoint is informational */
155 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r);
156
5ba3f43e 157 (void)atomic_fetch_and_explicit(&timer->pending_cpus,
0a7de745 158 ~(UINT64_C(1) << ncpu), memory_order_relaxed);
5ba3f43e
A
159}
160
161void
162kperf_ipi_handler(void *param)
163{
164 kperf_sample_cpu((struct kperf_timer *)param, false, false);
39037602
A
165}
166
167static void
168kperf_timer_handler(void *param0, __unused void *param1)
169{
170 struct kperf_timer *timer = param0;
171 unsigned int ntimer = (unsigned int)(timer - kperf_timerv);
172 unsigned int ncpus = machine_info.logical_cpu_max;
5ba3f43e
A
173 bool system_only_self = true;
174
0a7de745
A
175 uint32_t action_state = atomic_fetch_or(&timer->action_state,
176 KPERF_TMR_ACTIVE);
177
178 uint32_t actionid = KPERF_TMR_ACTION(action_state);
179 if (actionid == 0) {
cb323159 180 goto deactivate;
5ba3f43e 181 }
39037602 182
a39ff7e2
A
183#if DEVELOPMENT || DEBUG
184 timer->fire_time = mach_absolute_time();
185#endif /* DEVELOPMENT || DEBUG */
39037602
A
186
187 /* along the lines of do not ipi if we are all shutting down */
188 if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) {
189 goto deactivate;
190 }
191
192 BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period,
0a7de745 193 actionid);
39037602
A
194
195 if (ntimer == pet_timer_id) {
196 kperf_pet_fire_before();
197
198 /* clean-up the thread-on-CPUs cache */
5ba3f43e 199 bzero(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus));
39037602
A
200 }
201
5ba3f43e
A
202 /*
203 * IPI other cores only if the action has non-system samplers.
204 */
0a7de745 205 if (kperf_action_has_non_system(actionid)) {
5ba3f43e
A
206 /*
207 * If the core that's handling the timer is not scheduling
208 * threads, only run system samplers.
209 */
210 system_only_self = kperf_mp_broadcast_other_running(timer);
211 }
212 kperf_sample_cpu(timer, true, system_only_self);
39037602
A
213
214 /* release the pet thread? */
215 if (ntimer == pet_timer_id) {
216 /* PET mode is responsible for rearming the timer */
217 kperf_pet_fire_after();
218 } else {
219 /*
0a7de745
A
220 * FIXME: Get the current time from elsewhere. The next
221 * timer's period now includes the time taken to reach this
222 * point. This causes a bias towards longer sampling periods
223 * than requested.
224 */
39037602
A
225 kperf_timer_schedule(timer, mach_absolute_time());
226 }
227
228deactivate:
0a7de745 229 atomic_fetch_and(&timer->action_state, ~KPERF_TMR_ACTIVE);
39037602
A
230}
231
232/* program the timer from the PET thread */
233void
234kperf_timer_pet_rearm(uint64_t elapsed_ticks)
235{
236 struct kperf_timer *timer = NULL;
237 uint64_t period = 0;
238 uint64_t deadline;
239
240 /*
241 * If the pet_timer_id is invalid, it has been disabled, so this should
242 * do nothing.
243 */
244 if (pet_timer_id >= kperf_timerc) {
245 return;
246 }
247
248 unsigned int status = kperf_sampling_status();
249 /* do not reprogram the timer if it has been shutdown or sampling is off */
250 if (status == KPERF_SAMPLING_OFF) {
251 BUF_INFO(PERF_PET_END, SAMPLE_OFF);
252 return;
253 } else if (status == KPERF_SAMPLING_SHUTDOWN) {
254 BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN);
255 return;
256 }
257
258 timer = &(kperf_timerv[pet_timer_id]);
259
260 /* if we re-programmed the timer to zero, just drop it */
261 if (!timer->period) {
262 return;
263 }
264
265 /* subtract the time the pet sample took being careful not to underflow */
266 if (timer->period > elapsed_ticks) {
267 period = timer->period - elapsed_ticks;
268 }
269
270 /* make sure we don't set the next PET sample to happen too soon */
271 if (period < min_period_pet_abstime) {
272 period = min_period_pet_abstime;
273 }
274
275 /* we probably took so long in the PET thread, it makes sense to take
276 * the time again.
277 */
278 deadline = mach_absolute_time() + period;
279
280 BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline);
281
282 /* re-schedule the timer, making sure we don't apply slop */
5ba3f43e 283 timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
39037602
A
284
285 return;
286}
287
288/* turn on all the timers */
289void
290kperf_timer_go(void)
291{
292 /* get the PET thread going */
293 if (pet_timer_id < kperf_timerc) {
294 kperf_pet_config(kperf_timerv[pet_timer_id].actionid);
295 }
296
297 uint64_t now = mach_absolute_time();
298
299 for (unsigned int i = 0; i < kperf_timerc; i++) {
0a7de745
A
300 struct kperf_timer *timer = &kperf_timerv[i];
301 if (timer->period == 0) {
39037602
A
302 continue;
303 }
304
0a7de745
A
305 atomic_store(&timer->action_state,
306 timer->actionid & KPERF_TMR_ACTION_MASK);
307 kperf_timer_schedule(timer, now);
39037602
A
308 }
309}
310
311void
312kperf_timer_stop(void)
313{
0a7de745
A
314 /*
315 * Determine which timers are running and store them in a bitset, while
316 * cancelling their timer call.
317 */
318 uint64_t running_timers = 0;
39037602 319 for (unsigned int i = 0; i < kperf_timerc; i++) {
0a7de745
A
320 struct kperf_timer *timer = &kperf_timerv[i];
321 if (timer->period == 0) {
39037602
A
322 continue;
323 }
324
0a7de745
A
325 uint32_t action_state = atomic_fetch_and(&timer->action_state,
326 ~KPERF_TMR_ACTION_MASK);
327 if (action_state & KPERF_TMR_ACTIVE) {
328 bit_set(running_timers, i);
329 }
330
331 timer_call_cancel(&timer->tcall);
332 }
39037602 333
0a7de745
A
334 /*
335 * Wait for any running timers to finish their critical sections.
336 */
337 for (unsigned int i = lsb_first(running_timers); i < kperf_timerc;
338 i = lsb_next(running_timers, i)) {
339 while (atomic_load(&kperf_timerv[i].action_state) != 0) {
340 delay(10);
341 }
39037602
A
342 }
343
0a7de745
A
344 if (pet_timer_id < kperf_timerc) {
345 /* wait for PET to stop, too */
346 kperf_pet_config(0);
347 }
39037602
A
348}
349
350unsigned int
351kperf_timer_get_petid(void)
352{
353 return pet_timer_id;
354}
355
356int
357kperf_timer_set_petid(unsigned int timerid)
358{
359 if (timerid < kperf_timerc) {
360 uint64_t min_period;
361
362 min_period = kperf_timer_min_pet_period_abstime();
363 if (kperf_timerv[timerid].period < min_period) {
364 kperf_timerv[timerid].period = min_period;
365 }
366 kperf_pet_config(kperf_timerv[timerid].actionid);
367 } else {
368 /* clear the PET trigger if it's a bogus ID */
369 kperf_pet_config(0);
370 }
371
372 pet_timer_id = timerid;
373
374 return 0;
375}
376
377int
378kperf_timer_get_period(unsigned int timerid, uint64_t *period_abstime)
379{
380 if (timerid >= kperf_timerc) {
381 return EINVAL;
382 }
383
384 *period_abstime = kperf_timerv[timerid].period;
385 return 0;
386}
387
388int
389kperf_timer_set_period(unsigned int timerid, uint64_t period_abstime)
390{
391 uint64_t min_period;
392
393 if (timerid >= kperf_timerc) {
394 return EINVAL;
395 }
396
397 if (pet_timer_id == timerid) {
398 min_period = kperf_timer_min_pet_period_abstime();
399 } else {
400 min_period = kperf_timer_min_period_abstime();
401 }
402
403 if (period_abstime > 0 && period_abstime < min_period) {
404 period_abstime = min_period;
405 }
406
407 kperf_timerv[timerid].period = period_abstime;
408
409 /* FIXME: re-program running timers? */
410
411 return 0;
412}
413
414int
415kperf_timer_get_action(unsigned int timerid, uint32_t *action)
416{
417 if (timerid >= kperf_timerc) {
418 return EINVAL;
419 }
420
421 *action = kperf_timerv[timerid].actionid;
422 return 0;
423}
424
425int
426kperf_timer_set_action(unsigned int timerid, uint32_t action)
427{
428 if (timerid >= kperf_timerc) {
429 return EINVAL;
430 }
431
432 kperf_timerv[timerid].actionid = action;
433 return 0;
434}
435
436unsigned int
437kperf_timer_get_count(void)
438{
439 return kperf_timerc;
440}
441
442void
443kperf_timer_reset(void)
444{
445 kperf_timer_set_petid(999);
446 kperf_set_pet_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE);
447 kperf_set_lightweight_pet(0);
448 for (unsigned int i = 0; i < kperf_timerc; i++) {
449 kperf_timerv[i].period = 0;
450 kperf_timerv[i].actionid = 0;
0a7de745 451 atomic_store_explicit(&kperf_timerv[i].pending_cpus, 0, memory_order_relaxed);
39037602
A
452 }
453}
454
455extern int
456kperf_timer_set_count(unsigned int count)
457{
458 struct kperf_timer *new_timerv = NULL, *old_timerv = NULL;
459 unsigned int old_count;
460
461 if (min_period_abstime == 0) {
5ba3f43e
A
462 nanoseconds_to_absolutetime(KP_MIN_PERIOD_NS, &min_period_abstime);
463 nanoseconds_to_absolutetime(KP_MIN_PERIOD_BG_NS, &min_period_bg_abstime);
464 nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_NS, &min_period_pet_abstime);
465 nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_BG_NS,
0a7de745 466 &min_period_pet_bg_abstime);
39037602
A
467 assert(min_period_abstime > 0);
468 }
469
470 if (count == kperf_timerc) {
471 return 0;
472 }
473 if (count > TIMER_MAX) {
474 return EINVAL;
475 }
476
477 /* TODO: allow shrinking? */
478 if (count < kperf_timerc) {
479 return EINVAL;
480 }
481
482 /*
483 * Make sure kperf is initialized when creating the array for the first
484 * time.
485 */
486 if (kperf_timerc == 0) {
487 int r;
488
489 /* main kperf */
490 if ((r = kperf_init())) {
491 return r;
492 }
493 }
494
495 /*
496 * Shut down any running timers since we will be messing with the timer
497 * call structures.
498 */
499 kperf_timer_stop();
500
501 /* create a new array */
502 new_timerv = kalloc_tag(count * sizeof(struct kperf_timer),
0a7de745 503 VM_KERN_MEMORY_DIAG);
39037602
A
504 if (new_timerv == NULL) {
505 return ENOMEM;
506 }
507 old_timerv = kperf_timerv;
508 old_count = kperf_timerc;
509
510 if (old_timerv != NULL) {
511 bcopy(kperf_timerv, new_timerv,
0a7de745 512 kperf_timerc * sizeof(struct kperf_timer));
39037602
A
513 }
514
515 /* zero the new entries */
516 bzero(&(new_timerv[kperf_timerc]),
0a7de745 517 (count - old_count) * sizeof(struct kperf_timer));
39037602
A
518
519 /* (re-)setup the timer call info for all entries */
520 for (unsigned int i = 0; i < count; i++) {
5ba3f43e 521 timer_call_setup(&new_timerv[i].tcall, kperf_timer_handler, &new_timerv[i]);
39037602
A
522 }
523
524 kperf_timerv = new_timerv;
525 kperf_timerc = count;
526
527 if (old_timerv != NULL) {
528 kfree(old_timerv, old_count * sizeof(struct kperf_timer));
529 }
530
531 return 0;
532}