]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/kperf_timer.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / kperf / kperf_timer.c
CommitLineData
39037602
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* Manage timers */
30
31#include <mach/mach_types.h>
32#include <kern/cpu_data.h> /* current_thread() */
33#include <kern/kalloc.h>
5ba3f43e 34#include <stdatomic.h>
39037602
A
35#include <sys/errno.h>
36#include <sys/vm.h>
37#include <sys/ktrace.h>
38
39#include <machine/machine_routines.h>
40#if defined(__x86_64__)
41#include <i386/mp.h>
42#endif /* defined(__x86_64__) */
43
44#include <kperf/kperf.h>
45#include <kperf/buffer.h>
46#include <kperf/context.h>
47#include <kperf/action.h>
48#include <kperf/kperf_timer.h>
49#include <kperf/kperf_arch.h>
50#include <kperf/pet.h>
51#include <kperf/sample.h>
52
53/* the list of timers */
54struct kperf_timer *kperf_timerv = NULL;
55unsigned int kperf_timerc = 0;
56
57static unsigned int pet_timer_id = 999;
58
59/* maximum number of timers we can construct */
60#define TIMER_MAX (16)
61
39037602
A
62static uint64_t min_period_abstime;
63static uint64_t min_period_bg_abstime;
64static uint64_t min_period_pet_abstime;
65static uint64_t min_period_pet_bg_abstime;
66
67static uint64_t
68kperf_timer_min_period_abstime(void)
69{
70 if (ktrace_background_active()) {
71 return min_period_bg_abstime;
72 } else {
73 return min_period_abstime;
74 }
75}
76
77static uint64_t
78kperf_timer_min_pet_period_abstime(void)
79{
80 if (ktrace_background_active()) {
81 return min_period_pet_bg_abstime;
82 } else {
83 return min_period_pet_abstime;
84 }
85}
86
87static void
88kperf_timer_schedule(struct kperf_timer *timer, uint64_t now)
89{
90 BUF_INFO(PERF_TM_SCHED, timer->period);
91
92 /* if we re-programmed the timer to zero, just drop it */
93 if (timer->period == 0) {
94 return;
95 }
96
97 /* calculate deadline */
98 uint64_t deadline = now + timer->period;
99
100 /* re-schedule the timer, making sure we don't apply slop */
101 timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
102}
103
5ba3f43e
A
104static void
105kperf_sample_cpu(struct kperf_timer *timer, bool system_sample,
106 bool only_system)
39037602
A
107{
108 struct kperf_context ctx;
39037602
A
109
110 assert(timer != NULL);
111
112 /* Always cut a tracepoint to show a sample event occurred */
113 BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0);
114
115 int ncpu = cpu_number();
116
117 struct kperf_sample *intbuf = kperf_intr_sample_buffer();
118
119 /* On a timer, we can see the "real" current thread */
120 ctx.cur_thread = current_thread();
121 ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread));
122
123 /* who fired */
124 ctx.trigger_type = TRIGGER_TYPE_TIMER;
125 ctx.trigger_id = (unsigned int)(timer - kperf_timerv);
126
127 if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) {
5ba3f43e 128 kperf_tid_on_cpus[ncpu] = thread_tid(ctx.cur_thread);
39037602
A
129 }
130
131 /* make sure sampling is on */
132 unsigned int status = kperf_sampling_status();
133 if (status == KPERF_SAMPLING_OFF) {
134 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF);
135 return;
136 } else if (status == KPERF_SAMPLING_SHUTDOWN) {
137 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN);
138 return;
139 }
140
141 /* call the action -- kernel-only from interrupt, pend user */
5ba3f43e
A
142 int r = kperf_sample(intbuf, &ctx, timer->actionid,
143 SAMPLE_FLAG_PEND_USER | (system_sample ? SAMPLE_FLAG_SYSTEM : 0) |
144 (only_system ? SAMPLE_FLAG_ONLY_SYSTEM : 0));
39037602
A
145
146 /* end tracepoint is informational */
147 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r);
148
5ba3f43e
A
149 (void)atomic_fetch_and_explicit(&timer->pending_cpus,
150 ~(UINT64_C(1) << ncpu), memory_order_relaxed);
151}
152
153void
154kperf_ipi_handler(void *param)
155{
156 kperf_sample_cpu((struct kperf_timer *)param, false, false);
39037602
A
157}
158
159static void
160kperf_timer_handler(void *param0, __unused void *param1)
161{
162 struct kperf_timer *timer = param0;
163 unsigned int ntimer = (unsigned int)(timer - kperf_timerv);
164 unsigned int ncpus = machine_info.logical_cpu_max;
5ba3f43e
A
165 bool system_only_self = true;
166
167 if (timer->actionid == 0) {
168 return;
169 }
39037602
A
170
171 timer->active = 1;
172
173 /* along the lines of do not ipi if we are all shutting down */
174 if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) {
175 goto deactivate;
176 }
177
178 BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period,
179 timer->actionid);
180
181 if (ntimer == pet_timer_id) {
182 kperf_pet_fire_before();
183
184 /* clean-up the thread-on-CPUs cache */
5ba3f43e 185 bzero(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus));
39037602
A
186 }
187
5ba3f43e
A
188 /*
189 * IPI other cores only if the action has non-system samplers.
190 */
191 if (kperf_sample_has_non_system(timer->actionid)) {
192 /*
193 * If the core that's handling the timer is not scheduling
194 * threads, only run system samplers.
195 */
196 system_only_self = kperf_mp_broadcast_other_running(timer);
197 }
198 kperf_sample_cpu(timer, true, system_only_self);
39037602
A
199
200 /* release the pet thread? */
201 if (ntimer == pet_timer_id) {
202 /* PET mode is responsible for rearming the timer */
203 kperf_pet_fire_after();
204 } else {
205 /*
206 * FIXME: Get the current time from elsewhere. The next
207 * timer's period now includes the time taken to reach this
208 * point. This causes a bias towards longer sampling periods
209 * than requested.
210 */
211 kperf_timer_schedule(timer, mach_absolute_time());
212 }
213
214deactivate:
215 timer->active = 0;
216}
217
218/* program the timer from the PET thread */
219void
220kperf_timer_pet_rearm(uint64_t elapsed_ticks)
221{
222 struct kperf_timer *timer = NULL;
223 uint64_t period = 0;
224 uint64_t deadline;
225
226 /*
227 * If the pet_timer_id is invalid, it has been disabled, so this should
228 * do nothing.
229 */
230 if (pet_timer_id >= kperf_timerc) {
231 return;
232 }
233
234 unsigned int status = kperf_sampling_status();
235 /* do not reprogram the timer if it has been shutdown or sampling is off */
236 if (status == KPERF_SAMPLING_OFF) {
237 BUF_INFO(PERF_PET_END, SAMPLE_OFF);
238 return;
239 } else if (status == KPERF_SAMPLING_SHUTDOWN) {
240 BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN);
241 return;
242 }
243
244 timer = &(kperf_timerv[pet_timer_id]);
245
246 /* if we re-programmed the timer to zero, just drop it */
247 if (!timer->period) {
248 return;
249 }
250
251 /* subtract the time the pet sample took being careful not to underflow */
252 if (timer->period > elapsed_ticks) {
253 period = timer->period - elapsed_ticks;
254 }
255
256 /* make sure we don't set the next PET sample to happen too soon */
257 if (period < min_period_pet_abstime) {
258 period = min_period_pet_abstime;
259 }
260
261 /* we probably took so long in the PET thread, it makes sense to take
262 * the time again.
263 */
264 deadline = mach_absolute_time() + period;
265
266 BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline);
267
268 /* re-schedule the timer, making sure we don't apply slop */
5ba3f43e 269 timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
39037602
A
270
271 return;
272}
273
274/* turn on all the timers */
275void
276kperf_timer_go(void)
277{
278 /* get the PET thread going */
279 if (pet_timer_id < kperf_timerc) {
280 kperf_pet_config(kperf_timerv[pet_timer_id].actionid);
281 }
282
283 uint64_t now = mach_absolute_time();
284
285 for (unsigned int i = 0; i < kperf_timerc; i++) {
286 if (kperf_timerv[i].period == 0) {
287 continue;
288 }
289
290 kperf_timer_schedule(&(kperf_timerv[i]), now);
291 }
292}
293
294void
295kperf_timer_stop(void)
296{
297 for (unsigned int i = 0; i < kperf_timerc; i++) {
298 if (kperf_timerv[i].period == 0) {
299 continue;
300 }
301
302 /* wait for the timer to stop */
303 while (kperf_timerv[i].active);
304
5ba3f43e 305 timer_call_cancel(&kperf_timerv[i].tcall);
39037602
A
306 }
307
308 /* wait for PET to stop, too */
309 kperf_pet_config(0);
310}
311
312unsigned int
313kperf_timer_get_petid(void)
314{
315 return pet_timer_id;
316}
317
318int
319kperf_timer_set_petid(unsigned int timerid)
320{
321 if (timerid < kperf_timerc) {
322 uint64_t min_period;
323
324 min_period = kperf_timer_min_pet_period_abstime();
325 if (kperf_timerv[timerid].period < min_period) {
326 kperf_timerv[timerid].period = min_period;
327 }
328 kperf_pet_config(kperf_timerv[timerid].actionid);
329 } else {
330 /* clear the PET trigger if it's a bogus ID */
331 kperf_pet_config(0);
332 }
333
334 pet_timer_id = timerid;
335
336 return 0;
337}
338
339int
340kperf_timer_get_period(unsigned int timerid, uint64_t *period_abstime)
341{
342 if (timerid >= kperf_timerc) {
343 return EINVAL;
344 }
345
346 *period_abstime = kperf_timerv[timerid].period;
347 return 0;
348}
349
350int
351kperf_timer_set_period(unsigned int timerid, uint64_t period_abstime)
352{
353 uint64_t min_period;
354
355 if (timerid >= kperf_timerc) {
356 return EINVAL;
357 }
358
359 if (pet_timer_id == timerid) {
360 min_period = kperf_timer_min_pet_period_abstime();
361 } else {
362 min_period = kperf_timer_min_period_abstime();
363 }
364
365 if (period_abstime > 0 && period_abstime < min_period) {
366 period_abstime = min_period;
367 }
368
369 kperf_timerv[timerid].period = period_abstime;
370
371 /* FIXME: re-program running timers? */
372
373 return 0;
374}
375
376int
377kperf_timer_get_action(unsigned int timerid, uint32_t *action)
378{
379 if (timerid >= kperf_timerc) {
380 return EINVAL;
381 }
382
383 *action = kperf_timerv[timerid].actionid;
384 return 0;
385}
386
387int
388kperf_timer_set_action(unsigned int timerid, uint32_t action)
389{
390 if (timerid >= kperf_timerc) {
391 return EINVAL;
392 }
393
394 kperf_timerv[timerid].actionid = action;
395 return 0;
396}
397
398unsigned int
399kperf_timer_get_count(void)
400{
401 return kperf_timerc;
402}
403
404void
405kperf_timer_reset(void)
406{
407 kperf_timer_set_petid(999);
408 kperf_set_pet_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE);
409 kperf_set_lightweight_pet(0);
410 for (unsigned int i = 0; i < kperf_timerc; i++) {
411 kperf_timerv[i].period = 0;
412 kperf_timerv[i].actionid = 0;
39037602 413 kperf_timerv[i].pending_cpus = 0;
39037602
A
414 }
415}
416
417extern int
418kperf_timer_set_count(unsigned int count)
419{
420 struct kperf_timer *new_timerv = NULL, *old_timerv = NULL;
421 unsigned int old_count;
422
423 if (min_period_abstime == 0) {
5ba3f43e
A
424 nanoseconds_to_absolutetime(KP_MIN_PERIOD_NS, &min_period_abstime);
425 nanoseconds_to_absolutetime(KP_MIN_PERIOD_BG_NS, &min_period_bg_abstime);
426 nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_NS, &min_period_pet_abstime);
427 nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_BG_NS,
39037602
A
428 &min_period_pet_bg_abstime);
429 assert(min_period_abstime > 0);
430 }
431
432 if (count == kperf_timerc) {
433 return 0;
434 }
435 if (count > TIMER_MAX) {
436 return EINVAL;
437 }
438
439 /* TODO: allow shrinking? */
440 if (count < kperf_timerc) {
441 return EINVAL;
442 }
443
444 /*
445 * Make sure kperf is initialized when creating the array for the first
446 * time.
447 */
448 if (kperf_timerc == 0) {
449 int r;
450
451 /* main kperf */
452 if ((r = kperf_init())) {
453 return r;
454 }
455 }
456
457 /*
458 * Shut down any running timers since we will be messing with the timer
459 * call structures.
460 */
461 kperf_timer_stop();
462
463 /* create a new array */
464 new_timerv = kalloc_tag(count * sizeof(struct kperf_timer),
465 VM_KERN_MEMORY_DIAG);
466 if (new_timerv == NULL) {
467 return ENOMEM;
468 }
469 old_timerv = kperf_timerv;
470 old_count = kperf_timerc;
471
472 if (old_timerv != NULL) {
473 bcopy(kperf_timerv, new_timerv,
474 kperf_timerc * sizeof(struct kperf_timer));
475 }
476
477 /* zero the new entries */
478 bzero(&(new_timerv[kperf_timerc]),
479 (count - old_count) * sizeof(struct kperf_timer));
480
481 /* (re-)setup the timer call info for all entries */
482 for (unsigned int i = 0; i < count; i++) {
5ba3f43e 483 timer_call_setup(&new_timerv[i].tcall, kperf_timer_handler, &new_timerv[i]);
39037602
A
484 }
485
486 kperf_timerv = new_timerv;
487 kperf_timerc = count;
488
489 if (old_timerv != NULL) {
490 kfree(old_timerv, old_count * sizeof(struct kperf_timer));
491 }
492
493 return 0;
494}