]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/kperf_timer.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kperf / kperf_timer.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* Manage timers */
30
31 #include <mach/mach_types.h>
32 #include <kern/cpu_data.h> /* current_thread() */
33 #include <kern/kalloc.h>
34 #include <sys/errno.h>
35 #include <sys/vm.h>
36 #include <sys/ktrace.h>
37
38 #include <machine/machine_routines.h>
39 #if defined(__x86_64__)
40 #include <i386/mp.h>
41 #endif /* defined(__x86_64__) */
42
43 #include <kperf/kperf.h>
44 #include <kperf/buffer.h>
45 #include <kperf/context.h>
46 #include <kperf/action.h>
47 #include <kperf/kperf_timer.h>
48 #include <kperf/kperf_arch.h>
49 #include <kperf/pet.h>
50 #include <kperf/sample.h>
51
52 /* the list of timers */
53 struct kperf_timer *kperf_timerv = NULL;
54 unsigned int kperf_timerc = 0;
55
56 static unsigned int pet_timer_id = 999;
57
58 /* maximum number of timers we can construct */
59 #define TIMER_MAX (16)
60
61 #if defined(__x86_64__)
62
63 #define MIN_PERIOD_NS (20 * NSEC_PER_USEC)
64 #define MIN_PERIOD_BG_NS (10 * NSEC_PER_MSEC)
65 #define MIN_PERIOD_PET_NS (2 * NSEC_PER_MSEC)
66 #define MIN_PERIOD_PET_BG_NS (10 * NSEC_PER_MSEC)
67
68 #else /* defined(__x86_64__) */
69 #error "unsupported architecture"
70 #endif /* defined(__x86_64__) */
71
72 static uint64_t min_period_abstime;
73 static uint64_t min_period_bg_abstime;
74 static uint64_t min_period_pet_abstime;
75 static uint64_t min_period_pet_bg_abstime;
76
77 static uint64_t
78 kperf_timer_min_period_abstime(void)
79 {
80 if (ktrace_background_active()) {
81 return min_period_bg_abstime;
82 } else {
83 return min_period_abstime;
84 }
85 }
86
87 static uint64_t
88 kperf_timer_min_pet_period_abstime(void)
89 {
90 if (ktrace_background_active()) {
91 return min_period_pet_bg_abstime;
92 } else {
93 return min_period_pet_abstime;
94 }
95 }
96
97 static void
98 kperf_timer_schedule(struct kperf_timer *timer, uint64_t now)
99 {
100 BUF_INFO(PERF_TM_SCHED, timer->period);
101
102 /* if we re-programmed the timer to zero, just drop it */
103 if (timer->period == 0) {
104 return;
105 }
106
107 /* calculate deadline */
108 uint64_t deadline = now + timer->period;
109
110 /* re-schedule the timer, making sure we don't apply slop */
111 timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
112 }
113
114 void
115 kperf_ipi_handler(void *param)
116 {
117 struct kperf_context ctx;
118 struct kperf_timer *timer = param;
119
120 assert(timer != NULL);
121
122 /* Always cut a tracepoint to show a sample event occurred */
123 BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0);
124
125 int ncpu = cpu_number();
126
127 struct kperf_sample *intbuf = kperf_intr_sample_buffer();
128
129 /* On a timer, we can see the "real" current thread */
130 ctx.cur_thread = current_thread();
131 ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread));
132
133 /* who fired */
134 ctx.trigger_type = TRIGGER_TYPE_TIMER;
135 ctx.trigger_id = (unsigned int)(timer - kperf_timerv);
136
137 if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) {
138 kperf_thread_on_cpus[ncpu] = ctx.cur_thread;
139 }
140
141 /* make sure sampling is on */
142 unsigned int status = kperf_sampling_status();
143 if (status == KPERF_SAMPLING_OFF) {
144 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF);
145 return;
146 } else if (status == KPERF_SAMPLING_SHUTDOWN) {
147 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN);
148 return;
149 }
150
151 /* call the action -- kernel-only from interrupt, pend user */
152 int r = kperf_sample(intbuf, &ctx, timer->actionid, SAMPLE_FLAG_PEND_USER);
153
154 /* end tracepoint is informational */
155 BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r);
156
157 #if defined(__x86_64__)
158 (void)atomic_bit_clear(&(timer->pending_cpus), ncpu, __ATOMIC_RELAXED);
159 #endif /* defined(__x86_64__) */
160 }
161
162 static void
163 kperf_timer_handler(void *param0, __unused void *param1)
164 {
165 struct kperf_timer *timer = param0;
166 unsigned int ntimer = (unsigned int)(timer - kperf_timerv);
167 unsigned int ncpus = machine_info.logical_cpu_max;
168
169 timer->active = 1;
170
171 /* along the lines of do not ipi if we are all shutting down */
172 if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) {
173 goto deactivate;
174 }
175
176 BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period,
177 timer->actionid);
178
179 if (ntimer == pet_timer_id) {
180 kperf_pet_fire_before();
181
182 /* clean-up the thread-on-CPUs cache */
183 bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus));
184 }
185
186 /* ping all CPUs */
187 kperf_mp_broadcast_running(timer);
188
189 /* release the pet thread? */
190 if (ntimer == pet_timer_id) {
191 /* PET mode is responsible for rearming the timer */
192 kperf_pet_fire_after();
193 } else {
194 /*
195 * FIXME: Get the current time from elsewhere. The next
196 * timer's period now includes the time taken to reach this
197 * point. This causes a bias towards longer sampling periods
198 * than requested.
199 */
200 kperf_timer_schedule(timer, mach_absolute_time());
201 }
202
203 deactivate:
204 timer->active = 0;
205 }
206
207 /* program the timer from the PET thread */
208 void
209 kperf_timer_pet_rearm(uint64_t elapsed_ticks)
210 {
211 struct kperf_timer *timer = NULL;
212 uint64_t period = 0;
213 uint64_t deadline;
214
215 /*
216 * If the pet_timer_id is invalid, it has been disabled, so this should
217 * do nothing.
218 */
219 if (pet_timer_id >= kperf_timerc) {
220 return;
221 }
222
223 unsigned int status = kperf_sampling_status();
224 /* do not reprogram the timer if it has been shutdown or sampling is off */
225 if (status == KPERF_SAMPLING_OFF) {
226 BUF_INFO(PERF_PET_END, SAMPLE_OFF);
227 return;
228 } else if (status == KPERF_SAMPLING_SHUTDOWN) {
229 BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN);
230 return;
231 }
232
233 timer = &(kperf_timerv[pet_timer_id]);
234
235 /* if we re-programmed the timer to zero, just drop it */
236 if (!timer->period) {
237 return;
238 }
239
240 /* subtract the time the pet sample took being careful not to underflow */
241 if (timer->period > elapsed_ticks) {
242 period = timer->period - elapsed_ticks;
243 }
244
245 /* make sure we don't set the next PET sample to happen too soon */
246 if (period < min_period_pet_abstime) {
247 period = min_period_pet_abstime;
248 }
249
250 /* we probably took so long in the PET thread, it makes sense to take
251 * the time again.
252 */
253 deadline = mach_absolute_time() + period;
254
255 BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline);
256
257 /* re-schedule the timer, making sure we don't apply slop */
258 timer_call_enter(&(timer->tcall), deadline, TIMER_CALL_SYS_CRITICAL);
259
260 return;
261 }
262
263 /* turn on all the timers */
264 void
265 kperf_timer_go(void)
266 {
267 /* get the PET thread going */
268 if (pet_timer_id < kperf_timerc) {
269 kperf_pet_config(kperf_timerv[pet_timer_id].actionid);
270 }
271
272 uint64_t now = mach_absolute_time();
273
274 for (unsigned int i = 0; i < kperf_timerc; i++) {
275 if (kperf_timerv[i].period == 0) {
276 continue;
277 }
278
279 kperf_timer_schedule(&(kperf_timerv[i]), now);
280 }
281 }
282
283 void
284 kperf_timer_stop(void)
285 {
286 for (unsigned int i = 0; i < kperf_timerc; i++) {
287 if (kperf_timerv[i].period == 0) {
288 continue;
289 }
290
291 /* wait for the timer to stop */
292 while (kperf_timerv[i].active);
293
294 timer_call_cancel(&(kperf_timerv[i].tcall));
295 }
296
297 /* wait for PET to stop, too */
298 kperf_pet_config(0);
299 }
300
301 unsigned int
302 kperf_timer_get_petid(void)
303 {
304 return pet_timer_id;
305 }
306
307 int
308 kperf_timer_set_petid(unsigned int timerid)
309 {
310 if (timerid < kperf_timerc) {
311 uint64_t min_period;
312
313 min_period = kperf_timer_min_pet_period_abstime();
314 if (kperf_timerv[timerid].period < min_period) {
315 kperf_timerv[timerid].period = min_period;
316 }
317 kperf_pet_config(kperf_timerv[timerid].actionid);
318 } else {
319 /* clear the PET trigger if it's a bogus ID */
320 kperf_pet_config(0);
321 }
322
323 pet_timer_id = timerid;
324
325 return 0;
326 }
327
328 int
329 kperf_timer_get_period(unsigned int timerid, uint64_t *period_abstime)
330 {
331 if (timerid >= kperf_timerc) {
332 return EINVAL;
333 }
334
335 *period_abstime = kperf_timerv[timerid].period;
336 return 0;
337 }
338
339 int
340 kperf_timer_set_period(unsigned int timerid, uint64_t period_abstime)
341 {
342 uint64_t min_period;
343
344 if (timerid >= kperf_timerc) {
345 return EINVAL;
346 }
347
348 if (pet_timer_id == timerid) {
349 min_period = kperf_timer_min_pet_period_abstime();
350 } else {
351 min_period = kperf_timer_min_period_abstime();
352 }
353
354 if (period_abstime > 0 && period_abstime < min_period) {
355 period_abstime = min_period;
356 }
357
358 kperf_timerv[timerid].period = period_abstime;
359
360 /* FIXME: re-program running timers? */
361
362 return 0;
363 }
364
365 int
366 kperf_timer_get_action(unsigned int timerid, uint32_t *action)
367 {
368 if (timerid >= kperf_timerc) {
369 return EINVAL;
370 }
371
372 *action = kperf_timerv[timerid].actionid;
373 return 0;
374 }
375
376 int
377 kperf_timer_set_action(unsigned int timerid, uint32_t action)
378 {
379 if (timerid >= kperf_timerc) {
380 return EINVAL;
381 }
382
383 kperf_timerv[timerid].actionid = action;
384 return 0;
385 }
386
387 unsigned int
388 kperf_timer_get_count(void)
389 {
390 return kperf_timerc;
391 }
392
393 void
394 kperf_timer_reset(void)
395 {
396 kperf_timer_set_petid(999);
397 kperf_set_pet_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE);
398 kperf_set_lightweight_pet(0);
399 for (unsigned int i = 0; i < kperf_timerc; i++) {
400 kperf_timerv[i].period = 0;
401 kperf_timerv[i].actionid = 0;
402 #if defined(__x86_64__)
403 kperf_timerv[i].pending_cpus = 0;
404 #endif /* defined(__x86_64__) */
405 }
406 }
407
408 extern int
409 kperf_timer_set_count(unsigned int count)
410 {
411 struct kperf_timer *new_timerv = NULL, *old_timerv = NULL;
412 unsigned int old_count;
413
414 if (min_period_abstime == 0) {
415 nanoseconds_to_absolutetime(MIN_PERIOD_NS, &min_period_abstime);
416 nanoseconds_to_absolutetime(MIN_PERIOD_BG_NS, &min_period_bg_abstime);
417 nanoseconds_to_absolutetime(MIN_PERIOD_PET_NS, &min_period_pet_abstime);
418 nanoseconds_to_absolutetime(MIN_PERIOD_PET_BG_NS,
419 &min_period_pet_bg_abstime);
420 assert(min_period_abstime > 0);
421 }
422
423 if (count == kperf_timerc) {
424 return 0;
425 }
426 if (count > TIMER_MAX) {
427 return EINVAL;
428 }
429
430 /* TODO: allow shrinking? */
431 if (count < kperf_timerc) {
432 return EINVAL;
433 }
434
435 /*
436 * Make sure kperf is initialized when creating the array for the first
437 * time.
438 */
439 if (kperf_timerc == 0) {
440 int r;
441
442 /* main kperf */
443 if ((r = kperf_init())) {
444 return r;
445 }
446 }
447
448 /*
449 * Shut down any running timers since we will be messing with the timer
450 * call structures.
451 */
452 kperf_timer_stop();
453
454 /* create a new array */
455 new_timerv = kalloc_tag(count * sizeof(struct kperf_timer),
456 VM_KERN_MEMORY_DIAG);
457 if (new_timerv == NULL) {
458 return ENOMEM;
459 }
460 old_timerv = kperf_timerv;
461 old_count = kperf_timerc;
462
463 if (old_timerv != NULL) {
464 bcopy(kperf_timerv, new_timerv,
465 kperf_timerc * sizeof(struct kperf_timer));
466 }
467
468 /* zero the new entries */
469 bzero(&(new_timerv[kperf_timerc]),
470 (count - old_count) * sizeof(struct kperf_timer));
471
472 /* (re-)setup the timer call info for all entries */
473 for (unsigned int i = 0; i < count; i++) {
474 timer_call_setup(&(new_timerv[i].tcall), kperf_timer_handler, &(new_timerv[i]));
475 }
476
477 kperf_timerv = new_timerv;
478 kperf_timerc = count;
479
480 if (old_timerv != NULL) {
481 kfree(old_timerv, old_count * sizeof(struct kperf_timer));
482 }
483
484 return 0;
485 }