]>
Commit | Line | Data |
---|---|---|
316670eb A |
1 | /* |
2 | * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* Manage time triggers */ | |
30 | ||
31 | #include <mach/mach_types.h> | |
32 | #include <kern/cpu_data.h> /* current_thread() */ | |
33 | #include <kern/kalloc.h> | |
34 | #include <sys/errno.h> | |
35 | ||
39236c6e A |
36 | #include <machine/machine_routines.h> |
37 | ||
316670eb A |
38 | #include <chud/chud_xnu.h> |
39 | ||
40 | #include <kperf/kperf.h> | |
41 | #include <kperf/buffer.h> | |
42 | #include <kperf/context.h> | |
43 | #include <kperf/action.h> | |
44 | #include <kperf/timetrigger.h> | |
45 | #include <kperf/kperf_arch.h> | |
46 | #include <kperf/pet.h> | |
39236c6e A |
47 | #include <kperf/sample.h> |
48 | ||
49 | /* make up for arm signal deficiencies */ | |
50 | void kperf_signal_handler(void); | |
316670eb A |
51 | |
52 | /* represents a periodic timer */ | |
53 | struct time_trigger | |
54 | { | |
55 | struct timer_call tcall; | |
56 | uint64_t period; | |
57 | unsigned actionid; | |
58 | volatile unsigned active; | |
39236c6e A |
59 | |
60 | #ifdef USE_SIMPLE_SIGNALS | |
61 | /* firing accounting */ | |
62 | uint64_t fire_count; | |
63 | uint64_t last_cpu_fire[MAX_CPUS]; | |
64 | #endif | |
316670eb A |
65 | }; |
66 | ||
67 | /* the list of timers */ | |
68 | static unsigned timerc = 0; | |
69 | static struct time_trigger *timerv; | |
70 | static unsigned pet_timer = 999; | |
71 | ||
72 | /* maximum number of timers we can construct */ | |
73 | #define TIMER_MAX 16 | |
74 | ||
39236c6e A |
75 | /* minimal interval for a timer (10usec in nsec) */ |
76 | #define MIN_TIMER_NS (10000) | |
77 | /* minimal interval for pet timer (2msec in nsec) */ | |
78 | #define MIN_PET_TIMER_NS (2000000) | |
316670eb A |
79 | |
80 | static void | |
81 | kperf_timer_schedule( struct time_trigger *trigger, uint64_t now ) | |
82 | { | |
83 | uint64_t deadline; | |
84 | ||
85 | BUF_INFO1(PERF_TM_SCHED, trigger->period); | |
86 | ||
39236c6e A |
87 | /* if we re-programmed the timer to zero, just drop it */ |
88 | if( !trigger->period ) | |
89 | return; | |
90 | ||
316670eb A |
91 | /* calculate deadline */ |
92 | deadline = now + trigger->period; | |
93 | ||
94 | /* re-schedule the timer, making sure we don't apply slop */ | |
39236c6e | 95 | timer_call_enter( &trigger->tcall, deadline, TIMER_CALL_SYS_CRITICAL); |
316670eb A |
96 | } |
97 | ||
98 | static void | |
99 | kperf_ipi_handler( void *param ) | |
100 | { | |
101 | int r; | |
39236c6e | 102 | int ncpu; |
316670eb A |
103 | struct kperf_sample *intbuf = NULL; |
104 | struct kperf_context ctx; | |
105 | struct time_trigger *trigger = param; | |
106 | task_t task = NULL; | |
39236c6e A |
107 | |
108 | /* Always cut a tracepoint to show a sample event occurred */ | |
109 | BUF_DATA1(PERF_TM_HNDLR | DBG_FUNC_START, 0); | |
316670eb A |
110 | |
111 | /* In an interrupt, get the interrupt buffer for this CPU */ | |
112 | intbuf = kperf_intr_sample_buffer(); | |
113 | ||
114 | /* On a timer, we can see the "real" current thread */ | |
115 | ctx.cur_pid = 0; /* remove this? */ | |
116 | ctx.cur_thread = current_thread(); | |
117 | ||
118 | task = chudxnu_task_for_thread(ctx.cur_thread); | |
119 | if (task) | |
120 | ctx.cur_pid = chudxnu_pid_for_task(task); | |
121 | ||
122 | /* who fired */ | |
123 | ctx.trigger_type = TRIGGER_TYPE_TIMER; | |
124 | ctx.trigger_id = (unsigned)(trigger-timerv); /* computer timer number */ | |
125 | ||
39236c6e A |
126 | ncpu = chudxnu_cpu_number(); |
127 | if (ctx.trigger_id == pet_timer && ncpu < machine_info.logical_cpu_max) | |
128 | kperf_thread_on_cpus[ncpu] = ctx.cur_thread; | |
129 | ||
130 | /* check samppling is on */ | |
131 | if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) { | |
132 | BUF_INFO1(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF); | |
133 | return; | |
134 | } else if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) { | |
135 | BUF_INFO1(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN); | |
136 | return; | |
137 | } | |
138 | ||
316670eb | 139 | /* call the action -- kernel-only from interrupt, pend user */ |
39236c6e A |
140 | r = kperf_sample( intbuf, &ctx, trigger->actionid, SAMPLE_FLAG_PEND_USER ); |
141 | ||
142 | /* end tracepoint is informational */ | |
316670eb A |
143 | BUF_INFO1(PERF_TM_HNDLR | DBG_FUNC_END, r); |
144 | } | |
145 | ||
39236c6e A |
146 | #ifdef USE_SIMPLE_SIGNALS |
147 | /* if we can't pass a (function, arg) pair through a signal properly, | |
148 | * we do it the simple way. When a timer fires, we increment a counter | |
149 | * in the time trigger and broadcast a generic signal to all cores. Cores | |
150 | * search the time trigger list for any triggers for which their last seen | |
151 | * firing counter is lower than the current one. | |
152 | */ | |
153 | void | |
154 | kperf_signal_handler(void) | |
155 | { | |
156 | int i, cpu; | |
157 | struct time_trigger *tr = NULL; | |
158 | ||
159 | OSMemoryBarrier(); | |
160 | ||
161 | cpu = chudxnu_cpu_number(); | |
162 | for( i = 0; i < (int) timerc; i++ ) | |
163 | { | |
164 | tr = &timerv[i]; | |
165 | if( tr->fire_count <= tr->last_cpu_fire[cpu] ) | |
166 | continue; /* this trigger hasn't fired */ | |
167 | ||
168 | /* fire the trigger! */ | |
169 | tr->last_cpu_fire[cpu] = tr->fire_count; | |
170 | kperf_ipi_handler( tr ); | |
171 | } | |
172 | } | |
173 | #else | |
174 | void | |
175 | kperf_signal_handler(void) | |
176 | { | |
177 | // so we can link... | |
178 | } | |
179 | #endif | |
180 | ||
316670eb A |
181 | static void |
182 | kperf_timer_handler( void *param0, __unused void *param1 ) | |
183 | { | |
184 | struct time_trigger *trigger = param0; | |
185 | unsigned ntimer = (unsigned)(trigger - timerv); | |
39236c6e | 186 | unsigned ncpus = machine_info.logical_cpu_max; |
316670eb A |
187 | |
188 | trigger->active = 1; | |
189 | ||
190 | /* along the lines of do not ipi if we are all shutting down */ | |
191 | if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) | |
192 | goto deactivate; | |
193 | ||
39236c6e A |
194 | /* clean-up the thread-on-CPUs cache */ |
195 | bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); | |
196 | ||
316670eb | 197 | /* ping all CPUs */ |
39236c6e | 198 | #ifndef USE_SIMPLE_SIGNALS |
316670eb | 199 | kperf_mp_broadcast( kperf_ipi_handler, trigger ); |
39236c6e A |
200 | #else |
201 | trigger->fire_count++; | |
202 | OSMemoryBarrier(); | |
203 | kperf_mp_signal(); | |
204 | #endif | |
316670eb A |
205 | |
206 | /* release the pet thread? */ | |
207 | if( ntimer == pet_timer ) | |
208 | { | |
209 | /* timer re-enabled when thread done */ | |
210 | kperf_pet_thread_go(); | |
211 | } | |
212 | else | |
213 | { | |
39236c6e | 214 | /* re-enable the timer |
316670eb A |
215 | * FIXME: get the current time from elsewhere |
216 | */ | |
217 | uint64_t now = mach_absolute_time(); | |
218 | kperf_timer_schedule( trigger, now ); | |
219 | } | |
220 | ||
221 | deactivate: | |
222 | trigger->active = 0; | |
223 | } | |
224 | ||
225 | /* program the timer from the pet thread */ | |
226 | int | |
39236c6e | 227 | kperf_timer_pet_set( unsigned timer, uint64_t elapsed_ticks ) |
316670eb | 228 | { |
39236c6e A |
229 | static uint64_t pet_min_ticks = 0; |
230 | ||
316670eb A |
231 | uint64_t now; |
232 | struct time_trigger *trigger = NULL; | |
39236c6e A |
233 | uint64_t period = 0; |
234 | uint64_t deadline; | |
235 | ||
236 | /* compute ns -> ticks */ | |
237 | if( pet_min_ticks == 0 ) | |
238 | nanoseconds_to_absolutetime(MIN_PET_TIMER_NS, &pet_min_ticks); | |
316670eb A |
239 | |
240 | if( timer != pet_timer ) | |
241 | panic( "PET setting with bogus ID\n" ); | |
242 | ||
243 | if( timer >= timerc ) | |
244 | return EINVAL; | |
245 | ||
39236c6e A |
246 | if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) { |
247 | BUF_INFO1(PERF_PET_END, SAMPLE_OFF); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | // don't repgram the timer if it's been shutdown | |
252 | if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) { | |
253 | BUF_INFO1(PERF_PET_END, SAMPLE_SHUTDOWN); | |
254 | return 0; | |
255 | } | |
256 | ||
316670eb A |
257 | /* CHECKME: we probably took so damn long in the PET thread, |
258 | * it makes sense to take the time again. | |
259 | */ | |
260 | now = mach_absolute_time(); | |
261 | trigger = &timerv[timer]; | |
262 | ||
39236c6e A |
263 | /* if we re-programmed the timer to zero, just drop it */ |
264 | if( !trigger->period ) | |
265 | return 0; | |
266 | ||
267 | /* subtract the time the pet sample took being careful not to underflow */ | |
268 | if ( trigger->period > elapsed_ticks ) | |
269 | period = trigger->period - elapsed_ticks; | |
270 | ||
271 | /* make sure we don't set the next PET sample to happen too soon */ | |
272 | if ( period < pet_min_ticks ) | |
273 | period = pet_min_ticks; | |
274 | ||
275 | /* calculate deadline */ | |
276 | deadline = now + period; | |
277 | ||
278 | BUF_INFO(PERF_PET_SCHED, trigger->period, period, elapsed_ticks, deadline); | |
279 | ||
280 | /* re-schedule the timer, making sure we don't apply slop */ | |
281 | timer_call_enter( &trigger->tcall, deadline, TIMER_CALL_SYS_CRITICAL); | |
316670eb A |
282 | |
283 | return 0; | |
284 | } | |
285 | ||
286 | ||
287 | /* turn on all the timers */ | |
288 | extern int | |
289 | kperf_timer_go(void) | |
290 | { | |
291 | unsigned i; | |
292 | uint64_t now = mach_absolute_time(); | |
293 | ||
294 | for( i = 0; i < timerc; i++ ) | |
295 | { | |
296 | if( timerv[i].period == 0 ) | |
297 | continue; | |
298 | ||
299 | kperf_timer_schedule( &timerv[i], now ); | |
300 | } | |
301 | ||
302 | return 0; | |
303 | } | |
304 | ||
305 | ||
306 | extern int | |
307 | kperf_timer_stop(void) | |
308 | { | |
309 | unsigned i; | |
310 | ||
311 | for( i = 0; i < timerc; i++ ) | |
312 | { | |
313 | if( timerv[i].period == 0 ) | |
314 | continue; | |
315 | ||
316 | while (timerv[i].active) | |
317 | ; | |
318 | ||
319 | timer_call_cancel( &timerv[i].tcall ); | |
320 | } | |
321 | ||
322 | /* wait for PET to stop, too */ | |
323 | kperf_pet_thread_wait(); | |
324 | ||
325 | return 0; | |
326 | } | |
327 | ||
328 | unsigned | |
329 | kperf_timer_get_petid(void) | |
330 | { | |
331 | return pet_timer; | |
332 | } | |
333 | ||
334 | int | |
335 | kperf_timer_set_petid(unsigned timerid) | |
336 | { | |
337 | struct time_trigger *trigger = NULL; | |
338 | ||
339 | /* they can program whatever... */ | |
340 | pet_timer = timerid; | |
341 | ||
342 | /* clear them if it's a bogus ID */ | |
343 | if( pet_timer >= timerc ) | |
344 | { | |
345 | kperf_pet_timer_config( 0, 0 ); | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
350 | /* update the values */ | |
351 | trigger = &timerv[pet_timer]; | |
352 | kperf_pet_timer_config( pet_timer, trigger->actionid ); | |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
357 | int | |
358 | kperf_timer_get_period( unsigned timer, uint64_t *period ) | |
359 | { | |
316670eb A |
360 | if( timer >= timerc ) |
361 | return EINVAL; | |
362 | ||
363 | *period = timerv[timer].period; | |
364 | ||
365 | return 0; | |
366 | } | |
367 | ||
368 | int | |
369 | kperf_timer_set_period( unsigned timer, uint64_t period ) | |
370 | { | |
39236c6e | 371 | static uint64_t min_timer_ticks = 0; |
316670eb A |
372 | |
373 | if( timer >= timerc ) | |
374 | return EINVAL; | |
375 | ||
39236c6e A |
376 | /* compute us -> ticks */ |
377 | if( min_timer_ticks == 0 ) | |
378 | nanoseconds_to_absolutetime(MIN_TIMER_NS, &min_timer_ticks); | |
379 | ||
380 | /* check actual timer */ | |
381 | if( period && (period < min_timer_ticks) ) | |
382 | period = min_timer_ticks; | |
316670eb A |
383 | |
384 | timerv[timer].period = period; | |
385 | ||
386 | /* FIXME: re-program running timers? */ | |
387 | ||
388 | return 0; | |
389 | } | |
390 | ||
39236c6e A |
391 | int |
392 | kperf_timer_get_action( unsigned timer, uint32_t *action ) | |
393 | { | |
394 | if( timer >= timerc ) | |
395 | return EINVAL; | |
396 | ||
397 | *action = timerv[timer].actionid; | |
398 | ||
399 | return 0; | |
400 | } | |
401 | ||
402 | int | |
403 | kperf_timer_set_action( unsigned timer, uint32_t action ) | |
404 | { | |
405 | if( timer >= timerc ) | |
406 | return EINVAL; | |
407 | ||
408 | timerv[timer].actionid = action; | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
316670eb A |
413 | unsigned |
414 | kperf_timer_get_count(void) | |
415 | { | |
416 | return timerc; | |
417 | } | |
418 | ||
419 | static void | |
420 | setup_timer_call( struct time_trigger *trigger ) | |
421 | { | |
422 | timer_call_setup( &trigger->tcall, kperf_timer_handler, trigger ); | |
423 | } | |
424 | ||
425 | extern int | |
426 | kperf_timer_set_count(unsigned count) | |
427 | { | |
428 | struct time_trigger *new_timerv = NULL, *old_timerv = NULL; | |
429 | unsigned old_count, i; | |
430 | ||
431 | /* easy no-op */ | |
432 | if( count == timerc ) | |
316670eb | 433 | return 0; |
316670eb A |
434 | |
435 | /* TODO: allow shrinking? */ | |
436 | if( count < timerc ) | |
437 | return EINVAL; | |
438 | ||
439 | /* cap it for good measure */ | |
440 | if( count > TIMER_MAX ) | |
441 | return EINVAL; | |
442 | ||
443 | /* creating the action arror for the first time. create a few | |
444 | * more things, too. | |
445 | */ | |
446 | if( timerc == 0 ) | |
447 | { | |
448 | int r; | |
449 | ||
450 | /* main kperf */ | |
451 | r = kperf_init(); | |
452 | if( r ) | |
453 | return r; | |
39236c6e | 454 | |
316670eb A |
455 | /* get the PET thread going */ |
456 | r = kperf_pet_init(); | |
457 | if( r ) | |
458 | return r; | |
459 | } | |
460 | ||
39236c6e A |
461 | /* first shut down any running timers since we will be messing |
462 | * with the timer call structures | |
463 | */ | |
464 | if( kperf_timer_stop() ) | |
465 | return EBUSY; | |
466 | ||
316670eb A |
467 | /* create a new array */ |
468 | new_timerv = kalloc( count * sizeof(*new_timerv) ); | |
469 | if( new_timerv == NULL ) | |
470 | return ENOMEM; | |
471 | ||
472 | old_timerv = timerv; | |
473 | old_count = timerc; | |
474 | ||
475 | if( old_timerv != NULL ) | |
476 | bcopy( timerv, new_timerv, timerc * sizeof(*timerv) ); | |
477 | ||
478 | /* zero the new entries */ | |
479 | bzero( &new_timerv[timerc], (count - old_count) * sizeof(*new_timerv) ); | |
480 | ||
39236c6e A |
481 | /* (re-)setup the timer call info for all entries */ |
482 | for( i = 0; i < count; i++ ) | |
316670eb A |
483 | setup_timer_call( &new_timerv[i] ); |
484 | ||
485 | timerv = new_timerv; | |
486 | timerc = count; | |
487 | ||
488 | if( old_timerv != NULL ) | |
489 | kfree( old_timerv, old_count * sizeof(*timerv) ); | |
490 | ||
316670eb A |
491 | return 0; |
492 | } |