2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <mach/mach_types.h>
32 #include <kern/cpu_data.h> /* current_thread() */
33 #include <kern/kalloc.h>
34 #include <sys/errno.h>
36 #include <sys/ktrace.h>
38 #include <machine/machine_routines.h>
39 #if defined(__x86_64__)
41 #endif /* defined(__x86_64__) */
43 #include <kperf/kperf.h>
44 #include <kperf/buffer.h>
45 #include <kperf/context.h>
46 #include <kperf/action.h>
47 #include <kperf/kperf_timer.h>
48 #include <kperf/kperf_arch.h>
49 #include <kperf/pet.h>
50 #include <kperf/sample.h>
52 /* the list of timers */
53 struct kperf_timer
*kperf_timerv
= NULL
;
54 unsigned int kperf_timerc
= 0;
56 static unsigned int pet_timer_id
= 999;
58 /* maximum number of timers we can construct */
59 #define TIMER_MAX (16)
61 #if defined(__x86_64__)
63 #define MIN_PERIOD_NS (20 * NSEC_PER_USEC)
64 #define MIN_PERIOD_BG_NS (10 * NSEC_PER_MSEC)
65 #define MIN_PERIOD_PET_NS (2 * NSEC_PER_MSEC)
66 #define MIN_PERIOD_PET_BG_NS (10 * NSEC_PER_MSEC)
68 #else /* defined(__x86_64__) */
69 #error "unsupported architecture"
70 #endif /* defined(__x86_64__) */
72 static uint64_t min_period_abstime
;
73 static uint64_t min_period_bg_abstime
;
74 static uint64_t min_period_pet_abstime
;
75 static uint64_t min_period_pet_bg_abstime
;
78 kperf_timer_min_period_abstime(void)
80 if (ktrace_background_active()) {
81 return min_period_bg_abstime
;
83 return min_period_abstime
;
88 kperf_timer_min_pet_period_abstime(void)
90 if (ktrace_background_active()) {
91 return min_period_pet_bg_abstime
;
93 return min_period_pet_abstime
;
98 kperf_timer_schedule(struct kperf_timer
*timer
, uint64_t now
)
100 BUF_INFO(PERF_TM_SCHED
, timer
->period
);
102 /* if we re-programmed the timer to zero, just drop it */
103 if (timer
->period
== 0) {
107 /* calculate deadline */
108 uint64_t deadline
= now
+ timer
->period
;
110 /* re-schedule the timer, making sure we don't apply slop */
111 timer_call_enter(&timer
->tcall
, deadline
, TIMER_CALL_SYS_CRITICAL
);
115 kperf_ipi_handler(void *param
)
117 struct kperf_context ctx
;
118 struct kperf_timer
*timer
= param
;
120 assert(timer
!= NULL
);
122 /* Always cut a tracepoint to show a sample event occurred */
123 BUF_DATA(PERF_TM_HNDLR
| DBG_FUNC_START
, 0);
125 int ncpu
= cpu_number();
127 struct kperf_sample
*intbuf
= kperf_intr_sample_buffer();
129 /* On a timer, we can see the "real" current thread */
130 ctx
.cur_thread
= current_thread();
131 ctx
.cur_pid
= task_pid(get_threadtask(ctx
.cur_thread
));
134 ctx
.trigger_type
= TRIGGER_TYPE_TIMER
;
135 ctx
.trigger_id
= (unsigned int)(timer
- kperf_timerv
);
137 if (ctx
.trigger_id
== pet_timer_id
&& ncpu
< machine_info
.logical_cpu_max
) {
138 kperf_thread_on_cpus
[ncpu
] = ctx
.cur_thread
;
141 /* make sure sampling is on */
142 unsigned int status
= kperf_sampling_status();
143 if (status
== KPERF_SAMPLING_OFF
) {
144 BUF_INFO(PERF_TM_HNDLR
| DBG_FUNC_END
, SAMPLE_OFF
);
146 } else if (status
== KPERF_SAMPLING_SHUTDOWN
) {
147 BUF_INFO(PERF_TM_HNDLR
| DBG_FUNC_END
, SAMPLE_SHUTDOWN
);
151 /* call the action -- kernel-only from interrupt, pend user */
152 int r
= kperf_sample(intbuf
, &ctx
, timer
->actionid
, SAMPLE_FLAG_PEND_USER
);
154 /* end tracepoint is informational */
155 BUF_INFO(PERF_TM_HNDLR
| DBG_FUNC_END
, r
);
157 #if defined(__x86_64__)
158 (void)atomic_bit_clear(&(timer
->pending_cpus
), ncpu
, __ATOMIC_RELAXED
);
159 #endif /* defined(__x86_64__) */
163 kperf_timer_handler(void *param0
, __unused
void *param1
)
165 struct kperf_timer
*timer
= param0
;
166 unsigned int ntimer
= (unsigned int)(timer
- kperf_timerv
);
167 unsigned int ncpus
= machine_info
.logical_cpu_max
;
171 /* along the lines of do not ipi if we are all shutting down */
172 if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN
) {
176 BUF_DATA(PERF_TM_FIRE
, ntimer
, ntimer
== pet_timer_id
, timer
->period
,
179 if (ntimer
== pet_timer_id
) {
180 kperf_pet_fire_before();
182 /* clean-up the thread-on-CPUs cache */
183 bzero(kperf_thread_on_cpus
, ncpus
* sizeof(*kperf_thread_on_cpus
));
187 kperf_mp_broadcast_running(timer
);
189 /* release the pet thread? */
190 if (ntimer
== pet_timer_id
) {
191 /* PET mode is responsible for rearming the timer */
192 kperf_pet_fire_after();
195 * FIXME: Get the current time from elsewhere. The next
196 * timer's period now includes the time taken to reach this
197 * point. This causes a bias towards longer sampling periods
200 kperf_timer_schedule(timer
, mach_absolute_time());
207 /* program the timer from the PET thread */
209 kperf_timer_pet_rearm(uint64_t elapsed_ticks
)
211 struct kperf_timer
*timer
= NULL
;
216 * If the pet_timer_id is invalid, it has been disabled, so this should
219 if (pet_timer_id
>= kperf_timerc
) {
223 unsigned int status
= kperf_sampling_status();
224 /* do not reprogram the timer if it has been shutdown or sampling is off */
225 if (status
== KPERF_SAMPLING_OFF
) {
226 BUF_INFO(PERF_PET_END
, SAMPLE_OFF
);
228 } else if (status
== KPERF_SAMPLING_SHUTDOWN
) {
229 BUF_INFO(PERF_PET_END
, SAMPLE_SHUTDOWN
);
233 timer
= &(kperf_timerv
[pet_timer_id
]);
235 /* if we re-programmed the timer to zero, just drop it */
236 if (!timer
->period
) {
240 /* subtract the time the pet sample took being careful not to underflow */
241 if (timer
->period
> elapsed_ticks
) {
242 period
= timer
->period
- elapsed_ticks
;
245 /* make sure we don't set the next PET sample to happen too soon */
246 if (period
< min_period_pet_abstime
) {
247 period
= min_period_pet_abstime
;
250 /* we probably took so long in the PET thread, it makes sense to take
253 deadline
= mach_absolute_time() + period
;
255 BUF_INFO(PERF_PET_SCHED
, timer
->period
, period
, elapsed_ticks
, deadline
);
257 /* re-schedule the timer, making sure we don't apply slop */
258 timer_call_enter(&(timer
->tcall
), deadline
, TIMER_CALL_SYS_CRITICAL
);
263 /* turn on all the timers */
267 /* get the PET thread going */
268 if (pet_timer_id
< kperf_timerc
) {
269 kperf_pet_config(kperf_timerv
[pet_timer_id
].actionid
);
272 uint64_t now
= mach_absolute_time();
274 for (unsigned int i
= 0; i
< kperf_timerc
; i
++) {
275 if (kperf_timerv
[i
].period
== 0) {
279 kperf_timer_schedule(&(kperf_timerv
[i
]), now
);
284 kperf_timer_stop(void)
286 for (unsigned int i
= 0; i
< kperf_timerc
; i
++) {
287 if (kperf_timerv
[i
].period
== 0) {
291 /* wait for the timer to stop */
292 while (kperf_timerv
[i
].active
);
294 timer_call_cancel(&(kperf_timerv
[i
].tcall
));
297 /* wait for PET to stop, too */
302 kperf_timer_get_petid(void)
308 kperf_timer_set_petid(unsigned int timerid
)
310 if (timerid
< kperf_timerc
) {
313 min_period
= kperf_timer_min_pet_period_abstime();
314 if (kperf_timerv
[timerid
].period
< min_period
) {
315 kperf_timerv
[timerid
].period
= min_period
;
317 kperf_pet_config(kperf_timerv
[timerid
].actionid
);
319 /* clear the PET trigger if it's a bogus ID */
323 pet_timer_id
= timerid
;
329 kperf_timer_get_period(unsigned int timerid
, uint64_t *period_abstime
)
331 if (timerid
>= kperf_timerc
) {
335 *period_abstime
= kperf_timerv
[timerid
].period
;
340 kperf_timer_set_period(unsigned int timerid
, uint64_t period_abstime
)
344 if (timerid
>= kperf_timerc
) {
348 if (pet_timer_id
== timerid
) {
349 min_period
= kperf_timer_min_pet_period_abstime();
351 min_period
= kperf_timer_min_period_abstime();
354 if (period_abstime
> 0 && period_abstime
< min_period
) {
355 period_abstime
= min_period
;
358 kperf_timerv
[timerid
].period
= period_abstime
;
360 /* FIXME: re-program running timers? */
366 kperf_timer_get_action(unsigned int timerid
, uint32_t *action
)
368 if (timerid
>= kperf_timerc
) {
372 *action
= kperf_timerv
[timerid
].actionid
;
377 kperf_timer_set_action(unsigned int timerid
, uint32_t action
)
379 if (timerid
>= kperf_timerc
) {
383 kperf_timerv
[timerid
].actionid
= action
;
388 kperf_timer_get_count(void)
394 kperf_timer_reset(void)
396 kperf_timer_set_petid(999);
397 kperf_set_pet_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE
);
398 kperf_set_lightweight_pet(0);
399 for (unsigned int i
= 0; i
< kperf_timerc
; i
++) {
400 kperf_timerv
[i
].period
= 0;
401 kperf_timerv
[i
].actionid
= 0;
402 #if defined(__x86_64__)
403 kperf_timerv
[i
].pending_cpus
= 0;
404 #endif /* defined(__x86_64__) */
409 kperf_timer_set_count(unsigned int count
)
411 struct kperf_timer
*new_timerv
= NULL
, *old_timerv
= NULL
;
412 unsigned int old_count
;
414 if (min_period_abstime
== 0) {
415 nanoseconds_to_absolutetime(MIN_PERIOD_NS
, &min_period_abstime
);
416 nanoseconds_to_absolutetime(MIN_PERIOD_BG_NS
, &min_period_bg_abstime
);
417 nanoseconds_to_absolutetime(MIN_PERIOD_PET_NS
, &min_period_pet_abstime
);
418 nanoseconds_to_absolutetime(MIN_PERIOD_PET_BG_NS
,
419 &min_period_pet_bg_abstime
);
420 assert(min_period_abstime
> 0);
423 if (count
== kperf_timerc
) {
426 if (count
> TIMER_MAX
) {
430 /* TODO: allow shrinking? */
431 if (count
< kperf_timerc
) {
436 * Make sure kperf is initialized when creating the array for the first
439 if (kperf_timerc
== 0) {
443 if ((r
= kperf_init())) {
449 * Shut down any running timers since we will be messing with the timer
454 /* create a new array */
455 new_timerv
= kalloc_tag(count
* sizeof(struct kperf_timer
),
456 VM_KERN_MEMORY_DIAG
);
457 if (new_timerv
== NULL
) {
460 old_timerv
= kperf_timerv
;
461 old_count
= kperf_timerc
;
463 if (old_timerv
!= NULL
) {
464 bcopy(kperf_timerv
, new_timerv
,
465 kperf_timerc
* sizeof(struct kperf_timer
));
468 /* zero the new entries */
469 bzero(&(new_timerv
[kperf_timerc
]),
470 (count
- old_count
) * sizeof(struct kperf_timer
));
472 /* (re-)setup the timer call info for all entries */
473 for (unsigned int i
= 0; i
< count
; i
++) {
474 timer_call_setup(&(new_timerv
[i
].tcall
), kperf_timer_handler
, &(new_timerv
[i
]));
477 kperf_timerv
= new_timerv
;
478 kperf_timerc
= count
;
480 if (old_timerv
!= NULL
) {
481 kfree(old_timerv
, old_count
* sizeof(struct kperf_timer
));