]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2018 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stdint.h> | |
30 | ||
31 | #include <kern/thread.h> | |
32 | ||
33 | #include <kperf/action.h> | |
34 | #include <kperf/buffer.h> | |
35 | #include <kperf/kperf.h> | |
36 | #include <kperf/lazy.h> | |
37 | #include <kperf/sample.h> | |
38 | ||
39 | unsigned int kperf_lazy_wait_action = 0; | |
40 | unsigned int kperf_lazy_cpu_action = 0; | |
41 | uint64_t kperf_lazy_wait_time_threshold = 0; | |
42 | uint64_t kperf_lazy_cpu_time_threshold = 0; | |
43 | ||
44 | void | |
45 | kperf_lazy_reset(void) | |
46 | { | |
47 | kperf_lazy_wait_action = 0; | |
48 | kperf_lazy_wait_time_threshold = 0; | |
49 | kperf_lazy_cpu_action = 0; | |
50 | kperf_lazy_cpu_time_threshold = 0; | |
51 | kperf_on_cpu_update(); | |
52 | } | |
53 | ||
54 | void | |
55 | kperf_lazy_off_cpu(thread_t thread) | |
56 | { | |
57 | /* try to lazily sample the CPU if the thread was pre-empted */ | |
58 | if ((thread->reason & AST_SCHEDULING) != 0) { | |
59 | kperf_lazy_cpu_sample(thread, 0, 0); | |
60 | } | |
61 | } | |
62 | ||
63 | void | |
64 | kperf_lazy_make_runnable(thread_t thread, bool in_interrupt) | |
65 | { | |
66 | assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE); | |
67 | /* ignore threads that race to wait and in waking up */ | |
68 | if (thread->last_run_time > thread->last_made_runnable_time) { | |
69 | return; | |
70 | } | |
71 | ||
72 | uint64_t wait_time = thread_get_last_wait_duration(thread); | |
73 | if (wait_time > kperf_lazy_wait_time_threshold) { | |
74 | BUF_DATA(PERF_LZ_MKRUNNABLE, (uintptr_t)thread_tid(thread), | |
75 | thread->sched_pri, in_interrupt ? 1 : 0); | |
76 | } | |
77 | } | |
78 | ||
79 | void | |
80 | kperf_lazy_wait_sample(thread_t thread, thread_continue_t continuation, | |
81 | uintptr_t *starting_fp) | |
82 | { | |
83 | /* ignore idle threads */ | |
84 | if (thread->last_made_runnable_time == THREAD_NOT_RUNNABLE) { | |
85 | return; | |
86 | } | |
87 | /* ignore invalid made runnable times */ | |
88 | if (thread->last_made_runnable_time < thread->last_run_time) { | |
89 | return; | |
90 | } | |
91 | ||
92 | /* take a sample if thread was waiting for longer than threshold */ | |
93 | uint64_t wait_time = thread_get_last_wait_duration(thread); | |
94 | if (wait_time > kperf_lazy_wait_time_threshold) { | |
95 | uint64_t time_now = mach_absolute_time(); | |
96 | timer_update(&thread->runnable_timer, time_now); | |
97 | timer_update(&thread->system_timer, time_now); | |
98 | ||
99 | uint64_t runnable_time = timer_grab(&thread->runnable_timer); | |
100 | uint64_t running_time = timer_grab(&thread->user_timer) + | |
101 | timer_grab(&thread->system_timer); | |
102 | ||
103 | BUF_DATA(PERF_LZ_WAITSAMPLE, wait_time, runnable_time, running_time); | |
104 | ||
105 | task_t task = get_threadtask(thread); | |
106 | struct kperf_context ctx = { | |
107 | .cur_thread = thread, | |
108 | .cur_task = task, | |
109 | .cur_pid = task_pid(task), | |
110 | .trigger_type = TRIGGER_TYPE_LAZY_WAIT, | |
111 | .starting_fp = starting_fp, | |
112 | }; | |
113 | ||
114 | struct kperf_sample *sample = kperf_intr_sample_buffer(); | |
115 | if (!sample) { | |
116 | return; | |
117 | } | |
118 | ||
119 | unsigned int flags = SAMPLE_FLAG_PEND_USER; | |
120 | flags |= continuation ? SAMPLE_FLAG_CONTINUATION : 0; | |
121 | flags |= !ml_at_interrupt_context() ? SAMPLE_FLAG_NON_INTERRUPT : 0; | |
122 | ||
123 | kperf_sample(sample, &ctx, kperf_lazy_wait_action, flags); | |
124 | } | |
125 | } | |
126 | ||
127 | void | |
128 | kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, bool interrupt) | |
129 | { | |
130 | assert(ml_get_interrupts_enabled() == FALSE); | |
131 | if (!thread) { | |
132 | thread = current_thread(); | |
133 | } | |
134 | ||
135 | /* take a sample if this CPU's last sample time is beyond the threshold */ | |
136 | processor_t processor = current_processor(); | |
137 | uint64_t time_now = mach_absolute_time(); | |
138 | uint64_t since_last_sample = time_now - processor->kperf_last_sample_time; | |
139 | if (since_last_sample > kperf_lazy_cpu_time_threshold) { | |
140 | processor->kperf_last_sample_time = time_now; | |
141 | timer_update(&thread->runnable_timer, time_now); | |
142 | timer_update(&thread->system_timer, time_now); | |
143 | ||
144 | uint64_t runnable_time = timer_grab(&thread->runnable_timer); | |
145 | uint64_t running_time = timer_grab(&thread->user_timer) + | |
146 | timer_grab(&thread->system_timer); | |
147 | ||
148 | BUF_DATA(PERF_LZ_CPUSAMPLE, running_time, runnable_time, | |
149 | thread->sched_pri, interrupt ? 1 : 0); | |
150 | ||
151 | task_t task = get_threadtask(thread); | |
152 | struct kperf_context ctx = { | |
153 | .cur_thread = thread, | |
154 | .cur_task = task, | |
155 | .cur_pid = task_pid(task), | |
156 | .trigger_type = TRIGGER_TYPE_LAZY_CPU, | |
157 | .starting_fp = 0, | |
158 | }; | |
159 | ||
160 | struct kperf_sample *sample = kperf_intr_sample_buffer(); | |
161 | if (!sample) { | |
162 | return; | |
163 | } | |
164 | ||
165 | kperf_sample(sample, &ctx, kperf_lazy_cpu_action, | |
166 | SAMPLE_FLAG_PEND_USER | flags); | |
167 | } | |
168 | } | |
169 | ||
170 | /* | |
171 | * Accessors for configuration. | |
172 | */ | |
173 | ||
174 | int | |
175 | kperf_lazy_get_wait_action(void) | |
176 | { | |
177 | return kperf_lazy_wait_action; | |
178 | } | |
179 | ||
180 | int | |
181 | kperf_lazy_set_wait_action(int action_id) | |
182 | { | |
183 | if (action_id < 0 || (unsigned int)action_id > kperf_action_get_count()) { | |
184 | return 1; | |
185 | } | |
186 | ||
187 | kperf_lazy_wait_action = action_id; | |
188 | kperf_on_cpu_update(); | |
189 | return 0; | |
190 | } | |
191 | ||
192 | uint64_t | |
193 | kperf_lazy_get_wait_time_threshold(void) | |
194 | { | |
195 | return kperf_lazy_wait_time_threshold; | |
196 | } | |
197 | ||
198 | int | |
199 | kperf_lazy_set_wait_time_threshold(uint64_t threshold) | |
200 | { | |
201 | kperf_lazy_wait_time_threshold = threshold; | |
202 | return 0; | |
203 | } | |
204 | ||
205 | int | |
206 | kperf_lazy_get_cpu_action(void) | |
207 | { | |
208 | return kperf_lazy_cpu_action; | |
209 | } | |
210 | ||
211 | int | |
212 | kperf_lazy_set_cpu_action(int action_id) | |
213 | { | |
214 | if (action_id < 0 || (unsigned int)action_id > kperf_action_get_count()) { | |
215 | return 1; | |
216 | } | |
217 | ||
218 | kperf_lazy_cpu_action = action_id; | |
219 | return 0; | |
220 | } | |
221 | ||
222 | uint64_t | |
223 | kperf_lazy_get_cpu_time_threshold(void) | |
224 | { | |
225 | return kperf_lazy_cpu_time_threshold; | |
226 | } | |
227 | ||
228 | int | |
229 | kperf_lazy_set_cpu_time_threshold(uint64_t threshold) | |
230 | { | |
231 | kperf_lazy_cpu_time_threshold = threshold; | |
232 | return 0; | |
233 | } |