]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/thread_samplers.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kperf / thread_samplers.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* Sample thread data */
30
31 #include <kern/debug.h> /* panic */
32 #include <kern/thread.h> /* thread_* */
33 #include <kern/timer.h> /* timer_data_t */
34 #include <kern/policy_internal.h> /* TASK_POLICY_* */
35 #include <mach/mach_types.h>
36
37 #include <kperf/kperf.h>
38 #include <kperf/buffer.h>
39 #include <kperf/context.h>
40 #include <kperf/thread_samplers.h>
41 #include <kperf/ast.h>
42
43 extern boolean_t stackshot_thread_is_idle_worker_unsafe(thread_t thread);
44
45 /*
46 * XXX Deprecated, use thread scheduling sampler instead.
47 *
48 * Taken from AppleProfileGetRunModeOfThread and CHUD. Still here for
49 * backwards compatibility.
50 */
51
52 #define KPERF_TI_RUNNING (1U << 0)
53 #define KPERF_TI_RUNNABLE (1U << 1)
54 #define KPERF_TI_WAIT (1U << 2)
55 #define KPERF_TI_UNINT (1U << 3)
56 #define KPERF_TI_SUSP (1U << 4)
57 #define KPERF_TI_TERMINATE (1U << 5)
58 #define KPERF_TI_IDLE (1U << 6)
59
60 static uint32_t
61 kperf_thread_info_runmode_legacy(thread_t thread)
62 {
63 uint32_t kperf_state = 0;
64 int sched_state = thread->state;
65 processor_t last_processor = thread->last_processor;
66
67 if ((last_processor != PROCESSOR_NULL) && (thread == last_processor->active_thread)) {
68 kperf_state |= KPERF_TI_RUNNING;
69 }
70 if (sched_state & TH_RUN) {
71 kperf_state |= KPERF_TI_RUNNABLE;
72 }
73 if (sched_state & TH_WAIT) {
74 kperf_state |= KPERF_TI_WAIT;
75 }
76 if (sched_state & TH_UNINT) {
77 kperf_state |= KPERF_TI_UNINT;
78 }
79 if (sched_state & TH_SUSP) {
80 kperf_state |= KPERF_TI_SUSP;
81 }
82 if (sched_state & TH_TERMINATE) {
83 kperf_state |= KPERF_TI_TERMINATE;
84 }
85 if (sched_state & TH_IDLE) {
86 kperf_state |= KPERF_TI_IDLE;
87 }
88
89 /* on desktop, if state is blank, leave not idle set */
90 if (kperf_state == 0) {
91 return (TH_IDLE << 16);
92 }
93
94 /* high two bytes are inverted mask, low two bytes are normal */
95 return (((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff));
96 }
97
98 void
99 kperf_thread_info_sample(struct kperf_thread_info *ti, struct kperf_context *context)
100 {
101 thread_t cur_thread = context->cur_thread;
102
103 BUF_INFO(PERF_TI_SAMPLE, (uintptr_t)thread_tid(cur_thread));
104
105 ti->kpthi_pid = context->cur_pid;
106 ti->kpthi_tid = thread_tid(cur_thread);
107 ti->kpthi_dq_addr = thread_dispatchqaddr(cur_thread);
108 ti->kpthi_runmode = kperf_thread_info_runmode_legacy(cur_thread);
109
110 BUF_VERB(PERF_TI_SAMPLE | DBG_FUNC_END);
111 }
112
113 void
114 kperf_thread_info_log(struct kperf_thread_info *ti)
115 {
116 BUF_DATA(PERF_TI_DATA, ti->kpthi_pid, ti->kpthi_tid /* K64-only */,
117 ti->kpthi_dq_addr, ti->kpthi_runmode);
118 }
119
120 /*
121 * Scheduling information reports inputs and outputs of the scheduler state for
122 * a thread.
123 */
124
125 void
126 kperf_thread_scheduling_sample(struct kperf_thread_scheduling *thsc,
127 struct kperf_context *context)
128 {
129 assert(thsc != NULL);
130 assert(context != NULL);
131
132 thread_t thread = context->cur_thread;
133
134 BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread));
135
136 thsc->kpthsc_user_time = timer_grab(&(thread->user_timer));
137 uint64_t system_time = timer_grab(&(thread->system_timer));
138
139 if (thread->precise_user_kernel_time) {
140 thsc->kpthsc_system_time = system_time;
141 } else {
142 thsc->kpthsc_user_time += system_time;
143 thsc->kpthsc_system_time = 0;
144 }
145
146 thsc->kpthsc_state = thread->state;
147 thsc->kpthsc_base_priority = thread->base_pri;
148 thsc->kpthsc_sched_priority = thread->sched_pri;
149 thsc->kpthsc_effective_qos = thread->effective_policy.thep_qos;
150 thsc->kpthsc_requested_qos = thread->requested_policy.thrp_qos;
151 thsc->kpthsc_requested_qos_override = thread->requested_policy.thrp_qos_override;
152 thsc->kpthsc_effective_latency_qos = thread->effective_policy.thep_latency_qos;
153
154 BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_END);
155 }
156
157
158 void
159 kperf_thread_scheduling_log(struct kperf_thread_scheduling *thsc)
160 {
161 assert(thsc != NULL);
162 #if defined(__LP64__)
163 BUF_DATA(PERF_TI_SCHEDDATA, thsc->kpthsc_user_time,
164 thsc->kpthsc_system_time,
165 (((uint64_t)thsc->kpthsc_base_priority) << 48)
166 | ((uint64_t)thsc->kpthsc_sched_priority << 32)
167 | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24)
168 | (thsc->kpthsc_effective_qos << 6)
169 | (thsc->kpthsc_requested_qos << 3)
170 | thsc->kpthsc_requested_qos_override,
171 ((uint64_t)thsc->kpthsc_effective_latency_qos << 61));
172 #else
173 BUF_DATA(PERF_TI_SCHEDDATA1_32, UPPER_32(thsc->kpthsc_user_time),
174 LOWER_32(thsc->kpthsc_user_time),
175 UPPER_32(thsc->kpthsc_system_time),
176 LOWER_32(thsc->kpthsc_system_time));
177 BUF_DATA(PERF_TI_SCHEDDATA2_32, (((uint32_t)thsc->kpthsc_base_priority) << 16)
178 | thsc->kpthsc_sched_priority,
179 ((thsc->kpthsc_state & 0xff) << 24)
180 | (thsc->kpthsc_effective_qos << 6)
181 | (thsc->kpthsc_requested_qos << 3)
182 | thsc->kpthsc_requested_qos_override,
183 (uint32_t)thsc->kpthsc_effective_latency_qos << 29);
184 #endif /* defined(__LP64__) */
185 }
186
187 /*
188 * Snapshot information maintains parity with stackshot information for other,
189 * miscellaneous information about threads.
190 */
191
192 #define KPERF_THREAD_SNAPSHOT_DARWIN_BG (1U << 0);
193 #define KPERF_THREAD_SNAPSHOT_PASSIVE_IO (1U << 1);
194 #define KPERF_THREAD_SNAPSHOT_GFI (1U << 2);
195 #define KPERF_THREAD_SNAPSHOT_IDLE_WQ (1U << 3);
196 /* max is 1U << 7 */
197
198 void
199 kperf_thread_snapshot_sample(struct kperf_thread_snapshot *thsn,
200 struct kperf_context *context)
201 {
202 assert(thsn != NULL);
203 assert(context != NULL);
204
205 thread_t thread = context->cur_thread;
206
207 BUF_INFO(PERF_TI_SNAPSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread));
208
209 thsn->kpthsn_last_made_runnable_time = thread->last_made_runnable_time;
210
211 thsn->kpthsn_flags = 0;
212 if (thread->effective_policy.thep_darwinbg) {
213 thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_DARWIN_BG;
214 }
215 if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) {
216 thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_PASSIVE_IO;
217 }
218 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
219 thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_GFI
220 }
221 if (stackshot_thread_is_idle_worker_unsafe(thread)) {
222 thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_IDLE_WQ;
223 }
224
225 thsn->kpthsn_suspend_count = thread->suspend_count;
226 thsn->kpthsn_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
227
228 BUF_VERB(PERF_TI_SNAPSAMPLE | DBG_FUNC_END);
229 }
230
231 void
232 kperf_thread_snapshot_log(struct kperf_thread_snapshot *thsn)
233 {
234 assert(thsn != NULL);
235 #if defined(__LP64__)
236 BUF_DATA(PERF_TI_SNAPDATA, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8)
237 | (thsn->kpthsn_io_tier << 24),
238 thsn->kpthsn_last_made_runnable_time);
239 #else
240 BUF_DATA(PERF_TI_SNAPDATA_32, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8)
241 | (thsn->kpthsn_io_tier << 24),
242 UPPER_32(thsn->kpthsn_last_made_runnable_time),
243 LOWER_32(thsn->kpthsn_last_made_runnable_time));
244 #endif /* defined(__LP64__) */
245 }
246
247 /*
248 * Dispatch information only contains the dispatch queue serial number from
249 * libdispatch.
250 *
251 * It's a separate sampler because queue data must be copied in from user space.
252 */
253
254 void
255 kperf_thread_dispatch_sample(struct kperf_thread_dispatch *thdi,
256 struct kperf_context *context)
257 {
258 assert(thdi != NULL);
259 assert(context != NULL);
260
261 thread_t thread = context->cur_thread;
262
263 BUF_INFO(PERF_TI_DISPSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread));
264
265 task_t task = thread->task;
266 boolean_t task_64 = task_has_64BitAddr(task);
267 size_t user_addr_size = task_64 ? 8 : 4;
268
269 assert(thread->task != kernel_task);
270 uint64_t user_dq_key_addr = thread_dispatchqaddr(thread);
271 if (user_dq_key_addr == 0) {
272 goto error;
273 }
274
275 uint64_t user_dq_addr;
276 if ((copyin((user_addr_t)user_dq_key_addr,
277 (char *)&user_dq_addr,
278 user_addr_size) != 0) ||
279 (user_dq_addr == 0))
280 {
281 goto error;
282 }
283
284 uint64_t user_dq_serialno_addr =
285 user_dq_addr + get_task_dispatchqueue_serialno_offset(task);
286
287 if (copyin((user_addr_t)user_dq_serialno_addr,
288 (char *)&(thdi->kpthdi_dq_serialno),
289 user_addr_size) == 0)
290 {
291 goto out;
292 }
293
294 error:
295 thdi->kpthdi_dq_serialno = 0;
296
297 out:
298 BUF_VERB(PERF_TI_DISPSAMPLE | DBG_FUNC_END);
299 }
300
301 int
302 kperf_thread_dispatch_pend(struct kperf_context *context)
303 {
304 return kperf_ast_pend(context->cur_thread, T_KPERF_AST_DISPATCH);
305 }
306
307 void
308 kperf_thread_dispatch_log(struct kperf_thread_dispatch *thdi)
309 {
310 assert(thdi != NULL);
311 #if defined(__LP64__)
312 BUF_DATA(PERF_TI_DISPDATA, thdi->kpthdi_dq_serialno);
313 #else
314 BUF_DATA(PERF_TI_DISPDATA_32, UPPER_32(thdi->kpthdi_dq_serialno),
315 LOWER_32(thdi->kpthdi_dq_serialno));
316 #endif /* defined(__LP64__) */
317 }