]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kpc_thread.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_thread.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <kern/processor.h>
31 #include <kern/thread.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <sys/errno.h>
35
36 #include <kern/kpc.h>
37
38
39 /* global for whether to read PMCs on context switch */
40 int kpc_threads_counting;
41
42 /* current config and number of counters in that config */
43 static uint32_t kpc_thread_classes = 0;
44 static uint32_t kpc_thread_classes_count = 0;
45
46 static lck_grp_attr_t *kpc_thread_lckgrp_attr = NULL;
47 static lck_grp_t *kpc_thread_lckgrp = NULL;
48 static lck_mtx_t kpc_thread_lock;
49
50 void kpc_thread_init(void);
51
52 void
53 kpc_thread_init(void)
54 {
55 kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init();
56 kpc_thread_lckgrp = lck_grp_alloc_init("kpc", kpc_thread_lckgrp_attr);
57 lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL);
58 }
59
60 uint32_t
61 kpc_get_thread_counting(void)
62 {
63 uint32_t kpc_thread_classes_tmp;
64 int kpc_threads_counting_tmp;
65
66 /* Make sure we get a consistent snapshot of these values */
67 lck_mtx_lock(&kpc_thread_lock);
68
69 kpc_thread_classes_tmp = kpc_thread_classes;
70 kpc_threads_counting_tmp = kpc_threads_counting;
71
72 lck_mtx_unlock(&kpc_thread_lock);
73
74 if( kpc_threads_counting_tmp )
75 return kpc_thread_classes_tmp;
76 else
77 return 0;
78 }
79
80 int
81 kpc_set_thread_counting(uint32_t classes)
82 {
83 uint32_t count;
84
85 lck_mtx_lock(&kpc_thread_lock);
86
87 count = kpc_get_counter_count(classes);
88
89 if( (classes == 0)
90 || (count == 0) )
91 {
92 /* shut down */
93 kpc_threads_counting = FALSE;
94 }
95 else
96 {
97 /* stash the config */
98 kpc_thread_classes = classes;
99
100 /* work out the size */
101 kpc_thread_classes_count = count;
102 assert(kpc_thread_classes_count <= KPC_MAX_COUNTERS);
103
104 /* enable switch */
105 kpc_threads_counting = TRUE;
106
107 /* and schedule an AST for this thread... */
108 if( !current_thread()->kpc_buf )
109 {
110 current_thread()->t_chud |= T_KPC_ALLOC;
111 act_set_kperf(current_thread());
112 }
113 }
114
115 lck_mtx_unlock(&kpc_thread_lock);
116
117 return 0;
118 }
119
120 /* snapshot current PMCs and update counters in the current thread */
121 static void
122 kpc_update_thread_counters( thread_t thread )
123 {
124 uint32_t i;
125 uint64_t *tmp = NULL;
126 cpu_data_t *cpu = NULL;
127
128 /* TODO: Fix this...*/
129 #if defined (__x86_64__)
130 cpu = current_cpu_datap();
131 #else
132 #error architecture not yet supported
133 #endif
134
135 /* 1. stash current PMCs into latest CPU block */
136 kpc_get_cpu_counters( FALSE, kpc_thread_classes,
137 NULL, cpu->cpu_kpc_buf[1] );
138
139 /* 2. apply delta to old thread */
140 if( thread->kpc_buf )
141 for( i = 0; i < kpc_thread_classes_count; i++ )
142 thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i];
143
144
145 /* schedule any necessary allocations */
146 if( !current_thread()->kpc_buf )
147 {
148 current_thread()->t_chud |= T_KPC_ALLOC;
149 act_set_kperf(current_thread());
150 }
151
152 /* 3. switch the PMC block pointers */
153 tmp = cpu->cpu_kpc_buf[1];
154 cpu->cpu_kpc_buf[1] = cpu->cpu_kpc_buf[0];
155 cpu->cpu_kpc_buf[0] = tmp;
156 }
157
158 void
159 kpc_switch_context( thread_t old, thread_t new __unused )
160 {
161 kpc_update_thread_counters( old );
162 }
163
164 /* get counter values for a thread */
165 int
166 kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf)
167 {
168 thread_t thread = current_thread();
169 boolean_t enabled;
170
171 /* buffer too small :( */
172 if( *inoutcount < kpc_thread_classes_count )
173 return EINVAL;
174
175 /* copy data and actual size */
176 if( !thread->kpc_buf )
177 return EINVAL;
178
179 enabled = ml_set_interrupts_enabled(FALSE);
180
181 /* snap latest version of counters for this thread */
182 kpc_update_thread_counters( current_thread() );
183
184 /* copy out */
185 memcpy( buf, thread->kpc_buf,
186 kpc_thread_classes_count * sizeof(*buf) );
187 *inoutcount = kpc_thread_classes_count;
188
189 ml_set_interrupts_enabled(enabled);
190
191 return 0;
192 }
193
194
195 void
196 kpc_thread_create(thread_t thread)
197 {
198 /* nothing to do if we're not counting */
199 if(!kpc_threads_counting)
200 return;
201
202 /* give the new thread a counterbuf */
203 thread->kpc_buf = kpc_counterbuf_alloc();
204 }
205
206 void
207 kpc_thread_destroy(thread_t thread)
208 {
209 uint64_t *buf = NULL;
210
211 /* usual case: no kpc buf, just return */
212 if( !thread->kpc_buf )
213 return;
214
215 /* otherwise, don't leak */
216 buf = thread->kpc_buf;
217 thread->kpc_buf = NULL;
218 kpc_counterbuf_free(buf);
219 }
220
221 /* ast callback on a thread */
222 void
223 kpc_thread_ast_handler( thread_t thread )
224 {
225 /* see if we want an alloc */
226 if( thread->t_chud & T_KPC_ALLOC )
227 thread->kpc_buf = kpc_counterbuf_alloc();
228 }