]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_perfmon.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_perfmon.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <kern/thread.h>
27 #include <kern/thread_act.h>
28 #include <ppc/exception.h>
29 #include <ppc/savearea.h>
30 #include <ppc/hw_perfmon.h>
31 #include <ppc/hw_perfmon_mmcr.h>
32
33 decl_simple_lock_data(,hw_perfmon_lock)
34 static task_t hw_perfmon_owner = TASK_NULL;
35 static int hw_perfmon_thread_count = 0;
36
37 /* Notes:
38 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
39 * (can only count user events anyway)
40 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
41 * -virtual counter PMI is passed up as a breakpoint exception
42 */
43
44 int perfmon_init(void)
45 {
46 simple_lock_init(&hw_perfmon_lock, FALSE);
47 return KERN_SUCCESS;
48 }
49
50 /* PMC Facility Owner:
51 * TASK_NULL - no one owns it
52 * kernel_task - owned by hw_perfmon
53 * other task - owned by another task
54 */
55
56 int perfmon_acquire_facility(task_t task)
57 {
58 kern_return_t retval = KERN_SUCCESS;
59
60 simple_lock(&hw_perfmon_lock);
61
62 if(hw_perfmon_owner==task) {
63 #ifdef HWPERFMON_DEBUG
64 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
65 #endif
66 retval = KERN_SUCCESS;
67 /* already own it */
68 } else if(hw_perfmon_owner==TASK_NULL) { /* no one owns it */
69 hw_perfmon_owner = task;
70 hw_perfmon_thread_count = 0;
71 #ifdef HWPERFMON_DEBUG
72 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
73 #endif
74 retval = KERN_SUCCESS;
75 } else { /* someone already owns it */
76 if(hw_perfmon_owner==kernel_task) {
77 if(hw_perfmon_thread_count==0) { /* kernel owns it but no threads using it */
78 hw_perfmon_owner = task;
79 hw_perfmon_thread_count = 0;
80 #ifdef HWPERFMON_DEBUG
81 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
82 #endif
83 retval = KERN_SUCCESS;
84 } else {
85 #ifdef HWPERFMON_DEBUG
86 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
87 #endif
88 retval = KERN_RESOURCE_SHORTAGE;
89 }
90 } else { /* non-kernel owner */
91 #ifdef HWPERFMON_DEBUG
92 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
93 #endif
94 retval = KERN_RESOURCE_SHORTAGE;
95 }
96 }
97
98 simple_unlock(&hw_perfmon_lock);
99 return retval;
100 }
101
102 int perfmon_release_facility(task_t task)
103 {
104 kern_return_t retval = KERN_SUCCESS;
105 task_t old_perfmon_owner = hw_perfmon_owner;
106
107 simple_lock(&hw_perfmon_lock);
108
109 if(task!=hw_perfmon_owner) {
110 retval = KERN_NO_ACCESS;
111 } else {
112 if(old_perfmon_owner==kernel_task) {
113 if(hw_perfmon_thread_count>0) {
114 #ifdef HWPERFMON_DEBUG
115 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
116 #endif
117 retval = KERN_NO_ACCESS;
118 } else {
119 #ifdef HWPERFMON_DEBUG
120 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
121 #endif
122 hw_perfmon_owner = TASK_NULL;
123 retval = KERN_SUCCESS;
124 }
125 } else {
126 #ifdef HWPERFMON_DEBUG
127 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
128 #endif
129 hw_perfmon_owner = TASK_NULL;
130 retval = KERN_SUCCESS;
131 }
132 }
133
134 simple_unlock(&hw_perfmon_lock);
135 return retval;
136 }
137
138 int perfmon_enable(thread_act_t thr_act)
139 {
140 struct savearea *sv = thr_act->mact.pcb;
141 kern_return_t kr;
142 kern_return_t retval = KERN_SUCCESS;
143 int curPMC;
144
145 if(thr_act->mact.specFlags & perfMonitor) {
146 return KERN_SUCCESS; /* already enabled */
147 } else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) {
148 return KERN_RESOURCE_SHORTAGE; /* facility is in use */
149 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
150 simple_lock(&hw_perfmon_lock);
151 hw_perfmon_thread_count++;
152 simple_unlock(&hw_perfmon_lock);
153 }
154
155 sv->save_mmcr1 = 0;
156 sv->save_mmcr2 = 0;
157
158 switch(machine_slot[0].cpu_subtype) {
159 case CPU_SUBTYPE_POWERPC_750:
160 case CPU_SUBTYPE_POWERPC_7400:
161 case CPU_SUBTYPE_POWERPC_7450:
162 {
163 ppc32_mmcr0_reg_t mmcr0_reg;
164
165 mmcr0_reg.value = 0;
166 mmcr0_reg.field.disable_counters_always = TRUE;
167 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
168 sv->save_mmcr0 = mmcr0_reg.value;
169 }
170 break;
171 case CPU_SUBTYPE_POWERPC_970:
172 {
173 ppc64_mmcr0_reg_t mmcr0_reg;
174
175 mmcr0_reg.value = 0;
176 mmcr0_reg.field.disable_counters_always = TRUE;
177 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
178 sv->save_mmcr0 = mmcr0_reg.value;
179 }
180 break;
181 default:
182 retval = KERN_FAILURE;
183 break;
184 }
185
186 if(retval==KERN_SUCCESS) {
187 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
188 sv->save_pmc[curPMC] = 0;
189 thr_act->mact.pmcovfl[curPMC] = 0;
190 }
191 thr_act->mact.perfmonFlags = 0;
192 thr_act->mact.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */
193 if(thr_act==current_act()) {
194 per_proc_info[cpu_number()].spcFlags |= perfMonitor; /* update per_proc */
195 }
196 }
197
198 #ifdef HWPERFMON_DEBUG
199 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
200 #endif
201
202 return retval;
203 }
204
205 int perfmon_disable(thread_act_t thr_act)
206 {
207 struct savearea *sv = thr_act->mact.pcb;
208 int curPMC;
209
210 if(!(thr_act->mact.specFlags & perfMonitor)) {
211 return KERN_NO_ACCESS; /* not enabled */
212 } else {
213 simple_lock(&hw_perfmon_lock);
214 hw_perfmon_thread_count--;
215 simple_unlock(&hw_perfmon_lock);
216 perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
217 }
218
219 thr_act->mact.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
220 if(thr_act==current_act()) {
221 per_proc_info[cpu_number()].spcFlags &= ~perfMonitor; /* update per_proc */
222 }
223 sv->save_mmcr0 = 0;
224 sv->save_mmcr1 = 0;
225 sv->save_mmcr2 = 0;
226
227 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
228 sv->save_pmc[curPMC] = 0;
229 thr_act->mact.pmcovfl[curPMC] = 0;
230 thr_act->mact.perfmonFlags = 0;
231 }
232
233 #ifdef HWPERFMON_DEBUG
234 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
235 #endif
236
237 return KERN_SUCCESS;
238 }
239
240 int perfmon_clear_counters(thread_act_t thr_act)
241 {
242 struct savearea *sv = thr_act->mact.pcb;
243 int curPMC;
244
245 #ifdef HWPERFMON_DEBUG
246 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
247 #endif
248
249 /* clear thread copy */
250 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
251 sv->save_pmc[curPMC] = 0;
252 thr_act->mact.pmcovfl[curPMC] = 0;
253 }
254
255 return KERN_SUCCESS;
256 }
257
258 int perfmon_write_counters(thread_act_t thr_act, uint64_t *pmcs)
259 {
260 struct savearea *sv = thr_act->mact.pcb;
261 int curPMC;
262
263 #ifdef HWPERFMON_DEBUG
264 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
265 #endif
266
267 /* update thread copy */
268 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
269 sv->save_pmc[curPMC] = pmcs[curPMC] & 0x7FFFFFFF;
270 thr_act->mact.pmcovfl[curPMC] = (pmcs[curPMC]>>31) & 0xFFFFFFFF;
271 }
272
273 return KERN_SUCCESS;
274 }
275
276 int perfmon_read_counters(thread_act_t thr_act, uint64_t *pmcs)
277 {
278 struct savearea *sv = thr_act->mact.pcb;
279 int curPMC;
280
281 /* retrieve from thread copy */
282 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
283 pmcs[curPMC] = thr_act->mact.pmcovfl[curPMC];
284 pmcs[curPMC] = pmcs[curPMC]<<31;
285 pmcs[curPMC] |= (sv->save_pmc[curPMC] & 0x7FFFFFFF);
286 }
287
288 /* zero any unused counters on this platform */
289 switch(machine_slot[0].cpu_subtype) {
290 case CPU_SUBTYPE_POWERPC_750:
291 case CPU_SUBTYPE_POWERPC_7400:
292 case CPU_SUBTYPE_POWERPC_7450:
293 pmcs[PMC_7] = 0;
294 pmcs[PMC_8] = 0;
295 break;
296 default:
297 break;
298 }
299
300 #ifdef HWPERFMON_DEBUG
301 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
302 #endif
303
304 return KERN_SUCCESS;
305 }
306
307 int perfmon_start_counters(thread_act_t thr_act)
308 {
309 struct savearea *sv = thr_act->mact.pcb;
310 kern_return_t retval = KERN_SUCCESS;
311
312 switch(machine_slot[0].cpu_subtype) {
313 case CPU_SUBTYPE_POWERPC_750:
314 case CPU_SUBTYPE_POWERPC_7400:
315 {
316 ppc32_mmcr0_reg_t mmcr0_reg;
317 mmcr0_reg.value = sv->save_mmcr0;
318 mmcr0_reg.field.disable_counters_always = FALSE;
319 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
320 mmcr0_reg.field.on_pmi_stop_counting = FALSE;
321 mmcr0_reg.field.enable_pmi = FALSE;
322 mmcr0_reg.field.enable_pmi_on_pmc1 = FALSE;
323 mmcr0_reg.field.enable_pmi_on_pmcn = FALSE;
324 sv->save_mmcr0 = mmcr0_reg.value;
325 }
326 break;
327 case CPU_SUBTYPE_POWERPC_7450:
328 {
329 ppc32_mmcr0_reg_t mmcr0_reg;
330 mmcr0_reg.value = sv->save_mmcr0;
331 mmcr0_reg.field.disable_counters_always = FALSE;
332 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
333 mmcr0_reg.field.enable_pmi = TRUE;
334 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
335 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
336 sv->save_mmcr0 = mmcr0_reg.value;
337 }
338 break;
339 case CPU_SUBTYPE_POWERPC_970:
340 {
341 ppc64_mmcr0_reg_t mmcr0_reg;
342 mmcr0_reg.value = sv->save_mmcr0;
343 mmcr0_reg.field.disable_counters_always = FALSE;
344 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
345 mmcr0_reg.field.enable_pmi = TRUE;
346 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
347 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
348 sv->save_mmcr0 = mmcr0_reg.value;
349 }
350 break;
351 default:
352 retval = KERN_FAILURE;
353 break;
354 }
355
356 #ifdef HWPERFMON_DEBUG
357 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
358 #endif
359
360 return retval;
361 }
362
363 int perfmon_stop_counters(thread_act_t thr_act)
364 {
365 struct savearea *sv = thr_act->mact.pcb;
366 kern_return_t retval = KERN_SUCCESS;
367
368 switch(machine_slot[0].cpu_subtype) {
369 case CPU_SUBTYPE_POWERPC_750:
370 case CPU_SUBTYPE_POWERPC_7400:
371 case CPU_SUBTYPE_POWERPC_7450:
372 {
373 ppc32_mmcr0_reg_t mmcr0_reg;
374 mmcr0_reg.value = sv->save_mmcr0;
375 mmcr0_reg.field.disable_counters_always = TRUE;
376 sv->save_mmcr0 = mmcr0_reg.value;
377 }
378 break;
379 case CPU_SUBTYPE_POWERPC_970:
380 {
381 ppc64_mmcr0_reg_t mmcr0_reg;
382 mmcr0_reg.value = sv->save_mmcr0;
383 mmcr0_reg.field.disable_counters_always = TRUE;
384 sv->save_mmcr0 = mmcr0_reg.value;
385 }
386 break;
387 default:
388 retval = KERN_FAILURE;
389 break;
390 }
391
392 #ifdef HWPERFMON_DEBUG
393 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
394 #endif
395
396 return retval;
397 }
398
399 int perfmon_set_event(thread_act_t thr_act, int pmc, int event)
400 {
401 struct savearea *sv = thr_act->mact.pcb;
402 kern_return_t retval = KERN_SUCCESS;
403
404 #ifdef HWPERFMON_DEBUG
405 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
406 #endif
407
408 switch(machine_slot[0].cpu_subtype) {
409 case CPU_SUBTYPE_POWERPC_750:
410 case CPU_SUBTYPE_POWERPC_7400:
411 {
412 ppc32_mmcr0_reg_t mmcr0_reg;
413 ppc32_mmcr1_reg_t mmcr1_reg;
414
415 mmcr0_reg.value = sv->save_mmcr0;
416 mmcr1_reg.value = sv->save_mmcr1;
417
418 switch(pmc) {
419 case PMC_1:
420 mmcr0_reg.field.pmc1_event = event;
421 sv->save_mmcr0 = mmcr0_reg.value;
422 break;
423 case PMC_2:
424 mmcr0_reg.field.pmc2_event = event;
425 sv->save_mmcr0 = mmcr0_reg.value;
426 break;
427 case PMC_3:
428 mmcr1_reg.field.pmc3_event = event;
429 sv->save_mmcr1 = mmcr1_reg.value;
430 break;
431 case PMC_4:
432 mmcr1_reg.field.pmc4_event = event;
433 sv->save_mmcr1 = mmcr1_reg.value;
434 break;
435 default:
436 retval = KERN_FAILURE;
437 break;
438 }
439 }
440 break;
441 case CPU_SUBTYPE_POWERPC_7450:
442 {
443 ppc32_mmcr0_reg_t mmcr0_reg;
444 ppc32_mmcr1_reg_t mmcr1_reg;
445
446 mmcr0_reg.value = sv->save_mmcr0;
447 mmcr1_reg.value = sv->save_mmcr1;
448
449 switch(pmc) {
450 case PMC_1:
451 mmcr0_reg.field.pmc1_event = event;
452 sv->save_mmcr0 = mmcr0_reg.value;
453 break;
454 case PMC_2:
455 mmcr0_reg.field.pmc2_event = event;
456 sv->save_mmcr0 = mmcr0_reg.value;
457 break;
458 case PMC_3:
459 mmcr1_reg.field.pmc3_event = event;
460 sv->save_mmcr1 = mmcr1_reg.value;
461 break;
462 case PMC_4:
463 mmcr1_reg.field.pmc4_event = event;
464 sv->save_mmcr1 = mmcr1_reg.value;
465 break;
466 case PMC_5:
467 mmcr1_reg.field.pmc5_event = event;
468 sv->save_mmcr1 = mmcr1_reg.value;
469 break;
470 case PMC_6:
471 mmcr1_reg.field.pmc6_event = event;
472 sv->save_mmcr1 = mmcr1_reg.value;
473 break;
474 default:
475 retval = KERN_FAILURE;
476 break;
477 }
478 }
479 break;
480 case CPU_SUBTYPE_POWERPC_970:
481 {
482 ppc64_mmcr0_reg_t mmcr0_reg;
483 ppc64_mmcr1_reg_t mmcr1_reg;
484
485 mmcr0_reg.value = sv->save_mmcr0;
486 mmcr1_reg.value = sv->save_mmcr1;
487
488 switch(pmc) {
489 case PMC_1:
490 mmcr0_reg.field.pmc1_event = event;
491 sv->save_mmcr0 = mmcr0_reg.value;
492 break;
493 case PMC_2:
494 mmcr0_reg.field.pmc2_event = event;
495 sv->save_mmcr0 = mmcr0_reg.value;
496 break;
497 case PMC_3:
498 mmcr1_reg.field.pmc3_event = event;
499 sv->save_mmcr1 = mmcr1_reg.value;
500 break;
501 case PMC_4:
502 mmcr1_reg.field.pmc4_event = event;
503 sv->save_mmcr1 = mmcr1_reg.value;
504 break;
505 case PMC_5:
506 mmcr1_reg.field.pmc5_event = event;
507 sv->save_mmcr1 = mmcr1_reg.value;
508 break;
509 case PMC_6:
510 mmcr1_reg.field.pmc6_event = event;
511 sv->save_mmcr1 = mmcr1_reg.value;
512 break;
513 case PMC_7:
514 mmcr1_reg.field.pmc7_event = event;
515 sv->save_mmcr1 = mmcr1_reg.value;
516 break;
517 case PMC_8:
518 mmcr1_reg.field.pmc8_event = event;
519 sv->save_mmcr1 = mmcr1_reg.value;
520 break;
521 default:
522 retval = KERN_FAILURE;
523 break;
524 }
525 }
526 break;
527 default:
528 retval = KERN_FAILURE;
529 break;
530 }
531
532 #ifdef HWPERFMON_DEBUG
533 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
534 #endif
535
536 return retval;
537 }
538
539 int perfmon_set_event_func(thread_act_t thr_act, uint32_t f)
540 {
541 struct savearea *sv = thr_act->mact.pcb;
542 kern_return_t retval = KERN_SUCCESS;
543
544 #ifdef HWPERFMON_DEBUG
545 kprintf("perfmon_set_event_func - func=%s\n",
546 f==PPC_PERFMON_FUNC_FPU ? "FUNC" :
547 f==PPC_PERFMON_FUNC_ISU ? "ISU" :
548 f==PPC_PERFMON_FUNC_IFU ? "IFU" :
549 f==PPC_PERFMON_FUNC_VMX ? "VMX" :
550 f==PPC_PERFMON_FUNC_IDU ? "IDU" :
551 f==PPC_PERFMON_FUNC_GPS ? "GPS" :
552 f==PPC_PERFMON_FUNC_LSU0 ? "LSU0" :
553 f==PPC_PERFMON_FUNC_LSU1A ? "LSU1A" :
554 f==PPC_PERFMON_FUNC_LSU1B ? "LSU1B" :
555 f==PPC_PERFMON_FUNC_SPECA ? "SPECA" :
556 f==PPC_PERFMON_FUNC_SPECB ? "SPECB" :
557 f==PPC_PERFMON_FUNC_SPECC ? "SPECC" :
558 "UNKNOWN");
559 #endif /* HWPERFMON_DEBUG */
560
561 switch(machine_slot[0].cpu_subtype) {
562 case CPU_SUBTYPE_POWERPC_750:
563 case CPU_SUBTYPE_POWERPC_7400:
564 case CPU_SUBTYPE_POWERPC_7450:
565 retval = KERN_FAILURE; /* event functional unit only applies to 970 */
566 break;
567 case CPU_SUBTYPE_POWERPC_970:
568 {
569 ppc64_mmcr1_reg_t mmcr1_reg;
570 ppc_func_unit_t func_unit;
571
572 func_unit.value = f;
573 mmcr1_reg.value = sv->save_mmcr1;
574
575 mmcr1_reg.field.ttm0_select = func_unit.field.TTM0SEL;
576 mmcr1_reg.field.ttm1_select = func_unit.field.TTM1SEL;
577 mmcr1_reg.field.ttm2_select = 0; /* not used */
578 mmcr1_reg.field.ttm3_select = func_unit.field.TTM3SEL;
579 mmcr1_reg.field.speculative_event = func_unit.field.SPECSEL;
580 mmcr1_reg.field.lane0_select = func_unit.field.TD_CP_DBGxSEL;
581 mmcr1_reg.field.lane1_select = func_unit.field.TD_CP_DBGxSEL;
582 mmcr1_reg.field.lane2_select = func_unit.field.TD_CP_DBGxSEL;
583 mmcr1_reg.field.lane3_select = func_unit.field.TD_CP_DBGxSEL;
584
585 sv->save_mmcr1 = mmcr1_reg.value;
586 }
587 break;
588 default:
589 retval = KERN_FAILURE;
590 break;
591 }
592
593 return retval;
594 }
595
596 int perfmon_set_threshold(thread_act_t thr_act, int threshold)
597 {
598 struct savearea *sv = thr_act->mact.pcb;
599 kern_return_t retval = KERN_SUCCESS;
600
601 switch(machine_slot[0].cpu_subtype) {
602 case CPU_SUBTYPE_POWERPC_750:
603 {
604 ppc32_mmcr0_reg_t mmcr0_reg;
605
606 mmcr0_reg.value = sv->save_mmcr0;
607
608 if(threshold>63) { /* no multiplier on 750 */
609 int newThreshold = 63;
610 #ifdef HWPERFMON_DEBUG
611 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
612 #endif
613 threshold = newThreshold;
614 }
615 mmcr0_reg.field.threshold_value = threshold;
616
617 sv->save_mmcr0 = mmcr0_reg.value;
618 }
619 break;
620
621 case CPU_SUBTYPE_POWERPC_7400:
622 case CPU_SUBTYPE_POWERPC_7450:
623 {
624 ppc32_mmcr0_reg_t mmcr0_reg;
625 ppc32_mmcr2_reg_t mmcr2_reg;
626
627 mmcr0_reg.value = sv->save_mmcr0;
628 mmcr2_reg.value = sv->save_mmcr2;
629
630 if(threshold<=(2*63)) { /* 2x multiplier */
631 if(threshold%2 != 0) {
632 int newThreshold = 2*(threshold/2);
633 #ifdef HWPERFMON_DEBUG
634 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold, newThreshold);
635 #endif
636 threshold = newThreshold;
637 }
638 mmcr2_reg.field.threshold_multiplier = 0;
639 } else if(threshold<=(32*63)) { /* 32x multiplier */
640 if(threshold%32 != 0) {
641 int newThreshold = 32*(threshold/32);
642 #ifdef HWPERFMON_DEBUG
643 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold, newThreshold);
644 #endif
645 threshold = newThreshold;
646 }
647 mmcr2_reg.field.threshold_multiplier = 1;
648 } else {
649 int newThreshold = 32*63;
650 #ifdef HWPERFMON_DEBUG
651 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
652 #endif
653 threshold = newThreshold;
654 mmcr2_reg.field.threshold_multiplier = 1;
655 }
656 mmcr0_reg.field.threshold_value = threshold;
657
658 sv->save_mmcr0 = mmcr0_reg.value;
659 sv->save_mmcr2 = mmcr2_reg.value;
660
661 }
662 break;
663 case CPU_SUBTYPE_POWERPC_970:
664 {
665 ppc64_mmcr0_reg_t mmcr0_reg;
666
667 mmcr0_reg.value = sv->save_mmcr0;
668
669 if(threshold>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
670 int newThreshold = 63;
671 #ifdef HWPERFMON_DEBUG
672 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
673 #endif
674 threshold = newThreshold;
675 }
676 mmcr0_reg.field.threshold_value = threshold;
677
678 sv->save_mmcr0 = mmcr0_reg.value;
679 }
680 break;
681 default:
682 retval = KERN_FAILURE;
683 break;
684 }
685
686 #ifdef HWPERFMON_DEBUG
687 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
688 #endif
689
690 return retval;
691 }
692
693 int perfmon_set_tbsel(thread_act_t thr_act, int tbsel)
694 {
695 struct savearea *sv = thr_act->mact.pcb;
696 kern_return_t retval = KERN_SUCCESS;
697
698 switch(machine_slot[0].cpu_subtype) {
699 case CPU_SUBTYPE_POWERPC_750:
700 case CPU_SUBTYPE_POWERPC_7400:
701 case CPU_SUBTYPE_POWERPC_7450:
702 {
703 ppc32_mmcr0_reg_t mmcr0_reg;
704
705 mmcr0_reg.value = sv->save_mmcr0;
706 switch(tbsel) {
707 case 0x0:
708 case 0x1:
709 case 0x2:
710 case 0x3:
711 mmcr0_reg.field.timebase_bit_selector = tbsel;
712 break;
713 default:
714 retval = KERN_FAILURE;
715 }
716 sv->save_mmcr0 = mmcr0_reg.value;
717 }
718 break;
719 case CPU_SUBTYPE_POWERPC_970:
720 {
721 ppc64_mmcr0_reg_t mmcr0_reg;
722
723 mmcr0_reg.value = sv->save_mmcr0;
724 switch(tbsel) {
725 case 0x0:
726 case 0x1:
727 case 0x2:
728 case 0x3:
729 mmcr0_reg.field.timebase_bit_selector = tbsel;
730 break;
731 default:
732 retval = KERN_FAILURE;
733 }
734 sv->save_mmcr0 = mmcr0_reg.value;
735 }
736 break;
737 default:
738 retval = KERN_FAILURE;
739 break;
740 }
741
742 #ifdef HWPERFMON_DEBUG
743 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
744 #endif
745
746 return retval;
747 }
748
749 int perfmon_control(struct savearea *ssp)
750 {
751 mach_port_t thr_port = CAST_DOWN(mach_port_t, ssp->save_r3);
752 int action = (int)ssp->save_r4;
753 int pmc = (int)ssp->save_r5;
754 int val = (int)ssp->save_r6;
755 uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7);
756 thread_act_t thr_act = THREAD_NULL;
757 uint64_t kern_pmcs[MAX_CPUPMC_COUNT];
758 kern_return_t retval = KERN_SUCCESS;
759 int error;
760 boolean_t oldlevel;
761
762 thr_act = (thread_act_t) port_name_to_act(thr_port); // convert user space thread port name to a thread_act_t
763 if(!thr_act) {
764 ssp->save_r3 = KERN_INVALID_ARGUMENT;
765 return 1; /* Return and check for ASTs... */
766 }
767
768 if(thr_act!=current_act()) {
769 thread_suspend(thr_act);
770 }
771
772 #ifdef HWPERFMON_DEBUG
773 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
774 #endif
775
776 oldlevel = ml_set_interrupts_enabled(FALSE);
777
778 /* individual actions which do not require perfmon facility to be enabled */
779 if(action==PPC_PERFMON_DISABLE) {
780 retval = perfmon_disable(thr_act);
781 }
782 else if(action==PPC_PERFMON_ENABLE) {
783 retval = perfmon_enable(thr_act);
784 }
785
786 else { /* individual actions which do require perfmon facility to be enabled */
787 if(!(thr_act->mact.specFlags & perfMonitor)) { /* perfmon not enabled */
788 #ifdef HWPERFMON_DEBUG
789 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
790 #endif
791 retval = KERN_NO_ACCESS;
792 goto perfmon_return;
793 }
794
795 if(action==PPC_PERFMON_SET_EVENT) {
796 retval = perfmon_set_event(thr_act, pmc, val);
797 }
798 else if(action==PPC_PERFMON_SET_THRESHOLD) {
799 retval = perfmon_set_threshold(thr_act, val);
800 }
801 else if(action==PPC_PERFMON_SET_TBSEL) {
802 retval = perfmon_set_tbsel(thr_act, val);
803 }
804 else if(action==PPC_PERFMON_SET_EVENT_FUNC) {
805 retval = perfmon_set_event_func(thr_act, val);
806 }
807 else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) {
808 if(val) {
809 thr_act->mact.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI;
810 } else {
811 thr_act->mact.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI;
812 }
813 retval = KERN_SUCCESS;
814 }
815
816 /* combinable actions */
817 else {
818 if(action & PPC_PERFMON_STOP_COUNTERS) {
819 error = perfmon_stop_counters(thr_act);
820 if(error!=KERN_SUCCESS) {
821 retval = error;
822 goto perfmon_return;
823 }
824 }
825 if(action & PPC_PERFMON_CLEAR_COUNTERS) {
826 error = perfmon_clear_counters(thr_act);
827 if(error!=KERN_SUCCESS) {
828 retval = error;
829 goto perfmon_return;
830 }
831 }
832 if(action & PPC_PERFMON_WRITE_COUNTERS) {
833 if(error = copyin((void *)usr_pmcs_p, (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
834 retval = error;
835 goto perfmon_return;
836 }
837 error = perfmon_write_counters(thr_act, kern_pmcs);
838 if(error!=KERN_SUCCESS) {
839 retval = error;
840 goto perfmon_return;
841 }
842 }
843 if(action & PPC_PERFMON_READ_COUNTERS) {
844 error = perfmon_read_counters(thr_act, kern_pmcs);
845 if(error!=KERN_SUCCESS) {
846 retval = error;
847 goto perfmon_return;
848 }
849 if(error = copyout((void *)kern_pmcs, (void *)usr_pmcs_p, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
850 retval = error;
851 goto perfmon_return;
852 }
853 }
854 if(action & PPC_PERFMON_START_COUNTERS) {
855 error = perfmon_start_counters(thr_act);
856 if(error!=KERN_SUCCESS) {
857 retval = error;
858 goto perfmon_return;
859 }
860 }
861 }
862 }
863
864 perfmon_return:
865 ml_set_interrupts_enabled(oldlevel);
866
867 #ifdef HWPERFMON_DEBUG
868 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]);
869 #endif
870
871 if(thr_act!=current_act()) {
872 thread_resume(thr_act);
873 }
874
875 #ifdef HWPERFMON_DEBUG
876 if(retval!=KERN_SUCCESS) {
877 kprintf("perfmon_control - ERROR: retval=%d\n", retval);
878 }
879 #endif /* HWPERFMON_DEBUG */
880
881 ssp->save_r3 = retval;
882 return 1; /* Return and check for ASTs... */
883 }
884
885 int perfmon_handle_pmi(struct savearea *ssp)
886 {
887 int curPMC;
888 kern_return_t retval = KERN_SUCCESS;
889 thread_act_t thr_act = current_act();
890
891 #ifdef HWPERFMON_DEBUG
892 kprintf("perfmon_handle_pmi: got rupt\n");
893 #endif
894
895 if(!(thr_act->mact.specFlags & perfMonitor)) { /* perfmon not enabled */
896 #ifdef HWPERFMON_DEBUG
897 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
898 #endif
899 return KERN_FAILURE;
900 }
901
902 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
903 if(thr_act->mact.pcb->save_pmc[curPMC] & 0x80000000) {
904 if(thr_act->mact.pmcovfl[curPMC]==0xFFFFFFFF && (thr_act->mact.perfmonFlags & PERFMONFLAG_BREAKPOINT_FOR_PMI)) {
905 doexception(EXC_BREAKPOINT, EXC_PPC_PERFMON, (unsigned int)ssp->save_srr0); // pass up a breakpoint exception
906 return KERN_SUCCESS;
907 } else {
908 thr_act->mact.pmcovfl[curPMC]++;
909 thr_act->mact.pcb->save_pmc[curPMC] = 0;
910 }
911 }
912 }
913
914 if(retval==KERN_SUCCESS) {
915 switch(machine_slot[0].cpu_subtype) {
916 case CPU_SUBTYPE_POWERPC_7450:
917 {
918 ppc32_mmcr0_reg_t mmcr0_reg;
919
920 mmcr0_reg.value = thr_act->mact.pcb->save_mmcr0;
921 mmcr0_reg.field.disable_counters_always = FALSE;
922 mmcr0_reg.field.enable_pmi = TRUE;
923 thr_act->mact.pcb->save_mmcr0 = mmcr0_reg.value;
924 }
925 retval = KERN_SUCCESS;
926 break;
927 case CPU_SUBTYPE_POWERPC_970:
928 {
929 ppc64_mmcr0_reg_t mmcr0_reg;
930
931 mmcr0_reg.value = thr_act->mact.pcb->save_mmcr0;
932 mmcr0_reg.field.disable_counters_always = FALSE;
933 mmcr0_reg.field.enable_pmi = TRUE;
934 thr_act->mact.pcb->save_mmcr0 = mmcr0_reg.value;
935 }
936 retval = KERN_SUCCESS;
937 break;
938 default:
939 retval = KERN_FAILURE;
940 break;
941 }
942 }
943
944 return retval;
945 }