]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_perfmon.c
609a68ec3e74d8cf41712260432e3bed2b10cbc8
[apple/xnu.git] / osfmk / ppc / hw_perfmon.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <kern/thread.h>
24 #include <ppc/exception.h>
25 #include <ppc/savearea.h>
26 #include <ppc/hw_perfmon.h>
27 #include <ppc/hw_perfmon_mmcr.h>
28
29 decl_simple_lock_data(,hw_perfmon_lock)
30 static task_t hw_perfmon_owner = TASK_NULL;
31 static int hw_perfmon_thread_count = 0;
32
33 /* Notes:
34 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
35 * (can only count user events anyway)
36 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
37 * -virtual counter PMI is passed up as a breakpoint exception
38 */
39
40 int perfmon_init(void)
41 {
42 simple_lock_init(&hw_perfmon_lock, FALSE);
43 return KERN_SUCCESS;
44 }
45
46 /* PMC Facility Owner:
47 * TASK_NULL - no one owns it
48 * kernel_task - owned by hw_perfmon
49 * other task - owned by another task
50 */
51
52 int perfmon_acquire_facility(task_t task)
53 {
54 kern_return_t retval = KERN_SUCCESS;
55
56 simple_lock(&hw_perfmon_lock);
57
58 if(hw_perfmon_owner==task) {
59 #ifdef HWPERFMON_DEBUG
60 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
61 #endif
62 retval = KERN_SUCCESS;
63 /* already own it */
64 } else if(hw_perfmon_owner==TASK_NULL) { /* no one owns it */
65 hw_perfmon_owner = task;
66 hw_perfmon_thread_count = 0;
67 #ifdef HWPERFMON_DEBUG
68 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
69 #endif
70 retval = KERN_SUCCESS;
71 } else { /* someone already owns it */
72 if(hw_perfmon_owner==kernel_task) {
73 if(hw_perfmon_thread_count==0) { /* kernel owns it but no threads using it */
74 hw_perfmon_owner = task;
75 hw_perfmon_thread_count = 0;
76 #ifdef HWPERFMON_DEBUG
77 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
78 #endif
79 retval = KERN_SUCCESS;
80 } else {
81 #ifdef HWPERFMON_DEBUG
82 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
83 #endif
84 retval = KERN_RESOURCE_SHORTAGE;
85 }
86 } else { /* non-kernel owner */
87 #ifdef HWPERFMON_DEBUG
88 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
89 #endif
90 retval = KERN_RESOURCE_SHORTAGE;
91 }
92 }
93
94 simple_unlock(&hw_perfmon_lock);
95 return retval;
96 }
97
98 int perfmon_release_facility(task_t task)
99 {
100 kern_return_t retval = KERN_SUCCESS;
101 task_t old_perfmon_owner = hw_perfmon_owner;
102
103 simple_lock(&hw_perfmon_lock);
104
105 if(task!=hw_perfmon_owner) {
106 retval = KERN_NO_ACCESS;
107 } else {
108 if(old_perfmon_owner==kernel_task) {
109 if(hw_perfmon_thread_count>0) {
110 #ifdef HWPERFMON_DEBUG
111 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
112 #endif
113 retval = KERN_NO_ACCESS;
114 } else {
115 #ifdef HWPERFMON_DEBUG
116 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
117 #endif
118 hw_perfmon_owner = TASK_NULL;
119 retval = KERN_SUCCESS;
120 }
121 } else {
122 #ifdef HWPERFMON_DEBUG
123 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
124 #endif
125 hw_perfmon_owner = TASK_NULL;
126 retval = KERN_SUCCESS;
127 }
128 }
129
130 simple_unlock(&hw_perfmon_lock);
131 return retval;
132 }
133
134 int perfmon_enable(thread_t thread)
135 {
136 struct savearea *sv = thread->machine.pcb;
137 kern_return_t kr;
138 kern_return_t retval = KERN_SUCCESS;
139 int curPMC;
140
141 if(thread->machine.specFlags & perfMonitor) {
142 return KERN_SUCCESS; /* already enabled */
143 } else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) {
144 return KERN_RESOURCE_SHORTAGE; /* facility is in use */
145 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
146 simple_lock(&hw_perfmon_lock);
147 hw_perfmon_thread_count++;
148 simple_unlock(&hw_perfmon_lock);
149 }
150
151 sv->save_mmcr1 = 0;
152 sv->save_mmcr2 = 0;
153
154 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
155 case CPU_SUBTYPE_POWERPC_750:
156 case CPU_SUBTYPE_POWERPC_7400:
157 case CPU_SUBTYPE_POWERPC_7450:
158 {
159 ppc32_mmcr0_reg_t mmcr0_reg;
160
161 mmcr0_reg.value = 0;
162 mmcr0_reg.field.disable_counters_always = TRUE;
163 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
164 sv->save_mmcr0 = mmcr0_reg.value;
165 }
166 break;
167 case CPU_SUBTYPE_POWERPC_970:
168 {
169 ppc64_mmcr0_reg_t mmcr0_reg;
170
171 mmcr0_reg.value = 0;
172 mmcr0_reg.field.disable_counters_always = TRUE;
173 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
174 sv->save_mmcr0 = mmcr0_reg.value;
175 }
176 break;
177 default:
178 retval = KERN_FAILURE;
179 break;
180 }
181
182 if(retval==KERN_SUCCESS) {
183 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
184 sv->save_pmc[curPMC] = 0;
185 thread->machine.pmcovfl[curPMC] = 0;
186 }
187 thread->machine.perfmonFlags = 0;
188 thread->machine.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */
189 if(thread==current_thread()) {
190 getPerProc()->spcFlags |= perfMonitor; /* update per_proc */
191 }
192 }
193
194 #ifdef HWPERFMON_DEBUG
195 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
196 #endif
197
198 return retval;
199 }
200
201 int perfmon_disable(thread_t thread)
202 {
203 struct savearea *sv = thread->machine.pcb;
204 int curPMC;
205
206 if(!(thread->machine.specFlags & perfMonitor)) {
207 return KERN_NO_ACCESS; /* not enabled */
208 } else {
209 simple_lock(&hw_perfmon_lock);
210 hw_perfmon_thread_count--;
211 simple_unlock(&hw_perfmon_lock);
212 perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
213 }
214
215 thread->machine.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
216 if(thread==current_thread()) {
217 PerProcTable[cpu_number()].ppe_vaddr->spcFlags &= ~perfMonitor; /* update per_proc */
218 }
219 sv->save_mmcr0 = 0;
220 sv->save_mmcr1 = 0;
221 sv->save_mmcr2 = 0;
222
223 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
224 sv->save_pmc[curPMC] = 0;
225 thread->machine.pmcovfl[curPMC] = 0;
226 thread->machine.perfmonFlags = 0;
227 }
228
229 #ifdef HWPERFMON_DEBUG
230 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
231 #endif
232
233 return KERN_SUCCESS;
234 }
235
236 int perfmon_clear_counters(thread_t thread)
237 {
238 struct savearea *sv = thread->machine.pcb;
239 int curPMC;
240
241 #ifdef HWPERFMON_DEBUG
242 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
243 #endif
244
245 /* clear thread copy */
246 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
247 sv->save_pmc[curPMC] = 0;
248 thread->machine.pmcovfl[curPMC] = 0;
249 }
250
251 return KERN_SUCCESS;
252 }
253
254 int perfmon_write_counters(thread_t thread, uint64_t *pmcs)
255 {
256 struct savearea *sv = thread->machine.pcb;
257 int curPMC;
258
259 #ifdef HWPERFMON_DEBUG
260 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
261 #endif
262
263 /* update thread copy */
264 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
265 sv->save_pmc[curPMC] = pmcs[curPMC] & 0x7FFFFFFF;
266 thread->machine.pmcovfl[curPMC] = (pmcs[curPMC]>>31) & 0xFFFFFFFF;
267 }
268
269 return KERN_SUCCESS;
270 }
271
272 int perfmon_read_counters(thread_t thread, uint64_t *pmcs)
273 {
274 struct savearea *sv = thread->machine.pcb;
275 int curPMC;
276
277 /* retrieve from thread copy */
278 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
279 pmcs[curPMC] = thread->machine.pmcovfl[curPMC];
280 pmcs[curPMC] = pmcs[curPMC]<<31;
281 pmcs[curPMC] |= (sv->save_pmc[curPMC] & 0x7FFFFFFF);
282 }
283
284 /* zero any unused counters on this platform */
285 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
286 case CPU_SUBTYPE_POWERPC_750:
287 case CPU_SUBTYPE_POWERPC_7400:
288 case CPU_SUBTYPE_POWERPC_7450:
289 pmcs[PMC_7] = 0;
290 pmcs[PMC_8] = 0;
291 break;
292 default:
293 break;
294 }
295
296 #ifdef HWPERFMON_DEBUG
297 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
298 #endif
299
300 return KERN_SUCCESS;
301 }
302
303 int perfmon_start_counters(thread_t thread)
304 {
305 struct savearea *sv = thread->machine.pcb;
306 kern_return_t retval = KERN_SUCCESS;
307
308 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
309 case CPU_SUBTYPE_POWERPC_750:
310 case CPU_SUBTYPE_POWERPC_7400:
311 {
312 ppc32_mmcr0_reg_t mmcr0_reg;
313 mmcr0_reg.value = sv->save_mmcr0;
314 mmcr0_reg.field.disable_counters_always = FALSE;
315 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
316 mmcr0_reg.field.on_pmi_stop_counting = FALSE;
317 mmcr0_reg.field.enable_pmi = FALSE;
318 mmcr0_reg.field.enable_pmi_on_pmc1 = FALSE;
319 mmcr0_reg.field.enable_pmi_on_pmcn = FALSE;
320 sv->save_mmcr0 = mmcr0_reg.value;
321 }
322 break;
323 case CPU_SUBTYPE_POWERPC_7450:
324 {
325 ppc32_mmcr0_reg_t mmcr0_reg;
326 mmcr0_reg.value = sv->save_mmcr0;
327 mmcr0_reg.field.disable_counters_always = FALSE;
328 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
329 mmcr0_reg.field.enable_pmi = TRUE;
330 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
331 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
332 sv->save_mmcr0 = mmcr0_reg.value;
333 }
334 break;
335 case CPU_SUBTYPE_POWERPC_970:
336 {
337 ppc64_mmcr0_reg_t mmcr0_reg;
338 mmcr0_reg.value = sv->save_mmcr0;
339 mmcr0_reg.field.disable_counters_always = FALSE;
340 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
341 mmcr0_reg.field.enable_pmi = TRUE;
342 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
343 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
344 sv->save_mmcr0 = mmcr0_reg.value;
345 }
346 break;
347 default:
348 retval = KERN_FAILURE;
349 break;
350 }
351
352 #ifdef HWPERFMON_DEBUG
353 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
354 #endif
355
356 return retval;
357 }
358
359 int perfmon_stop_counters(thread_t thread)
360 {
361 struct savearea *sv = thread->machine.pcb;
362 kern_return_t retval = KERN_SUCCESS;
363
364 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
365 case CPU_SUBTYPE_POWERPC_750:
366 case CPU_SUBTYPE_POWERPC_7400:
367 case CPU_SUBTYPE_POWERPC_7450:
368 {
369 ppc32_mmcr0_reg_t mmcr0_reg;
370 mmcr0_reg.value = sv->save_mmcr0;
371 mmcr0_reg.field.disable_counters_always = TRUE;
372 sv->save_mmcr0 = mmcr0_reg.value;
373 }
374 break;
375 case CPU_SUBTYPE_POWERPC_970:
376 {
377 ppc64_mmcr0_reg_t mmcr0_reg;
378 mmcr0_reg.value = sv->save_mmcr0;
379 mmcr0_reg.field.disable_counters_always = TRUE;
380 sv->save_mmcr0 = mmcr0_reg.value;
381 }
382 break;
383 default:
384 retval = KERN_FAILURE;
385 break;
386 }
387
388 #ifdef HWPERFMON_DEBUG
389 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
390 #endif
391
392 return retval;
393 }
394
395 int perfmon_set_event(thread_t thread, int pmc, int event)
396 {
397 struct savearea *sv = thread->machine.pcb;
398 kern_return_t retval = KERN_SUCCESS;
399
400 #ifdef HWPERFMON_DEBUG
401 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
402 #endif
403
404 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
405 case CPU_SUBTYPE_POWERPC_750:
406 case CPU_SUBTYPE_POWERPC_7400:
407 {
408 ppc32_mmcr0_reg_t mmcr0_reg;
409 ppc32_mmcr1_reg_t mmcr1_reg;
410
411 mmcr0_reg.value = sv->save_mmcr0;
412 mmcr1_reg.value = sv->save_mmcr1;
413
414 switch(pmc) {
415 case PMC_1:
416 mmcr0_reg.field.pmc1_event = event;
417 sv->save_mmcr0 = mmcr0_reg.value;
418 break;
419 case PMC_2:
420 mmcr0_reg.field.pmc2_event = event;
421 sv->save_mmcr0 = mmcr0_reg.value;
422 break;
423 case PMC_3:
424 mmcr1_reg.field.pmc3_event = event;
425 sv->save_mmcr1 = mmcr1_reg.value;
426 break;
427 case PMC_4:
428 mmcr1_reg.field.pmc4_event = event;
429 sv->save_mmcr1 = mmcr1_reg.value;
430 break;
431 default:
432 retval = KERN_FAILURE;
433 break;
434 }
435 }
436 break;
437 case CPU_SUBTYPE_POWERPC_7450:
438 {
439 ppc32_mmcr0_reg_t mmcr0_reg;
440 ppc32_mmcr1_reg_t mmcr1_reg;
441
442 mmcr0_reg.value = sv->save_mmcr0;
443 mmcr1_reg.value = sv->save_mmcr1;
444
445 switch(pmc) {
446 case PMC_1:
447 mmcr0_reg.field.pmc1_event = event;
448 sv->save_mmcr0 = mmcr0_reg.value;
449 break;
450 case PMC_2:
451 mmcr0_reg.field.pmc2_event = event;
452 sv->save_mmcr0 = mmcr0_reg.value;
453 break;
454 case PMC_3:
455 mmcr1_reg.field.pmc3_event = event;
456 sv->save_mmcr1 = mmcr1_reg.value;
457 break;
458 case PMC_4:
459 mmcr1_reg.field.pmc4_event = event;
460 sv->save_mmcr1 = mmcr1_reg.value;
461 break;
462 case PMC_5:
463 mmcr1_reg.field.pmc5_event = event;
464 sv->save_mmcr1 = mmcr1_reg.value;
465 break;
466 case PMC_6:
467 mmcr1_reg.field.pmc6_event = event;
468 sv->save_mmcr1 = mmcr1_reg.value;
469 break;
470 default:
471 retval = KERN_FAILURE;
472 break;
473 }
474 }
475 break;
476 case CPU_SUBTYPE_POWERPC_970:
477 {
478 ppc64_mmcr0_reg_t mmcr0_reg;
479 ppc64_mmcr1_reg_t mmcr1_reg;
480
481 mmcr0_reg.value = sv->save_mmcr0;
482 mmcr1_reg.value = sv->save_mmcr1;
483
484 switch(pmc) {
485 case PMC_1:
486 mmcr0_reg.field.pmc1_event = event;
487 sv->save_mmcr0 = mmcr0_reg.value;
488 break;
489 case PMC_2:
490 mmcr0_reg.field.pmc2_event = event;
491 sv->save_mmcr0 = mmcr0_reg.value;
492 break;
493 case PMC_3:
494 mmcr1_reg.field.pmc3_event = event;
495 sv->save_mmcr1 = mmcr1_reg.value;
496 break;
497 case PMC_4:
498 mmcr1_reg.field.pmc4_event = event;
499 sv->save_mmcr1 = mmcr1_reg.value;
500 break;
501 case PMC_5:
502 mmcr1_reg.field.pmc5_event = event;
503 sv->save_mmcr1 = mmcr1_reg.value;
504 break;
505 case PMC_6:
506 mmcr1_reg.field.pmc6_event = event;
507 sv->save_mmcr1 = mmcr1_reg.value;
508 break;
509 case PMC_7:
510 mmcr1_reg.field.pmc7_event = event;
511 sv->save_mmcr1 = mmcr1_reg.value;
512 break;
513 case PMC_8:
514 mmcr1_reg.field.pmc8_event = event;
515 sv->save_mmcr1 = mmcr1_reg.value;
516 break;
517 default:
518 retval = KERN_FAILURE;
519 break;
520 }
521 }
522 break;
523 default:
524 retval = KERN_FAILURE;
525 break;
526 }
527
528 #ifdef HWPERFMON_DEBUG
529 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
530 #endif
531
532 return retval;
533 }
534
535 int perfmon_set_event_func(thread_t thread, uint32_t f)
536 {
537 struct savearea *sv = thread->machine.pcb;
538 kern_return_t retval = KERN_SUCCESS;
539
540 #ifdef HWPERFMON_DEBUG
541 kprintf("perfmon_set_event_func - func=%s\n",
542 f==PPC_PERFMON_FUNC_FPU ? "FUNC" :
543 f==PPC_PERFMON_FUNC_ISU ? "ISU" :
544 f==PPC_PERFMON_FUNC_IFU ? "IFU" :
545 f==PPC_PERFMON_FUNC_VMX ? "VMX" :
546 f==PPC_PERFMON_FUNC_IDU ? "IDU" :
547 f==PPC_PERFMON_FUNC_GPS ? "GPS" :
548 f==PPC_PERFMON_FUNC_LSU0 ? "LSU0" :
549 f==PPC_PERFMON_FUNC_LSU1A ? "LSU1A" :
550 f==PPC_PERFMON_FUNC_LSU1B ? "LSU1B" :
551 f==PPC_PERFMON_FUNC_SPECA ? "SPECA" :
552 f==PPC_PERFMON_FUNC_SPECB ? "SPECB" :
553 f==PPC_PERFMON_FUNC_SPECC ? "SPECC" :
554 "UNKNOWN");
555 #endif /* HWPERFMON_DEBUG */
556
557 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
558 case CPU_SUBTYPE_POWERPC_750:
559 case CPU_SUBTYPE_POWERPC_7400:
560 case CPU_SUBTYPE_POWERPC_7450:
561 retval = KERN_FAILURE; /* event functional unit only applies to 970 */
562 break;
563 case CPU_SUBTYPE_POWERPC_970:
564 {
565 ppc64_mmcr1_reg_t mmcr1_reg;
566 ppc_func_unit_t func_unit;
567
568 func_unit.value = f;
569 mmcr1_reg.value = sv->save_mmcr1;
570
571 mmcr1_reg.field.ttm0_select = func_unit.field.TTM0SEL;
572 mmcr1_reg.field.ttm1_select = func_unit.field.TTM1SEL;
573 mmcr1_reg.field.ttm2_select = 0; /* not used */
574 mmcr1_reg.field.ttm3_select = func_unit.field.TTM3SEL;
575 mmcr1_reg.field.speculative_event = func_unit.field.SPECSEL;
576 mmcr1_reg.field.lane0_select = func_unit.field.TD_CP_DBGxSEL;
577 mmcr1_reg.field.lane1_select = func_unit.field.TD_CP_DBGxSEL;
578 mmcr1_reg.field.lane2_select = func_unit.field.TD_CP_DBGxSEL;
579 mmcr1_reg.field.lane3_select = func_unit.field.TD_CP_DBGxSEL;
580
581 sv->save_mmcr1 = mmcr1_reg.value;
582 }
583 break;
584 default:
585 retval = KERN_FAILURE;
586 break;
587 }
588
589 return retval;
590 }
591
592 int perfmon_set_threshold(thread_t thread, int threshold)
593 {
594 struct savearea *sv = thread->machine.pcb;
595 kern_return_t retval = KERN_SUCCESS;
596
597 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
598 case CPU_SUBTYPE_POWERPC_750:
599 {
600 ppc32_mmcr0_reg_t mmcr0_reg;
601
602 mmcr0_reg.value = sv->save_mmcr0;
603
604 if(threshold>63) { /* no multiplier on 750 */
605 int newThreshold = 63;
606 #ifdef HWPERFMON_DEBUG
607 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
608 #endif
609 threshold = newThreshold;
610 }
611 mmcr0_reg.field.threshold_value = threshold;
612
613 sv->save_mmcr0 = mmcr0_reg.value;
614 }
615 break;
616
617 case CPU_SUBTYPE_POWERPC_7400:
618 case CPU_SUBTYPE_POWERPC_7450:
619 {
620 ppc32_mmcr0_reg_t mmcr0_reg;
621 ppc32_mmcr2_reg_t mmcr2_reg;
622
623 mmcr0_reg.value = sv->save_mmcr0;
624 mmcr2_reg.value = sv->save_mmcr2;
625
626 if(threshold<=(2*63)) { /* 2x multiplier */
627 if(threshold%2 != 0) {
628 int newThreshold = 2*(threshold/2);
629 #ifdef HWPERFMON_DEBUG
630 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold, newThreshold);
631 #endif
632 threshold = newThreshold;
633 }
634 mmcr2_reg.field.threshold_multiplier = 0;
635 } else if(threshold<=(32*63)) { /* 32x multiplier */
636 if(threshold%32 != 0) {
637 int newThreshold = 32*(threshold/32);
638 #ifdef HWPERFMON_DEBUG
639 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold, newThreshold);
640 #endif
641 threshold = newThreshold;
642 }
643 mmcr2_reg.field.threshold_multiplier = 1;
644 } else {
645 int newThreshold = 32*63;
646 #ifdef HWPERFMON_DEBUG
647 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
648 #endif
649 threshold = newThreshold;
650 mmcr2_reg.field.threshold_multiplier = 1;
651 }
652 mmcr0_reg.field.threshold_value = threshold;
653
654 sv->save_mmcr0 = mmcr0_reg.value;
655 sv->save_mmcr2 = mmcr2_reg.value;
656
657 }
658 break;
659 case CPU_SUBTYPE_POWERPC_970:
660 {
661 ppc64_mmcr0_reg_t mmcr0_reg;
662
663 mmcr0_reg.value = sv->save_mmcr0;
664
665 if(threshold>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
666 int newThreshold = 63;
667 #ifdef HWPERFMON_DEBUG
668 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
669 #endif
670 threshold = newThreshold;
671 }
672 mmcr0_reg.field.threshold_value = threshold;
673
674 sv->save_mmcr0 = mmcr0_reg.value;
675 }
676 break;
677 default:
678 retval = KERN_FAILURE;
679 break;
680 }
681
682 #ifdef HWPERFMON_DEBUG
683 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
684 #endif
685
686 return retval;
687 }
688
689 int perfmon_set_tbsel(thread_t thread, int tbsel)
690 {
691 struct savearea *sv = thread->machine.pcb;
692 kern_return_t retval = KERN_SUCCESS;
693
694 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
695 case CPU_SUBTYPE_POWERPC_750:
696 case CPU_SUBTYPE_POWERPC_7400:
697 case CPU_SUBTYPE_POWERPC_7450:
698 {
699 ppc32_mmcr0_reg_t mmcr0_reg;
700
701 mmcr0_reg.value = sv->save_mmcr0;
702 switch(tbsel) {
703 case 0x0:
704 case 0x1:
705 case 0x2:
706 case 0x3:
707 mmcr0_reg.field.timebase_bit_selector = tbsel;
708 break;
709 default:
710 retval = KERN_FAILURE;
711 }
712 sv->save_mmcr0 = mmcr0_reg.value;
713 }
714 break;
715 case CPU_SUBTYPE_POWERPC_970:
716 {
717 ppc64_mmcr0_reg_t mmcr0_reg;
718
719 mmcr0_reg.value = sv->save_mmcr0;
720 switch(tbsel) {
721 case 0x0:
722 case 0x1:
723 case 0x2:
724 case 0x3:
725 mmcr0_reg.field.timebase_bit_selector = tbsel;
726 break;
727 default:
728 retval = KERN_FAILURE;
729 }
730 sv->save_mmcr0 = mmcr0_reg.value;
731 }
732 break;
733 default:
734 retval = KERN_FAILURE;
735 break;
736 }
737
738 #ifdef HWPERFMON_DEBUG
739 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
740 #endif
741
742 return retval;
743 }
744
745 int perfmon_control(struct savearea *ssp)
746 {
747 mach_port_t thr_port = CAST_DOWN(mach_port_t, ssp->save_r3);
748 int action = (int)ssp->save_r4;
749 int pmc = (int)ssp->save_r5;
750 int val = (int)ssp->save_r6;
751 uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7);
752 thread_t thread = THREAD_NULL;
753 uint64_t kern_pmcs[MAX_CPUPMC_COUNT];
754 kern_return_t retval = KERN_SUCCESS;
755 int error;
756 boolean_t oldlevel;
757
758 thread = (thread_t) port_name_to_thread(thr_port); // convert user space thread port name to a thread_t
759 if(!thread) {
760 ssp->save_r3 = KERN_INVALID_ARGUMENT;
761 return 1; /* Return and check for ASTs... */
762 }
763
764 if(thread!=current_thread()) {
765 thread_suspend(thread);
766 }
767
768 #ifdef HWPERFMON_DEBUG
769 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
770 #endif
771
772 oldlevel = ml_set_interrupts_enabled(FALSE);
773
774 /* individual actions which do not require perfmon facility to be enabled */
775 if(action==PPC_PERFMON_DISABLE) {
776 retval = perfmon_disable(thread);
777 }
778 else if(action==PPC_PERFMON_ENABLE) {
779 retval = perfmon_enable(thread);
780 }
781
782 else { /* individual actions which do require perfmon facility to be enabled */
783 if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */
784 #ifdef HWPERFMON_DEBUG
785 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
786 #endif
787 retval = KERN_NO_ACCESS;
788 goto perfmon_return;
789 }
790
791 if(action==PPC_PERFMON_SET_EVENT) {
792 retval = perfmon_set_event(thread, pmc, val);
793 }
794 else if(action==PPC_PERFMON_SET_THRESHOLD) {
795 retval = perfmon_set_threshold(thread, val);
796 }
797 else if(action==PPC_PERFMON_SET_TBSEL) {
798 retval = perfmon_set_tbsel(thread, val);
799 }
800 else if(action==PPC_PERFMON_SET_EVENT_FUNC) {
801 retval = perfmon_set_event_func(thread, val);
802 }
803 else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) {
804 if(val) {
805 thread->machine.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI;
806 } else {
807 thread->machine.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI;
808 }
809 retval = KERN_SUCCESS;
810 }
811
812 /* combinable actions */
813 else {
814 if(action & PPC_PERFMON_STOP_COUNTERS) {
815 error = perfmon_stop_counters(thread);
816 if(error!=KERN_SUCCESS) {
817 retval = error;
818 goto perfmon_return;
819 }
820 }
821 if(action & PPC_PERFMON_CLEAR_COUNTERS) {
822 error = perfmon_clear_counters(thread);
823 if(error!=KERN_SUCCESS) {
824 retval = error;
825 goto perfmon_return;
826 }
827 }
828 if(action & PPC_PERFMON_WRITE_COUNTERS) {
829 if(error = copyin(CAST_USER_ADDR_T(usr_pmcs_p), (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
830 retval = error;
831 goto perfmon_return;
832 }
833 error = perfmon_write_counters(thread, kern_pmcs);
834 if(error!=KERN_SUCCESS) {
835 retval = error;
836 goto perfmon_return;
837 }
838 }
839 if(action & PPC_PERFMON_READ_COUNTERS) {
840 error = perfmon_read_counters(thread, kern_pmcs);
841 if(error!=KERN_SUCCESS) {
842 retval = error;
843 goto perfmon_return;
844 }
845 if(error = copyout((void *)kern_pmcs, CAST_USER_ADDR_T(usr_pmcs_p), MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
846 retval = error;
847 goto perfmon_return;
848 }
849 }
850 if(action & PPC_PERFMON_START_COUNTERS) {
851 error = perfmon_start_counters(thread);
852 if(error!=KERN_SUCCESS) {
853 retval = error;
854 goto perfmon_return;
855 }
856 }
857 }
858 }
859
860 perfmon_return:
861 ml_set_interrupts_enabled(oldlevel);
862
863 #ifdef HWPERFMON_DEBUG
864 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]);
865 #endif
866
867 if(thread!=current_thread()) {
868 thread_resume(thread);
869 }
870
871 #ifdef HWPERFMON_DEBUG
872 if(retval!=KERN_SUCCESS) {
873 kprintf("perfmon_control - ERROR: retval=%d\n", retval);
874 }
875 #endif /* HWPERFMON_DEBUG */
876
877 ssp->save_r3 = retval;
878 return 1; /* Return and check for ASTs... */
879 }
880
881 int perfmon_handle_pmi(struct savearea *ssp)
882 {
883 int curPMC;
884 kern_return_t retval = KERN_SUCCESS;
885 thread_t thread = current_thread();
886
887 #ifdef HWPERFMON_DEBUG
888 kprintf("perfmon_handle_pmi: got rupt\n");
889 #endif
890
891 if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */
892 #ifdef HWPERFMON_DEBUG
893 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
894 #endif
895 return KERN_FAILURE;
896 }
897
898 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
899 if(thread->machine.pcb->save_pmc[curPMC] & 0x80000000) {
900 if(thread->machine.pmcovfl[curPMC]==0xFFFFFFFF && (thread->machine.perfmonFlags & PERFMONFLAG_BREAKPOINT_FOR_PMI)) {
901 doexception(EXC_BREAKPOINT, EXC_PPC_PERFMON, (unsigned int)ssp->save_srr0); // pass up a breakpoint exception
902 return KERN_SUCCESS;
903 } else {
904 thread->machine.pmcovfl[curPMC]++;
905 thread->machine.pcb->save_pmc[curPMC] = 0;
906 }
907 }
908 }
909
910 if(retval==KERN_SUCCESS) {
911 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
912 case CPU_SUBTYPE_POWERPC_7450:
913 {
914 ppc32_mmcr0_reg_t mmcr0_reg;
915
916 mmcr0_reg.value = thread->machine.pcb->save_mmcr0;
917 mmcr0_reg.field.disable_counters_always = FALSE;
918 mmcr0_reg.field.enable_pmi = TRUE;
919 thread->machine.pcb->save_mmcr0 = mmcr0_reg.value;
920 }
921 retval = KERN_SUCCESS;
922 break;
923 case CPU_SUBTYPE_POWERPC_970:
924 {
925 ppc64_mmcr0_reg_t mmcr0_reg;
926
927 mmcr0_reg.value = thread->machine.pcb->save_mmcr0;
928 mmcr0_reg.field.disable_counters_always = FALSE;
929 mmcr0_reg.field.enable_pmi = TRUE;
930 thread->machine.pcb->save_mmcr0 = mmcr0_reg.value;
931 }
932 retval = KERN_SUCCESS;
933 break;
934 default:
935 retval = KERN_FAILURE;
936 break;
937 }
938 }
939
940 return retval;
941 }