2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/thread.h>
25 #include <ppc/exception.h>
26 #include <ppc/savearea.h>
27 #include <ppc/hw_perfmon.h>
28 #include <ppc/hw_perfmon_mmcr.h>
30 decl_simple_lock_data(,hw_perfmon_lock
)
31 static task_t hw_perfmon_owner
= TASK_NULL
;
32 static int hw_perfmon_thread_count
= 0;
35 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
36 * (can only count user events anyway)
37 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
38 * -virtual counter PMI is passed up as a breakpoint exception
41 int perfmon_init(void)
43 simple_lock_init(&hw_perfmon_lock
, FALSE
);
47 /* PMC Facility Owner:
48 * TASK_NULL - no one owns it
49 * kernel_task - owned by hw_perfmon
50 * other task - owned by another task
53 int perfmon_acquire_facility(task_t task
)
55 kern_return_t retval
= KERN_SUCCESS
;
57 simple_lock(&hw_perfmon_lock
);
59 if(hw_perfmon_owner
==task
) {
60 #ifdef HWPERFMON_DEBUG
61 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
63 retval
= KERN_SUCCESS
;
65 } else if(hw_perfmon_owner
==TASK_NULL
) { /* no one owns it */
66 hw_perfmon_owner
= task
;
67 hw_perfmon_thread_count
= 0;
68 #ifdef HWPERFMON_DEBUG
69 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
71 retval
= KERN_SUCCESS
;
72 } else { /* someone already owns it */
73 if(hw_perfmon_owner
==kernel_task
) {
74 if(hw_perfmon_thread_count
==0) { /* kernel owns it but no threads using it */
75 hw_perfmon_owner
= task
;
76 hw_perfmon_thread_count
= 0;
77 #ifdef HWPERFMON_DEBUG
78 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
80 retval
= KERN_SUCCESS
;
82 #ifdef HWPERFMON_DEBUG
83 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
85 retval
= KERN_RESOURCE_SHORTAGE
;
87 } else { /* non-kernel owner */
88 #ifdef HWPERFMON_DEBUG
89 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
91 retval
= KERN_RESOURCE_SHORTAGE
;
95 simple_unlock(&hw_perfmon_lock
);
99 int perfmon_release_facility(task_t task
)
101 kern_return_t retval
= KERN_SUCCESS
;
102 task_t old_perfmon_owner
= hw_perfmon_owner
;
104 simple_lock(&hw_perfmon_lock
);
106 if(task
!=hw_perfmon_owner
) {
107 retval
= KERN_NO_ACCESS
;
109 if(old_perfmon_owner
==kernel_task
) {
110 if(hw_perfmon_thread_count
>0) {
111 #ifdef HWPERFMON_DEBUG
112 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
114 retval
= KERN_NO_ACCESS
;
116 #ifdef HWPERFMON_DEBUG
117 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
119 hw_perfmon_owner
= TASK_NULL
;
120 retval
= KERN_SUCCESS
;
123 #ifdef HWPERFMON_DEBUG
124 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
126 hw_perfmon_owner
= TASK_NULL
;
127 retval
= KERN_SUCCESS
;
131 simple_unlock(&hw_perfmon_lock
);
135 int perfmon_enable(thread_t thread
)
137 struct savearea
*sv
= thread
->machine
.pcb
;
139 kern_return_t retval
= KERN_SUCCESS
;
142 if(thread
->machine
.specFlags
& perfMonitor
) {
143 return KERN_SUCCESS
; /* already enabled */
144 } else if(perfmon_acquire_facility(kernel_task
)!=KERN_SUCCESS
) {
145 return KERN_RESOURCE_SHORTAGE
; /* facility is in use */
146 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
147 simple_lock(&hw_perfmon_lock
);
148 hw_perfmon_thread_count
++;
149 simple_unlock(&hw_perfmon_lock
);
155 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
156 case CPU_SUBTYPE_POWERPC_750
:
157 case CPU_SUBTYPE_POWERPC_7400
:
158 case CPU_SUBTYPE_POWERPC_7450
:
160 ppc32_mmcr0_reg_t mmcr0_reg
;
163 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
164 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
165 sv
->save_mmcr0
= mmcr0_reg
.value
;
168 case CPU_SUBTYPE_POWERPC_970
:
170 ppc64_mmcr0_reg_t mmcr0_reg
;
173 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
174 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
175 sv
->save_mmcr0
= mmcr0_reg
.value
;
179 retval
= KERN_FAILURE
;
183 if(retval
==KERN_SUCCESS
) {
184 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
185 sv
->save_pmc
[curPMC
] = 0;
186 thread
->machine
.pmcovfl
[curPMC
] = 0;
188 thread
->machine
.perfmonFlags
= 0;
189 thread
->machine
.specFlags
|= perfMonitor
; /* enable perf monitor facility for this thread */
190 if(thread
==current_thread()) {
191 getPerProc()->spcFlags
|= perfMonitor
; /* update per_proc */
195 #ifdef HWPERFMON_DEBUG
196 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
202 int perfmon_disable(thread_t thread
)
204 struct savearea
*sv
= thread
->machine
.pcb
;
207 if(!(thread
->machine
.specFlags
& perfMonitor
)) {
208 return KERN_NO_ACCESS
; /* not enabled */
210 simple_lock(&hw_perfmon_lock
);
211 hw_perfmon_thread_count
--;
212 simple_unlock(&hw_perfmon_lock
);
213 perfmon_release_facility(kernel_task
); /* will release if hw_perfmon_thread_count is 0 */
216 thread
->machine
.specFlags
&= ~perfMonitor
; /* disable perf monitor facility for this thread */
217 if(thread
==current_thread()) {
218 PerProcTable
[cpu_number()].ppe_vaddr
->spcFlags
&= ~perfMonitor
; /* update per_proc */
224 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
225 sv
->save_pmc
[curPMC
] = 0;
226 thread
->machine
.pmcovfl
[curPMC
] = 0;
227 thread
->machine
.perfmonFlags
= 0;
230 #ifdef HWPERFMON_DEBUG
231 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
237 int perfmon_clear_counters(thread_t thread
)
239 struct savearea
*sv
= thread
->machine
.pcb
;
242 #ifdef HWPERFMON_DEBUG
243 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
246 /* clear thread copy */
247 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
248 sv
->save_pmc
[curPMC
] = 0;
249 thread
->machine
.pmcovfl
[curPMC
] = 0;
255 int perfmon_write_counters(thread_t thread
, uint64_t *pmcs
)
257 struct savearea
*sv
= thread
->machine
.pcb
;
260 #ifdef HWPERFMON_DEBUG
261 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
264 /* update thread copy */
265 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
266 sv
->save_pmc
[curPMC
] = pmcs
[curPMC
] & 0x7FFFFFFF;
267 thread
->machine
.pmcovfl
[curPMC
] = (pmcs
[curPMC
]>>31) & 0xFFFFFFFF;
273 int perfmon_read_counters(thread_t thread
, uint64_t *pmcs
)
275 struct savearea
*sv
= thread
->machine
.pcb
;
278 /* retrieve from thread copy */
279 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
280 pmcs
[curPMC
] = thread
->machine
.pmcovfl
[curPMC
];
281 pmcs
[curPMC
] = pmcs
[curPMC
]<<31;
282 pmcs
[curPMC
] |= (sv
->save_pmc
[curPMC
] & 0x7FFFFFFF);
285 /* zero any unused counters on this platform */
286 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
287 case CPU_SUBTYPE_POWERPC_750
:
288 case CPU_SUBTYPE_POWERPC_7400
:
289 case CPU_SUBTYPE_POWERPC_7450
:
297 #ifdef HWPERFMON_DEBUG
298 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
304 int perfmon_start_counters(thread_t thread
)
306 struct savearea
*sv
= thread
->machine
.pcb
;
307 kern_return_t retval
= KERN_SUCCESS
;
309 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
310 case CPU_SUBTYPE_POWERPC_750
:
311 case CPU_SUBTYPE_POWERPC_7400
:
313 ppc32_mmcr0_reg_t mmcr0_reg
;
314 mmcr0_reg
.value
= sv
->save_mmcr0
;
315 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
316 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
317 mmcr0_reg
.field
.on_pmi_stop_counting
= FALSE
;
318 mmcr0_reg
.field
.enable_pmi
= FALSE
;
319 mmcr0_reg
.field
.enable_pmi_on_pmc1
= FALSE
;
320 mmcr0_reg
.field
.enable_pmi_on_pmcn
= FALSE
;
321 sv
->save_mmcr0
= mmcr0_reg
.value
;
324 case CPU_SUBTYPE_POWERPC_7450
:
326 ppc32_mmcr0_reg_t mmcr0_reg
;
327 mmcr0_reg
.value
= sv
->save_mmcr0
;
328 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
329 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
330 mmcr0_reg
.field
.enable_pmi
= TRUE
;
331 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
332 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
333 sv
->save_mmcr0
= mmcr0_reg
.value
;
336 case CPU_SUBTYPE_POWERPC_970
:
338 ppc64_mmcr0_reg_t mmcr0_reg
;
339 mmcr0_reg
.value
= sv
->save_mmcr0
;
340 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
341 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
342 mmcr0_reg
.field
.enable_pmi
= TRUE
;
343 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
344 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
345 sv
->save_mmcr0
= mmcr0_reg
.value
;
349 retval
= KERN_FAILURE
;
353 #ifdef HWPERFMON_DEBUG
354 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
360 int perfmon_stop_counters(thread_t thread
)
362 struct savearea
*sv
= thread
->machine
.pcb
;
363 kern_return_t retval
= KERN_SUCCESS
;
365 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
366 case CPU_SUBTYPE_POWERPC_750
:
367 case CPU_SUBTYPE_POWERPC_7400
:
368 case CPU_SUBTYPE_POWERPC_7450
:
370 ppc32_mmcr0_reg_t mmcr0_reg
;
371 mmcr0_reg
.value
= sv
->save_mmcr0
;
372 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
373 sv
->save_mmcr0
= mmcr0_reg
.value
;
376 case CPU_SUBTYPE_POWERPC_970
:
378 ppc64_mmcr0_reg_t mmcr0_reg
;
379 mmcr0_reg
.value
= sv
->save_mmcr0
;
380 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
381 sv
->save_mmcr0
= mmcr0_reg
.value
;
385 retval
= KERN_FAILURE
;
389 #ifdef HWPERFMON_DEBUG
390 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
396 int perfmon_set_event(thread_t thread
, int pmc
, int event
)
398 struct savearea
*sv
= thread
->machine
.pcb
;
399 kern_return_t retval
= KERN_SUCCESS
;
401 #ifdef HWPERFMON_DEBUG
402 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
405 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
406 case CPU_SUBTYPE_POWERPC_750
:
407 case CPU_SUBTYPE_POWERPC_7400
:
409 ppc32_mmcr0_reg_t mmcr0_reg
;
410 ppc32_mmcr1_reg_t mmcr1_reg
;
412 mmcr0_reg
.value
= sv
->save_mmcr0
;
413 mmcr1_reg
.value
= sv
->save_mmcr1
;
417 mmcr0_reg
.field
.pmc1_event
= event
;
418 sv
->save_mmcr0
= mmcr0_reg
.value
;
421 mmcr0_reg
.field
.pmc2_event
= event
;
422 sv
->save_mmcr0
= mmcr0_reg
.value
;
425 mmcr1_reg
.field
.pmc3_event
= event
;
426 sv
->save_mmcr1
= mmcr1_reg
.value
;
429 mmcr1_reg
.field
.pmc4_event
= event
;
430 sv
->save_mmcr1
= mmcr1_reg
.value
;
433 retval
= KERN_FAILURE
;
438 case CPU_SUBTYPE_POWERPC_7450
:
440 ppc32_mmcr0_reg_t mmcr0_reg
;
441 ppc32_mmcr1_reg_t mmcr1_reg
;
443 mmcr0_reg
.value
= sv
->save_mmcr0
;
444 mmcr1_reg
.value
= sv
->save_mmcr1
;
448 mmcr0_reg
.field
.pmc1_event
= event
;
449 sv
->save_mmcr0
= mmcr0_reg
.value
;
452 mmcr0_reg
.field
.pmc2_event
= event
;
453 sv
->save_mmcr0
= mmcr0_reg
.value
;
456 mmcr1_reg
.field
.pmc3_event
= event
;
457 sv
->save_mmcr1
= mmcr1_reg
.value
;
460 mmcr1_reg
.field
.pmc4_event
= event
;
461 sv
->save_mmcr1
= mmcr1_reg
.value
;
464 mmcr1_reg
.field
.pmc5_event
= event
;
465 sv
->save_mmcr1
= mmcr1_reg
.value
;
468 mmcr1_reg
.field
.pmc6_event
= event
;
469 sv
->save_mmcr1
= mmcr1_reg
.value
;
472 retval
= KERN_FAILURE
;
477 case CPU_SUBTYPE_POWERPC_970
:
479 ppc64_mmcr0_reg_t mmcr0_reg
;
480 ppc64_mmcr1_reg_t mmcr1_reg
;
482 mmcr0_reg
.value
= sv
->save_mmcr0
;
483 mmcr1_reg
.value
= sv
->save_mmcr1
;
487 mmcr0_reg
.field
.pmc1_event
= event
;
488 sv
->save_mmcr0
= mmcr0_reg
.value
;
491 mmcr0_reg
.field
.pmc2_event
= event
;
492 sv
->save_mmcr0
= mmcr0_reg
.value
;
495 mmcr1_reg
.field
.pmc3_event
= event
;
496 sv
->save_mmcr1
= mmcr1_reg
.value
;
499 mmcr1_reg
.field
.pmc4_event
= event
;
500 sv
->save_mmcr1
= mmcr1_reg
.value
;
503 mmcr1_reg
.field
.pmc5_event
= event
;
504 sv
->save_mmcr1
= mmcr1_reg
.value
;
507 mmcr1_reg
.field
.pmc6_event
= event
;
508 sv
->save_mmcr1
= mmcr1_reg
.value
;
511 mmcr1_reg
.field
.pmc7_event
= event
;
512 sv
->save_mmcr1
= mmcr1_reg
.value
;
515 mmcr1_reg
.field
.pmc8_event
= event
;
516 sv
->save_mmcr1
= mmcr1_reg
.value
;
519 retval
= KERN_FAILURE
;
525 retval
= KERN_FAILURE
;
529 #ifdef HWPERFMON_DEBUG
530 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
536 int perfmon_set_event_func(thread_t thread
, uint32_t f
)
538 struct savearea
*sv
= thread
->machine
.pcb
;
539 kern_return_t retval
= KERN_SUCCESS
;
541 #ifdef HWPERFMON_DEBUG
542 kprintf("perfmon_set_event_func - func=%s\n",
543 f
==PPC_PERFMON_FUNC_FPU
? "FUNC" :
544 f
==PPC_PERFMON_FUNC_ISU
? "ISU" :
545 f
==PPC_PERFMON_FUNC_IFU
? "IFU" :
546 f
==PPC_PERFMON_FUNC_VMX
? "VMX" :
547 f
==PPC_PERFMON_FUNC_IDU
? "IDU" :
548 f
==PPC_PERFMON_FUNC_GPS
? "GPS" :
549 f
==PPC_PERFMON_FUNC_LSU0
? "LSU0" :
550 f
==PPC_PERFMON_FUNC_LSU1A
? "LSU1A" :
551 f
==PPC_PERFMON_FUNC_LSU1B
? "LSU1B" :
552 f
==PPC_PERFMON_FUNC_SPECA
? "SPECA" :
553 f
==PPC_PERFMON_FUNC_SPECB
? "SPECB" :
554 f
==PPC_PERFMON_FUNC_SPECC
? "SPECC" :
556 #endif /* HWPERFMON_DEBUG */
558 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
559 case CPU_SUBTYPE_POWERPC_750
:
560 case CPU_SUBTYPE_POWERPC_7400
:
561 case CPU_SUBTYPE_POWERPC_7450
:
562 retval
= KERN_FAILURE
; /* event functional unit only applies to 970 */
564 case CPU_SUBTYPE_POWERPC_970
:
566 ppc64_mmcr1_reg_t mmcr1_reg
;
567 ppc_func_unit_t func_unit
;
570 mmcr1_reg
.value
= sv
->save_mmcr1
;
572 mmcr1_reg
.field
.ttm0_select
= func_unit
.field
.TTM0SEL
;
573 mmcr1_reg
.field
.ttm1_select
= func_unit
.field
.TTM1SEL
;
574 mmcr1_reg
.field
.ttm2_select
= 0; /* not used */
575 mmcr1_reg
.field
.ttm3_select
= func_unit
.field
.TTM3SEL
;
576 mmcr1_reg
.field
.speculative_event
= func_unit
.field
.SPECSEL
;
577 mmcr1_reg
.field
.lane0_select
= func_unit
.field
.TD_CP_DBGxSEL
;
578 mmcr1_reg
.field
.lane1_select
= func_unit
.field
.TD_CP_DBGxSEL
;
579 mmcr1_reg
.field
.lane2_select
= func_unit
.field
.TD_CP_DBGxSEL
;
580 mmcr1_reg
.field
.lane3_select
= func_unit
.field
.TD_CP_DBGxSEL
;
582 sv
->save_mmcr1
= mmcr1_reg
.value
;
586 retval
= KERN_FAILURE
;
593 int perfmon_set_threshold(thread_t thread
, int threshold
)
595 struct savearea
*sv
= thread
->machine
.pcb
;
596 kern_return_t retval
= KERN_SUCCESS
;
598 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
599 case CPU_SUBTYPE_POWERPC_750
:
601 ppc32_mmcr0_reg_t mmcr0_reg
;
603 mmcr0_reg
.value
= sv
->save_mmcr0
;
605 if(threshold
>63) { /* no multiplier on 750 */
606 int newThreshold
= 63;
607 #ifdef HWPERFMON_DEBUG
608 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
610 threshold
= newThreshold
;
612 mmcr0_reg
.field
.threshold_value
= threshold
;
614 sv
->save_mmcr0
= mmcr0_reg
.value
;
618 case CPU_SUBTYPE_POWERPC_7400
:
619 case CPU_SUBTYPE_POWERPC_7450
:
621 ppc32_mmcr0_reg_t mmcr0_reg
;
622 ppc32_mmcr2_reg_t mmcr2_reg
;
624 mmcr0_reg
.value
= sv
->save_mmcr0
;
625 mmcr2_reg
.value
= sv
->save_mmcr2
;
627 if(threshold
<=(2*63)) { /* 2x multiplier */
628 if(threshold%2
!= 0) {
629 int newThreshold
= 2*(threshold
/2);
630 #ifdef HWPERFMON_DEBUG
631 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
633 threshold
= newThreshold
;
635 mmcr2_reg
.field
.threshold_multiplier
= 0;
636 } else if(threshold
<=(32*63)) { /* 32x multiplier */
637 if(threshold%32
!= 0) {
638 int newThreshold
= 32*(threshold
/32);
639 #ifdef HWPERFMON_DEBUG
640 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
642 threshold
= newThreshold
;
644 mmcr2_reg
.field
.threshold_multiplier
= 1;
646 int newThreshold
= 32*63;
647 #ifdef HWPERFMON_DEBUG
648 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
650 threshold
= newThreshold
;
651 mmcr2_reg
.field
.threshold_multiplier
= 1;
653 mmcr0_reg
.field
.threshold_value
= threshold
;
655 sv
->save_mmcr0
= mmcr0_reg
.value
;
656 sv
->save_mmcr2
= mmcr2_reg
.value
;
660 case CPU_SUBTYPE_POWERPC_970
:
662 ppc64_mmcr0_reg_t mmcr0_reg
;
664 mmcr0_reg
.value
= sv
->save_mmcr0
;
666 if(threshold
>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
667 int newThreshold
= 63;
668 #ifdef HWPERFMON_DEBUG
669 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
671 threshold
= newThreshold
;
673 mmcr0_reg
.field
.threshold_value
= threshold
;
675 sv
->save_mmcr0
= mmcr0_reg
.value
;
679 retval
= KERN_FAILURE
;
683 #ifdef HWPERFMON_DEBUG
684 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
690 int perfmon_set_tbsel(thread_t thread
, int tbsel
)
692 struct savearea
*sv
= thread
->machine
.pcb
;
693 kern_return_t retval
= KERN_SUCCESS
;
695 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
696 case CPU_SUBTYPE_POWERPC_750
:
697 case CPU_SUBTYPE_POWERPC_7400
:
698 case CPU_SUBTYPE_POWERPC_7450
:
700 ppc32_mmcr0_reg_t mmcr0_reg
;
702 mmcr0_reg
.value
= sv
->save_mmcr0
;
708 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
711 retval
= KERN_FAILURE
;
713 sv
->save_mmcr0
= mmcr0_reg
.value
;
716 case CPU_SUBTYPE_POWERPC_970
:
718 ppc64_mmcr0_reg_t mmcr0_reg
;
720 mmcr0_reg
.value
= sv
->save_mmcr0
;
726 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
729 retval
= KERN_FAILURE
;
731 sv
->save_mmcr0
= mmcr0_reg
.value
;
735 retval
= KERN_FAILURE
;
739 #ifdef HWPERFMON_DEBUG
740 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
746 int perfmon_control(struct savearea
*ssp
)
748 mach_port_t thr_port
= CAST_DOWN(mach_port_t
, ssp
->save_r3
);
749 int action
= (int)ssp
->save_r4
;
750 int pmc
= (int)ssp
->save_r5
;
751 int val
= (int)ssp
->save_r6
;
752 uint64_t *usr_pmcs_p
= CAST_DOWN(uint64_t *, ssp
->save_r7
);
753 thread_t thread
= THREAD_NULL
;
754 uint64_t kern_pmcs
[MAX_CPUPMC_COUNT
];
755 kern_return_t retval
= KERN_SUCCESS
;
759 thread
= (thread_t
) port_name_to_thread(thr_port
); // convert user space thread port name to a thread_t
761 ssp
->save_r3
= KERN_INVALID_ARGUMENT
;
762 return 1; /* Return and check for ASTs... */
765 if(thread
!=current_thread()) {
766 thread_suspend(thread
);
769 #ifdef HWPERFMON_DEBUG
770 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
773 oldlevel
= ml_set_interrupts_enabled(FALSE
);
775 /* individual actions which do not require perfmon facility to be enabled */
776 if(action
==PPC_PERFMON_DISABLE
) {
777 retval
= perfmon_disable(thread
);
779 else if(action
==PPC_PERFMON_ENABLE
) {
780 retval
= perfmon_enable(thread
);
783 else { /* individual actions which do require perfmon facility to be enabled */
784 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
785 #ifdef HWPERFMON_DEBUG
786 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
788 retval
= KERN_NO_ACCESS
;
792 if(action
==PPC_PERFMON_SET_EVENT
) {
793 retval
= perfmon_set_event(thread
, pmc
, val
);
795 else if(action
==PPC_PERFMON_SET_THRESHOLD
) {
796 retval
= perfmon_set_threshold(thread
, val
);
798 else if(action
==PPC_PERFMON_SET_TBSEL
) {
799 retval
= perfmon_set_tbsel(thread
, val
);
801 else if(action
==PPC_PERFMON_SET_EVENT_FUNC
) {
802 retval
= perfmon_set_event_func(thread
, val
);
804 else if(action
==PPC_PERFMON_ENABLE_PMI_BRKPT
) {
806 thread
->machine
.perfmonFlags
|= PERFMONFLAG_BREAKPOINT_FOR_PMI
;
808 thread
->machine
.perfmonFlags
&= ~PERFMONFLAG_BREAKPOINT_FOR_PMI
;
810 retval
= KERN_SUCCESS
;
813 /* combinable actions */
815 if(action
& PPC_PERFMON_STOP_COUNTERS
) {
816 error
= perfmon_stop_counters(thread
);
817 if(error
!=KERN_SUCCESS
) {
822 if(action
& PPC_PERFMON_CLEAR_COUNTERS
) {
823 error
= perfmon_clear_counters(thread
);
824 if(error
!=KERN_SUCCESS
) {
829 if(action
& PPC_PERFMON_WRITE_COUNTERS
) {
830 if(error
= copyin(CAST_USER_ADDR_T(usr_pmcs_p
), (void *)kern_pmcs
, MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
834 error
= perfmon_write_counters(thread
, kern_pmcs
);
835 if(error
!=KERN_SUCCESS
) {
840 if(action
& PPC_PERFMON_READ_COUNTERS
) {
841 error
= perfmon_read_counters(thread
, kern_pmcs
);
842 if(error
!=KERN_SUCCESS
) {
846 if(error
= copyout((void *)kern_pmcs
, CAST_USER_ADDR_T(usr_pmcs_p
), MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
851 if(action
& PPC_PERFMON_START_COUNTERS
) {
852 error
= perfmon_start_counters(thread
);
853 if(error
!=KERN_SUCCESS
) {
862 ml_set_interrupts_enabled(oldlevel
);
864 #ifdef HWPERFMON_DEBUG
865 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp
->save_mmcr0
, ssp
->save_pmc
[PMC_1
], ssp
->save_pmc
[PMC_2
], ssp
->save_pmc
[PMC_3
], ssp
->save_pmc
[PMC_4
], ssp
->save_pmc
[PMC_5
], ssp
->save_pmc
[PMC_6
], ssp
->save_pmc
[PMC_7
], ssp
->save_pmc
[PMC_8
]);
868 if(thread
!=current_thread()) {
869 thread_resume(thread
);
872 #ifdef HWPERFMON_DEBUG
873 if(retval
!=KERN_SUCCESS
) {
874 kprintf("perfmon_control - ERROR: retval=%d\n", retval
);
876 #endif /* HWPERFMON_DEBUG */
878 ssp
->save_r3
= retval
;
879 return 1; /* Return and check for ASTs... */
882 int perfmon_handle_pmi(struct savearea
*ssp
)
885 kern_return_t retval
= KERN_SUCCESS
;
886 thread_t thread
= current_thread();
888 #ifdef HWPERFMON_DEBUG
889 kprintf("perfmon_handle_pmi: got rupt\n");
892 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
893 #ifdef HWPERFMON_DEBUG
894 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
899 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
900 if(thread
->machine
.pcb
->save_pmc
[curPMC
] & 0x80000000) {
901 if(thread
->machine
.pmcovfl
[curPMC
]==0xFFFFFFFF && (thread
->machine
.perfmonFlags
& PERFMONFLAG_BREAKPOINT_FOR_PMI
)) {
902 doexception(EXC_BREAKPOINT
, EXC_PPC_PERFMON
, (unsigned int)ssp
->save_srr0
); // pass up a breakpoint exception
905 thread
->machine
.pmcovfl
[curPMC
]++;
906 thread
->machine
.pcb
->save_pmc
[curPMC
] = 0;
911 if(retval
==KERN_SUCCESS
) {
912 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
913 case CPU_SUBTYPE_POWERPC_7450
:
915 ppc32_mmcr0_reg_t mmcr0_reg
;
917 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
918 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
919 mmcr0_reg
.field
.enable_pmi
= TRUE
;
920 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
922 retval
= KERN_SUCCESS
;
924 case CPU_SUBTYPE_POWERPC_970
:
926 ppc64_mmcr0_reg_t mmcr0_reg
;
928 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
929 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
930 mmcr0_reg
.field
.enable_pmi
= TRUE
;
931 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
933 retval
= KERN_SUCCESS
;
936 retval
= KERN_FAILURE
;