2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <kern/thread.h>
32 #include <ppc/exception.h>
33 #include <ppc/savearea.h>
34 #include <ppc/hw_perfmon.h>
35 #include <ppc/hw_perfmon_mmcr.h>
37 decl_simple_lock_data(,hw_perfmon_lock
)
38 static task_t hw_perfmon_owner
= TASK_NULL
;
39 static int hw_perfmon_thread_count
= 0;
42 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
43 * (can only count user events anyway)
44 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
45 * -virtual counter PMI is passed up as a breakpoint exception
48 int perfmon_init(void)
50 simple_lock_init(&hw_perfmon_lock
, FALSE
);
54 /* PMC Facility Owner:
55 * TASK_NULL - no one owns it
56 * kernel_task - owned by hw_perfmon
57 * other task - owned by another task
60 int perfmon_acquire_facility(task_t task
)
62 kern_return_t retval
= KERN_SUCCESS
;
64 simple_lock(&hw_perfmon_lock
);
66 if(hw_perfmon_owner
==task
) {
67 #ifdef HWPERFMON_DEBUG
68 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
70 retval
= KERN_SUCCESS
;
72 } else if(hw_perfmon_owner
==TASK_NULL
) { /* no one owns it */
73 hw_perfmon_owner
= task
;
74 hw_perfmon_thread_count
= 0;
75 #ifdef HWPERFMON_DEBUG
76 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
78 retval
= KERN_SUCCESS
;
79 } else { /* someone already owns it */
80 if(hw_perfmon_owner
==kernel_task
) {
81 if(hw_perfmon_thread_count
==0) { /* kernel owns it but no threads using it */
82 hw_perfmon_owner
= task
;
83 hw_perfmon_thread_count
= 0;
84 #ifdef HWPERFMON_DEBUG
85 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
87 retval
= KERN_SUCCESS
;
89 #ifdef HWPERFMON_DEBUG
90 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
92 retval
= KERN_RESOURCE_SHORTAGE
;
94 } else { /* non-kernel owner */
95 #ifdef HWPERFMON_DEBUG
96 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
98 retval
= KERN_RESOURCE_SHORTAGE
;
102 simple_unlock(&hw_perfmon_lock
);
106 int perfmon_release_facility(task_t task
)
108 kern_return_t retval
= KERN_SUCCESS
;
109 task_t old_perfmon_owner
= hw_perfmon_owner
;
111 simple_lock(&hw_perfmon_lock
);
113 if(task
!=hw_perfmon_owner
) {
114 retval
= KERN_NO_ACCESS
;
116 if(old_perfmon_owner
==kernel_task
) {
117 if(hw_perfmon_thread_count
>0) {
118 #ifdef HWPERFMON_DEBUG
119 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
121 retval
= KERN_NO_ACCESS
;
123 #ifdef HWPERFMON_DEBUG
124 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
126 hw_perfmon_owner
= TASK_NULL
;
127 retval
= KERN_SUCCESS
;
130 #ifdef HWPERFMON_DEBUG
131 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
133 hw_perfmon_owner
= TASK_NULL
;
134 retval
= KERN_SUCCESS
;
138 simple_unlock(&hw_perfmon_lock
);
142 int perfmon_enable(thread_t thread
)
144 struct savearea
*sv
= thread
->machine
.pcb
;
146 kern_return_t retval
= KERN_SUCCESS
;
149 if(thread
->machine
.specFlags
& perfMonitor
) {
150 return KERN_SUCCESS
; /* already enabled */
151 } else if(perfmon_acquire_facility(kernel_task
)!=KERN_SUCCESS
) {
152 return KERN_RESOURCE_SHORTAGE
; /* facility is in use */
153 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
154 simple_lock(&hw_perfmon_lock
);
155 hw_perfmon_thread_count
++;
156 simple_unlock(&hw_perfmon_lock
);
162 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
163 case CPU_SUBTYPE_POWERPC_750
:
164 case CPU_SUBTYPE_POWERPC_7400
:
165 case CPU_SUBTYPE_POWERPC_7450
:
167 ppc32_mmcr0_reg_t mmcr0_reg
;
170 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
171 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
172 sv
->save_mmcr0
= mmcr0_reg
.value
;
175 case CPU_SUBTYPE_POWERPC_970
:
177 ppc64_mmcr0_reg_t mmcr0_reg
;
180 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
181 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
182 sv
->save_mmcr0
= mmcr0_reg
.value
;
186 retval
= KERN_FAILURE
;
190 if(retval
==KERN_SUCCESS
) {
191 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
192 sv
->save_pmc
[curPMC
] = 0;
193 thread
->machine
.pmcovfl
[curPMC
] = 0;
195 thread
->machine
.perfmonFlags
= 0;
196 thread
->machine
.specFlags
|= perfMonitor
; /* enable perf monitor facility for this thread */
197 if(thread
==current_thread()) {
198 getPerProc()->spcFlags
|= perfMonitor
; /* update per_proc */
202 #ifdef HWPERFMON_DEBUG
203 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
209 int perfmon_disable(thread_t thread
)
211 struct savearea
*sv
= thread
->machine
.pcb
;
214 if(!(thread
->machine
.specFlags
& perfMonitor
)) {
215 return KERN_NO_ACCESS
; /* not enabled */
217 simple_lock(&hw_perfmon_lock
);
218 hw_perfmon_thread_count
--;
219 simple_unlock(&hw_perfmon_lock
);
220 perfmon_release_facility(kernel_task
); /* will release if hw_perfmon_thread_count is 0 */
223 thread
->machine
.specFlags
&= ~perfMonitor
; /* disable perf monitor facility for this thread */
224 if(thread
==current_thread()) {
225 PerProcTable
[cpu_number()].ppe_vaddr
->spcFlags
&= ~perfMonitor
; /* update per_proc */
231 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
232 sv
->save_pmc
[curPMC
] = 0;
233 thread
->machine
.pmcovfl
[curPMC
] = 0;
234 thread
->machine
.perfmonFlags
= 0;
237 #ifdef HWPERFMON_DEBUG
238 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
244 int perfmon_clear_counters(thread_t thread
)
246 struct savearea
*sv
= thread
->machine
.pcb
;
249 #ifdef HWPERFMON_DEBUG
250 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
253 /* clear thread copy */
254 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
255 sv
->save_pmc
[curPMC
] = 0;
256 thread
->machine
.pmcovfl
[curPMC
] = 0;
262 int perfmon_write_counters(thread_t thread
, uint64_t *pmcs
)
264 struct savearea
*sv
= thread
->machine
.pcb
;
267 #ifdef HWPERFMON_DEBUG
268 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
271 /* update thread copy */
272 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
273 sv
->save_pmc
[curPMC
] = pmcs
[curPMC
] & 0x7FFFFFFF;
274 thread
->machine
.pmcovfl
[curPMC
] = (pmcs
[curPMC
]>>31) & 0xFFFFFFFF;
280 int perfmon_read_counters(thread_t thread
, uint64_t *pmcs
)
282 struct savearea
*sv
= thread
->machine
.pcb
;
285 /* retrieve from thread copy */
286 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
287 pmcs
[curPMC
] = thread
->machine
.pmcovfl
[curPMC
];
288 pmcs
[curPMC
] = pmcs
[curPMC
]<<31;
289 pmcs
[curPMC
] |= (sv
->save_pmc
[curPMC
] & 0x7FFFFFFF);
292 /* zero any unused counters on this platform */
293 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
294 case CPU_SUBTYPE_POWERPC_750
:
295 case CPU_SUBTYPE_POWERPC_7400
:
296 case CPU_SUBTYPE_POWERPC_7450
:
304 #ifdef HWPERFMON_DEBUG
305 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
311 int perfmon_start_counters(thread_t thread
)
313 struct savearea
*sv
= thread
->machine
.pcb
;
314 kern_return_t retval
= KERN_SUCCESS
;
316 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
317 case CPU_SUBTYPE_POWERPC_750
:
318 case CPU_SUBTYPE_POWERPC_7400
:
320 ppc32_mmcr0_reg_t mmcr0_reg
;
321 mmcr0_reg
.value
= sv
->save_mmcr0
;
322 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
323 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
324 mmcr0_reg
.field
.on_pmi_stop_counting
= FALSE
;
325 mmcr0_reg
.field
.enable_pmi
= FALSE
;
326 mmcr0_reg
.field
.enable_pmi_on_pmc1
= FALSE
;
327 mmcr0_reg
.field
.enable_pmi_on_pmcn
= FALSE
;
328 sv
->save_mmcr0
= mmcr0_reg
.value
;
331 case CPU_SUBTYPE_POWERPC_7450
:
333 ppc32_mmcr0_reg_t mmcr0_reg
;
334 mmcr0_reg
.value
= sv
->save_mmcr0
;
335 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
336 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
337 mmcr0_reg
.field
.enable_pmi
= TRUE
;
338 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
339 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
340 sv
->save_mmcr0
= mmcr0_reg
.value
;
343 case CPU_SUBTYPE_POWERPC_970
:
345 ppc64_mmcr0_reg_t mmcr0_reg
;
346 mmcr0_reg
.value
= sv
->save_mmcr0
;
347 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
348 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
349 mmcr0_reg
.field
.enable_pmi
= TRUE
;
350 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
351 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
352 sv
->save_mmcr0
= mmcr0_reg
.value
;
356 retval
= KERN_FAILURE
;
360 #ifdef HWPERFMON_DEBUG
361 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
367 int perfmon_stop_counters(thread_t thread
)
369 struct savearea
*sv
= thread
->machine
.pcb
;
370 kern_return_t retval
= KERN_SUCCESS
;
372 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
373 case CPU_SUBTYPE_POWERPC_750
:
374 case CPU_SUBTYPE_POWERPC_7400
:
375 case CPU_SUBTYPE_POWERPC_7450
:
377 ppc32_mmcr0_reg_t mmcr0_reg
;
378 mmcr0_reg
.value
= sv
->save_mmcr0
;
379 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
380 sv
->save_mmcr0
= mmcr0_reg
.value
;
383 case CPU_SUBTYPE_POWERPC_970
:
385 ppc64_mmcr0_reg_t mmcr0_reg
;
386 mmcr0_reg
.value
= sv
->save_mmcr0
;
387 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
388 sv
->save_mmcr0
= mmcr0_reg
.value
;
392 retval
= KERN_FAILURE
;
396 #ifdef HWPERFMON_DEBUG
397 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
403 int perfmon_set_event(thread_t thread
, int pmc
, int event
)
405 struct savearea
*sv
= thread
->machine
.pcb
;
406 kern_return_t retval
= KERN_SUCCESS
;
408 #ifdef HWPERFMON_DEBUG
409 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
412 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
413 case CPU_SUBTYPE_POWERPC_750
:
414 case CPU_SUBTYPE_POWERPC_7400
:
416 ppc32_mmcr0_reg_t mmcr0_reg
;
417 ppc32_mmcr1_reg_t mmcr1_reg
;
419 mmcr0_reg
.value
= sv
->save_mmcr0
;
420 mmcr1_reg
.value
= sv
->save_mmcr1
;
424 mmcr0_reg
.field
.pmc1_event
= event
;
425 sv
->save_mmcr0
= mmcr0_reg
.value
;
428 mmcr0_reg
.field
.pmc2_event
= event
;
429 sv
->save_mmcr0
= mmcr0_reg
.value
;
432 mmcr1_reg
.field
.pmc3_event
= event
;
433 sv
->save_mmcr1
= mmcr1_reg
.value
;
436 mmcr1_reg
.field
.pmc4_event
= event
;
437 sv
->save_mmcr1
= mmcr1_reg
.value
;
440 retval
= KERN_FAILURE
;
445 case CPU_SUBTYPE_POWERPC_7450
:
447 ppc32_mmcr0_reg_t mmcr0_reg
;
448 ppc32_mmcr1_reg_t mmcr1_reg
;
450 mmcr0_reg
.value
= sv
->save_mmcr0
;
451 mmcr1_reg
.value
= sv
->save_mmcr1
;
455 mmcr0_reg
.field
.pmc1_event
= event
;
456 sv
->save_mmcr0
= mmcr0_reg
.value
;
459 mmcr0_reg
.field
.pmc2_event
= event
;
460 sv
->save_mmcr0
= mmcr0_reg
.value
;
463 mmcr1_reg
.field
.pmc3_event
= event
;
464 sv
->save_mmcr1
= mmcr1_reg
.value
;
467 mmcr1_reg
.field
.pmc4_event
= event
;
468 sv
->save_mmcr1
= mmcr1_reg
.value
;
471 mmcr1_reg
.field
.pmc5_event
= event
;
472 sv
->save_mmcr1
= mmcr1_reg
.value
;
475 mmcr1_reg
.field
.pmc6_event
= event
;
476 sv
->save_mmcr1
= mmcr1_reg
.value
;
479 retval
= KERN_FAILURE
;
484 case CPU_SUBTYPE_POWERPC_970
:
486 ppc64_mmcr0_reg_t mmcr0_reg
;
487 ppc64_mmcr1_reg_t mmcr1_reg
;
489 mmcr0_reg
.value
= sv
->save_mmcr0
;
490 mmcr1_reg
.value
= sv
->save_mmcr1
;
494 mmcr0_reg
.field
.pmc1_event
= event
;
495 sv
->save_mmcr0
= mmcr0_reg
.value
;
498 mmcr0_reg
.field
.pmc2_event
= event
;
499 sv
->save_mmcr0
= mmcr0_reg
.value
;
502 mmcr1_reg
.field
.pmc3_event
= event
;
503 sv
->save_mmcr1
= mmcr1_reg
.value
;
506 mmcr1_reg
.field
.pmc4_event
= event
;
507 sv
->save_mmcr1
= mmcr1_reg
.value
;
510 mmcr1_reg
.field
.pmc5_event
= event
;
511 sv
->save_mmcr1
= mmcr1_reg
.value
;
514 mmcr1_reg
.field
.pmc6_event
= event
;
515 sv
->save_mmcr1
= mmcr1_reg
.value
;
518 mmcr1_reg
.field
.pmc7_event
= event
;
519 sv
->save_mmcr1
= mmcr1_reg
.value
;
522 mmcr1_reg
.field
.pmc8_event
= event
;
523 sv
->save_mmcr1
= mmcr1_reg
.value
;
526 retval
= KERN_FAILURE
;
532 retval
= KERN_FAILURE
;
536 #ifdef HWPERFMON_DEBUG
537 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
543 int perfmon_set_event_func(thread_t thread
, uint32_t f
)
545 struct savearea
*sv
= thread
->machine
.pcb
;
546 kern_return_t retval
= KERN_SUCCESS
;
548 #ifdef HWPERFMON_DEBUG
549 kprintf("perfmon_set_event_func - func=%s\n",
550 f
==PPC_PERFMON_FUNC_FPU
? "FUNC" :
551 f
==PPC_PERFMON_FUNC_ISU
? "ISU" :
552 f
==PPC_PERFMON_FUNC_IFU
? "IFU" :
553 f
==PPC_PERFMON_FUNC_VMX
? "VMX" :
554 f
==PPC_PERFMON_FUNC_IDU
? "IDU" :
555 f
==PPC_PERFMON_FUNC_GPS
? "GPS" :
556 f
==PPC_PERFMON_FUNC_LSU0
? "LSU0" :
557 f
==PPC_PERFMON_FUNC_LSU1A
? "LSU1A" :
558 f
==PPC_PERFMON_FUNC_LSU1B
? "LSU1B" :
559 f
==PPC_PERFMON_FUNC_SPECA
? "SPECA" :
560 f
==PPC_PERFMON_FUNC_SPECB
? "SPECB" :
561 f
==PPC_PERFMON_FUNC_SPECC
? "SPECC" :
563 #endif /* HWPERFMON_DEBUG */
565 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
566 case CPU_SUBTYPE_POWERPC_750
:
567 case CPU_SUBTYPE_POWERPC_7400
:
568 case CPU_SUBTYPE_POWERPC_7450
:
569 retval
= KERN_FAILURE
; /* event functional unit only applies to 970 */
571 case CPU_SUBTYPE_POWERPC_970
:
573 ppc64_mmcr1_reg_t mmcr1_reg
;
574 ppc_func_unit_t func_unit
;
577 mmcr1_reg
.value
= sv
->save_mmcr1
;
579 mmcr1_reg
.field
.ttm0_select
= func_unit
.field
.TTM0SEL
;
580 mmcr1_reg
.field
.ttm1_select
= func_unit
.field
.TTM1SEL
;
581 mmcr1_reg
.field
.ttm2_select
= 0; /* not used */
582 mmcr1_reg
.field
.ttm3_select
= func_unit
.field
.TTM3SEL
;
583 mmcr1_reg
.field
.speculative_event
= func_unit
.field
.SPECSEL
;
584 mmcr1_reg
.field
.lane0_select
= func_unit
.field
.TD_CP_DBGxSEL
;
585 mmcr1_reg
.field
.lane1_select
= func_unit
.field
.TD_CP_DBGxSEL
;
586 mmcr1_reg
.field
.lane2_select
= func_unit
.field
.TD_CP_DBGxSEL
;
587 mmcr1_reg
.field
.lane3_select
= func_unit
.field
.TD_CP_DBGxSEL
;
589 sv
->save_mmcr1
= mmcr1_reg
.value
;
593 retval
= KERN_FAILURE
;
600 int perfmon_set_threshold(thread_t thread
, int threshold
)
602 struct savearea
*sv
= thread
->machine
.pcb
;
603 kern_return_t retval
= KERN_SUCCESS
;
605 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
606 case CPU_SUBTYPE_POWERPC_750
:
608 ppc32_mmcr0_reg_t mmcr0_reg
;
610 mmcr0_reg
.value
= sv
->save_mmcr0
;
612 if(threshold
>63) { /* no multiplier on 750 */
613 int newThreshold
= 63;
614 #ifdef HWPERFMON_DEBUG
615 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
617 threshold
= newThreshold
;
619 mmcr0_reg
.field
.threshold_value
= threshold
;
621 sv
->save_mmcr0
= mmcr0_reg
.value
;
625 case CPU_SUBTYPE_POWERPC_7400
:
626 case CPU_SUBTYPE_POWERPC_7450
:
628 ppc32_mmcr0_reg_t mmcr0_reg
;
629 ppc32_mmcr2_reg_t mmcr2_reg
;
631 mmcr0_reg
.value
= sv
->save_mmcr0
;
632 mmcr2_reg
.value
= sv
->save_mmcr2
;
634 if(threshold
<=(2*63)) { /* 2x multiplier */
635 if(threshold%2
!= 0) {
636 int newThreshold
= 2*(threshold
/2);
637 #ifdef HWPERFMON_DEBUG
638 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
640 threshold
= newThreshold
;
642 mmcr2_reg
.field
.threshold_multiplier
= 0;
643 } else if(threshold
<=(32*63)) { /* 32x multiplier */
644 if(threshold%32
!= 0) {
645 int newThreshold
= 32*(threshold
/32);
646 #ifdef HWPERFMON_DEBUG
647 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
649 threshold
= newThreshold
;
651 mmcr2_reg
.field
.threshold_multiplier
= 1;
653 int newThreshold
= 32*63;
654 #ifdef HWPERFMON_DEBUG
655 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
657 threshold
= newThreshold
;
658 mmcr2_reg
.field
.threshold_multiplier
= 1;
660 mmcr0_reg
.field
.threshold_value
= threshold
;
662 sv
->save_mmcr0
= mmcr0_reg
.value
;
663 sv
->save_mmcr2
= mmcr2_reg
.value
;
667 case CPU_SUBTYPE_POWERPC_970
:
669 ppc64_mmcr0_reg_t mmcr0_reg
;
671 mmcr0_reg
.value
= sv
->save_mmcr0
;
673 if(threshold
>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
674 int newThreshold
= 63;
675 #ifdef HWPERFMON_DEBUG
676 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
678 threshold
= newThreshold
;
680 mmcr0_reg
.field
.threshold_value
= threshold
;
682 sv
->save_mmcr0
= mmcr0_reg
.value
;
686 retval
= KERN_FAILURE
;
690 #ifdef HWPERFMON_DEBUG
691 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
697 int perfmon_set_tbsel(thread_t thread
, int tbsel
)
699 struct savearea
*sv
= thread
->machine
.pcb
;
700 kern_return_t retval
= KERN_SUCCESS
;
702 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
703 case CPU_SUBTYPE_POWERPC_750
:
704 case CPU_SUBTYPE_POWERPC_7400
:
705 case CPU_SUBTYPE_POWERPC_7450
:
707 ppc32_mmcr0_reg_t mmcr0_reg
;
709 mmcr0_reg
.value
= sv
->save_mmcr0
;
715 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
718 retval
= KERN_FAILURE
;
720 sv
->save_mmcr0
= mmcr0_reg
.value
;
723 case CPU_SUBTYPE_POWERPC_970
:
725 ppc64_mmcr0_reg_t mmcr0_reg
;
727 mmcr0_reg
.value
= sv
->save_mmcr0
;
733 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
736 retval
= KERN_FAILURE
;
738 sv
->save_mmcr0
= mmcr0_reg
.value
;
742 retval
= KERN_FAILURE
;
746 #ifdef HWPERFMON_DEBUG
747 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
753 int perfmon_control(struct savearea
*ssp
)
755 mach_port_t thr_port
= CAST_DOWN(mach_port_t
, ssp
->save_r3
);
756 int action
= (int)ssp
->save_r4
;
757 int pmc
= (int)ssp
->save_r5
;
758 int val
= (int)ssp
->save_r6
;
759 uint64_t *usr_pmcs_p
= CAST_DOWN(uint64_t *, ssp
->save_r7
);
760 thread_t thread
= THREAD_NULL
;
761 uint64_t kern_pmcs
[MAX_CPUPMC_COUNT
];
762 kern_return_t retval
= KERN_SUCCESS
;
766 thread
= (thread_t
) port_name_to_thread(thr_port
); // convert user space thread port name to a thread_t
768 ssp
->save_r3
= KERN_INVALID_ARGUMENT
;
769 return 1; /* Return and check for ASTs... */
772 if(thread
!=current_thread()) {
773 thread_suspend(thread
);
776 #ifdef HWPERFMON_DEBUG
777 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
780 oldlevel
= ml_set_interrupts_enabled(FALSE
);
782 /* individual actions which do not require perfmon facility to be enabled */
783 if(action
==PPC_PERFMON_DISABLE
) {
784 retval
= perfmon_disable(thread
);
786 else if(action
==PPC_PERFMON_ENABLE
) {
787 retval
= perfmon_enable(thread
);
790 else { /* individual actions which do require perfmon facility to be enabled */
791 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
792 #ifdef HWPERFMON_DEBUG
793 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
795 retval
= KERN_NO_ACCESS
;
799 if(action
==PPC_PERFMON_SET_EVENT
) {
800 retval
= perfmon_set_event(thread
, pmc
, val
);
802 else if(action
==PPC_PERFMON_SET_THRESHOLD
) {
803 retval
= perfmon_set_threshold(thread
, val
);
805 else if(action
==PPC_PERFMON_SET_TBSEL
) {
806 retval
= perfmon_set_tbsel(thread
, val
);
808 else if(action
==PPC_PERFMON_SET_EVENT_FUNC
) {
809 retval
= perfmon_set_event_func(thread
, val
);
811 else if(action
==PPC_PERFMON_ENABLE_PMI_BRKPT
) {
813 thread
->machine
.perfmonFlags
|= PERFMONFLAG_BREAKPOINT_FOR_PMI
;
815 thread
->machine
.perfmonFlags
&= ~PERFMONFLAG_BREAKPOINT_FOR_PMI
;
817 retval
= KERN_SUCCESS
;
820 /* combinable actions */
822 if(action
& PPC_PERFMON_STOP_COUNTERS
) {
823 error
= perfmon_stop_counters(thread
);
824 if(error
!=KERN_SUCCESS
) {
829 if(action
& PPC_PERFMON_CLEAR_COUNTERS
) {
830 error
= perfmon_clear_counters(thread
);
831 if(error
!=KERN_SUCCESS
) {
836 if(action
& PPC_PERFMON_WRITE_COUNTERS
) {
837 if(error
= copyin(CAST_USER_ADDR_T(usr_pmcs_p
), (void *)kern_pmcs
, MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
841 error
= perfmon_write_counters(thread
, kern_pmcs
);
842 if(error
!=KERN_SUCCESS
) {
847 if(action
& PPC_PERFMON_READ_COUNTERS
) {
848 error
= perfmon_read_counters(thread
, kern_pmcs
);
849 if(error
!=KERN_SUCCESS
) {
853 if(error
= copyout((void *)kern_pmcs
, CAST_USER_ADDR_T(usr_pmcs_p
), MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
858 if(action
& PPC_PERFMON_START_COUNTERS
) {
859 error
= perfmon_start_counters(thread
);
860 if(error
!=KERN_SUCCESS
) {
869 ml_set_interrupts_enabled(oldlevel
);
871 #ifdef HWPERFMON_DEBUG
872 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp
->save_mmcr0
, ssp
->save_pmc
[PMC_1
], ssp
->save_pmc
[PMC_2
], ssp
->save_pmc
[PMC_3
], ssp
->save_pmc
[PMC_4
], ssp
->save_pmc
[PMC_5
], ssp
->save_pmc
[PMC_6
], ssp
->save_pmc
[PMC_7
], ssp
->save_pmc
[PMC_8
]);
875 if(thread
!=current_thread()) {
876 thread_resume(thread
);
879 #ifdef HWPERFMON_DEBUG
880 if(retval
!=KERN_SUCCESS
) {
881 kprintf("perfmon_control - ERROR: retval=%d\n", retval
);
883 #endif /* HWPERFMON_DEBUG */
885 ssp
->save_r3
= retval
;
886 return 1; /* Return and check for ASTs... */
889 int perfmon_handle_pmi(struct savearea
*ssp
)
892 kern_return_t retval
= KERN_SUCCESS
;
893 thread_t thread
= current_thread();
895 #ifdef HWPERFMON_DEBUG
896 kprintf("perfmon_handle_pmi: got rupt\n");
899 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
900 #ifdef HWPERFMON_DEBUG
901 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
906 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
907 if(thread
->machine
.pcb
->save_pmc
[curPMC
] & 0x80000000) {
908 if(thread
->machine
.pmcovfl
[curPMC
]==0xFFFFFFFF && (thread
->machine
.perfmonFlags
& PERFMONFLAG_BREAKPOINT_FOR_PMI
)) {
909 doexception(EXC_BREAKPOINT
, EXC_PPC_PERFMON
, (unsigned int)ssp
->save_srr0
); // pass up a breakpoint exception
912 thread
->machine
.pmcovfl
[curPMC
]++;
913 thread
->machine
.pcb
->save_pmc
[curPMC
] = 0;
918 if(retval
==KERN_SUCCESS
) {
919 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
920 case CPU_SUBTYPE_POWERPC_7450
:
922 ppc32_mmcr0_reg_t mmcr0_reg
;
924 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
925 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
926 mmcr0_reg
.field
.enable_pmi
= TRUE
;
927 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
929 retval
= KERN_SUCCESS
;
931 case CPU_SUBTYPE_POWERPC_970
:
933 ppc64_mmcr0_reg_t mmcr0_reg
;
935 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
936 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
937 mmcr0_reg
.field
.enable_pmi
= TRUE
;
938 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
940 retval
= KERN_SUCCESS
;
943 retval
= KERN_FAILURE
;