2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/std_types.h>
30 #include <i386/cpu_data.h>
31 #include <i386/cpu_number.h>
32 #include <i386/perfmon.h>
33 #include <i386/proc_reg.h>
34 #include <i386/cpu_threads.h>
36 #include <i386/cpuid.h>
37 #include <i386/lock.h>
38 #include <vm/vm_kern.h>
39 #include <kern/task.h>
42 #define DBG(x...) kprintf(x)
47 static decl_simple_lock_data(,pmc_lock
)
48 static task_t pmc_owner
= TASK_NULL
;
49 static int pmc_thread_count
= 0;
50 static boolean_t pmc_inited
= FALSE
;
52 /* PMC Facility Owner:
53 * TASK_NULL - no one owns it
54 * kernel_task - owned by pmc
55 * other task - owned by another task
59 * Table of ESCRs and addresses associated with performance counters/CCCRs.
60 * See Intel SDM Vol 3, Table 15-4 (section 15.9):
62 static uint16_t pmc_escr_addr_table
[18][8] = {
64 [MSR_BSU_ESCR0
] 0x3a0,
65 [MSR_FSB_ESCR0
] 0x3a2,
66 [MSR_MOB_ESCR0
] 0x3aa,
67 [MSR_PMH_ESCR0
] 0x3ac,
68 [MSR_BPU_ESCR0
] 0x3b2,
70 [MSR_ITLB_ESCR0
] 0x3b6,
74 [MSR_BSU_ESCR0
] 0x3a0,
75 [MSR_FSB_ESCR0
] 0x3a2,
76 [MSR_MOB_ESCR0
] 0x3aa,
77 [MSR_PMH_ESCR0
] 0x3ac,
78 [MSR_BPU_ESCR0
] 0x3b2,
80 [MSR_ITLB_ESCR0
] 0x3b6,
84 [MSR_BSU_ESCR1
] 0x3a1,
85 [MSR_FSB_ESCR1
] 0x3a3,
86 [MSR_MOB_ESCR1
] 0x3ab,
87 [MSR_PMH_ESCR1
] 0x3ad,
88 [MSR_BPU_ESCR1
] 0x3b3,
90 [MSR_ITLB_ESCR1
] 0x3b7,
94 [MSR_BSU_ESCR1
] 0x3a1,
95 [MSR_FSB_ESCR1
] 0x3a3,
96 [MSR_MOB_ESCR1
] 0x3ab,
97 [MSR_PMH_ESCR1
] 0x3ad,
98 [MSR_BPU_ESCR1
] 0x3b3,
100 [MSR_ITLB_ESCR1
] 0x3b7,
101 [MSR_IX_ESCR1
] 0x3c9,
104 [MSR_MS_ESCR1
] 0x3c1,
105 [MSR_TBPU_ESCR1
] 0x3c3,
106 [MSR_TC_ESCR1
] 0x3c5,
109 [MSR_MS_ESCR1
] 0x3c1,
110 [MSR_TBPU_ESCR1
] 0x3c3,
111 [MSR_TC_ESCR1
] 0x3c5,
114 [MSR_MS_ESCR1
] 0x3c1,
115 [MSR_TBPU_ESCR1
] 0x3c3,
116 [MSR_TC_ESCR1
] 0x3c5,
119 [MSR_MS_ESCR1
] 0x3c1,
120 [MSR_TBPU_ESCR1
] 0x3c3,
121 [MSR_TC_ESCR1
] 0x3c5,
123 [MSR_FLAME_COUNTER0
] {
124 [MSR_FIRM_ESCR0
] 0x3a4,
125 [MSR_FLAME_ESCR0
] 0x3a6,
126 [MSR_DAC_ESCR0
] 0x3a8,
127 [MSR_SAT_ESCR0
] 0x3ae,
128 [MSR_U2L_ESCR0
] 0x3b0,
130 [MSR_FLAME_COUNTER1
] {
131 [MSR_FIRM_ESCR0
] 0x3a4,
132 [MSR_FLAME_ESCR0
] 0x3a6,
133 [MSR_DAC_ESCR0
] 0x3a8,
134 [MSR_SAT_ESCR0
] 0x3ae,
135 [MSR_U2L_ESCR0
] 0x3b0,
137 [MSR_FLAME_COUNTER2
] {
138 [MSR_FIRM_ESCR1
] 0x3a5,
139 [MSR_FLAME_ESCR1
] 0x3a7,
140 [MSR_DAC_ESCR1
] 0x3a9,
141 [MSR_SAT_ESCR1
] 0x3af,
142 [MSR_U2L_ESCR1
] 0x3b1,
144 [MSR_FLAME_COUNTER3
] {
145 [MSR_FIRM_ESCR1
] 0x3a5,
146 [MSR_FLAME_ESCR1
] 0x3a7,
147 [MSR_DAC_ESCR1
] 0x3a9,
148 [MSR_SAT_ESCR1
] 0x3af,
149 [MSR_U2L_ESCR1
] 0x3b1,
152 [MSR_CRU_ESCR0
] 0x3b8,
153 [MSR_CRU_ESCR2
] 0x3cc,
154 [MSR_CRU_ESCR4
] 0x3e0,
155 [MSR_IQ_ESCR0
] 0x3ba,
156 [MSR_RAT_ESCR0
] 0x3bc,
157 [MSR_SSU_ESCR0
] 0x3be,
158 [MSR_AFL_ESCR0
] 0x3ca,
161 [MSR_CRU_ESCR0
] 0x3b8,
162 [MSR_CRU_ESCR2
] 0x3cc,
163 [MSR_CRU_ESCR4
] 0x3e0,
164 [MSR_IQ_ESCR0
] 0x3ba,
165 [MSR_RAT_ESCR0
] 0x3bc,
166 [MSR_SSU_ESCR0
] 0x3be,
167 [MSR_AFL_ESCR0
] 0x3ca,
170 [MSR_CRU_ESCR1
] 0x3b9,
171 [MSR_CRU_ESCR3
] 0x3cd,
172 [MSR_CRU_ESCR5
] 0x3e1,
173 [MSR_IQ_ESCR1
] 0x3bb,
174 [MSR_RAT_ESCR1
] 0x3bd,
175 [MSR_AFL_ESCR1
] 0x3cb,
178 [MSR_CRU_ESCR1
] 0x3b9,
179 [MSR_CRU_ESCR3
] 0x3cd,
180 [MSR_CRU_ESCR5
] 0x3e1,
181 [MSR_IQ_ESCR1
] 0x3bb,
182 [MSR_RAT_ESCR1
] 0x3bd,
183 [MSR_AFL_ESCR1
] 0x3cb,
186 [MSR_CRU_ESCR0
] 0x3b8,
187 [MSR_CRU_ESCR2
] 0x3cc,
188 [MSR_CRU_ESCR4
] 0x3e0,
189 [MSR_IQ_ESCR0
] 0x3ba,
190 [MSR_RAT_ESCR0
] 0x3bc,
191 [MSR_SSU_ESCR0
] 0x3be,
192 [MSR_AFL_ESCR0
] 0x3ca,
195 [MSR_CRU_ESCR1
] 0x3b9,
196 [MSR_CRU_ESCR3
] 0x3cd,
197 [MSR_CRU_ESCR5
] 0x3e1,
198 [MSR_IQ_ESCR1
] 0x3bb,
199 [MSR_RAT_ESCR1
] 0x3bd,
200 [MSR_AFL_ESCR1
] 0x3cb,
203 #define PMC_ESCR_ADDR(id,esid) pmc_escr_addr_table[id][esid]
206 pmc_id_t id_max
; /* Maximum counter id */
207 pmc_machine_t machine_type
; /* P6 or P4/Xeon */
208 uint32_t msr_counter_base
; /* First counter MSR */
209 uint32_t msr_control_base
; /* First control MSR */
212 boolean_t reserved
[2];
213 pmc_ovf_func_t
*ovf_func
[2];
216 boolean_t reserved
[2];
217 pmc_ovf_func_t
*ovf_func
[2];
218 uint32_t msr_global_ctrl
;
219 uint32_t msr_global_ovf_ctrl
;
220 uint32_t msr_global_status
;
223 boolean_t reserved
[18];
224 pmc_ovf_func_t
*ovf_func
[18];
226 pmc_cccr_t cccr_shadow
[18]; /* Last cccr set */
227 pmc_counter_t counter_shadow
[18]; /* Last counter set */
228 uint32_t ovfs_unexpected
[18]; /* Unexpected intrs */
235 _pmc_machine_type(void)
237 i386_cpu_info_t
*infop
= cpuid_info();
239 if (strncmp(infop
->cpuid_vendor
, CPUID_VID_INTEL
, sizeof(CPUID_VID_INTEL
)) != 0)
242 if (!pmc_is_available())
245 switch (infop
->cpuid_family
) {
247 switch (infop
->cpuid_model
) {
261 pmc_p4_intr(void *state
)
263 pmc_table_t
*pmc_table
= (pmc_table_t
*) x86_core()->pmc
;
267 int my_logical_cpu
= cpu_to_logical_cpu(cpu_number());
270 * Scan through table for reserved counters with overflow and
271 * with a registered overflow function.
273 for (id
= 0; id
<= pmc_table
->id_max
; id
++) {
274 if (!pmc_table
->P4
.reserved
[id
])
276 cccr_addr
= pmc_table
->msr_control_base
+ id
;
277 cccr
.u_u64
= rdmsr64(cccr_addr
);
279 pmc_table
->P4
.cccr_shadow
[id
] = cccr
;
280 pmc_table
->P4
.counter_shadow
[id
].u64
=
281 rdmsr64(pmc_table
->msr_counter_base
+ id
);
283 if (cccr
.u_htt
.ovf
== 0)
285 if ((cccr
.u_htt
.ovf_pmi_t0
== 1 && my_logical_cpu
== 0) ||
286 (cccr
.u_htt
.ovf_pmi_t1
== 1 && my_logical_cpu
== 1)) {
287 if (pmc_table
->P4
.ovf_func
[id
]) {
288 (*pmc_table
->P4
.ovf_func
[id
])(id
, state
);
289 /* func expected to clear overflow */
293 /* Clear overflow for unexpected interrupt */
295 pmc_table
->P4
.ovfs_unexpected
[id
]++;
301 pmc_p6_intr(void *state
)
303 pmc_table_t
*pmc_table
= (pmc_table_t
*) x86_core()->pmc
;
307 * Can't determine which counter has overflow
308 * so call all registered functions.
310 for (id
= 0; id
<= pmc_table
->id_max
; id
++)
311 if (pmc_table
->P6
.reserved
[id
] && pmc_table
->P6
.ovf_func
[id
])
312 (*pmc_table
->P6
.ovf_func
[id
])(id
, state
);
316 pmc_core_intr(void *state
)
318 pmc_table_t
*pmc_table
= (pmc_table_t
*) x86_core()->pmc
;
320 pmc_global_status_t ovf_status
;
322 ovf_status
.u64
= rdmsr64(pmc_table
->Core
.msr_global_status
);
324 * Scan through table for reserved counters with overflow and
325 * with a registered overflow function.
327 for (id
= 0; id
<= pmc_table
->id_max
; id
++) {
328 if (!pmc_table
->Core
.reserved
[id
])
330 if ((id
== 0 && ovf_status
.fld
.PMC0_overflow
) ||
331 (id
== 1 && ovf_status
.fld
.PMC1_overflow
)) {
332 if (pmc_table
->Core
.ovf_func
[id
]) {
333 (*pmc_table
->Core
.ovf_func
[id
])(id
, state
);
334 /* func expected to clear overflow */
345 pmc_table_t
*pmc_table
;
346 pmc_machine_t pmc_type
;
349 simple_lock_init(&pmc_lock
, 0);
353 pmc_type
= _pmc_machine_type();
354 if (pmc_type
== pmc_none
) {
358 ret
= kmem_alloc(kernel_map
,
359 (void *) &pmc_table
, sizeof(pmc_table_t
));
360 if (ret
!= KERN_SUCCESS
)
361 panic("pmc_init() kmem_alloc returned %d\n", ret
);
362 bzero((void *)pmc_table
, sizeof(pmc_table_t
));
364 pmc_table
->machine_type
= pmc_type
;
367 pmc_table
->id_max
= 17;
368 pmc_table
->msr_counter_base
= MSR_COUNTER_ADDR(0);
369 pmc_table
->msr_control_base
= MSR_CCCR_ADDR(0);
370 lapic_set_pmi_func(&pmc_p4_intr
);
373 pmc_table
->id_max
= 1;
374 pmc_table
->msr_counter_base
= MSR_IA32_PMC(0);
375 pmc_table
->msr_control_base
= MSR_IA32_PERFEVTSEL(0);
376 pmc_table
->Core
.msr_global_ctrl
= MSR_PERF_GLOBAL_CTRL
;
377 pmc_table
->Core
.msr_global_ovf_ctrl
= MSR_PERF_GLOBAL_OVF_CTRL
;
378 pmc_table
->Core
.msr_global_status
= MSR_PERF_GLOBAL_STATUS
;
379 lapic_set_pmi_func(&pmc_core_intr
);
382 pmc_table
->id_max
= 1;
383 pmc_table
->msr_counter_base
= MSR_P6_COUNTER_ADDR(0);
384 pmc_table
->msr_control_base
= MSR_P6_PES_ADDR(0);
385 lapic_set_pmi_func(&pmc_p6_intr
);
390 DBG("pmc_alloc() type=%d msr_counter_base=%p msr_control_base=%p\n",
391 pmc_table
->machine_type
,
392 (void *) pmc_table
->msr_counter_base
,
393 (void *) pmc_table
->msr_control_base
);
394 return (void *) pmc_table
;
398 static inline pmc_table_t
*
399 pmc_table_valid(pmc_id_t id
)
401 x86_core_t
*my_core
= x86_core();
404 assert(my_core
!= NULL
);
406 pmc
= (pmc_table_t
*) my_core
->pmc
;
408 (id
> pmc
->id_max
) ||
409 (pmc
->machine_type
== pmc_P4_Xeon
&& !pmc
->P4
.reserved
[id
]) ||
410 (pmc
->machine_type
== pmc_P6
&& !pmc
->P6
.reserved
[id
]) ||
411 (pmc
->machine_type
== pmc_Core
&& !pmc
->Core
.reserved
[id
]))
417 pmc_machine_type(pmc_machine_t
*type
)
419 x86_core_t
*my_core
= x86_core();
420 pmc_table_t
*pmc_table
;
422 assert(my_core
!= NULL
);
424 pmc_table
= (pmc_table_t
*) my_core
->pmc
;
425 if (pmc_table
== NULL
)
428 *type
= pmc_table
->machine_type
;
434 pmc_reserve(pmc_id_t id
)
436 x86_core_t
*my_core
= x86_core();
437 pmc_table_t
*pmc_table
;
439 assert(my_core
!= NULL
);
441 pmc_table
= (pmc_table_t
*) my_core
->pmc
;
442 if (pmc_table
== NULL
)
444 if (id
> pmc_table
->id_max
)
445 return KERN_INVALID_ARGUMENT
;
446 switch (pmc_table
->machine_type
) {
448 if (pmc_table
->P4
.reserved
[id
])
450 pmc_table
->P4
.reserved
[id
] = TRUE
;
453 if (pmc_table
->P6
.reserved
[id
])
455 pmc_table
->P6
.reserved
[id
] = TRUE
;
458 if (pmc_table
->Core
.reserved
[id
])
460 pmc_table
->Core
.reserved
[id
] = TRUE
;
461 pmc_global_ctrl_t ctrl
;
462 ctrl
.u64
= rdmsr64(pmc_table
->Core
.msr_global_ctrl
);
464 ctrl
.fld
.PMC0_enable
= 1;
466 ctrl
.fld
.PMC1_enable
= 1;
467 wrmsr64(pmc_table
->Core
.msr_global_ctrl
, ctrl
.u64
);
475 pmc_is_reserved(pmc_id_t id
)
477 return pmc_table_valid(id
) != NULL
;
481 pmc_free(pmc_id_t id
)
483 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
485 if (pmc_table
== NULL
)
486 return KERN_INVALID_ARGUMENT
;
488 pmc_cccr_write(id
, 0x0ULL
);
489 switch (pmc_table
->machine_type
) {
491 pmc_table
->P4
.reserved
[id
] = FALSE
;
492 pmc_table
->P4
.ovf_func
[id
] = NULL
;
495 pmc_table
->P6
.reserved
[id
] = FALSE
;
496 pmc_table
->P6
.ovf_func
[id
] = NULL
;
499 pmc_table
->Core
.reserved
[id
] = FALSE
;
500 pmc_table
->Core
.ovf_func
[id
] = NULL
;
501 pmc_global_ctrl_t ctrl
;
502 ctrl
.u64
= rdmsr64(pmc_table
->Core
.msr_global_ctrl
);
504 ctrl
.fld
.PMC0_enable
= 0;
506 ctrl
.fld
.PMC1_enable
= 0;
507 wrmsr64(pmc_table
->Core
.msr_global_ctrl
, ctrl
.u64
);
510 return KERN_INVALID_ARGUMENT
;
517 pmc_counter_read(pmc_id_t id
, pmc_counter_t
*val
)
519 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
521 if (pmc_table
== NULL
)
522 return KERN_INVALID_ARGUMENT
;
524 *(uint64_t *)val
= rdmsr64(pmc_table
->msr_counter_base
+ id
);
530 pmc_counter_write(pmc_id_t id
, pmc_counter_t
*val
)
532 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
534 if (pmc_table
== NULL
)
535 return KERN_INVALID_ARGUMENT
;
537 wrmsr64(pmc_table
->msr_counter_base
+ id
, *(uint64_t *)val
);
543 pmc_cccr_read(pmc_id_t id
, pmc_cccr_t
*cccr
)
545 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
547 if (pmc_table
== NULL
)
548 return KERN_INVALID_ARGUMENT
;
550 if (pmc_table
->machine_type
!= pmc_P4_Xeon
)
553 *(uint64_t *)cccr
= rdmsr64(pmc_table
->msr_control_base
+ id
);
559 pmc_cccr_write(pmc_id_t id
, pmc_cccr_t
*cccr
)
561 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
563 if (pmc_table
== NULL
)
564 return KERN_INVALID_ARGUMENT
;
566 if (pmc_table
->machine_type
!= pmc_P4_Xeon
)
569 wrmsr64(pmc_table
->msr_control_base
+ id
, *(uint64_t *)cccr
);
575 pmc_evtsel_read(pmc_id_t id
, pmc_evtsel_t
*evtsel
)
577 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
579 if (pmc_table
== NULL
)
580 return KERN_INVALID_ARGUMENT
;
582 if (!(pmc_table
->machine_type
== pmc_P6
||
583 pmc_table
->machine_type
== pmc_Core
))
586 evtsel
->u64
= rdmsr64(pmc_table
->msr_control_base
+ id
);
592 pmc_evtsel_write(pmc_id_t id
, pmc_evtsel_t
*evtsel
)
594 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
596 if (pmc_table
== NULL
)
597 return KERN_INVALID_ARGUMENT
;
599 if (!(pmc_table
->machine_type
== pmc_P6
||
600 pmc_table
->machine_type
== pmc_Core
))
603 wrmsr64(pmc_table
->msr_control_base
+ id
, evtsel
->u64
);
609 pmc_escr_read(pmc_id_t id
, pmc_escr_id_t esid
, pmc_escr_t
*escr
)
612 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
614 if (pmc_table
== NULL
)
615 return KERN_INVALID_ARGUMENT
;
617 if (pmc_table
->machine_type
!= pmc_P4_Xeon
)
620 if (esid
> PMC_ESID_MAX
)
621 return KERN_INVALID_ARGUMENT
;
623 addr
= PMC_ESCR_ADDR(id
, esid
);
625 return KERN_INVALID_ARGUMENT
;
627 *(uint64_t *)escr
= rdmsr64(addr
);
633 pmc_escr_write(pmc_id_t id
, pmc_escr_id_t esid
, pmc_escr_t
*escr
)
636 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
638 if (pmc_table
== NULL
)
641 if (pmc_table
->machine_type
!= pmc_P4_Xeon
)
644 if (esid
> PMC_ESID_MAX
)
645 return KERN_INVALID_ARGUMENT
;
647 addr
= PMC_ESCR_ADDR(id
, esid
);
649 return KERN_INVALID_ARGUMENT
;
651 wrmsr64(addr
, *(uint64_t *)escr
);
657 pmc_set_ovf_func(pmc_id_t id
, pmc_ovf_func_t func
)
659 pmc_table_t
*pmc_table
= pmc_table_valid(id
);
661 if (pmc_table
== NULL
)
662 return KERN_INVALID_ARGUMENT
;
664 switch (pmc_table
->machine_type
) {
666 pmc_table
->P4
.ovf_func
[id
] = func
;
669 pmc_table
->P6
.ovf_func
[id
] = func
;
672 pmc_table
->Core
.ovf_func
[id
] = func
;
675 return KERN_INVALID_ARGUMENT
;
682 pmc_acquire(task_t task
)
684 kern_return_t retval
= KERN_SUCCESS
;
689 simple_lock(&pmc_lock
);
691 if(pmc_owner
== task
) {
693 "ACQUIRED: already owner\n");
694 retval
= KERN_SUCCESS
;
696 } else if(pmc_owner
== TASK_NULL
) { /* no one owns it */
698 pmc_thread_count
= 0;
700 "ACQUIRED: no current owner - made new owner\n");
701 retval
= KERN_SUCCESS
;
702 } else { /* someone already owns it */
703 if(pmc_owner
== kernel_task
) {
704 if(pmc_thread_count
== 0) {
705 /* kernel owns it but no threads using it */
707 pmc_thread_count
= 0;
709 "ACQUIRED: owned by kernel, no threads\n");
710 retval
= KERN_SUCCESS
;
713 "DENIED: owned by kernel, in use\n");
714 retval
= KERN_RESOURCE_SHORTAGE
;
716 } else { /* non-kernel owner */
718 "DENIED: owned by another task\n");
719 retval
= KERN_RESOURCE_SHORTAGE
;
723 simple_unlock(&pmc_lock
);
728 pmc_release(task_t task
)
730 kern_return_t retval
= KERN_SUCCESS
;
731 task_t old_pmc_owner
= pmc_owner
;
736 simple_lock(&pmc_lock
);
738 if(task
!= pmc_owner
) {
739 retval
= KERN_NO_ACCESS
;
741 if(old_pmc_owner
== kernel_task
) {
742 if(pmc_thread_count
>0) {
744 "NOT RELEASED: owned by kernel, in use\n");
745 retval
= KERN_NO_ACCESS
;
748 "RELEASED: was owned by kernel\n");
749 pmc_owner
= TASK_NULL
;
750 retval
= KERN_SUCCESS
;
754 "RELEASED: was owned by user\n");
755 pmc_owner
= TASK_NULL
;
756 retval
= KERN_SUCCESS
;
760 simple_unlock(&pmc_lock
);