]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/perfmon.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / perfmon.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/std_types.h>
30 #include <i386/cpu_data.h>
31 #include <i386/cpu_number.h>
32 #include <i386/perfmon.h>
33 #include <i386/proc_reg.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/mp.h>
36 #include <i386/cpuid.h>
37 #include <i386/lock.h>
38 #include <vm/vm_kern.h>
39 #include <kern/task.h>
40
41 #if DEBUG
42 #define DBG(x...) kprintf(x)
43 #else
44 #define DBG(x...)
45 #endif
46
47 static decl_simple_lock_data(,pmc_lock)
48 static task_t pmc_owner = TASK_NULL;
49 static int pmc_thread_count = 0;
50 static boolean_t pmc_inited = FALSE;
51
52 /* PMC Facility Owner:
53 * TASK_NULL - no one owns it
54 * kernel_task - owned by pmc
55 * other task - owned by another task
56 */
57
58 /*
59 * Table of ESCRs and addresses associated with performance counters/CCCRs.
60 * See Intel SDM Vol 3, Table 15-4 (section 15.9):
61 */
62 static uint16_t pmc_escr_addr_table[18][8] = {
63 [MSR_BPU_COUNTER0] {
64 [MSR_BSU_ESCR0] 0x3a0,
65 [MSR_FSB_ESCR0] 0x3a2,
66 [MSR_MOB_ESCR0] 0x3aa,
67 [MSR_PMH_ESCR0] 0x3ac,
68 [MSR_BPU_ESCR0] 0x3b2,
69 [MSR_IS_ESCR0] 0x3b4,
70 [MSR_ITLB_ESCR0] 0x3b6,
71 [MSR_IX_ESCR0] 0x3c8,
72 },
73 [MSR_BPU_COUNTER1] {
74 [MSR_BSU_ESCR0] 0x3a0,
75 [MSR_FSB_ESCR0] 0x3a2,
76 [MSR_MOB_ESCR0] 0x3aa,
77 [MSR_PMH_ESCR0] 0x3ac,
78 [MSR_BPU_ESCR0] 0x3b2,
79 [MSR_IS_ESCR0] 0x3b4,
80 [MSR_ITLB_ESCR0] 0x3b6,
81 [MSR_IX_ESCR0] 0x3c8,
82 },
83 [MSR_BPU_COUNTER2] {
84 [MSR_BSU_ESCR1] 0x3a1,
85 [MSR_FSB_ESCR1] 0x3a3,
86 [MSR_MOB_ESCR1] 0x3ab,
87 [MSR_PMH_ESCR1] 0x3ad,
88 [MSR_BPU_ESCR1] 0x3b3,
89 [MSR_IS_ESCR1] 0x3b5,
90 [MSR_ITLB_ESCR1] 0x3b7,
91 [MSR_IX_ESCR1] 0x3c9,
92 },
93 [MSR_BPU_COUNTER3] {
94 [MSR_BSU_ESCR1] 0x3a1,
95 [MSR_FSB_ESCR1] 0x3a3,
96 [MSR_MOB_ESCR1] 0x3ab,
97 [MSR_PMH_ESCR1] 0x3ad,
98 [MSR_BPU_ESCR1] 0x3b3,
99 [MSR_IS_ESCR1] 0x3b5,
100 [MSR_ITLB_ESCR1] 0x3b7,
101 [MSR_IX_ESCR1] 0x3c9,
102 },
103 [MSR_MS_COUNTER0] {
104 [MSR_MS_ESCR1] 0x3c1,
105 [MSR_TBPU_ESCR1] 0x3c3,
106 [MSR_TC_ESCR1] 0x3c5,
107 },
108 [MSR_MS_COUNTER1] {
109 [MSR_MS_ESCR1] 0x3c1,
110 [MSR_TBPU_ESCR1] 0x3c3,
111 [MSR_TC_ESCR1] 0x3c5,
112 },
113 [MSR_MS_COUNTER2] {
114 [MSR_MS_ESCR1] 0x3c1,
115 [MSR_TBPU_ESCR1] 0x3c3,
116 [MSR_TC_ESCR1] 0x3c5,
117 },
118 [MSR_MS_COUNTER3] {
119 [MSR_MS_ESCR1] 0x3c1,
120 [MSR_TBPU_ESCR1] 0x3c3,
121 [MSR_TC_ESCR1] 0x3c5,
122 },
123 [MSR_FLAME_COUNTER0] {
124 [MSR_FIRM_ESCR0] 0x3a4,
125 [MSR_FLAME_ESCR0] 0x3a6,
126 [MSR_DAC_ESCR0] 0x3a8,
127 [MSR_SAT_ESCR0] 0x3ae,
128 [MSR_U2L_ESCR0] 0x3b0,
129 },
130 [MSR_FLAME_COUNTER1] {
131 [MSR_FIRM_ESCR0] 0x3a4,
132 [MSR_FLAME_ESCR0] 0x3a6,
133 [MSR_DAC_ESCR0] 0x3a8,
134 [MSR_SAT_ESCR0] 0x3ae,
135 [MSR_U2L_ESCR0] 0x3b0,
136 },
137 [MSR_FLAME_COUNTER2] {
138 [MSR_FIRM_ESCR1] 0x3a5,
139 [MSR_FLAME_ESCR1] 0x3a7,
140 [MSR_DAC_ESCR1] 0x3a9,
141 [MSR_SAT_ESCR1] 0x3af,
142 [MSR_U2L_ESCR1] 0x3b1,
143 },
144 [MSR_FLAME_COUNTER3] {
145 [MSR_FIRM_ESCR1] 0x3a5,
146 [MSR_FLAME_ESCR1] 0x3a7,
147 [MSR_DAC_ESCR1] 0x3a9,
148 [MSR_SAT_ESCR1] 0x3af,
149 [MSR_U2L_ESCR1] 0x3b1,
150 },
151 [MSR_IQ_COUNTER0] {
152 [MSR_CRU_ESCR0] 0x3b8,
153 [MSR_CRU_ESCR2] 0x3cc,
154 [MSR_CRU_ESCR4] 0x3e0,
155 [MSR_IQ_ESCR0] 0x3ba,
156 [MSR_RAT_ESCR0] 0x3bc,
157 [MSR_SSU_ESCR0] 0x3be,
158 [MSR_AFL_ESCR0] 0x3ca,
159 },
160 [MSR_IQ_COUNTER1] {
161 [MSR_CRU_ESCR0] 0x3b8,
162 [MSR_CRU_ESCR2] 0x3cc,
163 [MSR_CRU_ESCR4] 0x3e0,
164 [MSR_IQ_ESCR0] 0x3ba,
165 [MSR_RAT_ESCR0] 0x3bc,
166 [MSR_SSU_ESCR0] 0x3be,
167 [MSR_AFL_ESCR0] 0x3ca,
168 },
169 [MSR_IQ_COUNTER2] {
170 [MSR_CRU_ESCR1] 0x3b9,
171 [MSR_CRU_ESCR3] 0x3cd,
172 [MSR_CRU_ESCR5] 0x3e1,
173 [MSR_IQ_ESCR1] 0x3bb,
174 [MSR_RAT_ESCR1] 0x3bd,
175 [MSR_AFL_ESCR1] 0x3cb,
176 },
177 [MSR_IQ_COUNTER3] {
178 [MSR_CRU_ESCR1] 0x3b9,
179 [MSR_CRU_ESCR3] 0x3cd,
180 [MSR_CRU_ESCR5] 0x3e1,
181 [MSR_IQ_ESCR1] 0x3bb,
182 [MSR_RAT_ESCR1] 0x3bd,
183 [MSR_AFL_ESCR1] 0x3cb,
184 },
185 [MSR_IQ_COUNTER4] {
186 [MSR_CRU_ESCR0] 0x3b8,
187 [MSR_CRU_ESCR2] 0x3cc,
188 [MSR_CRU_ESCR4] 0x3e0,
189 [MSR_IQ_ESCR0] 0x3ba,
190 [MSR_RAT_ESCR0] 0x3bc,
191 [MSR_SSU_ESCR0] 0x3be,
192 [MSR_AFL_ESCR0] 0x3ca,
193 },
194 [MSR_IQ_COUNTER5] {
195 [MSR_CRU_ESCR1] 0x3b9,
196 [MSR_CRU_ESCR3] 0x3cd,
197 [MSR_CRU_ESCR5] 0x3e1,
198 [MSR_IQ_ESCR1] 0x3bb,
199 [MSR_RAT_ESCR1] 0x3bd,
200 [MSR_AFL_ESCR1] 0x3cb,
201 },
202 };
203 #define PMC_ESCR_ADDR(id,esid) pmc_escr_addr_table[id][esid]
204
205 typedef struct {
206 pmc_id_t id_max; /* Maximum counter id */
207 pmc_machine_t machine_type; /* P6 or P4/Xeon */
208 uint32_t msr_counter_base; /* First counter MSR */
209 uint32_t msr_control_base; /* First control MSR */
210 union {
211 struct {
212 boolean_t reserved[2];
213 pmc_ovf_func_t *ovf_func[2];
214 } P6;
215 struct {
216 boolean_t reserved[2];
217 pmc_ovf_func_t *ovf_func[2];
218 uint32_t msr_global_ctrl;
219 uint32_t msr_global_ovf_ctrl;
220 uint32_t msr_global_status;
221 } Core;
222 struct {
223 boolean_t reserved[18];
224 pmc_ovf_func_t *ovf_func[18];
225 #ifdef DEBUG
226 pmc_cccr_t cccr_shadow[18]; /* Last cccr set */
227 pmc_counter_t counter_shadow[18]; /* Last counter set */
228 uint32_t ovfs_unexpected[18]; /* Unexpected intrs */
229 #endif
230 } P4;
231 };
232 } pmc_table_t;
233
234 static pmc_machine_t
235 _pmc_machine_type(void)
236 {
237 i386_cpu_info_t *infop = cpuid_info();
238
239 if (strncmp(infop->cpuid_vendor, CPUID_VID_INTEL, sizeof(CPUID_VID_INTEL)) != 0)
240 return pmc_none;
241
242 if (!pmc_is_available())
243 return pmc_none;
244
245 switch (infop->cpuid_family) {
246 case 0x6:
247 switch (infop->cpuid_model) {
248 case 15:
249 return pmc_Core;
250 default:
251 return pmc_P6;
252 }
253 case 0xf:
254 return pmc_P4_Xeon;
255 default:
256 return pmc_unknown;
257 }
258 }
259
260 static void
261 pmc_p4_intr(void *state)
262 {
263 pmc_table_t *pmc_table = (pmc_table_t *) x86_core()->pmc;
264 uint32_t cccr_addr;
265 pmc_cccr_t cccr;
266 pmc_id_t id;
267 int my_logical_cpu = cpu_to_logical_cpu(cpu_number());
268
269 /*
270 * Scan through table for reserved counters with overflow and
271 * with a registered overflow function.
272 */
273 for (id = 0; id <= pmc_table->id_max; id++) {
274 if (!pmc_table->P4.reserved[id])
275 continue;
276 cccr_addr = pmc_table->msr_control_base + id;
277 cccr.u_u64 = rdmsr64(cccr_addr);
278 #ifdef DEBUG
279 pmc_table->P4.cccr_shadow[id] = cccr;
280 pmc_table->P4.counter_shadow[id].u64 =
281 rdmsr64(pmc_table->msr_counter_base + id);
282 #endif
283 if (cccr.u_htt.ovf == 0)
284 continue;
285 if ((cccr.u_htt.ovf_pmi_t0 == 1 && my_logical_cpu == 0) ||
286 (cccr.u_htt.ovf_pmi_t1 == 1 && my_logical_cpu == 1)) {
287 if (pmc_table->P4.ovf_func[id]) {
288 (*pmc_table->P4.ovf_func[id])(id, state);
289 /* func expected to clear overflow */
290 continue;
291 }
292 }
293 /* Clear overflow for unexpected interrupt */
294 #ifdef DEBUG
295 pmc_table->P4.ovfs_unexpected[id]++;
296 #endif
297 }
298 }
299
300 static void
301 pmc_p6_intr(void *state)
302 {
303 pmc_table_t *pmc_table = (pmc_table_t *) x86_core()->pmc;
304 pmc_id_t id;
305
306 /*
307 * Can't determine which counter has overflow
308 * so call all registered functions.
309 */
310 for (id = 0; id <= pmc_table->id_max; id++)
311 if (pmc_table->P6.reserved[id] && pmc_table->P6.ovf_func[id])
312 (*pmc_table->P6.ovf_func[id])(id, state);
313 }
314
315 static void
316 pmc_core_intr(void *state)
317 {
318 pmc_table_t *pmc_table = (pmc_table_t *) x86_core()->pmc;
319 pmc_id_t id;
320 pmc_global_status_t ovf_status;
321
322 ovf_status.u64 = rdmsr64(pmc_table->Core.msr_global_status);
323 /*
324 * Scan through table for reserved counters with overflow and
325 * with a registered overflow function.
326 */
327 for (id = 0; id <= pmc_table->id_max; id++) {
328 if (!pmc_table->Core.reserved[id])
329 continue;
330 if ((id == 0 && ovf_status.fld.PMC0_overflow) ||
331 (id == 1 && ovf_status.fld.PMC1_overflow)) {
332 if (pmc_table->Core.ovf_func[id]) {
333 (*pmc_table->Core.ovf_func[id])(id, state);
334 /* func expected to clear overflow */
335 continue;
336 }
337 }
338 }
339 }
340
341 void *
342 pmc_alloc(void)
343 {
344 int ret;
345 pmc_table_t *pmc_table;
346 pmc_machine_t pmc_type;
347
348 if (!pmc_inited) {
349 simple_lock_init(&pmc_lock, 0);
350 pmc_inited = TRUE;
351 }
352
353 pmc_type = _pmc_machine_type();
354 if (pmc_type == pmc_none) {
355 return NULL;
356 }
357
358 ret = kmem_alloc(kernel_map,
359 (void *) &pmc_table, sizeof(pmc_table_t));
360 if (ret != KERN_SUCCESS)
361 panic("pmc_init() kmem_alloc returned %d\n", ret);
362 bzero((void *)pmc_table, sizeof(pmc_table_t));
363
364 pmc_table->machine_type = pmc_type;
365 switch (pmc_type) {
366 case pmc_P4_Xeon:
367 pmc_table->id_max = 17;
368 pmc_table->msr_counter_base = MSR_COUNTER_ADDR(0);
369 pmc_table->msr_control_base = MSR_CCCR_ADDR(0);
370 lapic_set_pmi_func(&pmc_p4_intr);
371 break;
372 case pmc_Core:
373 pmc_table->id_max = 1;
374 pmc_table->msr_counter_base = MSR_IA32_PMC(0);
375 pmc_table->msr_control_base = MSR_IA32_PERFEVTSEL(0);
376 pmc_table->Core.msr_global_ctrl = MSR_PERF_GLOBAL_CTRL;
377 pmc_table->Core.msr_global_ovf_ctrl = MSR_PERF_GLOBAL_OVF_CTRL;
378 pmc_table->Core.msr_global_status = MSR_PERF_GLOBAL_STATUS;
379 lapic_set_pmi_func(&pmc_core_intr);
380 break;
381 case pmc_P6:
382 pmc_table->id_max = 1;
383 pmc_table->msr_counter_base = MSR_P6_COUNTER_ADDR(0);
384 pmc_table->msr_control_base = MSR_P6_PES_ADDR(0);
385 lapic_set_pmi_func(&pmc_p6_intr);
386 break;
387 default:
388 break;
389 }
390 DBG("pmc_alloc() type=%d msr_counter_base=%p msr_control_base=%p\n",
391 pmc_table->machine_type,
392 (void *) pmc_table->msr_counter_base,
393 (void *) pmc_table->msr_control_base);
394 return (void *) pmc_table;
395 }
396
397
398 static inline pmc_table_t *
399 pmc_table_valid(pmc_id_t id)
400 {
401 x86_core_t *my_core = x86_core();
402 pmc_table_t *pmc;
403
404 assert(my_core != NULL);
405
406 pmc = (pmc_table_t *) my_core->pmc;
407 if ((pmc == NULL) ||
408 (id > pmc->id_max) ||
409 (pmc->machine_type == pmc_P4_Xeon && !pmc->P4.reserved[id]) ||
410 (pmc->machine_type == pmc_P6 && !pmc->P6.reserved[id]) ||
411 (pmc->machine_type == pmc_Core && !pmc->Core.reserved[id]))
412 return NULL;
413 return pmc;
414 }
415
416 int
417 pmc_machine_type(pmc_machine_t *type)
418 {
419 x86_core_t *my_core = x86_core();
420 pmc_table_t *pmc_table;
421
422 assert(my_core != NULL);
423
424 pmc_table = (pmc_table_t *) my_core->pmc;
425 if (pmc_table == NULL)
426 return KERN_FAILURE;
427
428 *type = pmc_table->machine_type;
429
430 return KERN_SUCCESS;
431 }
432
433 int
434 pmc_reserve(pmc_id_t id)
435 {
436 x86_core_t *my_core = x86_core();
437 pmc_table_t *pmc_table;
438
439 assert(my_core != NULL);
440
441 pmc_table = (pmc_table_t *) my_core->pmc;
442 if (pmc_table == NULL)
443 return KERN_FAILURE;
444 if (id > pmc_table->id_max)
445 return KERN_INVALID_ARGUMENT;
446 switch (pmc_table->machine_type) {
447 case pmc_P4_Xeon:
448 if (pmc_table->P4.reserved[id])
449 return KERN_FAILURE;
450 pmc_table->P4.reserved[id] = TRUE;
451 return KERN_SUCCESS;
452 case pmc_P6:
453 if (pmc_table->P6.reserved[id])
454 return KERN_FAILURE;
455 pmc_table->P6.reserved[id] = TRUE;
456 return KERN_SUCCESS;
457 case pmc_Core:
458 if (pmc_table->Core.reserved[id])
459 return KERN_FAILURE;
460 pmc_table->Core.reserved[id] = TRUE;
461 pmc_global_ctrl_t ctrl;
462 ctrl.u64 = rdmsr64(pmc_table->Core.msr_global_ctrl);
463 if (id == 0)
464 ctrl.fld.PMC0_enable = 1;
465 else
466 ctrl.fld.PMC1_enable = 1;
467 wrmsr64(pmc_table->Core.msr_global_ctrl, ctrl.u64);
468 return KERN_SUCCESS;
469 default:
470 return KERN_FAILURE;
471 }
472 }
473
474 boolean_t
475 pmc_is_reserved(pmc_id_t id)
476 {
477 return pmc_table_valid(id) != NULL;
478 }
479
480 int
481 pmc_free(pmc_id_t id)
482 {
483 pmc_table_t *pmc_table = pmc_table_valid(id);
484
485 if (pmc_table == NULL)
486 return KERN_INVALID_ARGUMENT;
487
488 pmc_cccr_write(id, 0x0ULL);
489 switch (pmc_table->machine_type) {
490 case pmc_P4_Xeon:
491 pmc_table->P4.reserved[id] = FALSE;
492 pmc_table->P4.ovf_func[id] = NULL;
493 break;
494 case pmc_P6:
495 pmc_table->P6.reserved[id] = FALSE;
496 pmc_table->P6.ovf_func[id] = NULL;
497 break;
498 case pmc_Core:
499 pmc_table->Core.reserved[id] = FALSE;
500 pmc_table->Core.ovf_func[id] = NULL;
501 pmc_global_ctrl_t ctrl;
502 ctrl.u64 = rdmsr64(pmc_table->Core.msr_global_ctrl);
503 if (id == 0)
504 ctrl.fld.PMC0_enable = 0;
505 else
506 ctrl.fld.PMC1_enable = 0;
507 wrmsr64(pmc_table->Core.msr_global_ctrl, ctrl.u64);
508 break;
509 default:
510 return KERN_INVALID_ARGUMENT;
511 }
512
513 return KERN_SUCCESS;
514 }
515
516 int
517 pmc_counter_read(pmc_id_t id, pmc_counter_t *val)
518 {
519 pmc_table_t *pmc_table = pmc_table_valid(id);
520
521 if (pmc_table == NULL)
522 return KERN_INVALID_ARGUMENT;
523
524 *(uint64_t *)val = rdmsr64(pmc_table->msr_counter_base + id);
525
526 return KERN_SUCCESS;
527 }
528
529 int
530 pmc_counter_write(pmc_id_t id, pmc_counter_t *val)
531 {
532 pmc_table_t *pmc_table = pmc_table_valid(id);
533
534 if (pmc_table == NULL)
535 return KERN_INVALID_ARGUMENT;
536
537 wrmsr64(pmc_table->msr_counter_base + id, *(uint64_t *)val);
538
539 return KERN_SUCCESS;
540 }
541
542 int
543 pmc_cccr_read(pmc_id_t id, pmc_cccr_t *cccr)
544 {
545 pmc_table_t *pmc_table = pmc_table_valid(id);
546
547 if (pmc_table == NULL)
548 return KERN_INVALID_ARGUMENT;
549
550 if (pmc_table->machine_type != pmc_P4_Xeon)
551 return KERN_FAILURE;
552
553 *(uint64_t *)cccr = rdmsr64(pmc_table->msr_control_base + id);
554
555 return KERN_SUCCESS;
556 }
557
558 int
559 pmc_cccr_write(pmc_id_t id, pmc_cccr_t *cccr)
560 {
561 pmc_table_t *pmc_table = pmc_table_valid(id);
562
563 if (pmc_table == NULL)
564 return KERN_INVALID_ARGUMENT;
565
566 if (pmc_table->machine_type != pmc_P4_Xeon)
567 return KERN_FAILURE;
568
569 wrmsr64(pmc_table->msr_control_base + id, *(uint64_t *)cccr);
570
571 return KERN_SUCCESS;
572 }
573
574 int
575 pmc_evtsel_read(pmc_id_t id, pmc_evtsel_t *evtsel)
576 {
577 pmc_table_t *pmc_table = pmc_table_valid(id);
578
579 if (pmc_table == NULL)
580 return KERN_INVALID_ARGUMENT;
581
582 if (!(pmc_table->machine_type == pmc_P6 ||
583 pmc_table->machine_type == pmc_Core))
584 return KERN_FAILURE;
585
586 evtsel->u64 = rdmsr64(pmc_table->msr_control_base + id);
587
588 return KERN_SUCCESS;
589 }
590
591 int
592 pmc_evtsel_write(pmc_id_t id, pmc_evtsel_t *evtsel)
593 {
594 pmc_table_t *pmc_table = pmc_table_valid(id);
595
596 if (pmc_table == NULL)
597 return KERN_INVALID_ARGUMENT;
598
599 if (!(pmc_table->machine_type == pmc_P6 ||
600 pmc_table->machine_type == pmc_Core))
601 return KERN_FAILURE;
602
603 wrmsr64(pmc_table->msr_control_base + id, evtsel->u64);
604
605 return KERN_SUCCESS;
606 }
607
608 int
609 pmc_escr_read(pmc_id_t id, pmc_escr_id_t esid, pmc_escr_t *escr)
610 {
611 uint32_t addr;
612 pmc_table_t *pmc_table = pmc_table_valid(id);
613
614 if (pmc_table == NULL)
615 return KERN_INVALID_ARGUMENT;
616
617 if (pmc_table->machine_type != pmc_P4_Xeon)
618 return KERN_FAILURE;
619
620 if (esid > PMC_ESID_MAX)
621 return KERN_INVALID_ARGUMENT;
622
623 addr = PMC_ESCR_ADDR(id, esid);
624 if (addr == 0)
625 return KERN_INVALID_ARGUMENT;
626
627 *(uint64_t *)escr = rdmsr64(addr);
628
629 return KERN_SUCCESS;
630 }
631
632 int
633 pmc_escr_write(pmc_id_t id, pmc_escr_id_t esid, pmc_escr_t *escr)
634 {
635 uint32_t addr;
636 pmc_table_t *pmc_table = pmc_table_valid(id);
637
638 if (pmc_table == NULL)
639 return KERN_FAILURE;
640
641 if (pmc_table->machine_type != pmc_P4_Xeon)
642 return KERN_FAILURE;
643
644 if (esid > PMC_ESID_MAX)
645 return KERN_INVALID_ARGUMENT;
646
647 addr = PMC_ESCR_ADDR(id, esid);
648 if (addr == 0)
649 return KERN_INVALID_ARGUMENT;
650
651 wrmsr64(addr, *(uint64_t *)escr);
652
653 return KERN_SUCCESS;
654 }
655
656 int
657 pmc_set_ovf_func(pmc_id_t id, pmc_ovf_func_t func)
658 {
659 pmc_table_t *pmc_table = pmc_table_valid(id);
660
661 if (pmc_table == NULL)
662 return KERN_INVALID_ARGUMENT;
663
664 switch (pmc_table->machine_type) {
665 case pmc_P4_Xeon:
666 pmc_table->P4.ovf_func[id] = func;
667 break;
668 case pmc_P6:
669 pmc_table->P6.ovf_func[id] = func;
670 break;
671 case pmc_Core:
672 pmc_table->Core.ovf_func[id] = func;
673 break;
674 default:
675 return KERN_INVALID_ARGUMENT;
676 }
677
678 return KERN_SUCCESS;
679 }
680
681 int
682 pmc_acquire(task_t task)
683 {
684 kern_return_t retval = KERN_SUCCESS;
685
686 if (!pmc_inited)
687 return KERN_FAILURE;
688
689 simple_lock(&pmc_lock);
690
691 if(pmc_owner == task) {
692 DBG("pmc_acquire - "
693 "ACQUIRED: already owner\n");
694 retval = KERN_SUCCESS;
695 /* already own it */
696 } else if(pmc_owner == TASK_NULL) { /* no one owns it */
697 pmc_owner = task;
698 pmc_thread_count = 0;
699 DBG("pmc_acquire - "
700 "ACQUIRED: no current owner - made new owner\n");
701 retval = KERN_SUCCESS;
702 } else { /* someone already owns it */
703 if(pmc_owner == kernel_task) {
704 if(pmc_thread_count == 0) {
705 /* kernel owns it but no threads using it */
706 pmc_owner = task;
707 pmc_thread_count = 0;
708 DBG("pmc_acquire - "
709 "ACQUIRED: owned by kernel, no threads\n");
710 retval = KERN_SUCCESS;
711 } else {
712 DBG("pmc_acquire - "
713 "DENIED: owned by kernel, in use\n");
714 retval = KERN_RESOURCE_SHORTAGE;
715 }
716 } else { /* non-kernel owner */
717 DBG("pmc_acquire - "
718 "DENIED: owned by another task\n");
719 retval = KERN_RESOURCE_SHORTAGE;
720 }
721 }
722
723 simple_unlock(&pmc_lock);
724 return retval;
725 }
726
727 int
728 pmc_release(task_t task)
729 {
730 kern_return_t retval = KERN_SUCCESS;
731 task_t old_pmc_owner = pmc_owner;
732
733 if (!pmc_inited)
734 return KERN_FAILURE;
735
736 simple_lock(&pmc_lock);
737
738 if(task != pmc_owner) {
739 retval = KERN_NO_ACCESS;
740 } else {
741 if(old_pmc_owner == kernel_task) {
742 if(pmc_thread_count>0) {
743 DBG("pmc_release - "
744 "NOT RELEASED: owned by kernel, in use\n");
745 retval = KERN_NO_ACCESS;
746 } else {
747 DBG("pmc_release - "
748 "RELEASED: was owned by kernel\n");
749 pmc_owner = TASK_NULL;
750 retval = KERN_SUCCESS;
751 }
752 } else {
753 DBG("pmc_release - "
754 "RELEASED: was owned by user\n");
755 pmc_owner = TASK_NULL;
756 retval = KERN_SUCCESS;
757 }
758 }
759
760 simple_unlock(&pmc_lock);
761 return retval;
762 }
763