]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/kperf_kpc.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kperf / kperf_kpc.c
1 /*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Sample KPC data into kperf and manage shared context-switch and AST handlers
31 */
32
33 #include <kperf/kperf.h>
34 #include <kperf/buffer.h>
35 #include <kperf/context.h>
36 #include <kperf/pet.h>
37 #include <kperf/kperf_kpc.h>
38 #include <kern/kpc.h> /* kpc_cswitch_context, kpc_threads_counting */
39
40 void
41 kperf_kpc_thread_ast(thread_t thread)
42 {
43 kpc_thread_ast_handler(thread);
44 kperf_thread_ast_handler(thread);
45 thread->kperf_ast = 0;
46 }
47
48 void
49 kperf_kpc_thread_sample(struct kpcdata *kpcd, int sample_config)
50 {
51 BUF_INFO(PERF_KPC_THREAD_SAMPLE | DBG_FUNC_START, sample_config);
52
53 kpcd->running = kpc_get_running();
54 /* let kpc_get_curthread_counters set the correct count */
55 kpcd->counterc = KPC_MAX_COUNTERS;
56 if (kpc_get_curthread_counters(&kpcd->counterc,
57 kpcd->counterv)) {
58 /* if thread counters aren't ready, default to 0 */
59 memset(kpcd->counterv, 0,
60 sizeof(uint64_t) * kpcd->counterc);
61 }
62 /* help out Instruments by sampling KPC's config */
63 if (!sample_config) {
64 kpcd->configc = 0;
65 } else {
66 kpcd->configc = kpc_get_config_count(kpcd->running);
67 kpc_get_config(kpcd->running, kpcd->configv);
68 }
69
70 BUF_INFO(PERF_KPC_THREAD_SAMPLE | DBG_FUNC_END, kpcd->running, kpcd->counterc);
71 }
72
73 void
74 kperf_kpc_cpu_sample(struct kpcdata *kpcd, int sample_config)
75 {
76 BUF_INFO(PERF_KPC_CPU_SAMPLE | DBG_FUNC_START, sample_config);
77
78 kpcd->running = kpc_get_running();
79 kpcd->counterc = kpc_get_cpu_counters(0, kpcd->running,
80 &kpcd->curcpu,
81 kpcd->counterv);
82 if (!sample_config) {
83 kpcd->configc = 0;
84 } else {
85 kpcd->configc = kpc_get_config_count(kpcd->running);
86 kpc_get_config(kpcd->running, kpcd->configv);
87 }
88
89 BUF_INFO(PERF_KPC_CPU_SAMPLE | DBG_FUNC_END, kpcd->running, kpcd->counterc);
90 }
91
92 void
93 kperf_kpc_config_log(const struct kpcdata *kpcd)
94 {
95 BUF_DATA(PERF_KPC_CONFIG,
96 kpcd->running,
97 kpcd->counterc,
98 kpc_get_counter_count(KPC_CLASS_FIXED_MASK),
99 kpcd->configc);
100
101 #if __LP64__
102 unsigned int max = (kpcd->configc + 3) / 4;
103 for (unsigned int i = 0; i < max; i++) {
104 uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
105 BUF_DATA(PERF_KPC_CFG_REG | flag,
106 kpcd->configv[0 + i * 4], kpcd->configv[1 + i * 4],
107 kpcd->configv[2 + i * 4], kpcd->configv[3 + i * 4]);
108 }
109 #else /* __LP64__ */
110 unsigned int max = (kpcd->configc + 1) / 2;
111 for (unsigned int i = 0; i < max; i++) {
112 uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
113 BUF_DATA(PERF_KPC_CFG_REG32 | flag,
114 kpcd->configv[i * 2] >> 32ULL,
115 kpcd->configv[i * 2] & 0xffffffffULL,
116 kpcd->configv[i * 2 + 1] >> 32ULL,
117 kpcd->configv[i * 2 + 1] & 0xffffffffULL);
118 }
119 #endif /* !__LP64__ */
120 }
121
122 static void
123 kperf_kpc_log(uint32_t code, uint32_t code32, const struct kpcdata *kpcd)
124 {
125 #if __LP64__
126 #pragma unused(code32)
127 unsigned int max = (kpcd->counterc + 3) / 4;
128 /* and the actual counts with one 64-bit argument each */
129 for (unsigned int i = 0; i < max; i++) {
130 uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
131 BUF_DATA(code | flag,
132 kpcd->counterv[0 + i * 4],
133 kpcd->counterv[1 + i * 4],
134 kpcd->counterv[2 + i * 4],
135 kpcd->counterv[3 + i * 4]);
136 }
137 #else /* __LP64__ */
138 #pragma unused(code)
139 unsigned int max = (kpcd->counterc + 1) / 2;
140 /* and the actual counts with two 32-bit trace arguments each */
141 for (unsigned int i = 0; i < max; i++) {
142 uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
143 BUF_DATA(code32 | flag,
144 (kpcd->counterv[0 + i * 2] >> 32ULL),
145 kpcd->counterv[0 + i * 2] & 0xffffffffULL,
146 (kpcd->counterv[1 + i * 2] >> 32ULL),
147 kpcd->counterv[1 + i * 2] & 0xffffffffULL);
148 }
149 #endif /* !__LP64__ */
150 }
151
152 void
153 kperf_kpc_cpu_log(const struct kpcdata *kpcd)
154 {
155 kperf_kpc_log(PERF_KPC_DATA, PERF_KPC_DATA32, kpcd);
156 }
157
158 void
159 kperf_kpc_thread_log(const struct kpcdata *kpcd)
160 {
161 kperf_kpc_log(PERF_KPC_DATA_THREAD, PERF_KPC_DATA_THREAD32, kpcd);
162 }