]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpuid.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm / cpuid.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <vm/vm_page.h>
36 #include "proc_reg.h"
37
38 #include <libkern/section_keywords.h>
39
40 /* Temporary types to aid decoding,
41 * Everything in Little Endian */
42
43 typedef struct {
44 uint32_t
45 Ctype1:3, /* 2:0 */
46 Ctype2:3, /* 5:3 */
47 Ctype3:3, /* 8:6 */
48 Ctypes:15, /* 6:23 - Don't Care */
49 LoC:3, /* 26-24 - Level of Coherency */
50 LoU:3, /* 29:27 - Level of Unification */
51 RAZ:2; /* 31:30 - Read-As-Zero */
52 } arm_cache_clidr_t;
53
54 typedef union {
55 arm_cache_clidr_t bits;
56 uint32_t value;
57 } arm_cache_clidr_info_t;
58
59
60 typedef struct {
61 uint32_t
62 LineSize:3, /* 2:0 - Number of words in cache line */
63 Assoc:10, /* 12:3 - Associativity of cache */
64 NumSets:15, /* 27:13 - Number of sets in cache */
65 c_type:4; /* 31:28 - Cache type */
66 } arm_cache_ccsidr_t;
67
68
69 typedef union {
70 arm_cache_ccsidr_t bits;
71 uint32_t value;
72 } arm_cache_ccsidr_info_t;
73
74 /* Statics */
75
76 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t) cpuid_cpu_info;
77 static SECURITY_READ_ONLY_LATE(cache_info_t) cpuid_cache_info;
78
79 /* Code */
80
81 __private_extern__
82 void
83 do_cpuid(void)
84 {
85 cpuid_cpu_info.value = machine_read_midr();
86 #if (__ARM_ARCH__ == 8)
87
88 #if defined(HAS_APPLE_PAC)
89 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8E;
90 #else /* defined(HAS_APPLE_PAC) */
91 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8;
92 #endif /* defined(HAS_APPLE_PAC) */
93
94 #elif (__ARM_ARCH__ == 7)
95 #ifdef __ARM_SUB_ARCH__
96 cpuid_cpu_info.arm_info.arm_arch = __ARM_SUB_ARCH__;
97 #else /* __ARM_SUB_ARCH__ */
98 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv7;
99 #endif /* __ARM_SUB_ARCH__ */
100 #else /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
101 /* 1176 architecture lives in the extended feature register */
102 if (cpuid_cpu_info.arm_info.arm_arch == CPU_ARCH_EXTENDED) {
103 arm_isa_feat1_reg isa = machine_read_isa_feat1();
104
105 /*
106 * if isa feature register 1 [15:12] == 0x2, this chip
107 * supports sign extention instructions, which indicate ARMv6
108 */
109 if (isa.field.sign_zero_ext_support == 0x2) {
110 cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv6;
111 }
112 }
113 #endif /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
114 }
115
116 arm_cpu_info_t *
117 cpuid_info(void)
118 {
119 return &cpuid_cpu_info;
120 }
121
122 int
123 cpuid_get_cpufamily(void)
124 {
125 int cpufamily = 0;
126
127 switch (cpuid_info()->arm_info.arm_implementor) {
128 case CPU_VID_ARM:
129 switch (cpuid_info()->arm_info.arm_part) {
130 case CPU_PART_CORTEXA9:
131 cpufamily = CPUFAMILY_ARM_14;
132 break;
133 case CPU_PART_CORTEXA8:
134 cpufamily = CPUFAMILY_ARM_13;
135 break;
136 case CPU_PART_CORTEXA7:
137 cpufamily = CPUFAMILY_ARM_15;
138 break;
139 case CPU_PART_1136JFS:
140 case CPU_PART_1176JZFS:
141 cpufamily = CPUFAMILY_ARM_11;
142 break;
143 case CPU_PART_926EJS:
144 case CPU_PART_920T:
145 cpufamily = CPUFAMILY_ARM_9;
146 break;
147 default:
148 cpufamily = CPUFAMILY_UNKNOWN;
149 break;
150 }
151 break;
152
153 case CPU_VID_INTEL:
154 cpufamily = CPUFAMILY_ARM_XSCALE;
155 break;
156
157 case CPU_VID_APPLE:
158 switch (cpuid_info()->arm_info.arm_part) {
159 case CPU_PART_SWIFT:
160 cpufamily = CPUFAMILY_ARM_SWIFT;
161 break;
162 case CPU_PART_CYCLONE:
163 cpufamily = CPUFAMILY_ARM_CYCLONE;
164 break;
165 case CPU_PART_TYPHOON:
166 case CPU_PART_TYPHOON_CAPRI:
167 cpufamily = CPUFAMILY_ARM_TYPHOON;
168 break;
169 case CPU_PART_TWISTER:
170 case CPU_PART_TWISTER_ELBA_MALTA:
171 cpufamily = CPUFAMILY_ARM_TWISTER;
172 break;
173 case CPU_PART_HURRICANE:
174 case CPU_PART_HURRICANE_MYST:
175 cpufamily = CPUFAMILY_ARM_HURRICANE;
176 break;
177 case CPU_PART_MONSOON:
178 case CPU_PART_MISTRAL:
179 cpufamily = CPUFAMILY_ARM_MONSOON_MISTRAL;
180 break;
181 case CPU_PART_VORTEX:
182 case CPU_PART_TEMPEST:
183 case CPU_PART_TEMPEST_M9:
184 case CPU_PART_VORTEX_ARUBA:
185 case CPU_PART_TEMPEST_ARUBA:
186 cpufamily = CPUFAMILY_ARM_VORTEX_TEMPEST;
187 break;
188 default:
189 cpufamily = CPUFAMILY_UNKNOWN;
190 break;
191 }
192 break;
193
194 default:
195 cpufamily = CPUFAMILY_UNKNOWN;
196 break;
197 }
198
199 return cpufamily;
200 }
201
202 void
203 do_debugid(void)
204 {
205 machine_do_debugid();
206 }
207
208 arm_debug_info_t *
209 arm_debug_info(void)
210 {
211 return machine_arm_debug_info();
212 }
213
214 void
215 do_mvfpid(void)
216 {
217 return machine_do_mvfpid();
218 }
219
220 arm_mvfp_info_t
221 *
222 arm_mvfp_info(void)
223 {
224 return machine_arm_mvfp_info();
225 }
226
227 void
228 do_cacheid(void)
229 {
230 arm_cache_clidr_info_t arm_cache_clidr_info;
231 arm_cache_ccsidr_info_t arm_cache_ccsidr_info;
232
233 arm_cache_clidr_info.value = machine_read_clidr();
234
235
236 /* Select L1 data/unified cache */
237
238 machine_write_csselr(CSSELR_L1, CSSELR_DATA_UNIFIED);
239 arm_cache_ccsidr_info.value = machine_read_ccsidr();
240
241 cpuid_cache_info.c_unified = (arm_cache_clidr_info.bits.Ctype1 == 0x4) ? 1 : 0;
242
243 switch (arm_cache_ccsidr_info.bits.c_type) {
244 case 0x1:
245 cpuid_cache_info.c_type = CACHE_WRITE_ALLOCATION;
246 break;
247 case 0x2:
248 cpuid_cache_info.c_type = CACHE_READ_ALLOCATION;
249 break;
250 case 0x4:
251 cpuid_cache_info.c_type = CACHE_WRITE_BACK;
252 break;
253 case 0x8:
254 cpuid_cache_info.c_type = CACHE_WRITE_THROUGH;
255 break;
256 default:
257 cpuid_cache_info.c_type = CACHE_UNKNOWN;
258 }
259
260 cpuid_cache_info.c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
261 cpuid_cache_info.c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
262
263 /* I cache size */
264 cpuid_cache_info.c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info.c_linesz * cpuid_cache_info.c_assoc;
265
266 /* D cache size */
267 cpuid_cache_info.c_dsize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info.c_linesz * cpuid_cache_info.c_assoc;
268
269
270 if ((arm_cache_clidr_info.bits.Ctype3 == 0x4) ||
271 (arm_cache_clidr_info.bits.Ctype2 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x2)) {
272 if (arm_cache_clidr_info.bits.Ctype3 == 0x4) {
273 /* Select L3 (LLC) if the SoC is new enough to have that.
274 * This will be the second-level cache for the highest-performing ACC. */
275 machine_write_csselr(CSSELR_L3, CSSELR_DATA_UNIFIED);
276 } else {
277 /* Select L2 data cache */
278 machine_write_csselr(CSSELR_L2, CSSELR_DATA_UNIFIED);
279 }
280 arm_cache_ccsidr_info.value = machine_read_ccsidr();
281
282 cpuid_cache_info.c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
283 cpuid_cache_info.c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
284 cpuid_cache_info.c_l2size = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info.c_linesz * cpuid_cache_info.c_assoc;
285 cpuid_cache_info.c_inner_cache_size = cpuid_cache_info.c_dsize;
286 cpuid_cache_info.c_bulksize_op = cpuid_cache_info.c_l2size;
287
288 /* capri has a 2MB L2 cache unlike every other SoC up to this
289 * point with a 1MB L2 cache, so to get the same performance
290 * gain from coloring, we have to double the number of colors.
291 * Note that in general (and in fact as it's implemented in
292 * i386/cpuid.c), the number of colors is calculated as the
293 * cache line size * the number of sets divided by the page
294 * size. Also note that for H8 devices and up, the page size
295 * will be 16k instead of 4, which will reduce the number of
296 * colors required. Thus, this is really a temporary solution
297 * for capri specifically that we may want to generalize later:
298 *
299 * TODO: Are there any special considerations for our unusual
300 * cache geometries (3MB)?
301 */
302 vm_cache_geometry_colors = ((arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info.c_linesz) / PAGE_SIZE;
303 kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
304 } else {
305 cpuid_cache_info.c_l2size = 0;
306
307 cpuid_cache_info.c_inner_cache_size = cpuid_cache_info.c_dsize;
308 cpuid_cache_info.c_bulksize_op = cpuid_cache_info.c_dsize;
309 }
310
311 kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
312 __FUNCTION__,
313 cpuid_cache_info.c_dsize + cpuid_cache_info.c_isize,
314 ((cpuid_cache_info.c_type == CACHE_WRITE_BACK) ? "WB" :
315 (cpuid_cache_info.c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")),
316 cpuid_cache_info.c_isize,
317 cpuid_cache_info.c_dsize,
318 (cpuid_cache_info.c_unified) ? "unified" : "separate",
319 cpuid_cache_info.c_assoc,
320 cpuid_cache_info.c_linesz);
321 }
322
323 cache_info_t *
324 cache_info(void)
325 {
326 return &cpuid_cache_info;
327 }