2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <vm/vm_page.h>
38 #include <libkern/section_keywords.h>
40 /* Temporary types to aid decoding,
41 * Everything in Little Endian */
48 Ctypes
:15, /* 6:23 - Don't Care */
49 LoC
:3, /* 26-24 - Level of Coherency */
50 LoU
:3, /* 29:27 - Level of Unification */
51 RAZ
:2; /* 31:30 - Read-As-Zero */
55 arm_cache_clidr_t bits
;
57 } arm_cache_clidr_info_t
;
62 LineSize
:3, /* 2:0 - Number of words in cache line */
63 Assoc
:10, /* 12:3 - Associativity of cache */
64 NumSets
:15, /* 27:13 - Number of sets in cache */
65 c_type
:4; /* 31:28 - Cache type */
70 arm_cache_ccsidr_t bits
;
72 } arm_cache_ccsidr_info_t
;
76 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t
) cpuid_cpu_info
;
77 static SECURITY_READ_ONLY_LATE(cache_info_t
) cpuid_cache_info
;
85 cpuid_cpu_info
.value
= machine_read_midr();
86 #if (__ARM_ARCH__ == 8)
88 #if defined(HAS_APPLE_PAC)
89 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv8E
;
90 #else /* defined(HAS_APPLE_PAC) */
91 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv8
;
92 #endif /* defined(HAS_APPLE_PAC) */
94 #elif (__ARM_ARCH__ == 7)
95 #ifdef __ARM_SUB_ARCH__
96 cpuid_cpu_info
.arm_info
.arm_arch
= __ARM_SUB_ARCH__
;
97 #else /* __ARM_SUB_ARCH__ */
98 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv7
;
99 #endif /* __ARM_SUB_ARCH__ */
100 #else /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
101 /* 1176 architecture lives in the extended feature register */
102 if (cpuid_cpu_info
.arm_info
.arm_arch
== CPU_ARCH_EXTENDED
) {
103 arm_isa_feat1_reg isa
= machine_read_isa_feat1();
106 * if isa feature register 1 [15:12] == 0x2, this chip
107 * supports sign extention instructions, which indicate ARMv6
109 if (isa
.field
.sign_zero_ext_support
== 0x2) {
110 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv6
;
113 #endif /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
119 return &cpuid_cpu_info
;
123 cpuid_get_cpufamily(void)
127 switch (cpuid_info()->arm_info
.arm_implementor
) {
129 switch (cpuid_info()->arm_info
.arm_part
) {
130 case CPU_PART_CORTEXA9
:
131 cpufamily
= CPUFAMILY_ARM_14
;
133 case CPU_PART_CORTEXA8
:
134 cpufamily
= CPUFAMILY_ARM_13
;
136 case CPU_PART_CORTEXA7
:
137 cpufamily
= CPUFAMILY_ARM_15
;
139 case CPU_PART_1136JFS
:
140 case CPU_PART_1176JZFS
:
141 cpufamily
= CPUFAMILY_ARM_11
;
143 case CPU_PART_926EJS
:
145 cpufamily
= CPUFAMILY_ARM_9
;
148 cpufamily
= CPUFAMILY_UNKNOWN
;
154 cpufamily
= CPUFAMILY_ARM_XSCALE
;
158 switch (cpuid_info()->arm_info
.arm_part
) {
159 case CPU_PART_TYPHOON
:
160 case CPU_PART_TYPHOON_CAPRI
:
161 cpufamily
= CPUFAMILY_ARM_TYPHOON
;
163 case CPU_PART_TWISTER
:
164 case CPU_PART_TWISTER_ELBA_MALTA
:
165 cpufamily
= CPUFAMILY_ARM_TWISTER
;
167 case CPU_PART_HURRICANE
:
168 case CPU_PART_HURRICANE_MYST
:
169 cpufamily
= CPUFAMILY_ARM_HURRICANE
;
171 case CPU_PART_MONSOON
:
172 case CPU_PART_MISTRAL
:
173 cpufamily
= CPUFAMILY_ARM_MONSOON_MISTRAL
;
175 case CPU_PART_VORTEX
:
176 case CPU_PART_TEMPEST
:
177 case CPU_PART_TEMPEST_M9
:
178 case CPU_PART_VORTEX_ARUBA
:
179 case CPU_PART_TEMPEST_ARUBA
:
180 cpufamily
= CPUFAMILY_ARM_VORTEX_TEMPEST
;
182 case CPU_PART_LIGHTNING
:
183 case CPU_PART_THUNDER
:
184 #ifndef RC_HIDE_XNU_FIRESTORM
185 case CPU_PART_THUNDER_M10
:
187 cpufamily
= CPUFAMILY_ARM_LIGHTNING_THUNDER
;
189 #ifndef RC_HIDE_XNU_FIRESTORM
190 case CPU_PART_FIRESTORM
:
191 case CPU_PART_ICESTORM
:
192 case CPU_PART_FIRESTORM_TONGA
:
193 case CPU_PART_ICESTORM_TONGA
:
194 cpufamily
= CPUFAMILY_ARM_FIRESTORM_ICESTORM
;
198 cpufamily
= CPUFAMILY_UNKNOWN
;
204 cpufamily
= CPUFAMILY_UNKNOWN
;
212 cpuid_get_cpusubfamily(void)
214 int cpusubfamily
= CPUSUBFAMILY_UNKNOWN
;
216 if (cpuid_info()->arm_info
.arm_implementor
!= CPU_VID_APPLE
) {
220 switch (cpuid_info()->arm_info
.arm_part
) {
221 case CPU_PART_TYPHOON
:
222 case CPU_PART_TWISTER
:
223 case CPU_PART_HURRICANE
:
224 case CPU_PART_MONSOON
:
225 case CPU_PART_MISTRAL
:
226 case CPU_PART_VORTEX
:
227 case CPU_PART_TEMPEST
:
228 case CPU_PART_LIGHTNING
:
229 case CPU_PART_THUNDER
:
230 #ifndef RC_HIDE_XNU_FIRESTORM
231 case CPU_PART_FIRESTORM
:
232 case CPU_PART_ICESTORM
:
234 cpusubfamily
= CPUSUBFAMILY_ARM_HP
;
236 case CPU_PART_TYPHOON_CAPRI
:
237 case CPU_PART_TWISTER_ELBA_MALTA
:
238 case CPU_PART_HURRICANE_MYST
:
239 case CPU_PART_VORTEX_ARUBA
:
240 case CPU_PART_TEMPEST_ARUBA
:
241 #ifndef RC_HIDE_XNU_FIRESTORM
242 case CPU_PART_FIRESTORM_TONGA
:
243 case CPU_PART_ICESTORM_TONGA
:
245 cpusubfamily
= CPUSUBFAMILY_ARM_HG
;
247 case CPU_PART_TEMPEST_M9
:
248 #ifndef RC_HIDE_XNU_FIRESTORM
249 case CPU_PART_THUNDER_M10
:
251 cpusubfamily
= CPUSUBFAMILY_ARM_M
;
254 cpusubfamily
= CPUFAMILY_UNKNOWN
;
264 machine_do_debugid();
270 return machine_arm_debug_info();
276 return machine_do_mvfpid();
283 return machine_arm_mvfp_info();
289 arm_cache_clidr_info_t arm_cache_clidr_info
;
290 arm_cache_ccsidr_info_t arm_cache_ccsidr_info
;
292 arm_cache_clidr_info
.value
= machine_read_clidr();
295 /* Select L1 data/unified cache */
297 machine_write_csselr(CSSELR_L1
, CSSELR_DATA_UNIFIED
);
298 arm_cache_ccsidr_info
.value
= machine_read_ccsidr();
300 cpuid_cache_info
.c_unified
= (arm_cache_clidr_info
.bits
.Ctype1
== 0x4) ? 1 : 0;
302 switch (arm_cache_ccsidr_info
.bits
.c_type
) {
304 cpuid_cache_info
.c_type
= CACHE_WRITE_ALLOCATION
;
307 cpuid_cache_info
.c_type
= CACHE_READ_ALLOCATION
;
310 cpuid_cache_info
.c_type
= CACHE_WRITE_BACK
;
313 cpuid_cache_info
.c_type
= CACHE_WRITE_THROUGH
;
316 cpuid_cache_info
.c_type
= CACHE_UNKNOWN
;
319 cpuid_cache_info
.c_linesz
= 4 * (1 << (arm_cache_ccsidr_info
.bits
.LineSize
+ 2));
320 cpuid_cache_info
.c_assoc
= (arm_cache_ccsidr_info
.bits
.Assoc
+ 1);
323 cpuid_cache_info
.c_isize
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
* cpuid_cache_info
.c_assoc
;
326 cpuid_cache_info
.c_dsize
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
* cpuid_cache_info
.c_assoc
;
329 if ((arm_cache_clidr_info
.bits
.Ctype3
== 0x4) ||
330 (arm_cache_clidr_info
.bits
.Ctype2
== 0x4) || (arm_cache_clidr_info
.bits
.Ctype2
== 0x2)) {
331 if (arm_cache_clidr_info
.bits
.Ctype3
== 0x4) {
332 /* Select L3 (LLC) if the SoC is new enough to have that.
333 * This will be the second-level cache for the highest-performing ACC. */
334 machine_write_csselr(CSSELR_L3
, CSSELR_DATA_UNIFIED
);
336 /* Select L2 data cache */
337 machine_write_csselr(CSSELR_L2
, CSSELR_DATA_UNIFIED
);
339 arm_cache_ccsidr_info
.value
= machine_read_ccsidr();
341 cpuid_cache_info
.c_linesz
= 4 * (1 << (arm_cache_ccsidr_info
.bits
.LineSize
+ 2));
342 cpuid_cache_info
.c_assoc
= (arm_cache_ccsidr_info
.bits
.Assoc
+ 1);
343 cpuid_cache_info
.c_l2size
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
* cpuid_cache_info
.c_assoc
;
344 cpuid_cache_info
.c_inner_cache_size
= cpuid_cache_info
.c_dsize
;
345 cpuid_cache_info
.c_bulksize_op
= cpuid_cache_info
.c_l2size
;
347 /* capri has a 2MB L2 cache unlike every other SoC up to this
348 * point with a 1MB L2 cache, so to get the same performance
349 * gain from coloring, we have to double the number of colors.
350 * Note that in general (and in fact as it's implemented in
351 * i386/cpuid.c), the number of colors is calculated as the
352 * cache line size * the number of sets divided by the page
353 * size. Also note that for H8 devices and up, the page size
354 * will be 16k instead of 4, which will reduce the number of
355 * colors required. Thus, this is really a temporary solution
356 * for capri specifically that we may want to generalize later:
358 * TODO: Are there any special considerations for our unusual
359 * cache geometries (3MB)?
361 vm_cache_geometry_colors
= ((arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
) / PAGE_SIZE
;
362 kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors
);
364 cpuid_cache_info
.c_l2size
= 0;
366 cpuid_cache_info
.c_inner_cache_size
= cpuid_cache_info
.c_dsize
;
367 cpuid_cache_info
.c_bulksize_op
= cpuid_cache_info
.c_dsize
;
370 if (cpuid_cache_info
.c_unified
== 0) {
371 machine_write_csselr(CSSELR_L1
, CSSELR_INSTR
);
372 arm_cache_ccsidr_info
.value
= machine_read_ccsidr();
373 uint32_t c_linesz
= 4 * (1 << (arm_cache_ccsidr_info
.bits
.LineSize
+ 2));
374 uint32_t c_assoc
= (arm_cache_ccsidr_info
.bits
.Assoc
+ 1);
376 cpuid_cache_info
.c_isize
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * c_linesz
* c_assoc
;
379 kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
381 cpuid_cache_info
.c_dsize
+ cpuid_cache_info
.c_isize
,
382 ((cpuid_cache_info
.c_type
== CACHE_WRITE_BACK
) ? "WB" :
383 (cpuid_cache_info
.c_type
== CACHE_WRITE_THROUGH
? "WT" : "Unknown")),
384 cpuid_cache_info
.c_isize
,
385 cpuid_cache_info
.c_dsize
,
386 (cpuid_cache_info
.c_unified
) ? "unified" : "separate",
387 cpuid_cache_info
.c_assoc
,
388 cpuid_cache_info
.c_linesz
);
394 return &cpuid_cache_info
;