2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <vm/vm_page.h>
38 #include <libkern/section_keywords.h>
40 /* Temporary types to aid decoding,
41 * Everything in Little Endian */
48 Ctypes
:15, /* 6:23 - Don't Care */
49 LoC
:3, /* 26-24 - Level of Coherency */
50 LoU
:3, /* 29:27 - Level of Unification */
51 RAZ
:2; /* 31:30 - Read-As-Zero */
55 arm_cache_clidr_t bits
;
57 } arm_cache_clidr_info_t
;
62 LineSize
:3, /* 2:0 - Number of words in cache line */
63 Assoc
:10, /* 12:3 - Associativity of cache */
64 NumSets
:15, /* 27:13 - Number of sets in cache */
65 c_type
:4; /* 31:28 - Cache type */
70 arm_cache_ccsidr_t bits
;
72 } arm_cache_ccsidr_info_t
;
76 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t
) cpuid_cpu_info
;
77 static SECURITY_READ_ONLY_LATE(cache_info_t
) cpuid_cache_info
;
85 cpuid_cpu_info
.value
= machine_read_midr();
86 #if (__ARM_ARCH__ == 8)
88 #if defined(HAS_APPLE_PAC)
89 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv8E
;
90 #else /* defined(HAS_APPLE_PAC) */
91 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv8
;
92 #endif /* defined(HAS_APPLE_PAC) */
94 #elif (__ARM_ARCH__ == 7)
95 #ifdef __ARM_SUB_ARCH__
96 cpuid_cpu_info
.arm_info
.arm_arch
= __ARM_SUB_ARCH__
;
97 #else /* __ARM_SUB_ARCH__ */
98 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv7
;
99 #endif /* __ARM_SUB_ARCH__ */
100 #else /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
101 /* 1176 architecture lives in the extended feature register */
102 if (cpuid_cpu_info
.arm_info
.arm_arch
== CPU_ARCH_EXTENDED
) {
103 arm_isa_feat1_reg isa
= machine_read_isa_feat1();
106 * if isa feature register 1 [15:12] == 0x2, this chip
107 * supports sign extention instructions, which indicate ARMv6
109 if (isa
.field
.sign_zero_ext_support
== 0x2) {
110 cpuid_cpu_info
.arm_info
.arm_arch
= CPU_ARCH_ARMv6
;
113 #endif /* (__ARM_ARCH__ != 7) && (__ARM_ARCH__ != 8) */
119 return &cpuid_cpu_info
;
123 cpuid_get_cpufamily(void)
127 switch (cpuid_info()->arm_info
.arm_implementor
) {
129 switch (cpuid_info()->arm_info
.arm_part
) {
130 case CPU_PART_CORTEXA9
:
131 cpufamily
= CPUFAMILY_ARM_14
;
133 case CPU_PART_CORTEXA8
:
134 cpufamily
= CPUFAMILY_ARM_13
;
136 case CPU_PART_CORTEXA7
:
137 cpufamily
= CPUFAMILY_ARM_15
;
139 case CPU_PART_1136JFS
:
140 case CPU_PART_1176JZFS
:
141 cpufamily
= CPUFAMILY_ARM_11
;
143 case CPU_PART_926EJS
:
145 cpufamily
= CPUFAMILY_ARM_9
;
148 cpufamily
= CPUFAMILY_UNKNOWN
;
154 cpufamily
= CPUFAMILY_ARM_XSCALE
;
158 switch (cpuid_info()->arm_info
.arm_part
) {
159 case CPU_PART_TYPHOON
:
160 case CPU_PART_TYPHOON_CAPRI
:
161 cpufamily
= CPUFAMILY_ARM_TYPHOON
;
163 case CPU_PART_TWISTER
:
164 case CPU_PART_TWISTER_ELBA_MALTA
:
165 cpufamily
= CPUFAMILY_ARM_TWISTER
;
167 case CPU_PART_HURRICANE
:
168 case CPU_PART_HURRICANE_MYST
:
169 cpufamily
= CPUFAMILY_ARM_HURRICANE
;
171 case CPU_PART_MONSOON
:
172 case CPU_PART_MISTRAL
:
173 cpufamily
= CPUFAMILY_ARM_MONSOON_MISTRAL
;
175 case CPU_PART_VORTEX
:
176 case CPU_PART_TEMPEST
:
177 case CPU_PART_TEMPEST_M9
:
178 case CPU_PART_VORTEX_ARUBA
:
179 case CPU_PART_TEMPEST_ARUBA
:
180 cpufamily
= CPUFAMILY_ARM_VORTEX_TEMPEST
;
182 case CPU_PART_LIGHTNING
:
183 case CPU_PART_THUNDER
:
184 cpufamily
= CPUFAMILY_ARM_LIGHTNING_THUNDER
;
187 cpufamily
= CPUFAMILY_UNKNOWN
;
193 cpufamily
= CPUFAMILY_UNKNOWN
;
201 cpuid_get_cpusubfamily(void)
203 int cpusubfamily
= CPUSUBFAMILY_UNKNOWN
;
205 if (cpuid_info()->arm_info
.arm_implementor
!= CPU_VID_APPLE
) {
209 switch (cpuid_info()->arm_info
.arm_part
) {
210 case CPU_PART_TYPHOON
:
211 case CPU_PART_TWISTER
:
212 case CPU_PART_HURRICANE
:
213 case CPU_PART_MONSOON
:
214 case CPU_PART_MISTRAL
:
215 case CPU_PART_VORTEX
:
216 case CPU_PART_TEMPEST
:
217 case CPU_PART_LIGHTNING
:
218 case CPU_PART_THUNDER
:
219 cpusubfamily
= CPUSUBFAMILY_ARM_HP
;
221 case CPU_PART_TYPHOON_CAPRI
:
222 case CPU_PART_TWISTER_ELBA_MALTA
:
223 case CPU_PART_HURRICANE_MYST
:
224 case CPU_PART_VORTEX_ARUBA
:
225 case CPU_PART_TEMPEST_ARUBA
:
226 cpusubfamily
= CPUSUBFAMILY_ARM_HG
;
228 case CPU_PART_TEMPEST_M9
:
229 cpusubfamily
= CPUSUBFAMILY_ARM_M
;
232 cpusubfamily
= CPUFAMILY_UNKNOWN
;
242 machine_do_debugid();
248 return machine_arm_debug_info();
254 return machine_do_mvfpid();
261 return machine_arm_mvfp_info();
267 arm_cache_clidr_info_t arm_cache_clidr_info
;
268 arm_cache_ccsidr_info_t arm_cache_ccsidr_info
;
270 arm_cache_clidr_info
.value
= machine_read_clidr();
273 /* Select L1 data/unified cache */
275 machine_write_csselr(CSSELR_L1
, CSSELR_DATA_UNIFIED
);
276 arm_cache_ccsidr_info
.value
= machine_read_ccsidr();
278 cpuid_cache_info
.c_unified
= (arm_cache_clidr_info
.bits
.Ctype1
== 0x4) ? 1 : 0;
280 switch (arm_cache_ccsidr_info
.bits
.c_type
) {
282 cpuid_cache_info
.c_type
= CACHE_WRITE_ALLOCATION
;
285 cpuid_cache_info
.c_type
= CACHE_READ_ALLOCATION
;
288 cpuid_cache_info
.c_type
= CACHE_WRITE_BACK
;
291 cpuid_cache_info
.c_type
= CACHE_WRITE_THROUGH
;
294 cpuid_cache_info
.c_type
= CACHE_UNKNOWN
;
297 cpuid_cache_info
.c_linesz
= 4 * (1 << (arm_cache_ccsidr_info
.bits
.LineSize
+ 2));
298 cpuid_cache_info
.c_assoc
= (arm_cache_ccsidr_info
.bits
.Assoc
+ 1);
301 cpuid_cache_info
.c_isize
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
* cpuid_cache_info
.c_assoc
;
304 cpuid_cache_info
.c_dsize
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
* cpuid_cache_info
.c_assoc
;
307 if ((arm_cache_clidr_info
.bits
.Ctype3
== 0x4) ||
308 (arm_cache_clidr_info
.bits
.Ctype2
== 0x4) || (arm_cache_clidr_info
.bits
.Ctype2
== 0x2)) {
309 if (arm_cache_clidr_info
.bits
.Ctype3
== 0x4) {
310 /* Select L3 (LLC) if the SoC is new enough to have that.
311 * This will be the second-level cache for the highest-performing ACC. */
312 machine_write_csselr(CSSELR_L3
, CSSELR_DATA_UNIFIED
);
314 /* Select L2 data cache */
315 machine_write_csselr(CSSELR_L2
, CSSELR_DATA_UNIFIED
);
317 arm_cache_ccsidr_info
.value
= machine_read_ccsidr();
319 cpuid_cache_info
.c_linesz
= 4 * (1 << (arm_cache_ccsidr_info
.bits
.LineSize
+ 2));
320 cpuid_cache_info
.c_assoc
= (arm_cache_ccsidr_info
.bits
.Assoc
+ 1);
321 cpuid_cache_info
.c_l2size
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
* cpuid_cache_info
.c_assoc
;
322 cpuid_cache_info
.c_inner_cache_size
= cpuid_cache_info
.c_dsize
;
323 cpuid_cache_info
.c_bulksize_op
= cpuid_cache_info
.c_l2size
;
325 /* capri has a 2MB L2 cache unlike every other SoC up to this
326 * point with a 1MB L2 cache, so to get the same performance
327 * gain from coloring, we have to double the number of colors.
328 * Note that in general (and in fact as it's implemented in
329 * i386/cpuid.c), the number of colors is calculated as the
330 * cache line size * the number of sets divided by the page
331 * size. Also note that for H8 devices and up, the page size
332 * will be 16k instead of 4, which will reduce the number of
333 * colors required. Thus, this is really a temporary solution
334 * for capri specifically that we may want to generalize later:
336 * TODO: Are there any special considerations for our unusual
337 * cache geometries (3MB)?
339 vm_cache_geometry_colors
= ((arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * cpuid_cache_info
.c_linesz
) / PAGE_SIZE
;
340 kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors
);
342 cpuid_cache_info
.c_l2size
= 0;
344 cpuid_cache_info
.c_inner_cache_size
= cpuid_cache_info
.c_dsize
;
345 cpuid_cache_info
.c_bulksize_op
= cpuid_cache_info
.c_dsize
;
348 if (cpuid_cache_info
.c_unified
== 0) {
349 machine_write_csselr(CSSELR_L1
, CSSELR_INSTR
);
350 arm_cache_ccsidr_info
.value
= machine_read_ccsidr();
351 uint32_t c_linesz
= 4 * (1 << (arm_cache_ccsidr_info
.bits
.LineSize
+ 2));
352 uint32_t c_assoc
= (arm_cache_ccsidr_info
.bits
.Assoc
+ 1);
354 cpuid_cache_info
.c_isize
= (arm_cache_ccsidr_info
.bits
.NumSets
+ 1) * c_linesz
* c_assoc
;
357 kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
359 cpuid_cache_info
.c_dsize
+ cpuid_cache_info
.c_isize
,
360 ((cpuid_cache_info
.c_type
== CACHE_WRITE_BACK
) ? "WB" :
361 (cpuid_cache_info
.c_type
== CACHE_WRITE_THROUGH
? "WT" : "Unknown")),
362 cpuid_cache_info
.c_isize
,
363 cpuid_cache_info
.c_dsize
,
364 (cpuid_cache_info
.c_unified
) ? "unified" : "separate",
365 cpuid_cache_info
.c_assoc
,
366 cpuid_cache_info
.c_linesz
);
372 return &cpuid_cache_info
;