2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <pexpert/arm64/board_config.h>
31 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
34 #include <libkern/section_keywords.h>
35 #include <libkern/kernel_mach_header.h>
36 #include <pexpert/pexpert.h>
37 #include <pexpert/device_tree.h>
38 #include <machine/atomic.h>
39 #include <arm/cpu_internal.h>
40 #include <arm/caches_internal.h>
41 #include <arm/machine_routines.h>
43 #include <arm64/tlb.h>
44 #include <arm64/amcc_rorgn.h>
45 #include <memmap_types.h>
48 #include <arm64/pal_hibernate.h>
49 #endif /* HIBERNATION */
52 #define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA)
53 #define IOA_LOCK_GROUP 1 // IOA lock group index
55 #define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC)
57 #define AMCC_LOCK_GROUP 0 // AMCC lock group index
58 #define MAX_APERTURES 16 // Maximum number of register apertures
59 #define MAX_PLANES 16 // Maximum number of planes within each aperture
61 #define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group
62 #define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group
64 #define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type
66 extern vm_offset_t segLOWESTRO
;
67 extern vm_offset_t segHIGHESTRO
;
69 extern vm_offset_t segLASTB
;
70 extern vm_offset_t segTEXTEXECB
;
71 extern unsigned long segSizeLAST
;
72 extern unsigned long segSizeLASTDATACONST
;
73 extern unsigned long segSizeTEXTEXEC
;
75 typedef struct lock_reg
{
76 uint32_t reg_offset
; // Register offset
77 uint32_t reg_mask
; // Register mask
78 uint32_t reg_value
; // Regsiter value
81 typedef struct lock_type
{
82 uint32_t page_size_shift
; // page shift used in lower/upper limit registers
83 lock_reg_t lower_limit_reg
; // Lower limit register description
84 lock_reg_t upper_limit_reg
; // Upper limit register description
85 lock_reg_t enable_reg
; // Enable register description
86 lock_reg_t write_disable_reg
; // Write disable register description
87 lock_reg_t lock_reg
; // Lock register description
90 typedef struct lock_group
{
91 uint32_t aperture_count
; // Aperture count
92 uint32_t aperture_size
; // Aperture size
93 uint32_t plane_count
; // Number of planes in the aperture
94 uint32_t plane_stride
; // Stride between planes in the aperture
95 uint64_t aperture_phys_addr
[MAX_APERTURES
]; // Apreture physical addresses
96 lock_reg_t cache_status_reg
; // Cache status register description
98 lock_reg_t master_lock_reg
; // Master lock register description
100 lock_type_t ctrr_a
; // CTRR-A (KTRR) lock
103 SECURITY_READ_ONLY_LATE(lock_group_t
) _lock_group
[MAX_LOCK_GROUPS
] = { {0} };
104 SECURITY_READ_ONLY_LATE(bool) lock_regs_set
= false;
106 static vm_offset_t rorgn_begin
= 0;
107 static vm_offset_t rorgn_end
= 0;
108 SECURITY_READ_ONLY_LATE(vm_offset_t
) ctrr_begin
= 0;
109 SECURITY_READ_ONLY_LATE(vm_offset_t
) ctrr_end
= 0;
111 static uint64_t lock_group_va
[MAX_LOCK_GROUPS
][MAX_APERTURES
];
113 #if CONFIG_CSR_FROM_DT
114 SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text
= false;
117 #if defined(KERNEL_INTEGRITY_KTRR)
118 #define CTRR_LOCK_MSR ARM64_REG_KTRR_LOCK_EL1
119 #elif defined(KERNEL_INTEGRITY_CTRR)
120 #define CTRR_LOCK_MSR ARM64_REG_CTRR_LOCK_EL1
124 * lock_group_t - describes all the parameters xnu needs to know to
125 * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start.
126 * This description assumes that each AMCC/IOA in a given system will
127 * be identical, respectively. The only variable are the number of
128 * apertures present and the physical base address of each aperture.
130 * General xnu lock group lockdown flow:
131 * - for each lock group:
132 * - ml_io_map all present lock group physical base addresses
133 * - assert all lock group begin/end page numbers set by iboot are identical
134 * - convert lock group begin/end page number to physical address
135 * - assert lock group begin/end page numbers match xnu view of read only region
136 * - assert lock group is not currently locked
137 * - ensure lock group master cache is disabled
138 * - write enable/lock registers to enable/lock the lock group read only region
142 _dt_get_uint32(DTEntry node
, char const *name
, uint32_t *dest
)
144 uint32_t const *value
;
147 if (SecureDTGetProperty(node
, name
, (void const **)&value
, &size
) != kSuccess
) {
151 if (size
!= sizeof(uint32_t)) {
152 panic("lock-regs: unexpected size %u", size
);
161 _dt_get_uint32_required(DTEntry node
, char const *name
)
165 if (!_dt_get_uint32(node
, name
, &value
)) {
166 panic("lock-regs: cannot find required property '%s'", name
);
173 _dt_get_lock_reg(DTEntry node
, lock_reg_t
*reg
, const char *parent_name
, const char *reg_name
, bool required
, bool with_value
)
178 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-offset", reg_name
);
179 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_offset
);
182 panic("%s: missing property '%s'", parent_name
, prop_name
);
188 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-mask", reg_name
);
189 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_mask
);
191 panic("%s: missing property '%s'", parent_name
, prop_name
);
195 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-value", reg_name
);
196 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_value
);
198 panic("%s: missing property '%s'", parent_name
, prop_name
);
206 _dt_get_lock_group(DTEntry lock_regs_node
, lock_group_t
* lock_group
, const char *group_name
, uint32_t options
)
210 // Find the lock group node.
211 if (SecureDTLookupEntry(lock_regs_node
, group_name
, &group_node
) != kSuccess
) {
212 panic("lock-regs: /chosen/lock-regs/%s not found", group_name
);
215 lock_group
->aperture_count
= _dt_get_uint32_required(group_node
, "aperture-count");
217 if (lock_group
->aperture_count
> MAX_APERTURES
) {
218 panic("%s: %s %u exceeds maximum %u", group_name
, "aperture-count", lock_group
->aperture_count
, MAX_APERTURES
);
221 lock_group
->aperture_size
= _dt_get_uint32_required(group_node
, "aperture-size");
223 if ((lock_group
->aperture_count
> 0) && (lock_group
->aperture_size
== 0)) {
224 panic("%s: have %u apertures, but 0 size", group_name
, lock_group
->aperture_count
);
227 lock_group
->plane_count
= _dt_get_uint32_required(group_node
, "plane-count");
229 if (lock_group
->plane_count
> MAX_PLANES
) {
230 panic("%s: %s %u exceeds maximum %u", group_name
, "plane-count", lock_group
->plane_count
, MAX_PLANES
);
233 if (!_dt_get_uint32(group_node
, "plane-stride", &lock_group
->plane_stride
)) {
234 lock_group
->plane_stride
= 0;
237 if (lock_group
->plane_count
> 1) {
238 uint32_t aperture_size
;
240 if (lock_group
->plane_stride
== 0) {
241 panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name
, lock_group
->plane_count
);
244 if (os_mul_overflow(lock_group
->plane_count
, lock_group
->plane_stride
, &aperture_size
)
245 || (aperture_size
> lock_group
->aperture_size
)) {
246 panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name
, lock_group
->aperture_size
, lock_group
->plane_count
, lock_group
->plane_stride
);
250 uint64_t const *phys_bases
= NULL
;
251 unsigned int prop_size
;
252 if (SecureDTGetProperty(group_node
, "aperture-phys-addr", (const void**)&phys_bases
, &prop_size
) != kSuccess
) {
253 panic("%s: missing required %s", group_name
, "aperture-phys-addr");
256 if (prop_size
!= lock_group
->aperture_count
* sizeof(lock_group
->aperture_phys_addr
[0])) {
257 panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)",
258 group_name
, prop_size
, lock_group
->aperture_count
, sizeof(lock_group
->aperture_phys_addr
[0]),
259 lock_group
->aperture_count
* sizeof(lock_group
->aperture_phys_addr
[0]));
262 memcpy(lock_group
->aperture_phys_addr
, phys_bases
, prop_size
);
264 if (options
& LOCK_GROUP_HAS_CACHE_STATUS_REG
) {
265 _dt_get_lock_reg(group_node
, &lock_group
->cache_status_reg
, group_name
, "cache-status", true, true);
269 if (options
& LOCK_GROUP_HAS_MASTER_LOCK_REG
) {
270 _dt_get_lock_reg(group_node
, &lock_group
->master_lock_reg
, group_name
, "master-lock", true, true);
278 _dt_get_lock_type(DTEntry group_node
, lock_type_t
*lock_type
, const char *group_name
, const char *type_name
, uint32_t options
)
281 bool has_lock
= options
& LOCK_TYPE_HAS_LOCK_REG
;
283 // Find the lock type type_node.
284 if (SecureDTLookupEntry(group_node
, type_name
, &type_node
) != kSuccess
) {
285 panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name
, type_name
);
288 lock_type
->page_size_shift
= _dt_get_uint32_required(type_node
, "page-size-shift");
290 // Find all of the regsiters for this lock type.
291 // Parent Register Descriptor Parent Name Reg Name Required Value
292 _dt_get_lock_reg(type_node
, &lock_type
->lower_limit_reg
, type_name
, "lower-limit", true, false);
293 _dt_get_lock_reg(type_node
, &lock_type
->upper_limit_reg
, type_name
, "upper-limit", true, false);
294 _dt_get_lock_reg(type_node
, &lock_type
->lock_reg
, type_name
, "lock", has_lock
, true);
295 _dt_get_lock_reg(type_node
, &lock_type
->enable_reg
, type_name
, "enable", false, true);
296 _dt_get_lock_reg(type_node
, &lock_type
->write_disable_reg
, type_name
, "write-disable", false, true);
300 * find_lock_group_data:
302 * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t
304 * called first time before IOKit start while still uniprocessor
307 static lock_group_t
const * _Nonnull
308 find_lock_group_data(void)
310 DTEntry lock_regs_node
= NULL
;
311 DTEntry amcc_node
= NULL
;
313 // Return the lock group data pointer if we already found and populated one.
318 if (SecureDTLookupEntry(NULL
, "/chosen/lock-regs", &lock_regs_node
) != kSuccess
) {
319 panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)");
322 amcc_node
= _dt_get_lock_group(lock_regs_node
, &_lock_group
[AMCC_LOCK_GROUP
], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG
);
323 _dt_get_lock_type(amcc_node
, &_lock_group
[AMCC_LOCK_GROUP
].ctrr_a
, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG
);
326 DTEntry ioa_node
= _dt_get_lock_group(lock_regs_node
, &_lock_group
[IOA_LOCK_GROUP
], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG
);
327 _dt_get_lock_type(ioa_node
, &_lock_group
[IOA_LOCK_GROUP
].ctrr_a
, "ioa", "ioa-ctrr-a", 0);
330 lock_regs_set
= true;
336 rorgn_stash_range(void)
338 #if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT
339 boolean_t rorgn_disable
= FALSE
;
341 #if DEVELOPMENT || DEBUG
342 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable
, sizeof(rorgn_disable
));
345 #if CONFIG_CSR_FROM_DT
346 if (csr_unsafe_kernel_text
) {
347 rorgn_disable
= true;
352 /* take early out if boot arg present, don't query any machine registers to avoid
353 * dependency on amcc DT entry
358 lock_group_t
const * const lock_group
= find_lock_group_data();
360 /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */
361 uint64_t rorgn_begin_page
[MAX_LOCK_GROUPS
][MAX_APERTURES
][MAX_PLANES
];
362 uint64_t rorgn_end_page
[MAX_LOCK_GROUPS
][MAX_APERTURES
][MAX_PLANES
];
364 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
365 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
366 const uint64_t amcc_pa
= lock_group
[lg
].aperture_phys_addr
[aperture
];
368 // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown()
369 lock_group_va
[lg
][aperture
] = ml_io_map(amcc_pa
, lock_group
[lg
].aperture_size
);
371 if (lock_group_va
[lg
][aperture
] == 0) {
372 panic("map aperture_phys_addr[%u]/%#x failed", aperture
, lock_group
[lg
].aperture_size
);
375 for (unsigned int plane
= 0; plane
< lock_group
[lg
].plane_count
; plane
++) {
378 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.lower_limit_reg
.reg_offset
;
379 rorgn_begin_page
[lg
][aperture
][plane
] = *(volatile uint32_t *)reg_addr
;
380 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.upper_limit_reg
.reg_offset
;
381 rorgn_end_page
[lg
][aperture
][plane
] = *(volatile uint32_t *)reg_addr
;
385 assert(rorgn_end_page
[lg
][0][0] > rorgn_begin_page
[lg
][0][0]);
387 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
388 for (unsigned int plane
= 0; plane
< lock_group
[lg
].plane_count
; plane
++) {
389 if ((rorgn_begin_page
[lg
][aperture
][plane
] != rorgn_begin_page
[0][0][0])
390 || (rorgn_end_page
[lg
][aperture
][plane
] != rorgn_end_page
[0][0][0])) {
391 panic("Inconsistent memory config");
396 uint64_t page_bytes
= 1ULL << lock_group
[lg
].ctrr_a
.page_size_shift
;
398 /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */
399 rorgn_begin
= (rorgn_begin_page
[0][0][0] << lock_group
[lg
].ctrr_a
.page_size_shift
) + gDramBase
;
400 rorgn_end
= (rorgn_end_page
[0][0][0] << lock_group
[lg
].ctrr_a
.page_size_shift
) + gDramBase
+ page_bytes
- 1;
403 assert(segLOWESTRO
&& gVirtBase
&& gPhysBase
);
405 /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */
406 ctrr_begin
= kvtophys(segLOWESTRO
);
408 #if defined(KERNEL_INTEGRITY_KTRR)
410 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region)
412 * +------------------+-----------+-----------------------------------+
413 * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) |
414 * +------------------+-----------+-----------------------------------+
415 * | | TEXT_EXEC | <- KTRR RO Region End (ctrr_end) |
416 * +------------------+-----------+-----------------------------------+
418 * +------------------+-----------+-----------------------------------+
419 * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin |
420 * | | | (ctrr_begin/rorgn_begin) |
421 * +------------------+-----------+-----------------------------------+
425 ctrr_end
= kvtophys(segLASTB
) - segSizeLASTDATACONST
- 1;
427 /* assert not booted from kernel collection */
428 assert(!segHIGHESTRO
);
430 /* assert that __LAST segment containing privileged insns is only a single page */
431 assert(segSizeLAST
== PAGE_SIZE
);
433 /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */
434 assert((ctrr_end
+ 1) == kvtophys(segTEXTEXECB
) + segSizeTEXTEXEC
);
436 /* ensure that iboot and xnu agree on the amcc rorgn range */
437 assert((rorgn_begin
== ctrr_begin
) && (rorgn_end
== (ctrr_end
+ segSizeLASTDATACONST
+ segSizeLAST
)));
438 #elif defined(KERNEL_INTEGRITY_CTRR)
440 /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making
441 * __pinst no execute because PXN applies with MMU off in CTRR.
443 * +------------------+-----------+------------------------------+
444 * | Largest Address | LAST | <- CTRR/AMCC RO Region End |
445 * | | | (ctrr_end/rorgn_end) |
446 * +------------------+-----------+------------------------------+
448 * +------------------+-----------+------------------------------+
450 * +------------------+-----------+------------------------------+
451 * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin |
452 * | | | (ctrr_begin/rorgn_begin) |
453 * +------------------+-----------+------------------------------+
459 * kernel collections may have additional kext RO data after kernel LAST
461 assert(segLASTB
+ segSizeLAST
<= segHIGHESTRO
);
462 ctrr_end
= kvtophys(segHIGHESTRO
) - 1;
464 ctrr_end
= kvtophys(segLASTB
) + segSizeLAST
- 1;
467 /* ensure that iboot and xnu agree on the amcc rorgn range */
468 assert((rorgn_begin
== ctrr_begin
) && (rorgn_end
== ctrr_end
));
472 #if DEVELOPMENT || DEBUG
474 assert_all_lock_groups_unlocked(lock_group_t
const *lock_groups
)
477 uint64_t ctrr_lock
= 0;
479 bool write_disabled
= false;;
483 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
484 for (unsigned int aperture
= 0; aperture
< lock_groups
[lg
].aperture_count
; aperture
++) {
486 // Does the lock group define a master lock register?
487 if (lock_groups
[lg
].master_lock_reg
.reg_mask
!= 0) {
488 reg_addr
= lock_group_va
[lg
][aperture
] + lock_groups
[lg
].master_lock_reg
.reg_offset
;
489 locked
|= ((*(volatile uint32_t *)reg_addr
& lock_groups
[lg
].master_lock_reg
.reg_mask
) == lock_groups
[lg
].master_lock_reg
.reg_value
);
492 for (unsigned int plane
= 0; plane
< lock_groups
[lg
].plane_count
; plane
++) {
493 // Does the lock group define a write disable register?
494 if (lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_mask
!= 0) {
495 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_groups
[lg
].plane_stride
) + lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_offset
;
496 write_disabled
|= ((*(volatile uint32_t *)reg_addr
& lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_mask
) == lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_value
);
499 // Does the lock group define a lock register?
500 if (lock_groups
[lg
].ctrr_a
.lock_reg
.reg_mask
!= 0) {
501 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_groups
[lg
].plane_stride
) + lock_groups
[lg
].ctrr_a
.lock_reg
.reg_offset
;
502 locked
|= ((*(volatile uint32_t *)reg_addr
& lock_groups
[lg
].ctrr_a
.lock_reg
.reg_mask
) == lock_groups
[lg
].ctrr_a
.lock_reg
.reg_value
);
508 ctrr_lock
= __builtin_arm_rsr64(CTRR_LOCK_MSR
);
511 assert(!write_disabled
&& !locked
);
516 lock_all_lock_groups(lock_group_t
const *lock_group
, vm_offset_t begin
, vm_offset_t end
)
522 * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock
524 * begin and end are first and last byte inclusive of lock group read only region
527 CleanPoC_DcacheRegion_Force(begin
, end
- begin
+ 1);
529 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
530 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
531 /* lock planes in reverse order: plane 0 should be locked last */
532 unsigned int plane
= lock_group
[lg
].plane_count
- 1;
534 // Enable the protection region if the lock group defines an enable register.
535 if (lock_group
[lg
].ctrr_a
.enable_reg
.reg_mask
!= 0) {
536 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.enable_reg
.reg_offset
;
537 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.enable_reg
.reg_value
;
540 // Disable writes if the lock group defines a write disable register.
541 if (lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_mask
!= 0) {
542 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_offset
;
543 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_value
;
546 // Lock the lock if the lock group defines an enable register.
547 if (lock_group
[lg
].ctrr_a
.lock_reg
.reg_mask
!= 0) {
548 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.lock_reg
.reg_offset
;
549 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.lock_reg
.reg_value
;
552 __builtin_arm_isb(ISB_SY
);
553 } while (plane
-- > 0);
555 // Lock the master lock if the lock group define a master lock register.
556 if (lock_group
[lg
].master_lock_reg
.reg_mask
!= 0) {
557 reg_addr
= lock_group_va
[lg
][aperture
] + lock_group
[lg
].master_lock_reg
.reg_offset
;
558 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].master_lock_reg
.reg_value
;
560 __builtin_arm_isb(ISB_SY
);
567 lock_mmu(uint64_t begin
, uint64_t end
)
569 #if defined(KERNEL_INTEGRITY_KTRR)
571 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1
, begin
);
572 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1
, end
);
573 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1
, 1ULL);
577 __builtin_arm_isb(ISB_SY
);
580 #elif defined (KERNEL_INTEGRITY_CTRR)
581 /* this will lock the entire bootstrap cluster. non bootstrap clusters
582 * will be locked by respective cluster master in start.s */
584 __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1
, begin
);
585 __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1
, end
);
587 #if !defined(APPLEVORTEX)
588 /* H12+ changed sequence, must invalidate TLB immediately after setting CTRR bounds */
589 __builtin_arm_isb(ISB_SY
); /* ensure all prior MSRs are complete */
591 #endif /* !defined(APPLEVORTEX) */
593 __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1
, CTRR_CTL_EL1_A_PXN
| CTRR_CTL_EL1_A_MMUON_WRPROTECT
);
594 __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1
, 1ULL);
596 uint64_t current_el
= __builtin_arm_rsr64("CurrentEL");
597 if (current_el
== PSR64_MODE_EL2
) {
598 // CTRR v2 has explicit registers for cluster config. they can only be written in EL2
600 __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2
, begin
);
601 __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2
, end
);
602 __builtin_arm_wsr64(ACC_CTRR_CTL_EL2
, CTRR_CTL_EL1_A_PXN
| CTRR_CTL_EL1_A_MMUON_WRPROTECT
);
603 __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2
, 1ULL);
606 __builtin_arm_isb(ISB_SY
); /* ensure all prior MSRs are complete */
607 #if defined(APPLEVORTEX)
609 #endif /* defined(APPLEVORTEX) */
611 #else /* defined(KERNEL_INTEGRITY_KTRR) */
612 #error KERNEL_INTEGRITY config error
613 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
616 #if DEVELOPMENT || DEBUG
618 assert_amcc_cache_disabled(lock_group_t
const *lock_group
)
622 const lock_reg_t
*cache_status_reg
= &lock_group
[AMCC_LOCK_GROUP
].cache_status_reg
;
624 // If the platform does not define a cache status register, then we're done here.
625 if (cache_status_reg
->reg_mask
!= 0) {
629 for (unsigned int aperture
= 0; aperture
< lock_group
[AMCC_LOCK_GROUP
].aperture_count
; aperture
++) {
630 for (unsigned int plane
= 0; plane
< lock_group
[AMCC_LOCK_GROUP
].plane_count
; plane
++) {
631 uint64_t reg_addr
= lock_group_va
[AMCC_LOCK_GROUP
][aperture
] + (plane
* lock_group
[AMCC_LOCK_GROUP
].plane_stride
) + cache_status_reg
->reg_offset
;
632 uint32_t reg_value
= *(volatile uint32_t *)reg_addr
;
633 assert((reg_value
& cache_status_reg
->reg_mask
) == cache_status_reg
->reg_value
);
637 #endif /* DEVELOPMENT || DEBUG */
640 * void rorgn_lockdown(void)
642 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
644 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
645 * start.s:start_cpu() for subsequent wake/resume of all cores
650 boolean_t ctrr_disable
= FALSE
;
652 #if DEVELOPMENT || DEBUG
653 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable
, sizeof(ctrr_disable
));
654 #endif /* DEVELOPMENT || DEBUG */
656 #if CONFIG_CSR_FROM_DT
657 if (csr_unsafe_kernel_text
) {
660 #endif /* CONFIG_CSR_FROM_DT */
663 lock_group_t
const * const lock_group
= find_lock_group_data();
665 #if DEVELOPMENT || DEBUG
666 assert_all_lock_groups_unlocked(lock_group
);
668 printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin
, (void *)rorgn_end
);
669 printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin
, (void *)ctrr_end
);
671 assert_amcc_cache_disabled(lock_group
);
672 #endif /* DEVELOPMENT || DEBUG */
674 // Lock the AMCC/IOA PIO lock registers.
675 lock_all_lock_groups(lock_group
, phystokv(rorgn_begin
), phystokv(rorgn_end
));
678 * KTRR/CTRR registers are inclusive of the smallest page size granule supported by processor MMU
679 * rather than the actual page size in use. Load the last byte of the end page, and let the HW
680 * truncate per the smallest page granule supported. Must use same treament in start.s for warm
683 lock_mmu(ctrr_begin
, ctrr_end
);
685 // Unmap and free PIO VA space needed to lockdown the lock groups.
686 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
687 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
688 ml_io_unmap(lock_group_va
[lg
][aperture
], lock_group
[lg
].aperture_size
);
693 #if defined(KERNEL_INTEGRITY_CTRR)
694 /* wake any threads blocked on cluster master lockdown */
699 cdp
->cpu_cluster_id
= ml_get_cluster_number_local();
700 assert(cdp
->cpu_cluster_id
<= (uint32_t)ml_get_max_cluster_number());
701 ctrr_cluster_locked
[cdp
->cpu_cluster_id
] = CTRR_LOCKED
;
702 thread_wakeup(&ctrr_cluster_locked
[cdp
->cpu_cluster_id
]);
706 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */