2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <pexpert/arm64/board_config.h>
31 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
34 #include <libkern/section_keywords.h>
35 #include <libkern/kernel_mach_header.h>
36 #include <pexpert/pexpert.h>
37 #include <pexpert/device_tree.h>
38 #include <machine/atomic.h>
39 #include <arm/cpu_internal.h>
40 #include <arm/caches_internal.h>
41 #include <arm/machine_routines.h>
43 #include <arm64/tlb.h>
44 #include <arm64/amcc_rorgn.h>
47 #include <arm64/pal_hibernate.h>
48 #endif /* HIBERNATION */
51 #define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA)
52 #define IOA_LOCK_GROUP 1 // IOA lock group index
54 #define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC)
56 #define AMCC_LOCK_GROUP 0 // AMCC lock group index
57 #define MAX_APERTURES 16 // Maximum number of register apertures
58 #define MAX_PLANES 16 // Maximum number of planes within each aperture
60 #define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group
61 #define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group
63 #define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type
65 extern vm_offset_t segLOWESTRO
;
66 extern vm_offset_t segHIGHESTRO
;
68 extern vm_offset_t segLASTB
;
69 extern vm_offset_t segTEXTEXECB
;
70 extern unsigned long segSizeLAST
;
71 extern unsigned long segSizeLASTDATACONST
;
72 extern unsigned long segSizeTEXTEXEC
;
74 typedef struct lock_reg
{
75 uint32_t reg_offset
; // Register offset
76 uint32_t reg_mask
; // Register mask
77 uint32_t reg_value
; // Regsiter value
80 typedef struct lock_type
{
81 uint32_t page_size_shift
; // page shift used in lower/upper limit registers
82 lock_reg_t lower_limit_reg
; // Lower limit register description
83 lock_reg_t upper_limit_reg
; // Upper limit register description
84 lock_reg_t enable_reg
; // Enable register description
85 lock_reg_t write_disable_reg
; // Write disable register description
86 lock_reg_t lock_reg
; // Lock register description
89 typedef struct lock_group
{
90 uint32_t aperture_count
; // Aperture count
91 uint32_t aperture_size
; // Aperture size
92 uint32_t plane_count
; // Number of planes in the aperture
93 uint32_t plane_stride
; // Stride between planes in the aperture
94 uint64_t aperture_phys_addr
[MAX_APERTURES
]; // Apreture physical addresses
95 lock_reg_t cache_status_reg
; // Cache status register description
97 lock_reg_t master_lock_reg
; // Master lock register description
99 lock_type_t ctrr_a
; // CTRR-A (KTRR) lock
102 SECURITY_READ_ONLY_LATE(lock_group_t
) _lock_group
[MAX_LOCK_GROUPS
] = { {0} };
103 SECURITY_READ_ONLY_LATE(bool) lock_regs_set
= false;
105 static vm_offset_t rorgn_begin
= 0;
106 static vm_offset_t rorgn_end
= 0;
107 SECURITY_READ_ONLY_LATE(vm_offset_t
) ctrr_begin
= 0;
108 SECURITY_READ_ONLY_LATE(vm_offset_t
) ctrr_end
= 0;
110 static uint64_t lock_group_va
[MAX_LOCK_GROUPS
][MAX_APERTURES
];
112 #if CONFIG_CSR_FROM_DT
113 SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text
= false;
116 #if defined(KERNEL_INTEGRITY_KTRR)
117 #define CTRR_LOCK_MSR ARM64_REG_KTRR_LOCK_EL1
118 #elif defined(KERNEL_INTEGRITY_CTRR)
119 #define CTRR_LOCK_MSR ARM64_REG_CTRR_LOCK_EL1
123 * lock_group_t - describes all the parameters xnu needs to know to
124 * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start.
125 * This description assumes that each AMCC/IOA in a given system will
126 * be identical, respectively. The only variable are the number of
127 * apertures present and the physical base address of each aperture.
129 * General xnu lock group lockdown flow:
130 * - for each lock group:
131 * - ml_io_map all present lock group physical base addresses
132 * - assert all lock group begin/end page numbers set by iboot are identical
133 * - convert lock group begin/end page number to physical address
134 * - assert lock group begin/end page numbers match xnu view of read only region
135 * - assert lock group is not currently locked
136 * - ensure lock group master cache is disabled
137 * - write enable/lock registers to enable/lock the lock group read only region
141 _dt_get_uint32(DTEntry node
, char const *name
, uint32_t *dest
)
143 uint32_t const *value
;
146 if (SecureDTGetProperty(node
, name
, (void const **)&value
, &size
) != kSuccess
) {
150 if (size
!= sizeof(uint32_t)) {
151 panic("lock-regs: unexpected size %u", size
);
160 _dt_get_uint32_required(DTEntry node
, char const *name
)
164 if (!_dt_get_uint32(node
, name
, &value
)) {
165 panic("lock-regs: cannot find required property '%s'", name
);
172 _dt_get_lock_reg(DTEntry node
, lock_reg_t
*reg
, const char *parent_name
, const char *reg_name
, bool required
, bool with_value
)
177 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-offset", reg_name
);
178 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_offset
);
181 panic("%s: missing property '%s'", parent_name
, prop_name
);
187 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-mask", reg_name
);
188 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_mask
);
190 panic("%s: missing property '%s'", parent_name
, prop_name
);
194 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-value", reg_name
);
195 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_value
);
197 panic("%s: missing property '%s'", parent_name
, prop_name
);
205 _dt_get_lock_group(DTEntry lock_regs_node
, lock_group_t
* lock_group
, const char *group_name
, uint32_t options
)
209 // Find the lock group node.
210 if (SecureDTLookupEntry(lock_regs_node
, group_name
, &group_node
) != kSuccess
) {
211 panic("lock-regs: /chosen/lock-regs/%s not found", group_name
);
214 lock_group
->aperture_count
= _dt_get_uint32_required(group_node
, "aperture-count");
216 if (lock_group
->aperture_count
> MAX_APERTURES
) {
217 panic("%s: %s %u exceeds maximum %u", group_name
, "aperture-count", lock_group
->aperture_count
, MAX_APERTURES
);
220 lock_group
->aperture_size
= _dt_get_uint32_required(group_node
, "aperture-size");
222 if ((lock_group
->aperture_count
> 0) && (lock_group
->aperture_size
== 0)) {
223 panic("%s: have %u apertures, but 0 size", group_name
, lock_group
->aperture_count
);
226 lock_group
->plane_count
= _dt_get_uint32_required(group_node
, "plane-count");
228 if (lock_group
->plane_count
> MAX_PLANES
) {
229 panic("%s: %s %u exceeds maximum %u", group_name
, "plane-count", lock_group
->plane_count
, MAX_PLANES
);
232 if (!_dt_get_uint32(group_node
, "plane-stride", &lock_group
->plane_stride
)) {
233 lock_group
->plane_stride
= 0;
236 if (lock_group
->plane_count
> 1) {
237 uint32_t aperture_size
;
239 if (lock_group
->plane_stride
== 0) {
240 panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name
, lock_group
->plane_count
);
243 if (os_mul_overflow(lock_group
->plane_count
, lock_group
->plane_stride
, &aperture_size
)
244 || (aperture_size
> lock_group
->aperture_size
)) {
245 panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name
, lock_group
->aperture_size
, lock_group
->plane_count
, lock_group
->plane_stride
);
249 uint64_t const *phys_bases
= NULL
;
250 unsigned int prop_size
;
251 if (SecureDTGetProperty(group_node
, "aperture-phys-addr", (const void**)&phys_bases
, &prop_size
) != kSuccess
) {
252 panic("%s: missing required %s", group_name
, "aperture-phys-addr");
255 if (prop_size
!= lock_group
->aperture_count
* sizeof(lock_group
->aperture_phys_addr
[0])) {
256 panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)",
257 group_name
, prop_size
, lock_group
->aperture_count
, sizeof(lock_group
->aperture_phys_addr
[0]),
258 lock_group
->aperture_count
* sizeof(lock_group
->aperture_phys_addr
[0]));
261 memcpy(lock_group
->aperture_phys_addr
, phys_bases
, prop_size
);
263 if (options
& LOCK_GROUP_HAS_CACHE_STATUS_REG
) {
264 _dt_get_lock_reg(group_node
, &lock_group
->cache_status_reg
, group_name
, "cache-status", true, true);
268 if (options
& LOCK_GROUP_HAS_MASTER_LOCK_REG
) {
269 _dt_get_lock_reg(group_node
, &lock_group
->master_lock_reg
, group_name
, "master-lock", true, true);
277 _dt_get_lock_type(DTEntry group_node
, lock_type_t
*lock_type
, const char *group_name
, const char *type_name
, uint32_t options
)
280 bool has_lock
= options
& LOCK_TYPE_HAS_LOCK_REG
;
282 // Find the lock type type_node.
283 if (SecureDTLookupEntry(group_node
, type_name
, &type_node
) != kSuccess
) {
284 panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name
, type_name
);
287 lock_type
->page_size_shift
= _dt_get_uint32_required(type_node
, "page-size-shift");
289 // Find all of the regsiters for this lock type.
290 // Parent Register Descriptor Parent Name Reg Name Required Value
291 _dt_get_lock_reg(type_node
, &lock_type
->lower_limit_reg
, type_name
, "lower-limit", true, false);
292 _dt_get_lock_reg(type_node
, &lock_type
->upper_limit_reg
, type_name
, "upper-limit", true, false);
293 _dt_get_lock_reg(type_node
, &lock_type
->lock_reg
, type_name
, "lock", has_lock
, true);
294 _dt_get_lock_reg(type_node
, &lock_type
->enable_reg
, type_name
, "enable", false, true);
295 _dt_get_lock_reg(type_node
, &lock_type
->write_disable_reg
, type_name
, "write-disable", false, true);
299 * find_lock_group_data:
301 * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t
303 * called first time before IOKit start while still uniprocessor
306 static lock_group_t
const * _Nonnull
307 find_lock_group_data(void)
309 DTEntry lock_regs_node
= NULL
;
310 DTEntry amcc_node
= NULL
;
312 // Return the lock group data pointer if we already found and populated one.
317 if (SecureDTLookupEntry(NULL
, "/chosen/lock-regs", &lock_regs_node
) != kSuccess
) {
318 panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)");
321 amcc_node
= _dt_get_lock_group(lock_regs_node
, &_lock_group
[AMCC_LOCK_GROUP
], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG
);
322 _dt_get_lock_type(amcc_node
, &_lock_group
[AMCC_LOCK_GROUP
].ctrr_a
, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG
);
325 DTEntry ioa_node
= _dt_get_lock_group(lock_regs_node
, &_lock_group
[IOA_LOCK_GROUP
], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG
);
326 _dt_get_lock_type(ioa_node
, &_lock_group
[IOA_LOCK_GROUP
].ctrr_a
, "ioa", "ioa-ctrr-a", 0);
329 lock_regs_set
= true;
335 rorgn_stash_range(void)
337 #if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT
338 boolean_t rorgn_disable
= FALSE
;
340 #if DEVELOPMENT || DEBUG
341 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable
, sizeof(rorgn_disable
));
344 #if CONFIG_CSR_FROM_DT
345 if (csr_unsafe_kernel_text
) {
346 rorgn_disable
= true;
351 /* take early out if boot arg present, don't query any machine registers to avoid
352 * dependency on amcc DT entry
357 lock_group_t
const * const lock_group
= find_lock_group_data();
359 /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */
360 uint64_t rorgn_begin_page
[MAX_LOCK_GROUPS
][MAX_APERTURES
][MAX_PLANES
];
361 uint64_t rorgn_end_page
[MAX_LOCK_GROUPS
][MAX_APERTURES
][MAX_PLANES
];
363 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
364 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
365 const uint64_t amcc_pa
= lock_group
[lg
].aperture_phys_addr
[aperture
];
367 // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown()
368 lock_group_va
[lg
][aperture
] = ml_io_map(amcc_pa
, lock_group
[lg
].aperture_size
);
370 if (lock_group_va
[lg
][aperture
] == 0) {
371 panic("map aperture_phys_addr[%u]/%#x failed", aperture
, lock_group
[lg
].aperture_size
);
374 for (unsigned int plane
= 0; plane
< lock_group
[lg
].plane_count
; plane
++) {
377 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.lower_limit_reg
.reg_offset
;
378 rorgn_begin_page
[lg
][aperture
][plane
] = *(volatile uint32_t *)reg_addr
;
379 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.upper_limit_reg
.reg_offset
;
380 rorgn_end_page
[lg
][aperture
][plane
] = *(volatile uint32_t *)reg_addr
;
384 assert(rorgn_end_page
[lg
][0][0] > rorgn_begin_page
[lg
][0][0]);
386 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
387 for (unsigned int plane
= 0; plane
< lock_group
[lg
].plane_count
; plane
++) {
388 if ((rorgn_begin_page
[lg
][aperture
][plane
] != rorgn_begin_page
[0][0][0])
389 || (rorgn_end_page
[lg
][aperture
][plane
] != rorgn_end_page
[0][0][0])) {
390 panic("Inconsistent memory config");
395 uint64_t page_bytes
= 1ULL << lock_group
[lg
].ctrr_a
.page_size_shift
;
397 /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */
398 rorgn_begin
= (rorgn_begin_page
[0][0][0] << lock_group
[lg
].ctrr_a
.page_size_shift
) + gDramBase
;
399 rorgn_end
= (rorgn_end_page
[0][0][0] << lock_group
[lg
].ctrr_a
.page_size_shift
) + gDramBase
+ page_bytes
- 1;
402 assert(segLOWESTRO
&& gVirtBase
&& gPhysBase
);
404 /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */
405 ctrr_begin
= kvtophys(segLOWESTRO
);
407 #if defined(KERNEL_INTEGRITY_KTRR)
409 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region)
411 * +------------------+-----------+-----------------------------------+
412 * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) |
413 * +------------------+-----------+-----------------------------------+
414 * | | TEXT_EXEC | <- KTRR RO Region End (ctrr_end) |
415 * +------------------+-----------+-----------------------------------+
417 * +------------------+-----------+-----------------------------------+
418 * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin |
419 * | | | (ctrr_begin/rorgn_begin) |
420 * +------------------+-----------+-----------------------------------+
424 ctrr_end
= kvtophys(segLASTB
) - segSizeLASTDATACONST
- 1;
426 /* assert not booted from kernel collection */
427 assert(!segHIGHESTRO
);
429 /* assert that __LAST segment containing privileged insns is only a single page */
430 assert(segSizeLAST
== PAGE_SIZE
);
432 /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */
433 assert((ctrr_end
+ 1) == kvtophys(segTEXTEXECB
) + segSizeTEXTEXEC
);
435 /* ensure that iboot and xnu agree on the amcc rorgn range */
436 assert((rorgn_begin
== ctrr_begin
) && (rorgn_end
== (ctrr_end
+ segSizeLASTDATACONST
+ segSizeLAST
)));
437 #elif defined(KERNEL_INTEGRITY_CTRR)
439 /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making
440 * __pinst no execute because PXN applies with MMU off in CTRR.
442 * +------------------+-----------+------------------------------+
443 * | Largest Address | LAST | <- CTRR/AMCC RO Region End |
444 * | | | (ctrr_end/rorgn_end) |
445 * +------------------+-----------+------------------------------+
447 * +------------------+-----------+------------------------------+
449 * +------------------+-----------+------------------------------+
450 * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin |
451 * | | | (ctrr_begin/rorgn_begin) |
452 * +------------------+-----------+------------------------------+
458 * kernel collections may have additional kext RO data after kernel LAST
460 assert(segLASTB
+ segSizeLAST
<= segHIGHESTRO
);
461 ctrr_end
= kvtophys(segHIGHESTRO
) - 1;
463 ctrr_end
= kvtophys(segLASTB
) + segSizeLAST
- 1;
466 /* ensure that iboot and xnu agree on the amcc rorgn range */
467 assert((rorgn_begin
== ctrr_begin
) && (rorgn_end
== ctrr_end
));
471 #if DEVELOPMENT || DEBUG
473 assert_all_lock_groups_unlocked(lock_group_t
const *lock_groups
)
476 uint64_t ctrr_lock
= 0;
478 bool write_disabled
= false;;
482 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
483 for (unsigned int aperture
= 0; aperture
< lock_groups
[lg
].aperture_count
; aperture
++) {
485 // Does the lock group define a master lock register?
486 if (lock_groups
[lg
].master_lock_reg
.reg_mask
!= 0) {
487 reg_addr
= lock_group_va
[lg
][aperture
] + lock_groups
[lg
].master_lock_reg
.reg_offset
;
488 locked
|= ((*(volatile uint32_t *)reg_addr
& lock_groups
[lg
].master_lock_reg
.reg_mask
) == lock_groups
[lg
].master_lock_reg
.reg_value
);
491 for (unsigned int plane
= 0; plane
< lock_groups
[lg
].plane_count
; plane
++) {
492 // Does the lock group define a write disable register?
493 if (lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_mask
!= 0) {
494 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_groups
[lg
].plane_stride
) + lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_offset
;
495 write_disabled
|= ((*(volatile uint32_t *)reg_addr
& lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_mask
) == lock_groups
[lg
].ctrr_a
.write_disable_reg
.reg_value
);
498 // Does the lock group define a lock register?
499 if (lock_groups
[lg
].ctrr_a
.lock_reg
.reg_mask
!= 0) {
500 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_groups
[lg
].plane_stride
) + lock_groups
[lg
].ctrr_a
.lock_reg
.reg_offset
;
501 locked
|= ((*(volatile uint32_t *)reg_addr
& lock_groups
[lg
].ctrr_a
.lock_reg
.reg_mask
) == lock_groups
[lg
].ctrr_a
.lock_reg
.reg_value
);
507 ctrr_lock
= __builtin_arm_rsr64(CTRR_LOCK_MSR
);
510 assert(!write_disabled
&& !locked
);
515 lock_all_lock_groups(lock_group_t
const *lock_group
, vm_offset_t begin
, vm_offset_t end
)
521 * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock
523 * begin and end are first and last byte inclusive of lock group read only region
526 CleanPoC_DcacheRegion_Force(begin
, end
- begin
+ 1);
528 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
529 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
530 /* lock planes in reverse order: plane 0 should be locked last */
531 unsigned int plane
= lock_group
[lg
].plane_count
- 1;
533 // Enable the protection region if the lock group defines an enable register.
534 if (lock_group
[lg
].ctrr_a
.enable_reg
.reg_mask
!= 0) {
535 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.enable_reg
.reg_offset
;
536 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.enable_reg
.reg_value
;
539 // Disable writes if the lock group defines a write disable register.
540 if (lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_mask
!= 0) {
541 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_offset
;
542 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_value
;
545 // Lock the lock if the lock group defines an enable register.
546 if (lock_group
[lg
].ctrr_a
.lock_reg
.reg_mask
!= 0) {
547 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.lock_reg
.reg_offset
;
548 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.lock_reg
.reg_value
;
551 __builtin_arm_isb(ISB_SY
);
552 } while (plane
-- > 0);
554 // Lock the master lock if the lock group define a master lock register.
555 if (lock_group
[lg
].master_lock_reg
.reg_mask
!= 0) {
556 reg_addr
= lock_group_va
[lg
][aperture
] + lock_group
[lg
].master_lock_reg
.reg_offset
;
557 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].master_lock_reg
.reg_value
;
559 __builtin_arm_isb(ISB_SY
);
566 lock_mmu(uint64_t begin
, uint64_t end
)
568 #if defined(KERNEL_INTEGRITY_KTRR)
570 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1
, begin
);
571 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1
, end
);
572 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1
, 1ULL);
576 __builtin_arm_isb(ISB_SY
);
579 #elif defined (KERNEL_INTEGRITY_CTRR)
580 /* this will lock the entire bootstrap cluster. non bootstrap clusters
581 * will be locked by respective cluster master in start.s */
583 __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1
, begin
);
584 __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1
, end
);
586 #if !defined(APPLEVORTEX)
587 /* H12+ changed sequence, must invalidate TLB immediately after setting CTRR bounds */
588 __builtin_arm_isb(ISB_SY
); /* ensure all prior MSRs are complete */
590 #endif /* !defined(APPLEVORTEX) */
592 __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1
, CTRR_CTL_EL1_A_PXN
| CTRR_CTL_EL1_A_MMUON_WRPROTECT
);
593 __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1
, 1ULL);
595 uint64_t current_el
= __builtin_arm_rsr64("CurrentEL");
596 if (current_el
== PSR64_MODE_EL2
) {
597 // CTRR v2 has explicit registers for cluster config. they can only be written in EL2
599 __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2
, begin
);
600 __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2
, end
);
601 __builtin_arm_wsr64(ACC_CTRR_CTL_EL2
, CTRR_CTL_EL1_A_PXN
| CTRR_CTL_EL1_A_MMUON_WRPROTECT
);
602 __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2
, 1ULL);
605 __builtin_arm_isb(ISB_SY
); /* ensure all prior MSRs are complete */
606 #if defined(APPLEVORTEX)
608 #endif /* defined(APPLEVORTEX) */
610 #else /* defined(KERNEL_INTEGRITY_KTRR) */
611 #error KERNEL_INTEGRITY config error
612 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
615 #if DEVELOPMENT || DEBUG
617 assert_amcc_cache_disabled(lock_group_t
const *lock_group
)
621 const lock_reg_t
*cache_status_reg
= &lock_group
[AMCC_LOCK_GROUP
].cache_status_reg
;
623 // If the platform does not define a cache status register, then we're done here.
624 if (cache_status_reg
->reg_mask
!= 0) {
628 for (unsigned int aperture
= 0; aperture
< lock_group
[AMCC_LOCK_GROUP
].aperture_count
; aperture
++) {
629 for (unsigned int plane
= 0; plane
< lock_group
[AMCC_LOCK_GROUP
].plane_count
; plane
++) {
630 uint64_t reg_addr
= lock_group_va
[AMCC_LOCK_GROUP
][aperture
] + (plane
* lock_group
[AMCC_LOCK_GROUP
].plane_stride
) + cache_status_reg
->reg_offset
;
631 uint32_t reg_value
= *(volatile uint32_t *)reg_addr
;
632 assert((reg_value
& cache_status_reg
->reg_mask
) == cache_status_reg
->reg_value
);
636 #endif /* DEVELOPMENT || DEBUG */
639 * void rorgn_lockdown(void)
641 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
643 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
644 * start.s:start_cpu() for subsequent wake/resume of all cores
649 boolean_t ctrr_disable
= FALSE
;
651 #if DEVELOPMENT || DEBUG
652 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable
, sizeof(ctrr_disable
));
653 #endif /* DEVELOPMENT || DEBUG */
655 #if CONFIG_CSR_FROM_DT
656 if (csr_unsafe_kernel_text
) {
659 #endif /* CONFIG_CSR_FROM_DT */
662 lock_group_t
const * const lock_group
= find_lock_group_data();
664 #if DEVELOPMENT || DEBUG
665 assert_all_lock_groups_unlocked(lock_group
);
667 printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin
, (void *)rorgn_end
);
668 printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin
, (void *)ctrr_end
);
670 assert_amcc_cache_disabled(lock_group
);
671 #endif /* DEVELOPMENT || DEBUG */
673 // Lock the AMCC/IOA PIO lock registers.
674 lock_all_lock_groups(lock_group
, phystokv(rorgn_begin
), phystokv(rorgn_end
));
677 * KTRR/CTRR registers are inclusive of the smallest page size granule supported by processor MMU
678 * rather than the actual page size in use. Load the last byte of the end page, and let the HW
679 * truncate per the smallest page granule supported. Must use same treament in start.s for warm
682 lock_mmu(ctrr_begin
, ctrr_end
);
684 // Unmap and free PIO VA space needed to lockdown the lock groups.
685 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
686 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
687 ml_io_unmap(lock_group_va
[lg
][aperture
], lock_group
[lg
].aperture_size
);
692 #if defined(KERNEL_INTEGRITY_CTRR)
693 /* wake any threads blocked on cluster master lockdown */
698 cdp
->cpu_cluster_id
= ml_get_cluster_number_local();
699 assert(cdp
->cpu_cluster_id
<= (uint32_t)ml_get_max_cluster_number());
700 ctrr_cluster_locked
[cdp
->cpu_cluster_id
] = CTRR_LOCKED
;
701 thread_wakeup(&ctrr_cluster_locked
[cdp
->cpu_cluster_id
]);
705 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */