2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <pexpert/arm64/board_config.h>
31 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
34 #include <libkern/section_keywords.h>
35 #include <libkern/kernel_mach_header.h>
36 #include <pexpert/pexpert.h>
37 #include <pexpert/device_tree.h>
38 #include <machine/atomic.h>
39 #include <arm/cpu_internal.h>
40 #include <arm/caches_internal.h>
41 #include <arm/machine_routines.h>
43 #include <arm64/tlb.h>
44 #include <arm64/amcc_rorgn.h>
47 #include <arm64/pal_hibernate.h>
48 #endif /* HIBERNATION */
51 #define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA)
52 #define IOA_LOCK_GROUP 1 // IOA lock group index
54 #define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC)
56 #define AMCC_LOCK_GROUP 0 // AMCC lock group index
57 #define MAX_APERTURES 16 // Maximum number of register apertures
58 #define MAX_PLANES 16 // Maximum number of planes within each aperture
60 #define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group
61 #define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group
63 #define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type
65 extern vm_offset_t segLOWESTRO
;
66 extern vm_offset_t segHIGHESTRO
;
68 extern vm_offset_t segLASTB
;
69 extern vm_offset_t segTEXTEXECB
;
70 extern unsigned long segSizeLAST
;
71 extern unsigned long segSizeLASTDATACONST
;
72 extern unsigned long segSizeTEXTEXEC
;
73 extern unsigned long segSizeKLD
;
75 typedef struct lock_reg
{
76 uint32_t reg_offset
; // Register offset
77 uint32_t reg_mask
; // Register mask
78 uint32_t reg_value
; // Regsiter value
81 typedef struct lock_type
{
82 uint32_t page_size_shift
; // page shift used in lower/upper limit registers
83 lock_reg_t lower_limit_reg
; // Lower limit register description
84 lock_reg_t upper_limit_reg
; // Upper limit register description
85 lock_reg_t enable_reg
; // Enable register description
86 lock_reg_t write_disable_reg
; // Write disable register description
87 lock_reg_t lock_reg
; // Lock register description
90 typedef struct lock_group
{
91 uint32_t aperture_count
; // Aperture count
92 uint32_t aperture_size
; // Aperture size
93 uint32_t plane_count
; // Number of planes in the aperture
94 uint32_t plane_stride
; // Stride between planes in the aperture
95 uint64_t aperture_phys_addr
[MAX_APERTURES
]; // Apreture physical addresses
96 lock_reg_t cache_status_reg
; // Cache status register description
98 lock_reg_t master_lock_reg
; // Master lock register description
100 lock_type_t ctrr_a
; // CTRR-A (KTRR) lock
103 SECURITY_READ_ONLY_LATE(lock_group_t
) _lock_group
[MAX_LOCK_GROUPS
] = { {0} };
104 SECURITY_READ_ONLY_LATE(bool) lock_regs_set
= false;
106 static vm_offset_t rorgn_begin
= 0;
107 static vm_offset_t rorgn_end
= 0;
108 SECURITY_READ_ONLY_LATE(vm_offset_t
) ctrr_begin
= 0;
109 SECURITY_READ_ONLY_LATE(vm_offset_t
) ctrr_end
= 0;
111 static uint64_t lock_group_va
[MAX_LOCK_GROUPS
][MAX_APERTURES
];
113 #if CONFIG_CSR_FROM_DT
114 SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text
= false;
118 * lock_group_t - describes all the parameters xnu needs to know to
119 * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start.
120 * This description assumes that each AMCC/IOA in a given system will
121 * be identical, respectively. The only variable are the number of
122 * apertures present and the physical base address of each aperture.
124 * General xnu lock group lockdown flow:
125 * - for each lock group:
126 * - ml_io_map all present lock group physical base addresses
127 * - assert all lock group begin/end page numbers set by iboot are identical
128 * - convert lock group begin/end page number to physical address
129 * - assert lock group begin/end page numbers match xnu view of read only region
130 * - assert lock group is not currently locked
131 * - ensure lock group master cache is disabled
132 * - write enable/lock registers to enable/lock the lock group read only region
136 _dt_get_uint32(DTEntry node
, char const *name
, uint32_t *dest
)
138 uint32_t const *value
;
141 if (SecureDTGetProperty(node
, name
, (void const **)&value
, &size
) != kSuccess
) {
145 if (size
!= sizeof(uint32_t)) {
146 panic("lock-regs: unexpected size %u", size
);
155 _dt_get_uint32_required(DTEntry node
, char const *name
)
159 if (!_dt_get_uint32(node
, name
, &value
)) {
160 panic("lock-regs: cannot find required property '%s'", name
);
167 _dt_get_lock_reg(DTEntry node
, lock_reg_t
*reg
, const char *parent_name
, const char *reg_name
, bool required
, bool with_value
)
172 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-offset", reg_name
);
173 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_offset
);
176 panic("%s: missing property '%s'", parent_name
, prop_name
);
182 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-mask", reg_name
);
183 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_mask
);
185 panic("%s: missing property '%s'", parent_name
, prop_name
);
189 snprintf(prop_name
, sizeof(prop_name
), "%s-reg-value", reg_name
);
190 found
= _dt_get_uint32(node
, prop_name
, ®
->reg_value
);
192 panic("%s: missing property '%s'", parent_name
, prop_name
);
200 _dt_get_lock_group(DTEntry lock_regs_node
, lock_group_t
* lock_group
, const char *group_name
, uint32_t options
)
204 // Find the lock group node.
205 if (SecureDTLookupEntry(lock_regs_node
, group_name
, &group_node
) != kSuccess
) {
206 panic("lock-regs: /chosen/lock-regs/%s not found", group_name
);
209 lock_group
->aperture_count
= _dt_get_uint32_required(group_node
, "aperture-count");
211 if (lock_group
->aperture_count
> MAX_APERTURES
) {
212 panic("%s: %s %u exceeds maximum %u", group_name
, "aperture-count", lock_group
->aperture_count
, MAX_APERTURES
);
215 lock_group
->aperture_size
= _dt_get_uint32_required(group_node
, "aperture-size");
217 if ((lock_group
->aperture_count
> 0) && (lock_group
->aperture_size
== 0)) {
218 panic("%s: have %u apertures, but 0 size", group_name
, lock_group
->aperture_count
);
221 lock_group
->plane_count
= _dt_get_uint32_required(group_node
, "plane-count");
223 if (lock_group
->plane_count
> MAX_PLANES
) {
224 panic("%s: %s %u exceeds maximum %u", group_name
, "plane-count", lock_group
->plane_count
, MAX_PLANES
);
227 if (!_dt_get_uint32(group_node
, "plane-stride", &lock_group
->plane_stride
)) {
228 lock_group
->plane_stride
= 0;
231 if (lock_group
->plane_count
> 1) {
232 uint32_t aperture_size
;
234 if (lock_group
->plane_stride
== 0) {
235 panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name
, lock_group
->plane_count
);
238 if (os_mul_overflow(lock_group
->plane_count
, lock_group
->plane_stride
, &aperture_size
)
239 || (aperture_size
> lock_group
->aperture_size
)) {
240 panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name
, lock_group
->aperture_size
, lock_group
->plane_count
, lock_group
->plane_stride
);
244 uint64_t const *phys_bases
= NULL
;
245 unsigned int prop_size
;
246 if (SecureDTGetProperty(group_node
, "aperture-phys-addr", (const void**)&phys_bases
, &prop_size
) != kSuccess
) {
247 panic("%s: missing required %s", group_name
, "aperture-phys-addr");
250 if (prop_size
!= lock_group
->aperture_count
* sizeof(lock_group
->aperture_phys_addr
[0])) {
251 panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)",
252 group_name
, prop_size
, lock_group
->aperture_count
, sizeof(lock_group
->aperture_phys_addr
[0]),
253 lock_group
->aperture_count
* sizeof(lock_group
->aperture_phys_addr
[0]));
256 memcpy(lock_group
->aperture_phys_addr
, phys_bases
, prop_size
);
258 if (options
& LOCK_GROUP_HAS_CACHE_STATUS_REG
) {
259 _dt_get_lock_reg(group_node
, &lock_group
->cache_status_reg
, group_name
, "cache-status", true, true);
263 if (options
& LOCK_GROUP_HAS_MASTER_LOCK_REG
) {
264 _dt_get_lock_reg(group_node
, &lock_group
->master_lock_reg
, group_name
, "master-lock", true, true);
272 _dt_get_lock_type(DTEntry group_node
, lock_type_t
*lock_type
, const char *group_name
, const char *type_name
, uint32_t options
)
275 bool has_lock
= options
& LOCK_TYPE_HAS_LOCK_REG
;
277 // Find the lock type type_node.
278 if (SecureDTLookupEntry(group_node
, type_name
, &type_node
) != kSuccess
) {
279 panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name
, type_name
);
282 lock_type
->page_size_shift
= _dt_get_uint32_required(type_node
, "page-size-shift");
284 // Find all of the regsiters for this lock type.
285 // Parent Register Descriptor Parent Name Reg Name Required Value
286 _dt_get_lock_reg(type_node
, &lock_type
->lower_limit_reg
, type_name
, "lower-limit", true, false);
287 _dt_get_lock_reg(type_node
, &lock_type
->upper_limit_reg
, type_name
, "upper-limit", true, false);
288 _dt_get_lock_reg(type_node
, &lock_type
->lock_reg
, type_name
, "lock", has_lock
, true);
289 _dt_get_lock_reg(type_node
, &lock_type
->enable_reg
, type_name
, "enable", false, true);
290 _dt_get_lock_reg(type_node
, &lock_type
->write_disable_reg
, type_name
, "write-disable", false, true);
294 * find_lock_group_data:
296 * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t
298 * called first time before IOKit start while still uniprocessor
301 static lock_group_t
const * _Nonnull
302 find_lock_group_data(void)
304 DTEntry lock_regs_node
= NULL
;
305 DTEntry amcc_node
= NULL
;
307 // Return the lock group data pointer if we already found and populated one.
312 if (SecureDTLookupEntry(NULL
, "/chosen/lock-regs", &lock_regs_node
) != kSuccess
) {
313 panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)");
316 amcc_node
= _dt_get_lock_group(lock_regs_node
, &_lock_group
[AMCC_LOCK_GROUP
], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG
);
317 _dt_get_lock_type(amcc_node
, &_lock_group
[AMCC_LOCK_GROUP
].ctrr_a
, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG
);
320 DTEntry ioa_node
= _dt_get_lock_group(lock_regs_node
, &_lock_group
[IOA_LOCK_GROUP
], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG
);
321 _dt_get_lock_type(ioa_node
, &_lock_group
[IOA_LOCK_GROUP
].ctrr_a
, "ioa", "ioa-ctrr-a", 0);
324 lock_regs_set
= true;
330 rorgn_stash_range(void)
332 #if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT
333 boolean_t rorgn_disable
= FALSE
;
335 #if DEVELOPMENT || DEBUG
336 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable
, sizeof(rorgn_disable
));
339 #if CONFIG_CSR_FROM_DT
340 if (csr_unsafe_kernel_text
) {
341 rorgn_disable
= true;
346 /* take early out if boot arg present, don't query any machine registers to avoid
347 * dependency on amcc DT entry
352 lock_group_t
const * const lock_group
= find_lock_group_data();
354 /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */
355 uint64_t rorgn_begin_page
[MAX_LOCK_GROUPS
][MAX_APERTURES
][MAX_PLANES
];
356 uint64_t rorgn_end_page
[MAX_LOCK_GROUPS
][MAX_APERTURES
][MAX_PLANES
];
358 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
359 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
360 const uint64_t amcc_pa
= lock_group
[lg
].aperture_phys_addr
[aperture
];
362 // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown()
363 lock_group_va
[lg
][aperture
] = ml_io_map(amcc_pa
, lock_group
[lg
].aperture_size
);
365 if (lock_group_va
[lg
][aperture
] == 0) {
366 panic("map aperture_phys_addr[%u]/%#x failed", aperture
, lock_group
[lg
].aperture_size
);
369 for (unsigned int plane
= 0; plane
< lock_group
[lg
].plane_count
; plane
++) {
372 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.lower_limit_reg
.reg_offset
;
373 rorgn_begin_page
[lg
][aperture
][plane
] = *(volatile uint32_t *)reg_addr
;
374 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.upper_limit_reg
.reg_offset
;
375 rorgn_end_page
[lg
][aperture
][plane
] = *(volatile uint32_t *)reg_addr
;
379 assert(rorgn_end_page
[lg
][0][0] > rorgn_begin_page
[lg
][0][0]);
381 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
382 for (unsigned int plane
= 0; plane
< lock_group
[lg
].plane_count
; plane
++) {
383 if ((rorgn_begin_page
[lg
][aperture
][plane
] != rorgn_begin_page
[0][0][0])
384 || (rorgn_end_page
[lg
][aperture
][plane
] != rorgn_end_page
[0][0][0])) {
385 panic("Inconsistent memory config");
390 uint64_t page_bytes
= 1ULL << lock_group
[lg
].ctrr_a
.page_size_shift
;
392 /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */
393 rorgn_begin
= (rorgn_begin_page
[0][0][0] << lock_group
[lg
].ctrr_a
.page_size_shift
) + gDramBase
;
394 rorgn_end
= (rorgn_end_page
[0][0][0] << lock_group
[lg
].ctrr_a
.page_size_shift
) + gDramBase
+ page_bytes
- 1;
397 assert(segLOWESTRO
&& gVirtBase
&& gPhysBase
);
399 /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */
400 ctrr_begin
= kvtophys(segLOWESTRO
);
402 #if defined(KERNEL_INTEGRITY_KTRR)
404 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region)
406 * +------------------+-----------+-----------------------------------+
407 * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) |
408 * +------------------+-----------+-----------------------------------+
409 * | | KLD | <- KTRR RO Region End (ctrr_end) |
411 * +------------------+-----------+-----------------------------------+
413 * +------------------+-----------+-----------------------------------+
414 * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin |
415 * | | | (ctrr_begin/rorgn_begin) |
416 * +------------------+-----------+-----------------------------------+
420 ctrr_end
= kvtophys(segLASTB
) - segSizeLASTDATACONST
- 1;
422 /* assert not booted from kernel collection */
423 assert(!segHIGHESTRO
);
425 /* assert that __LAST segment containing privileged insns is only a single page */
426 assert(segSizeLAST
== PAGE_SIZE
);
428 /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */
429 assert((ctrr_end
+ 1) == kvtophys(segTEXTEXECB
) + segSizeTEXTEXEC
+ segSizeKLD
);
431 /* ensure that iboot and xnu agree on the amcc rorgn range */
432 assert((rorgn_begin
== ctrr_begin
) && (rorgn_end
== (ctrr_end
+ segSizeLASTDATACONST
+ segSizeLAST
)));
433 #elif defined(KERNEL_INTEGRITY_CTRR)
435 /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making
436 * __pinst no execute because PXN applies with MMU off in CTRR.
438 * +------------------+-----------+------------------------------+
439 * | Largest Address | LAST | <- CTRR/AMCC RO Region End |
440 * | | | (ctrr_end/rorgn_end) |
441 * +------------------+-----------+------------------------------+
442 * | | PPLDATA_CONST |
446 * +------------------+-----------+------------------------------+
448 * +------------------+-----------+------------------------------+
449 * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin |
450 * | | | (ctrr_begin/rorgn_begin) |
451 * +------------------+-----------+------------------------------+
457 * kernel collections may have additional kext RO data after kernel LAST
459 assert(segLASTB
+ segSizeLAST
<= segHIGHESTRO
);
460 ctrr_end
= kvtophys(segHIGHESTRO
) - 1;
462 ctrr_end
= kvtophys(segLASTB
) + segSizeLAST
- 1;
465 /* ensure that iboot and xnu agree on the amcc rorgn range */
466 assert((rorgn_begin
== ctrr_begin
) && (rorgn_end
== ctrr_end
));
471 lock_all_lock_groups(lock_group_t
const *lock_group
, vm_offset_t begin
, vm_offset_t end
)
477 * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock
479 * begin and end are first and last byte inclusive of lock group read only region
482 CleanPoC_DcacheRegion_Force(begin
, end
- begin
+ 1);
484 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
485 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
486 /* lock planes in reverse order: plane 0 should be locked last */
487 unsigned int plane
= lock_group
[lg
].plane_count
- 1;
489 // Enable the protection region if the lock group defines an enable register.
490 if (lock_group
[lg
].ctrr_a
.enable_reg
.reg_mask
!= 0) {
491 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.enable_reg
.reg_offset
;
492 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.enable_reg
.reg_value
;
495 // Disable writes if the lock group defines a write disable register.
496 if (lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_mask
!= 0) {
497 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_offset
;
498 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.write_disable_reg
.reg_value
;
501 // Lock the lock if the lock group defines an enable register.
502 if (lock_group
[lg
].ctrr_a
.lock_reg
.reg_mask
!= 0) {
503 reg_addr
= lock_group_va
[lg
][aperture
] + (plane
* lock_group
[lg
].plane_stride
) + lock_group
[lg
].ctrr_a
.lock_reg
.reg_offset
;
504 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].ctrr_a
.lock_reg
.reg_value
;
507 __builtin_arm_isb(ISB_SY
);
508 } while (plane
-- > 0);
510 // Lock the master lock if the lock group define a master lock register.
511 if (lock_group
[lg
].master_lock_reg
.reg_mask
!= 0) {
512 reg_addr
= lock_group_va
[lg
][aperture
] + lock_group
[lg
].master_lock_reg
.reg_offset
;
513 *(volatile uint32_t *)reg_addr
= lock_group
[lg
].master_lock_reg
.reg_value
;
515 __builtin_arm_isb(ISB_SY
);
521 #if DEVELOPMENT || DEBUG
523 assert_amcc_cache_disabled(lock_group_t
const *lock_group
)
527 const lock_reg_t
*cache_status_reg
= &lock_group
[AMCC_LOCK_GROUP
].cache_status_reg
;
529 // If the platform does not define a cache status register, then we're done here.
530 if (cache_status_reg
->reg_mask
!= 0) {
534 for (unsigned int aperture
= 0; aperture
< lock_group
[AMCC_LOCK_GROUP
].aperture_count
; aperture
++) {
535 for (unsigned int plane
= 0; plane
< lock_group
[AMCC_LOCK_GROUP
].plane_count
; plane
++) {
536 uint64_t reg_addr
= lock_group_va
[AMCC_LOCK_GROUP
][aperture
] + (plane
* lock_group
[AMCC_LOCK_GROUP
].plane_stride
) + cache_status_reg
->reg_offset
;
537 uint32_t reg_value
= *(volatile uint32_t *)reg_addr
;
538 assert((reg_value
& cache_status_reg
->reg_mask
) == cache_status_reg
->reg_value
);
542 #endif /* DEVELOPMENT || DEBUG */
545 * void rorgn_lockdown(void)
547 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
549 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
550 * start.s:start_cpu() for subsequent wake/resume of all cores
555 boolean_t ctrr_disable
= FALSE
;
557 #if DEVELOPMENT || DEBUG
558 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable
, sizeof(ctrr_disable
));
559 #endif /* DEVELOPMENT || DEBUG */
561 #if CONFIG_CSR_FROM_DT
562 if (csr_unsafe_kernel_text
) {
565 #endif /* CONFIG_CSR_FROM_DT */
568 lock_group_t
const * const lock_group
= find_lock_group_data();
570 #if DEVELOPMENT || DEBUG
571 printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin
, (void *)rorgn_end
);
572 printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin
, (void *)ctrr_end
);
574 assert_amcc_cache_disabled(lock_group
);
575 #endif /* DEVELOPMENT || DEBUG */
577 // Lock the AMCC/IOA PIO lock registers.
578 lock_all_lock_groups(lock_group
, phystokv(rorgn_begin
), phystokv(rorgn_end
));
580 // Unmap and free PIO VA space needed to lockdown the lock groups.
581 for (unsigned int lg
= 0; lg
< MAX_LOCK_GROUPS
; lg
++) {
582 for (unsigned int aperture
= 0; aperture
< lock_group
[lg
].aperture_count
; aperture
++) {
583 ml_io_unmap(lock_group_va
[lg
][aperture
], lock_group
[lg
].aperture_size
);
588 #if defined(KERNEL_INTEGRITY_CTRR)
589 /* wake any threads blocked on cluster master lockdown */
594 cdp
->cpu_cluster_id
= ml_get_cluster_number_local();
595 assert(cdp
->cpu_cluster_id
<= (uint32_t)ml_get_max_cluster_number());
596 ctrr_cluster_locked
[cdp
->cpu_cluster_id
] = CTRR_LOCKED
;
597 thread_wakeup(&ctrr_cluster_locked
[cdp
->cpu_cluster_id
]);
601 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */