]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/arm64/amcc_rorgn.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / amcc_rorgn.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <pexpert/arm64/board_config.h>
30
31#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
32
33#include <vm/pmap.h>
34#include <libkern/section_keywords.h>
35#include <libkern/kernel_mach_header.h>
36#include <pexpert/pexpert.h>
37#include <pexpert/device_tree.h>
38#include <machine/atomic.h>
39#include <arm/cpu_internal.h>
40#include <arm/caches_internal.h>
41#include <arm/machine_routines.h>
42#include <arm/pmap.h>
43#include <arm64/tlb.h>
44#include <arm64/amcc_rorgn.h>
45
46#if HIBERNATION
47#include <arm64/pal_hibernate.h>
48#endif /* HIBERNATION */
49
50#if HAS_IOA
51#define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA)
52#define IOA_LOCK_GROUP 1 // IOA lock group index
53#else
54#define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC)
55#endif
56#define AMCC_LOCK_GROUP 0 // AMCC lock group index
57#define MAX_APERTURES 16 // Maximum number of register apertures
58#define MAX_PLANES 16 // Maximum number of planes within each aperture
59
60#define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group
61#define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group
62
63#define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type
64
65extern vm_offset_t segLOWESTRO;
66extern vm_offset_t segHIGHESTRO;
67
68extern vm_offset_t segLASTB;
69extern vm_offset_t segTEXTEXECB;
70extern unsigned long segSizeLAST;
71extern unsigned long segSizeLASTDATACONST;
72extern unsigned long segSizeTEXTEXEC;
73
74typedef struct lock_reg {
75 uint32_t reg_offset; // Register offset
76 uint32_t reg_mask; // Register mask
77 uint32_t reg_value; // Regsiter value
78} lock_reg_t;
79
80typedef struct lock_type {
81 uint32_t page_size_shift; // page shift used in lower/upper limit registers
82 lock_reg_t lower_limit_reg; // Lower limit register description
83 lock_reg_t upper_limit_reg; // Upper limit register description
84 lock_reg_t enable_reg; // Enable register description
85 lock_reg_t write_disable_reg; // Write disable register description
86 lock_reg_t lock_reg; // Lock register description
87} lock_type_t;
88
89typedef struct lock_group {
90 uint32_t aperture_count; // Aperture count
91 uint32_t aperture_size; // Aperture size
92 uint32_t plane_count; // Number of planes in the aperture
93 uint32_t plane_stride; // Stride between planes in the aperture
94 uint64_t aperture_phys_addr[MAX_APERTURES]; // Apreture physical addresses
95 lock_reg_t cache_status_reg; // Cache status register description
96#if HAS_IOA
97 lock_reg_t master_lock_reg; // Master lock register description
98#endif
99 lock_type_t ctrr_a; // CTRR-A (KTRR) lock
100} lock_group_t;
101
102SECURITY_READ_ONLY_LATE(lock_group_t) _lock_group[MAX_LOCK_GROUPS] = { {0} };
103SECURITY_READ_ONLY_LATE(bool) lock_regs_set = false;
104
105static vm_offset_t rorgn_begin = 0;
106static vm_offset_t rorgn_end = 0;
107SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_begin = 0;
108SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_end = 0;
109
110static uint64_t lock_group_va[MAX_LOCK_GROUPS][MAX_APERTURES];
111
112#if CONFIG_CSR_FROM_DT
113SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text = false;
114#endif
115
116#if defined(KERNEL_INTEGRITY_KTRR)
117#define CTRR_LOCK_MSR ARM64_REG_KTRR_LOCK_EL1
118#elif defined(KERNEL_INTEGRITY_CTRR)
119#define CTRR_LOCK_MSR ARM64_REG_CTRR_LOCK_EL1
120#endif
121
122/*
123 * lock_group_t - describes all the parameters xnu needs to know to
124 * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start.
125 * This description assumes that each AMCC/IOA in a given system will
126 * be identical, respectively. The only variable are the number of
127 * apertures present and the physical base address of each aperture.
128 *
129 * General xnu lock group lockdown flow:
130 * - for each lock group:
131 * - ml_io_map all present lock group physical base addresses
132 * - assert all lock group begin/end page numbers set by iboot are identical
133 * - convert lock group begin/end page number to physical address
134 * - assert lock group begin/end page numbers match xnu view of read only region
135 * - assert lock group is not currently locked
136 * - ensure lock group master cache is disabled
137 * - write enable/lock registers to enable/lock the lock group read only region
138 */
139
140static bool
141_dt_get_uint32(DTEntry node, char const *name, uint32_t *dest)
142{
143 uint32_t const *value;
144 unsigned int size;
145
146 if (SecureDTGetProperty(node, name, (void const **)&value, &size) != kSuccess) {
147 return false;
148 }
149
150 if (size != sizeof(uint32_t)) {
151 panic("lock-regs: unexpected size %u", size);
152 }
153
154 *dest = *value;
155
156 return true;
157}
158
159static uint32_t
160_dt_get_uint32_required(DTEntry node, char const *name)
161{
162 uint32_t value;
163
164 if (!_dt_get_uint32(node, name, &value)) {
165 panic("lock-regs: cannot find required property '%s'", name);
166 }
167
168 return value;
169}
170
171static bool
172_dt_get_lock_reg(DTEntry node, lock_reg_t *reg, const char *parent_name, const char *reg_name, bool required, bool with_value)
173{
174 char prop_name[32];
175 bool found;
176
177 snprintf(prop_name, sizeof(prop_name), "%s-reg-offset", reg_name);
178 found = _dt_get_uint32(node, prop_name, &reg->reg_offset);
179 if (!found) {
180 if (required) {
181 panic("%s: missing property '%s'", parent_name, prop_name);
182 } else {
183 return false;
184 }
185 }
186
187 snprintf(prop_name, sizeof(prop_name), "%s-reg-mask", reg_name);
188 found = _dt_get_uint32(node, prop_name, &reg->reg_mask);
189 if (!found) {
190 panic("%s: missing property '%s'", parent_name, prop_name);
191 }
192
193 if (with_value) {
194 snprintf(prop_name, sizeof(prop_name), "%s-reg-value", reg_name);
195 found = _dt_get_uint32(node, prop_name, &reg->reg_value);
196 if (!found) {
197 panic("%s: missing property '%s'", parent_name, prop_name);
198 }
199 }
200
201 return true;
202}
203
204static DTEntry
205_dt_get_lock_group(DTEntry lock_regs_node, lock_group_t* lock_group, const char *group_name, uint32_t options)
206{
207 DTEntry group_node;
208
209 // Find the lock group node.
210 if (SecureDTLookupEntry(lock_regs_node, group_name, &group_node) != kSuccess) {
211 panic("lock-regs: /chosen/lock-regs/%s not found", group_name);
212 }
213
214 lock_group->aperture_count = _dt_get_uint32_required(group_node, "aperture-count");
215
216 if (lock_group->aperture_count > MAX_APERTURES) {
217 panic("%s: %s %u exceeds maximum %u", group_name, "aperture-count", lock_group->aperture_count, MAX_APERTURES);
218 }
219
220 lock_group->aperture_size = _dt_get_uint32_required(group_node, "aperture-size");
221
222 if ((lock_group->aperture_count > 0) && (lock_group->aperture_size == 0)) {
223 panic("%s: have %u apertures, but 0 size", group_name, lock_group->aperture_count);
224 }
225
226 lock_group->plane_count = _dt_get_uint32_required(group_node, "plane-count");
227
228 if (lock_group->plane_count > MAX_PLANES) {
229 panic("%s: %s %u exceeds maximum %u", group_name, "plane-count", lock_group->plane_count, MAX_PLANES);
230 }
231
232 if (!_dt_get_uint32(group_node, "plane-stride", &lock_group->plane_stride)) {
233 lock_group->plane_stride = 0;
234 }
235
236 if (lock_group->plane_count > 1) {
237 uint32_t aperture_size;
238
239 if (lock_group->plane_stride == 0) {
240 panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name, lock_group->plane_count);
241 }
242
243 if (os_mul_overflow(lock_group->plane_count, lock_group->plane_stride, &aperture_size)
244 || (aperture_size > lock_group->aperture_size)) {
245 panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name, lock_group->aperture_size, lock_group->plane_count, lock_group->plane_stride);
246 }
247 }
248
249 uint64_t const *phys_bases = NULL;
250 unsigned int prop_size;
251 if (SecureDTGetProperty(group_node, "aperture-phys-addr", (const void**)&phys_bases, &prop_size) != kSuccess) {
252 panic("%s: missing required %s", group_name, "aperture-phys-addr");
253 }
254
255 if (prop_size != lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])) {
256 panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)",
257 group_name, prop_size, lock_group->aperture_count, sizeof(lock_group->aperture_phys_addr[0]),
258 lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0]));
259 }
260
261 memcpy(lock_group->aperture_phys_addr, phys_bases, prop_size);
262
263 if (options & LOCK_GROUP_HAS_CACHE_STATUS_REG) {
264 _dt_get_lock_reg(group_node, &lock_group->cache_status_reg, group_name, "cache-status", true, true);
265 }
266
267#if HAS_IOA
268 if (options & LOCK_GROUP_HAS_MASTER_LOCK_REG) {
269 _dt_get_lock_reg(group_node, &lock_group->master_lock_reg, group_name, "master-lock", true, true);
270 }
271#endif
272
273 return group_node;
274}
275
276static void
277_dt_get_lock_type(DTEntry group_node, lock_type_t *lock_type, const char *group_name, const char *type_name, uint32_t options)
278{
279 DTEntry type_node;
280 bool has_lock = options & LOCK_TYPE_HAS_LOCK_REG;
281
282 // Find the lock type type_node.
283 if (SecureDTLookupEntry(group_node, type_name, &type_node) != kSuccess) {
284 panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name, type_name);
285 }
286
287 lock_type->page_size_shift = _dt_get_uint32_required(type_node, "page-size-shift");
288
289 // Find all of the regsiters for this lock type.
290 // Parent Register Descriptor Parent Name Reg Name Required Value
291 _dt_get_lock_reg(type_node, &lock_type->lower_limit_reg, type_name, "lower-limit", true, false);
292 _dt_get_lock_reg(type_node, &lock_type->upper_limit_reg, type_name, "upper-limit", true, false);
293 _dt_get_lock_reg(type_node, &lock_type->lock_reg, type_name, "lock", has_lock, true);
294 _dt_get_lock_reg(type_node, &lock_type->enable_reg, type_name, "enable", false, true);
295 _dt_get_lock_reg(type_node, &lock_type->write_disable_reg, type_name, "write-disable", false, true);
296}
297
298/*
299 * find_lock_group_data:
300 *
301 * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t
302 *
303 * called first time before IOKit start while still uniprocessor
304 *
305 */
306static lock_group_t const * _Nonnull
307find_lock_group_data(void)
308{
309 DTEntry lock_regs_node = NULL;
310 DTEntry amcc_node = NULL;
311
312 // Return the lock group data pointer if we already found and populated one.
313 if (lock_regs_set) {
314 return _lock_group;
315 }
316
317 if (SecureDTLookupEntry(NULL, "/chosen/lock-regs", &lock_regs_node) != kSuccess) {
318 panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)");
319 }
320
321 amcc_node = _dt_get_lock_group(lock_regs_node, &_lock_group[AMCC_LOCK_GROUP], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG);
322 _dt_get_lock_type(amcc_node, &_lock_group[AMCC_LOCK_GROUP].ctrr_a, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG);
323
324#if HAS_IOA
325 DTEntry ioa_node = _dt_get_lock_group(lock_regs_node, &_lock_group[IOA_LOCK_GROUP], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG);
326 _dt_get_lock_type(ioa_node, &_lock_group[IOA_LOCK_GROUP].ctrr_a, "ioa", "ioa-ctrr-a", 0);
327#endif
328
329 lock_regs_set = true;
330
331 return _lock_group;
332}
333
334void
335rorgn_stash_range(void)
336{
337#if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT
338 boolean_t rorgn_disable = FALSE;
339
340#if DEVELOPMENT || DEBUG
341 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable));
342#endif
343
344#if CONFIG_CSR_FROM_DT
345 if (csr_unsafe_kernel_text) {
346 rorgn_disable = true;
347 }
348#endif
349
350 if (rorgn_disable) {
351 /* take early out if boot arg present, don't query any machine registers to avoid
352 * dependency on amcc DT entry
353 */
354 return;
355 }
356#endif
357 lock_group_t const * const lock_group = find_lock_group_data();
358
359 /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */
360 uint64_t rorgn_begin_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES];
361 uint64_t rorgn_end_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES];
362
363 for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
364 for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
365 const uint64_t amcc_pa = lock_group[lg].aperture_phys_addr[aperture];
366
367 // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown()
368 lock_group_va[lg][aperture] = ml_io_map(amcc_pa, lock_group[lg].aperture_size);
369
370 if (lock_group_va[lg][aperture] == 0) {
371 panic("map aperture_phys_addr[%u]/%#x failed", aperture, lock_group[lg].aperture_size);
372 }
373
374 for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) {
375 uint64_t reg_addr;
376
377 reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lower_limit_reg.reg_offset;
378 rorgn_begin_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr;
379 reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.upper_limit_reg.reg_offset;
380 rorgn_end_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr;
381 }
382 }
383
384 assert(rorgn_end_page[lg][0][0] > rorgn_begin_page[lg][0][0]);
385
386 for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
387 for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) {
388 if ((rorgn_begin_page[lg][aperture][plane] != rorgn_begin_page[0][0][0])
389 || (rorgn_end_page[lg][aperture][plane] != rorgn_end_page[0][0][0])) {
390 panic("Inconsistent memory config");
391 }
392 }
393 }
394
395 uint64_t page_bytes = 1ULL << lock_group[lg].ctrr_a.page_size_shift;
396
397 /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */
398 rorgn_begin = (rorgn_begin_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase;
399 rorgn_end = (rorgn_end_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase + page_bytes - 1;
400 }
401
402 assert(segLOWESTRO && gVirtBase && gPhysBase);
403
404 /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */
405 ctrr_begin = kvtophys(segLOWESTRO);
406
407#if defined(KERNEL_INTEGRITY_KTRR)
408
409 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region)
410 *
411 * +------------------+-----------+-----------------------------------+
412 * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) |
413 * +------------------+-----------+-----------------------------------+
414 * | | TEXT_EXEC | <- KTRR RO Region End (ctrr_end) |
415 * +------------------+-----------+-----------------------------------+
416 * | | ... | |
417 * +------------------+-----------+-----------------------------------+
418 * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin |
419 * | | | (ctrr_begin/rorgn_begin) |
420 * +------------------+-----------+-----------------------------------+
421 *
422 */
423
424 ctrr_end = kvtophys(segLASTB) - segSizeLASTDATACONST - 1;
425
426 /* assert not booted from kernel collection */
427 assert(!segHIGHESTRO);
428
429 /* assert that __LAST segment containing privileged insns is only a single page */
430 assert(segSizeLAST == PAGE_SIZE);
431
432 /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */
433 assert((ctrr_end + 1) == kvtophys(segTEXTEXECB) + segSizeTEXTEXEC);
434
435 /* ensure that iboot and xnu agree on the amcc rorgn range */
436 assert((rorgn_begin == ctrr_begin) && (rorgn_end == (ctrr_end + segSizeLASTDATACONST + segSizeLAST)));
437#elif defined(KERNEL_INTEGRITY_CTRR)
438
439 /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making
440 * __pinst no execute because PXN applies with MMU off in CTRR.
441 *
442 * +------------------+-----------+------------------------------+
443 * | Largest Address | LAST | <- CTRR/AMCC RO Region End |
444 * | | | (ctrr_end/rorgn_end) |
445 * +------------------+-----------+------------------------------+
446 * | | TEXT_EXEC | |
447 * +------------------+-----------+------------------------------+
448 * | | ... | |
449 * +------------------+-----------+------------------------------+
450 * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin |
451 * | | | (ctrr_begin/rorgn_begin) |
452 * +------------------+-----------+------------------------------+
453 *
454 */
455
456 if (segHIGHESTRO) {
457 /*
458 * kernel collections may have additional kext RO data after kernel LAST
459 */
460 assert(segLASTB + segSizeLAST <= segHIGHESTRO);
461 ctrr_end = kvtophys(segHIGHESTRO) - 1;
462 } else {
463 ctrr_end = kvtophys(segLASTB) + segSizeLAST - 1;
464 }
465
466 /* ensure that iboot and xnu agree on the amcc rorgn range */
467 assert((rorgn_begin == ctrr_begin) && (rorgn_end == ctrr_end));
468#endif
469}
470
471#if DEVELOPMENT || DEBUG
472static void
473assert_all_lock_groups_unlocked(lock_group_t const *lock_groups)
474{
475 uint64_t reg_addr;
476 uint64_t ctrr_lock = 0;
477 bool locked = false;
478 bool write_disabled = false;;
479
480 assert(lock_groups);
481
482 for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
483 for (unsigned int aperture = 0; aperture < lock_groups[lg].aperture_count; aperture++) {
484#if HAS_IOA
485 // Does the lock group define a master lock register?
486 if (lock_groups[lg].master_lock_reg.reg_mask != 0) {
487 reg_addr = lock_group_va[lg][aperture] + lock_groups[lg].master_lock_reg.reg_offset;
488 locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].master_lock_reg.reg_mask) == lock_groups[lg].master_lock_reg.reg_value);
489 }
490#endif
491 for (unsigned int plane = 0; plane < lock_groups[lg].plane_count; plane++) {
492 // Does the lock group define a write disable register?
493 if (lock_groups[lg].ctrr_a.write_disable_reg.reg_mask != 0) {
494 reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.write_disable_reg.reg_offset;
495 write_disabled |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.write_disable_reg.reg_mask) == lock_groups[lg].ctrr_a.write_disable_reg.reg_value);
496 }
497
498 // Does the lock group define a lock register?
499 if (lock_groups[lg].ctrr_a.lock_reg.reg_mask != 0) {
500 reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.lock_reg.reg_offset;
501 locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.lock_reg.reg_mask) == lock_groups[lg].ctrr_a.lock_reg.reg_value);
502 }
503 }
504 }
505 }
506
507 ctrr_lock = __builtin_arm_rsr64(CTRR_LOCK_MSR);
508
509 assert(!ctrr_lock);
510 assert(!write_disabled && !locked);
511}
512#endif
513
514static void
515lock_all_lock_groups(lock_group_t const *lock_group, vm_offset_t begin, vm_offset_t end)
516{
517 uint64_t reg_addr;
518 assert(lock_group);
519
520 /*
521 * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock
522 *
523 * begin and end are first and last byte inclusive of lock group read only region
524 */
525
526 CleanPoC_DcacheRegion_Force(begin, end - begin + 1);
527
528 for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
529 for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
530 /* lock planes in reverse order: plane 0 should be locked last */
531 unsigned int plane = lock_group[lg].plane_count - 1;
532 do {
533 // Enable the protection region if the lock group defines an enable register.
534 if (lock_group[lg].ctrr_a.enable_reg.reg_mask != 0) {
535 reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.enable_reg.reg_offset;
536 *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.enable_reg.reg_value;
537 }
538
539 // Disable writes if the lock group defines a write disable register.
540 if (lock_group[lg].ctrr_a.write_disable_reg.reg_mask != 0) {
541 reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.write_disable_reg.reg_offset;
542 *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.write_disable_reg.reg_value;
543 }
544
545 // Lock the lock if the lock group defines an enable register.
546 if (lock_group[lg].ctrr_a.lock_reg.reg_mask != 0) {
547 reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lock_reg.reg_offset;
548 *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.lock_reg.reg_value;
549 }
550
551 __builtin_arm_isb(ISB_SY);
552 } while (plane-- > 0);
553#if HAS_IOA
554 // Lock the master lock if the lock group define a master lock register.
555 if (lock_group[lg].master_lock_reg.reg_mask != 0) {
556 reg_addr = lock_group_va[lg][aperture] + lock_group[lg].master_lock_reg.reg_offset;
557 *(volatile uint32_t *)reg_addr = lock_group[lg].master_lock_reg.reg_value;
558 }
559 __builtin_arm_isb(ISB_SY);
560#endif
561 }
562 }
563}
564
565static void
566lock_mmu(uint64_t begin, uint64_t end)
567{
568#if defined(KERNEL_INTEGRITY_KTRR)
569
570 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin);
571 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end);
572 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL);
573
574 /* flush TLB */
575
576 __builtin_arm_isb(ISB_SY);
577 flush_mmu_tlb();
578
579#elif defined (KERNEL_INTEGRITY_CTRR)
580 /* this will lock the entire bootstrap cluster. non bootstrap clusters
581 * will be locked by respective cluster master in start.s */
582
583 __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1, begin);
584 __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1, end);
585
586#if !defined(APPLEVORTEX)
587 /* H12+ changed sequence, must invalidate TLB immediately after setting CTRR bounds */
588 __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */
589 flush_mmu_tlb();
590#endif /* !defined(APPLEVORTEX) */
591
592 __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT);
593 __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1, 1ULL);
594
595 uint64_t current_el = __builtin_arm_rsr64("CurrentEL");
596 if (current_el == PSR64_MODE_EL2) {
597 // CTRR v2 has explicit registers for cluster config. they can only be written in EL2
598
599 __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2, begin);
600 __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2, end);
601 __builtin_arm_wsr64(ACC_CTRR_CTL_EL2, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT);
602 __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2, 1ULL);
603 }
604
605 __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */
606#if defined(APPLEVORTEX)
607 flush_mmu_tlb();
608#endif /* defined(APPLEVORTEX) */
609
610#else /* defined(KERNEL_INTEGRITY_KTRR) */
611#error KERNEL_INTEGRITY config error
612#endif /* defined(KERNEL_INTEGRITY_KTRR) */
613}
614
615#if DEVELOPMENT || DEBUG
616static void
617assert_amcc_cache_disabled(lock_group_t const *lock_group)
618{
619 assert(lock_group);
620
621 const lock_reg_t *cache_status_reg = &lock_group[AMCC_LOCK_GROUP].cache_status_reg;
622
623 // If the platform does not define a cache status register, then we're done here.
624 if (cache_status_reg->reg_mask != 0) {
625 return;
626 }
627
628 for (unsigned int aperture = 0; aperture < lock_group[AMCC_LOCK_GROUP].aperture_count; aperture++) {
629 for (unsigned int plane = 0; plane < lock_group[AMCC_LOCK_GROUP].plane_count; plane++) {
630 uint64_t reg_addr = lock_group_va[AMCC_LOCK_GROUP][aperture] + (plane * lock_group[AMCC_LOCK_GROUP].plane_stride) + cache_status_reg->reg_offset;
631 uint32_t reg_value = *(volatile uint32_t *)reg_addr;
632 assert((reg_value & cache_status_reg->reg_mask) == cache_status_reg->reg_value);
633 }
634 }
635}
636#endif /* DEVELOPMENT || DEBUG */
637
638/*
639 * void rorgn_lockdown(void)
640 *
641 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
642 *
643 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
644 * start.s:start_cpu() for subsequent wake/resume of all cores
645 */
646void
647rorgn_lockdown(void)
648{
649 boolean_t ctrr_disable = FALSE;
650
651#if DEVELOPMENT || DEBUG
652 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
653#endif /* DEVELOPMENT || DEBUG */
654
655#if CONFIG_CSR_FROM_DT
656 if (csr_unsafe_kernel_text) {
657 ctrr_disable = true;
658 }
659#endif /* CONFIG_CSR_FROM_DT */
660
661 if (!ctrr_disable) {
662 lock_group_t const * const lock_group = find_lock_group_data();
663
664#if DEVELOPMENT || DEBUG
665 assert_all_lock_groups_unlocked(lock_group);
666
667 printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin, (void *)rorgn_end);
668 printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin, (void *)ctrr_end);
669
670 assert_amcc_cache_disabled(lock_group);
671#endif /* DEVELOPMENT || DEBUG */
672
673 // Lock the AMCC/IOA PIO lock registers.
674 lock_all_lock_groups(lock_group, phystokv(rorgn_begin), phystokv(rorgn_end));
675
676 /*
677 * KTRR/CTRR registers are inclusive of the smallest page size granule supported by processor MMU
678 * rather than the actual page size in use. Load the last byte of the end page, and let the HW
679 * truncate per the smallest page granule supported. Must use same treament in start.s for warm
680 * start of APs.
681 */
682 lock_mmu(ctrr_begin, ctrr_end);
683
684 // Unmap and free PIO VA space needed to lockdown the lock groups.
685 for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) {
686 for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) {
687 ml_io_unmap(lock_group_va[lg][aperture], lock_group[lg].aperture_size);
688 }
689 }
690 }
691
692#if defined(KERNEL_INTEGRITY_CTRR)
693 /* wake any threads blocked on cluster master lockdown */
694 cpu_data_t *cdp;
695
696 cdp = getCpuDatap();
697
698 cdp->cpu_cluster_id = ml_get_cluster_number_local();
699 assert(cdp->cpu_cluster_id <= (uint32_t)ml_get_max_cluster_number());
700 ctrr_cluster_locked[cdp->cpu_cluster_id] = CTRR_LOCKED;
701 thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]);
702#endif
703}
704
705#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */