]>
Commit | Line | Data |
---|---|---|
f427ee49 A |
1 | /* |
2 | * Copyright (c) 2019-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <pexpert/arm64/board_config.h> | |
30 | ||
31 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
32 | ||
33 | #include <vm/pmap.h> | |
34 | #include <libkern/section_keywords.h> | |
35 | #include <libkern/kernel_mach_header.h> | |
36 | #include <pexpert/pexpert.h> | |
37 | #include <pexpert/device_tree.h> | |
38 | #include <machine/atomic.h> | |
39 | #include <arm/cpu_internal.h> | |
40 | #include <arm/caches_internal.h> | |
41 | #include <arm/machine_routines.h> | |
42 | #include <arm/pmap.h> | |
43 | #include <arm64/tlb.h> | |
44 | #include <arm64/amcc_rorgn.h> | |
45 | #include <memmap_types.h> | |
46 | ||
47 | #if HIBERNATION | |
48 | #include <arm64/pal_hibernate.h> | |
49 | #endif /* HIBERNATION */ | |
50 | ||
51 | #if HAS_IOA | |
52 | #define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA) | |
53 | #define IOA_LOCK_GROUP 1 // IOA lock group index | |
54 | #else | |
55 | #define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC) | |
56 | #endif | |
57 | #define AMCC_LOCK_GROUP 0 // AMCC lock group index | |
58 | #define MAX_APERTURES 16 // Maximum number of register apertures | |
59 | #define MAX_PLANES 16 // Maximum number of planes within each aperture | |
60 | ||
61 | #define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group | |
62 | #define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group | |
63 | ||
64 | #define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type | |
65 | ||
66 | extern vm_offset_t segLOWESTRO; | |
67 | extern vm_offset_t segHIGHESTRO; | |
68 | ||
69 | extern vm_offset_t segLASTB; | |
70 | extern vm_offset_t segTEXTEXECB; | |
71 | extern unsigned long segSizeLAST; | |
72 | extern unsigned long segSizeLASTDATACONST; | |
73 | extern unsigned long segSizeTEXTEXEC; | |
74 | ||
75 | typedef struct lock_reg { | |
76 | uint32_t reg_offset; // Register offset | |
77 | uint32_t reg_mask; // Register mask | |
78 | uint32_t reg_value; // Regsiter value | |
79 | } lock_reg_t; | |
80 | ||
81 | typedef struct lock_type { | |
82 | uint32_t page_size_shift; // page shift used in lower/upper limit registers | |
83 | lock_reg_t lower_limit_reg; // Lower limit register description | |
84 | lock_reg_t upper_limit_reg; // Upper limit register description | |
85 | lock_reg_t enable_reg; // Enable register description | |
86 | lock_reg_t write_disable_reg; // Write disable register description | |
87 | lock_reg_t lock_reg; // Lock register description | |
88 | } lock_type_t; | |
89 | ||
90 | typedef struct lock_group { | |
91 | uint32_t aperture_count; // Aperture count | |
92 | uint32_t aperture_size; // Aperture size | |
93 | uint32_t plane_count; // Number of planes in the aperture | |
94 | uint32_t plane_stride; // Stride between planes in the aperture | |
95 | uint64_t aperture_phys_addr[MAX_APERTURES]; // Apreture physical addresses | |
96 | lock_reg_t cache_status_reg; // Cache status register description | |
97 | #if HAS_IOA | |
98 | lock_reg_t master_lock_reg; // Master lock register description | |
99 | #endif | |
100 | lock_type_t ctrr_a; // CTRR-A (KTRR) lock | |
101 | } lock_group_t; | |
102 | ||
103 | SECURITY_READ_ONLY_LATE(lock_group_t) _lock_group[MAX_LOCK_GROUPS] = { {0} }; | |
104 | SECURITY_READ_ONLY_LATE(bool) lock_regs_set = false; | |
105 | ||
106 | static vm_offset_t rorgn_begin = 0; | |
107 | static vm_offset_t rorgn_end = 0; | |
108 | SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_begin = 0; | |
109 | SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_end = 0; | |
110 | ||
111 | static uint64_t lock_group_va[MAX_LOCK_GROUPS][MAX_APERTURES]; | |
112 | ||
113 | #if CONFIG_CSR_FROM_DT | |
114 | SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text = false; | |
115 | #endif | |
116 | ||
117 | #if defined(KERNEL_INTEGRITY_KTRR) | |
118 | #define CTRR_LOCK_MSR ARM64_REG_KTRR_LOCK_EL1 | |
119 | #elif defined(KERNEL_INTEGRITY_CTRR) | |
120 | #define CTRR_LOCK_MSR ARM64_REG_CTRR_LOCK_EL1 | |
121 | #endif | |
122 | ||
123 | /* | |
124 | * lock_group_t - describes all the parameters xnu needs to know to | |
125 | * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start. | |
126 | * This description assumes that each AMCC/IOA in a given system will | |
127 | * be identical, respectively. The only variable are the number of | |
128 | * apertures present and the physical base address of each aperture. | |
129 | * | |
130 | * General xnu lock group lockdown flow: | |
131 | * - for each lock group: | |
132 | * - ml_io_map all present lock group physical base addresses | |
133 | * - assert all lock group begin/end page numbers set by iboot are identical | |
134 | * - convert lock group begin/end page number to physical address | |
135 | * - assert lock group begin/end page numbers match xnu view of read only region | |
136 | * - assert lock group is not currently locked | |
137 | * - ensure lock group master cache is disabled | |
138 | * - write enable/lock registers to enable/lock the lock group read only region | |
139 | */ | |
140 | ||
141 | static bool | |
142 | _dt_get_uint32(DTEntry node, char const *name, uint32_t *dest) | |
143 | { | |
144 | uint32_t const *value; | |
145 | unsigned int size; | |
146 | ||
147 | if (SecureDTGetProperty(node, name, (void const **)&value, &size) != kSuccess) { | |
148 | return false; | |
149 | } | |
150 | ||
151 | if (size != sizeof(uint32_t)) { | |
152 | panic("lock-regs: unexpected size %u", size); | |
153 | } | |
154 | ||
155 | *dest = *value; | |
156 | ||
157 | return true; | |
158 | } | |
159 | ||
160 | static uint32_t | |
161 | _dt_get_uint32_required(DTEntry node, char const *name) | |
162 | { | |
163 | uint32_t value; | |
164 | ||
165 | if (!_dt_get_uint32(node, name, &value)) { | |
166 | panic("lock-regs: cannot find required property '%s'", name); | |
167 | } | |
168 | ||
169 | return value; | |
170 | } | |
171 | ||
172 | static bool | |
173 | _dt_get_lock_reg(DTEntry node, lock_reg_t *reg, const char *parent_name, const char *reg_name, bool required, bool with_value) | |
174 | { | |
175 | char prop_name[32]; | |
176 | bool found; | |
177 | ||
178 | snprintf(prop_name, sizeof(prop_name), "%s-reg-offset", reg_name); | |
179 | found = _dt_get_uint32(node, prop_name, ®->reg_offset); | |
180 | if (!found) { | |
181 | if (required) { | |
182 | panic("%s: missing property '%s'", parent_name, prop_name); | |
183 | } else { | |
184 | return false; | |
185 | } | |
186 | } | |
187 | ||
188 | snprintf(prop_name, sizeof(prop_name), "%s-reg-mask", reg_name); | |
189 | found = _dt_get_uint32(node, prop_name, ®->reg_mask); | |
190 | if (!found) { | |
191 | panic("%s: missing property '%s'", parent_name, prop_name); | |
192 | } | |
193 | ||
194 | if (with_value) { | |
195 | snprintf(prop_name, sizeof(prop_name), "%s-reg-value", reg_name); | |
196 | found = _dt_get_uint32(node, prop_name, ®->reg_value); | |
197 | if (!found) { | |
198 | panic("%s: missing property '%s'", parent_name, prop_name); | |
199 | } | |
200 | } | |
201 | ||
202 | return true; | |
203 | } | |
204 | ||
205 | static DTEntry | |
206 | _dt_get_lock_group(DTEntry lock_regs_node, lock_group_t* lock_group, const char *group_name, uint32_t options) | |
207 | { | |
208 | DTEntry group_node; | |
209 | ||
210 | // Find the lock group node. | |
211 | if (SecureDTLookupEntry(lock_regs_node, group_name, &group_node) != kSuccess) { | |
212 | panic("lock-regs: /chosen/lock-regs/%s not found", group_name); | |
213 | } | |
214 | ||
215 | lock_group->aperture_count = _dt_get_uint32_required(group_node, "aperture-count"); | |
216 | ||
217 | if (lock_group->aperture_count > MAX_APERTURES) { | |
218 | panic("%s: %s %u exceeds maximum %u", group_name, "aperture-count", lock_group->aperture_count, MAX_APERTURES); | |
219 | } | |
220 | ||
221 | lock_group->aperture_size = _dt_get_uint32_required(group_node, "aperture-size"); | |
222 | ||
223 | if ((lock_group->aperture_count > 0) && (lock_group->aperture_size == 0)) { | |
224 | panic("%s: have %u apertures, but 0 size", group_name, lock_group->aperture_count); | |
225 | } | |
226 | ||
227 | lock_group->plane_count = _dt_get_uint32_required(group_node, "plane-count"); | |
228 | ||
229 | if (lock_group->plane_count > MAX_PLANES) { | |
230 | panic("%s: %s %u exceeds maximum %u", group_name, "plane-count", lock_group->plane_count, MAX_PLANES); | |
231 | } | |
232 | ||
233 | if (!_dt_get_uint32(group_node, "plane-stride", &lock_group->plane_stride)) { | |
234 | lock_group->plane_stride = 0; | |
235 | } | |
236 | ||
237 | if (lock_group->plane_count > 1) { | |
238 | uint32_t aperture_size; | |
239 | ||
240 | if (lock_group->plane_stride == 0) { | |
241 | panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name, lock_group->plane_count); | |
242 | } | |
243 | ||
244 | if (os_mul_overflow(lock_group->plane_count, lock_group->plane_stride, &aperture_size) | |
245 | || (aperture_size > lock_group->aperture_size)) { | |
246 | panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name, lock_group->aperture_size, lock_group->plane_count, lock_group->plane_stride); | |
247 | } | |
248 | } | |
249 | ||
250 | uint64_t const *phys_bases = NULL; | |
251 | unsigned int prop_size; | |
252 | if (SecureDTGetProperty(group_node, "aperture-phys-addr", (const void**)&phys_bases, &prop_size) != kSuccess) { | |
253 | panic("%s: missing required %s", group_name, "aperture-phys-addr"); | |
254 | } | |
255 | ||
256 | if (prop_size != lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])) { | |
257 | panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)", | |
258 | group_name, prop_size, lock_group->aperture_count, sizeof(lock_group->aperture_phys_addr[0]), | |
259 | lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])); | |
260 | } | |
261 | ||
262 | memcpy(lock_group->aperture_phys_addr, phys_bases, prop_size); | |
263 | ||
264 | if (options & LOCK_GROUP_HAS_CACHE_STATUS_REG) { | |
265 | _dt_get_lock_reg(group_node, &lock_group->cache_status_reg, group_name, "cache-status", true, true); | |
266 | } | |
267 | ||
268 | #if HAS_IOA | |
269 | if (options & LOCK_GROUP_HAS_MASTER_LOCK_REG) { | |
270 | _dt_get_lock_reg(group_node, &lock_group->master_lock_reg, group_name, "master-lock", true, true); | |
271 | } | |
272 | #endif | |
273 | ||
274 | return group_node; | |
275 | } | |
276 | ||
277 | static void | |
278 | _dt_get_lock_type(DTEntry group_node, lock_type_t *lock_type, const char *group_name, const char *type_name, uint32_t options) | |
279 | { | |
280 | DTEntry type_node; | |
281 | bool has_lock = options & LOCK_TYPE_HAS_LOCK_REG; | |
282 | ||
283 | // Find the lock type type_node. | |
284 | if (SecureDTLookupEntry(group_node, type_name, &type_node) != kSuccess) { | |
285 | panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name, type_name); | |
286 | } | |
287 | ||
288 | lock_type->page_size_shift = _dt_get_uint32_required(type_node, "page-size-shift"); | |
289 | ||
290 | // Find all of the regsiters for this lock type. | |
291 | // Parent Register Descriptor Parent Name Reg Name Required Value | |
292 | _dt_get_lock_reg(type_node, &lock_type->lower_limit_reg, type_name, "lower-limit", true, false); | |
293 | _dt_get_lock_reg(type_node, &lock_type->upper_limit_reg, type_name, "upper-limit", true, false); | |
294 | _dt_get_lock_reg(type_node, &lock_type->lock_reg, type_name, "lock", has_lock, true); | |
295 | _dt_get_lock_reg(type_node, &lock_type->enable_reg, type_name, "enable", false, true); | |
296 | _dt_get_lock_reg(type_node, &lock_type->write_disable_reg, type_name, "write-disable", false, true); | |
297 | } | |
298 | ||
299 | /* | |
300 | * find_lock_group_data: | |
301 | * | |
302 | * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t | |
303 | * | |
304 | * called first time before IOKit start while still uniprocessor | |
305 | * | |
306 | */ | |
307 | static lock_group_t const * _Nonnull | |
308 | find_lock_group_data(void) | |
309 | { | |
310 | DTEntry lock_regs_node = NULL; | |
311 | DTEntry amcc_node = NULL; | |
312 | ||
313 | // Return the lock group data pointer if we already found and populated one. | |
314 | if (lock_regs_set) { | |
315 | return _lock_group; | |
316 | } | |
317 | ||
318 | if (SecureDTLookupEntry(NULL, "/chosen/lock-regs", &lock_regs_node) != kSuccess) { | |
319 | panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)"); | |
320 | } | |
321 | ||
322 | amcc_node = _dt_get_lock_group(lock_regs_node, &_lock_group[AMCC_LOCK_GROUP], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG); | |
323 | _dt_get_lock_type(amcc_node, &_lock_group[AMCC_LOCK_GROUP].ctrr_a, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG); | |
324 | ||
325 | #if HAS_IOA | |
326 | DTEntry ioa_node = _dt_get_lock_group(lock_regs_node, &_lock_group[IOA_LOCK_GROUP], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG); | |
327 | _dt_get_lock_type(ioa_node, &_lock_group[IOA_LOCK_GROUP].ctrr_a, "ioa", "ioa-ctrr-a", 0); | |
328 | #endif | |
329 | ||
330 | lock_regs_set = true; | |
331 | ||
332 | return _lock_group; | |
333 | } | |
334 | ||
335 | void | |
336 | rorgn_stash_range(void) | |
337 | { | |
338 | #if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT | |
339 | boolean_t rorgn_disable = FALSE; | |
340 | ||
341 | #if DEVELOPMENT || DEBUG | |
342 | PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable)); | |
343 | #endif | |
344 | ||
345 | #if CONFIG_CSR_FROM_DT | |
346 | if (csr_unsafe_kernel_text) { | |
347 | rorgn_disable = true; | |
348 | } | |
349 | #endif | |
350 | ||
351 | if (rorgn_disable) { | |
352 | /* take early out if boot arg present, don't query any machine registers to avoid | |
353 | * dependency on amcc DT entry | |
354 | */ | |
355 | return; | |
356 | } | |
357 | #endif | |
358 | lock_group_t const * const lock_group = find_lock_group_data(); | |
359 | ||
360 | /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */ | |
361 | uint64_t rorgn_begin_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES]; | |
362 | uint64_t rorgn_end_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES]; | |
363 | ||
364 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
365 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
366 | const uint64_t amcc_pa = lock_group[lg].aperture_phys_addr[aperture]; | |
367 | ||
368 | // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown() | |
369 | lock_group_va[lg][aperture] = ml_io_map(amcc_pa, lock_group[lg].aperture_size); | |
370 | ||
371 | if (lock_group_va[lg][aperture] == 0) { | |
372 | panic("map aperture_phys_addr[%u]/%#x failed", aperture, lock_group[lg].aperture_size); | |
373 | } | |
374 | ||
375 | for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) { | |
376 | uint64_t reg_addr; | |
377 | ||
378 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lower_limit_reg.reg_offset; | |
379 | rorgn_begin_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr; | |
380 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.upper_limit_reg.reg_offset; | |
381 | rorgn_end_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr; | |
382 | } | |
383 | } | |
384 | ||
385 | assert(rorgn_end_page[lg][0][0] > rorgn_begin_page[lg][0][0]); | |
386 | ||
387 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
388 | for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) { | |
389 | if ((rorgn_begin_page[lg][aperture][plane] != rorgn_begin_page[0][0][0]) | |
390 | || (rorgn_end_page[lg][aperture][plane] != rorgn_end_page[0][0][0])) { | |
391 | panic("Inconsistent memory config"); | |
392 | } | |
393 | } | |
394 | } | |
395 | ||
396 | uint64_t page_bytes = 1ULL << lock_group[lg].ctrr_a.page_size_shift; | |
397 | ||
398 | /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */ | |
399 | rorgn_begin = (rorgn_begin_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase; | |
400 | rorgn_end = (rorgn_end_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase + page_bytes - 1; | |
401 | } | |
402 | ||
403 | assert(segLOWESTRO && gVirtBase && gPhysBase); | |
404 | ||
405 | /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */ | |
406 | ctrr_begin = kvtophys(segLOWESTRO); | |
407 | ||
408 | #if defined(KERNEL_INTEGRITY_KTRR) | |
409 | ||
410 | /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region) | |
411 | * | |
412 | * +------------------+-----------+-----------------------------------+ | |
413 | * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) | | |
414 | * +------------------+-----------+-----------------------------------+ | |
415 | * | | TEXT_EXEC | <- KTRR RO Region End (ctrr_end) | | |
416 | * +------------------+-----------+-----------------------------------+ | |
417 | * | | ... | | | |
418 | * +------------------+-----------+-----------------------------------+ | |
419 | * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin | | |
420 | * | | | (ctrr_begin/rorgn_begin) | | |
421 | * +------------------+-----------+-----------------------------------+ | |
422 | * | |
423 | */ | |
424 | ||
425 | ctrr_end = kvtophys(segLASTB) - segSizeLASTDATACONST - 1; | |
426 | ||
427 | /* assert not booted from kernel collection */ | |
428 | assert(!segHIGHESTRO); | |
429 | ||
430 | /* assert that __LAST segment containing privileged insns is only a single page */ | |
431 | assert(segSizeLAST == PAGE_SIZE); | |
432 | ||
433 | /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */ | |
434 | assert((ctrr_end + 1) == kvtophys(segTEXTEXECB) + segSizeTEXTEXEC); | |
435 | ||
436 | /* ensure that iboot and xnu agree on the amcc rorgn range */ | |
437 | assert((rorgn_begin == ctrr_begin) && (rorgn_end == (ctrr_end + segSizeLASTDATACONST + segSizeLAST))); | |
438 | #elif defined(KERNEL_INTEGRITY_CTRR) | |
439 | ||
440 | /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making | |
441 | * __pinst no execute because PXN applies with MMU off in CTRR. | |
442 | * | |
443 | * +------------------+-----------+------------------------------+ | |
444 | * | Largest Address | LAST | <- CTRR/AMCC RO Region End | | |
445 | * | | | (ctrr_end/rorgn_end) | | |
446 | * +------------------+-----------+------------------------------+ | |
447 | * | | TEXT_EXEC | | | |
448 | * +------------------+-----------+------------------------------+ | |
449 | * | | ... | | | |
450 | * +------------------+-----------+------------------------------+ | |
451 | * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin | | |
452 | * | | | (ctrr_begin/rorgn_begin) | | |
453 | * +------------------+-----------+------------------------------+ | |
454 | * | |
455 | */ | |
456 | ||
457 | if (segHIGHESTRO) { | |
458 | /* | |
459 | * kernel collections may have additional kext RO data after kernel LAST | |
460 | */ | |
461 | assert(segLASTB + segSizeLAST <= segHIGHESTRO); | |
462 | ctrr_end = kvtophys(segHIGHESTRO) - 1; | |
463 | } else { | |
464 | ctrr_end = kvtophys(segLASTB) + segSizeLAST - 1; | |
465 | } | |
466 | ||
467 | /* ensure that iboot and xnu agree on the amcc rorgn range */ | |
468 | assert((rorgn_begin == ctrr_begin) && (rorgn_end == ctrr_end)); | |
469 | #endif | |
470 | } | |
471 | ||
472 | #if DEVELOPMENT || DEBUG | |
473 | static void | |
474 | assert_all_lock_groups_unlocked(lock_group_t const *lock_groups) | |
475 | { | |
476 | uint64_t reg_addr; | |
477 | uint64_t ctrr_lock = 0; | |
478 | bool locked = false; | |
479 | bool write_disabled = false;; | |
480 | ||
481 | assert(lock_groups); | |
482 | ||
483 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
484 | for (unsigned int aperture = 0; aperture < lock_groups[lg].aperture_count; aperture++) { | |
485 | #if HAS_IOA | |
486 | // Does the lock group define a master lock register? | |
487 | if (lock_groups[lg].master_lock_reg.reg_mask != 0) { | |
488 | reg_addr = lock_group_va[lg][aperture] + lock_groups[lg].master_lock_reg.reg_offset; | |
489 | locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].master_lock_reg.reg_mask) == lock_groups[lg].master_lock_reg.reg_value); | |
490 | } | |
491 | #endif | |
492 | for (unsigned int plane = 0; plane < lock_groups[lg].plane_count; plane++) { | |
493 | // Does the lock group define a write disable register? | |
494 | if (lock_groups[lg].ctrr_a.write_disable_reg.reg_mask != 0) { | |
495 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.write_disable_reg.reg_offset; | |
496 | write_disabled |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.write_disable_reg.reg_mask) == lock_groups[lg].ctrr_a.write_disable_reg.reg_value); | |
497 | } | |
498 | ||
499 | // Does the lock group define a lock register? | |
500 | if (lock_groups[lg].ctrr_a.lock_reg.reg_mask != 0) { | |
501 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.lock_reg.reg_offset; | |
502 | locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.lock_reg.reg_mask) == lock_groups[lg].ctrr_a.lock_reg.reg_value); | |
503 | } | |
504 | } | |
505 | } | |
506 | } | |
507 | ||
508 | ctrr_lock = __builtin_arm_rsr64(CTRR_LOCK_MSR); | |
509 | ||
510 | assert(!ctrr_lock); | |
511 | assert(!write_disabled && !locked); | |
512 | } | |
513 | #endif | |
514 | ||
515 | static void | |
516 | lock_all_lock_groups(lock_group_t const *lock_group, vm_offset_t begin, vm_offset_t end) | |
517 | { | |
518 | uint64_t reg_addr; | |
519 | assert(lock_group); | |
520 | ||
521 | /* | |
522 | * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock | |
523 | * | |
524 | * begin and end are first and last byte inclusive of lock group read only region | |
525 | */ | |
526 | ||
527 | CleanPoC_DcacheRegion_Force(begin, end - begin + 1); | |
528 | ||
529 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
530 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
531 | /* lock planes in reverse order: plane 0 should be locked last */ | |
532 | unsigned int plane = lock_group[lg].plane_count - 1; | |
533 | do { | |
534 | // Enable the protection region if the lock group defines an enable register. | |
535 | if (lock_group[lg].ctrr_a.enable_reg.reg_mask != 0) { | |
536 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.enable_reg.reg_offset; | |
537 | *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.enable_reg.reg_value; | |
538 | } | |
539 | ||
540 | // Disable writes if the lock group defines a write disable register. | |
541 | if (lock_group[lg].ctrr_a.write_disable_reg.reg_mask != 0) { | |
542 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.write_disable_reg.reg_offset; | |
543 | *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.write_disable_reg.reg_value; | |
544 | } | |
545 | ||
546 | // Lock the lock if the lock group defines an enable register. | |
547 | if (lock_group[lg].ctrr_a.lock_reg.reg_mask != 0) { | |
548 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lock_reg.reg_offset; | |
549 | *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.lock_reg.reg_value; | |
550 | } | |
551 | ||
552 | __builtin_arm_isb(ISB_SY); | |
553 | } while (plane-- > 0); | |
554 | #if HAS_IOA | |
555 | // Lock the master lock if the lock group define a master lock register. | |
556 | if (lock_group[lg].master_lock_reg.reg_mask != 0) { | |
557 | reg_addr = lock_group_va[lg][aperture] + lock_group[lg].master_lock_reg.reg_offset; | |
558 | *(volatile uint32_t *)reg_addr = lock_group[lg].master_lock_reg.reg_value; | |
559 | } | |
560 | __builtin_arm_isb(ISB_SY); | |
561 | #endif | |
562 | } | |
563 | } | |
564 | } | |
565 | ||
566 | static void | |
567 | lock_mmu(uint64_t begin, uint64_t end) | |
568 | { | |
569 | #if defined(KERNEL_INTEGRITY_KTRR) | |
570 | ||
571 | __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin); | |
572 | __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end); | |
573 | __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); | |
574 | ||
575 | /* flush TLB */ | |
576 | ||
577 | __builtin_arm_isb(ISB_SY); | |
578 | flush_mmu_tlb(); | |
579 | ||
580 | #elif defined (KERNEL_INTEGRITY_CTRR) | |
581 | /* this will lock the entire bootstrap cluster. non bootstrap clusters | |
582 | * will be locked by respective cluster master in start.s */ | |
583 | ||
584 | __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1, begin); | |
585 | __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1, end); | |
586 | ||
587 | #if !defined(APPLEVORTEX) | |
588 | /* H12+ changed sequence, must invalidate TLB immediately after setting CTRR bounds */ | |
589 | __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */ | |
590 | flush_mmu_tlb(); | |
591 | #endif /* !defined(APPLEVORTEX) */ | |
592 | ||
593 | __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT); | |
594 | __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1, 1ULL); | |
595 | ||
596 | uint64_t current_el = __builtin_arm_rsr64("CurrentEL"); | |
597 | if (current_el == PSR64_MODE_EL2) { | |
598 | // CTRR v2 has explicit registers for cluster config. they can only be written in EL2 | |
599 | ||
600 | __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2, begin); | |
601 | __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2, end); | |
602 | __builtin_arm_wsr64(ACC_CTRR_CTL_EL2, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT); | |
603 | __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2, 1ULL); | |
604 | } | |
605 | ||
606 | __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */ | |
607 | #if defined(APPLEVORTEX) | |
608 | flush_mmu_tlb(); | |
609 | #endif /* defined(APPLEVORTEX) */ | |
610 | ||
611 | #else /* defined(KERNEL_INTEGRITY_KTRR) */ | |
612 | #error KERNEL_INTEGRITY config error | |
613 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
614 | } | |
615 | ||
616 | #if DEVELOPMENT || DEBUG | |
617 | static void | |
618 | assert_amcc_cache_disabled(lock_group_t const *lock_group) | |
619 | { | |
620 | assert(lock_group); | |
621 | ||
622 | const lock_reg_t *cache_status_reg = &lock_group[AMCC_LOCK_GROUP].cache_status_reg; | |
623 | ||
624 | // If the platform does not define a cache status register, then we're done here. | |
625 | if (cache_status_reg->reg_mask != 0) { | |
626 | return; | |
627 | } | |
628 | ||
629 | for (unsigned int aperture = 0; aperture < lock_group[AMCC_LOCK_GROUP].aperture_count; aperture++) { | |
630 | for (unsigned int plane = 0; plane < lock_group[AMCC_LOCK_GROUP].plane_count; plane++) { | |
631 | uint64_t reg_addr = lock_group_va[AMCC_LOCK_GROUP][aperture] + (plane * lock_group[AMCC_LOCK_GROUP].plane_stride) + cache_status_reg->reg_offset; | |
632 | uint32_t reg_value = *(volatile uint32_t *)reg_addr; | |
633 | assert((reg_value & cache_status_reg->reg_mask) == cache_status_reg->reg_value); | |
634 | } | |
635 | } | |
636 | } | |
637 | #endif /* DEVELOPMENT || DEBUG */ | |
638 | ||
639 | /* | |
640 | * void rorgn_lockdown(void) | |
641 | * | |
642 | * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked | |
643 | * | |
644 | * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in | |
645 | * start.s:start_cpu() for subsequent wake/resume of all cores | |
646 | */ | |
647 | void | |
648 | rorgn_lockdown(void) | |
649 | { | |
650 | boolean_t ctrr_disable = FALSE; | |
651 | ||
652 | #if DEVELOPMENT || DEBUG | |
653 | PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable)); | |
654 | #endif /* DEVELOPMENT || DEBUG */ | |
655 | ||
656 | #if CONFIG_CSR_FROM_DT | |
657 | if (csr_unsafe_kernel_text) { | |
658 | ctrr_disable = true; | |
659 | } | |
660 | #endif /* CONFIG_CSR_FROM_DT */ | |
661 | ||
662 | if (!ctrr_disable) { | |
663 | lock_group_t const * const lock_group = find_lock_group_data(); | |
664 | ||
665 | #if DEVELOPMENT || DEBUG | |
666 | assert_all_lock_groups_unlocked(lock_group); | |
667 | ||
668 | printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin, (void *)rorgn_end); | |
669 | printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin, (void *)ctrr_end); | |
670 | ||
671 | assert_amcc_cache_disabled(lock_group); | |
672 | #endif /* DEVELOPMENT || DEBUG */ | |
673 | ||
674 | // Lock the AMCC/IOA PIO lock registers. | |
675 | lock_all_lock_groups(lock_group, phystokv(rorgn_begin), phystokv(rorgn_end)); | |
676 | ||
677 | /* | |
678 | * KTRR/CTRR registers are inclusive of the smallest page size granule supported by processor MMU | |
679 | * rather than the actual page size in use. Load the last byte of the end page, and let the HW | |
680 | * truncate per the smallest page granule supported. Must use same treament in start.s for warm | |
681 | * start of APs. | |
682 | */ | |
683 | lock_mmu(ctrr_begin, ctrr_end); | |
684 | ||
685 | // Unmap and free PIO VA space needed to lockdown the lock groups. | |
686 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
687 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
688 | ml_io_unmap(lock_group_va[lg][aperture], lock_group[lg].aperture_size); | |
689 | } | |
690 | } | |
691 | } | |
692 | ||
693 | #if defined(KERNEL_INTEGRITY_CTRR) | |
694 | /* wake any threads blocked on cluster master lockdown */ | |
695 | cpu_data_t *cdp; | |
696 | ||
697 | cdp = getCpuDatap(); | |
698 | ||
699 | cdp->cpu_cluster_id = ml_get_cluster_number_local(); | |
700 | assert(cdp->cpu_cluster_id <= (uint32_t)ml_get_max_cluster_number()); | |
701 | ctrr_cluster_locked[cdp->cpu_cluster_id] = CTRR_LOCKED; | |
702 | thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]); | |
703 | #endif | |
704 | } | |
705 | ||
706 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |