]>
Commit | Line | Data |
---|---|---|
f427ee49 A |
1 | /* |
2 | * Copyright (c) 2019-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <pexpert/arm64/board_config.h> | |
30 | ||
31 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
32 | ||
33 | #include <vm/pmap.h> | |
34 | #include <libkern/section_keywords.h> | |
35 | #include <libkern/kernel_mach_header.h> | |
36 | #include <pexpert/pexpert.h> | |
37 | #include <pexpert/device_tree.h> | |
38 | #include <machine/atomic.h> | |
39 | #include <arm/cpu_internal.h> | |
40 | #include <arm/caches_internal.h> | |
41 | #include <arm/machine_routines.h> | |
42 | #include <arm/pmap.h> | |
43 | #include <arm64/tlb.h> | |
44 | #include <arm64/amcc_rorgn.h> | |
f427ee49 A |
45 | |
46 | #if HIBERNATION | |
47 | #include <arm64/pal_hibernate.h> | |
48 | #endif /* HIBERNATION */ | |
49 | ||
50 | #if HAS_IOA | |
51 | #define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA) | |
52 | #define IOA_LOCK_GROUP 1 // IOA lock group index | |
53 | #else | |
54 | #define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC) | |
55 | #endif | |
56 | #define AMCC_LOCK_GROUP 0 // AMCC lock group index | |
57 | #define MAX_APERTURES 16 // Maximum number of register apertures | |
58 | #define MAX_PLANES 16 // Maximum number of planes within each aperture | |
59 | ||
60 | #define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group | |
61 | #define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group | |
62 | ||
63 | #define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type | |
64 | ||
65 | extern vm_offset_t segLOWESTRO; | |
66 | extern vm_offset_t segHIGHESTRO; | |
67 | ||
68 | extern vm_offset_t segLASTB; | |
69 | extern vm_offset_t segTEXTEXECB; | |
70 | extern unsigned long segSizeLAST; | |
71 | extern unsigned long segSizeLASTDATACONST; | |
72 | extern unsigned long segSizeTEXTEXEC; | |
c3c9b80d | 73 | extern unsigned long segSizeKLD; |
f427ee49 A |
74 | |
75 | typedef struct lock_reg { | |
76 | uint32_t reg_offset; // Register offset | |
77 | uint32_t reg_mask; // Register mask | |
78 | uint32_t reg_value; // Regsiter value | |
79 | } lock_reg_t; | |
80 | ||
81 | typedef struct lock_type { | |
82 | uint32_t page_size_shift; // page shift used in lower/upper limit registers | |
83 | lock_reg_t lower_limit_reg; // Lower limit register description | |
84 | lock_reg_t upper_limit_reg; // Upper limit register description | |
85 | lock_reg_t enable_reg; // Enable register description | |
86 | lock_reg_t write_disable_reg; // Write disable register description | |
87 | lock_reg_t lock_reg; // Lock register description | |
88 | } lock_type_t; | |
89 | ||
90 | typedef struct lock_group { | |
91 | uint32_t aperture_count; // Aperture count | |
92 | uint32_t aperture_size; // Aperture size | |
93 | uint32_t plane_count; // Number of planes in the aperture | |
94 | uint32_t plane_stride; // Stride between planes in the aperture | |
95 | uint64_t aperture_phys_addr[MAX_APERTURES]; // Apreture physical addresses | |
96 | lock_reg_t cache_status_reg; // Cache status register description | |
97 | #if HAS_IOA | |
98 | lock_reg_t master_lock_reg; // Master lock register description | |
99 | #endif | |
100 | lock_type_t ctrr_a; // CTRR-A (KTRR) lock | |
101 | } lock_group_t; | |
102 | ||
103 | SECURITY_READ_ONLY_LATE(lock_group_t) _lock_group[MAX_LOCK_GROUPS] = { {0} }; | |
104 | SECURITY_READ_ONLY_LATE(bool) lock_regs_set = false; | |
105 | ||
106 | static vm_offset_t rorgn_begin = 0; | |
107 | static vm_offset_t rorgn_end = 0; | |
108 | SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_begin = 0; | |
109 | SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_end = 0; | |
110 | ||
111 | static uint64_t lock_group_va[MAX_LOCK_GROUPS][MAX_APERTURES]; | |
112 | ||
113 | #if CONFIG_CSR_FROM_DT | |
114 | SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text = false; | |
115 | #endif | |
116 | ||
f427ee49 A |
117 | /* |
118 | * lock_group_t - describes all the parameters xnu needs to know to | |
119 | * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start. | |
120 | * This description assumes that each AMCC/IOA in a given system will | |
121 | * be identical, respectively. The only variable are the number of | |
122 | * apertures present and the physical base address of each aperture. | |
123 | * | |
124 | * General xnu lock group lockdown flow: | |
125 | * - for each lock group: | |
126 | * - ml_io_map all present lock group physical base addresses | |
127 | * - assert all lock group begin/end page numbers set by iboot are identical | |
128 | * - convert lock group begin/end page number to physical address | |
129 | * - assert lock group begin/end page numbers match xnu view of read only region | |
130 | * - assert lock group is not currently locked | |
131 | * - ensure lock group master cache is disabled | |
132 | * - write enable/lock registers to enable/lock the lock group read only region | |
133 | */ | |
134 | ||
135 | static bool | |
136 | _dt_get_uint32(DTEntry node, char const *name, uint32_t *dest) | |
137 | { | |
138 | uint32_t const *value; | |
139 | unsigned int size; | |
140 | ||
141 | if (SecureDTGetProperty(node, name, (void const **)&value, &size) != kSuccess) { | |
142 | return false; | |
143 | } | |
144 | ||
145 | if (size != sizeof(uint32_t)) { | |
146 | panic("lock-regs: unexpected size %u", size); | |
147 | } | |
148 | ||
149 | *dest = *value; | |
150 | ||
151 | return true; | |
152 | } | |
153 | ||
154 | static uint32_t | |
155 | _dt_get_uint32_required(DTEntry node, char const *name) | |
156 | { | |
157 | uint32_t value; | |
158 | ||
159 | if (!_dt_get_uint32(node, name, &value)) { | |
160 | panic("lock-regs: cannot find required property '%s'", name); | |
161 | } | |
162 | ||
163 | return value; | |
164 | } | |
165 | ||
166 | static bool | |
167 | _dt_get_lock_reg(DTEntry node, lock_reg_t *reg, const char *parent_name, const char *reg_name, bool required, bool with_value) | |
168 | { | |
169 | char prop_name[32]; | |
170 | bool found; | |
171 | ||
172 | snprintf(prop_name, sizeof(prop_name), "%s-reg-offset", reg_name); | |
173 | found = _dt_get_uint32(node, prop_name, ®->reg_offset); | |
174 | if (!found) { | |
175 | if (required) { | |
176 | panic("%s: missing property '%s'", parent_name, prop_name); | |
177 | } else { | |
178 | return false; | |
179 | } | |
180 | } | |
181 | ||
182 | snprintf(prop_name, sizeof(prop_name), "%s-reg-mask", reg_name); | |
183 | found = _dt_get_uint32(node, prop_name, ®->reg_mask); | |
184 | if (!found) { | |
185 | panic("%s: missing property '%s'", parent_name, prop_name); | |
186 | } | |
187 | ||
188 | if (with_value) { | |
189 | snprintf(prop_name, sizeof(prop_name), "%s-reg-value", reg_name); | |
190 | found = _dt_get_uint32(node, prop_name, ®->reg_value); | |
191 | if (!found) { | |
192 | panic("%s: missing property '%s'", parent_name, prop_name); | |
193 | } | |
194 | } | |
195 | ||
196 | return true; | |
197 | } | |
198 | ||
199 | static DTEntry | |
200 | _dt_get_lock_group(DTEntry lock_regs_node, lock_group_t* lock_group, const char *group_name, uint32_t options) | |
201 | { | |
202 | DTEntry group_node; | |
203 | ||
204 | // Find the lock group node. | |
205 | if (SecureDTLookupEntry(lock_regs_node, group_name, &group_node) != kSuccess) { | |
206 | panic("lock-regs: /chosen/lock-regs/%s not found", group_name); | |
207 | } | |
208 | ||
209 | lock_group->aperture_count = _dt_get_uint32_required(group_node, "aperture-count"); | |
210 | ||
211 | if (lock_group->aperture_count > MAX_APERTURES) { | |
212 | panic("%s: %s %u exceeds maximum %u", group_name, "aperture-count", lock_group->aperture_count, MAX_APERTURES); | |
213 | } | |
214 | ||
215 | lock_group->aperture_size = _dt_get_uint32_required(group_node, "aperture-size"); | |
216 | ||
217 | if ((lock_group->aperture_count > 0) && (lock_group->aperture_size == 0)) { | |
218 | panic("%s: have %u apertures, but 0 size", group_name, lock_group->aperture_count); | |
219 | } | |
220 | ||
221 | lock_group->plane_count = _dt_get_uint32_required(group_node, "plane-count"); | |
222 | ||
223 | if (lock_group->plane_count > MAX_PLANES) { | |
224 | panic("%s: %s %u exceeds maximum %u", group_name, "plane-count", lock_group->plane_count, MAX_PLANES); | |
225 | } | |
226 | ||
227 | if (!_dt_get_uint32(group_node, "plane-stride", &lock_group->plane_stride)) { | |
228 | lock_group->plane_stride = 0; | |
229 | } | |
230 | ||
231 | if (lock_group->plane_count > 1) { | |
232 | uint32_t aperture_size; | |
233 | ||
234 | if (lock_group->plane_stride == 0) { | |
235 | panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name, lock_group->plane_count); | |
236 | } | |
237 | ||
238 | if (os_mul_overflow(lock_group->plane_count, lock_group->plane_stride, &aperture_size) | |
239 | || (aperture_size > lock_group->aperture_size)) { | |
240 | panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name, lock_group->aperture_size, lock_group->plane_count, lock_group->plane_stride); | |
241 | } | |
242 | } | |
243 | ||
244 | uint64_t const *phys_bases = NULL; | |
245 | unsigned int prop_size; | |
246 | if (SecureDTGetProperty(group_node, "aperture-phys-addr", (const void**)&phys_bases, &prop_size) != kSuccess) { | |
247 | panic("%s: missing required %s", group_name, "aperture-phys-addr"); | |
248 | } | |
249 | ||
250 | if (prop_size != lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])) { | |
251 | panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)", | |
252 | group_name, prop_size, lock_group->aperture_count, sizeof(lock_group->aperture_phys_addr[0]), | |
253 | lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])); | |
254 | } | |
255 | ||
256 | memcpy(lock_group->aperture_phys_addr, phys_bases, prop_size); | |
257 | ||
258 | if (options & LOCK_GROUP_HAS_CACHE_STATUS_REG) { | |
259 | _dt_get_lock_reg(group_node, &lock_group->cache_status_reg, group_name, "cache-status", true, true); | |
260 | } | |
261 | ||
262 | #if HAS_IOA | |
263 | if (options & LOCK_GROUP_HAS_MASTER_LOCK_REG) { | |
264 | _dt_get_lock_reg(group_node, &lock_group->master_lock_reg, group_name, "master-lock", true, true); | |
265 | } | |
266 | #endif | |
267 | ||
268 | return group_node; | |
269 | } | |
270 | ||
271 | static void | |
272 | _dt_get_lock_type(DTEntry group_node, lock_type_t *lock_type, const char *group_name, const char *type_name, uint32_t options) | |
273 | { | |
274 | DTEntry type_node; | |
275 | bool has_lock = options & LOCK_TYPE_HAS_LOCK_REG; | |
276 | ||
277 | // Find the lock type type_node. | |
278 | if (SecureDTLookupEntry(group_node, type_name, &type_node) != kSuccess) { | |
279 | panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name, type_name); | |
280 | } | |
281 | ||
282 | lock_type->page_size_shift = _dt_get_uint32_required(type_node, "page-size-shift"); | |
283 | ||
284 | // Find all of the regsiters for this lock type. | |
285 | // Parent Register Descriptor Parent Name Reg Name Required Value | |
286 | _dt_get_lock_reg(type_node, &lock_type->lower_limit_reg, type_name, "lower-limit", true, false); | |
287 | _dt_get_lock_reg(type_node, &lock_type->upper_limit_reg, type_name, "upper-limit", true, false); | |
288 | _dt_get_lock_reg(type_node, &lock_type->lock_reg, type_name, "lock", has_lock, true); | |
289 | _dt_get_lock_reg(type_node, &lock_type->enable_reg, type_name, "enable", false, true); | |
290 | _dt_get_lock_reg(type_node, &lock_type->write_disable_reg, type_name, "write-disable", false, true); | |
291 | } | |
292 | ||
293 | /* | |
294 | * find_lock_group_data: | |
295 | * | |
296 | * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t | |
297 | * | |
298 | * called first time before IOKit start while still uniprocessor | |
299 | * | |
300 | */ | |
301 | static lock_group_t const * _Nonnull | |
302 | find_lock_group_data(void) | |
303 | { | |
304 | DTEntry lock_regs_node = NULL; | |
305 | DTEntry amcc_node = NULL; | |
306 | ||
307 | // Return the lock group data pointer if we already found and populated one. | |
308 | if (lock_regs_set) { | |
309 | return _lock_group; | |
310 | } | |
311 | ||
312 | if (SecureDTLookupEntry(NULL, "/chosen/lock-regs", &lock_regs_node) != kSuccess) { | |
313 | panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)"); | |
314 | } | |
315 | ||
316 | amcc_node = _dt_get_lock_group(lock_regs_node, &_lock_group[AMCC_LOCK_GROUP], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG); | |
317 | _dt_get_lock_type(amcc_node, &_lock_group[AMCC_LOCK_GROUP].ctrr_a, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG); | |
318 | ||
319 | #if HAS_IOA | |
320 | DTEntry ioa_node = _dt_get_lock_group(lock_regs_node, &_lock_group[IOA_LOCK_GROUP], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG); | |
321 | _dt_get_lock_type(ioa_node, &_lock_group[IOA_LOCK_GROUP].ctrr_a, "ioa", "ioa-ctrr-a", 0); | |
322 | #endif | |
323 | ||
324 | lock_regs_set = true; | |
325 | ||
326 | return _lock_group; | |
327 | } | |
328 | ||
329 | void | |
330 | rorgn_stash_range(void) | |
331 | { | |
332 | #if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT | |
333 | boolean_t rorgn_disable = FALSE; | |
334 | ||
335 | #if DEVELOPMENT || DEBUG | |
336 | PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable)); | |
337 | #endif | |
338 | ||
339 | #if CONFIG_CSR_FROM_DT | |
340 | if (csr_unsafe_kernel_text) { | |
341 | rorgn_disable = true; | |
342 | } | |
343 | #endif | |
344 | ||
345 | if (rorgn_disable) { | |
346 | /* take early out if boot arg present, don't query any machine registers to avoid | |
347 | * dependency on amcc DT entry | |
348 | */ | |
349 | return; | |
350 | } | |
351 | #endif | |
352 | lock_group_t const * const lock_group = find_lock_group_data(); | |
353 | ||
354 | /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */ | |
355 | uint64_t rorgn_begin_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES]; | |
356 | uint64_t rorgn_end_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES]; | |
357 | ||
358 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
359 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
360 | const uint64_t amcc_pa = lock_group[lg].aperture_phys_addr[aperture]; | |
361 | ||
362 | // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown() | |
363 | lock_group_va[lg][aperture] = ml_io_map(amcc_pa, lock_group[lg].aperture_size); | |
364 | ||
365 | if (lock_group_va[lg][aperture] == 0) { | |
366 | panic("map aperture_phys_addr[%u]/%#x failed", aperture, lock_group[lg].aperture_size); | |
367 | } | |
368 | ||
369 | for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) { | |
370 | uint64_t reg_addr; | |
371 | ||
372 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lower_limit_reg.reg_offset; | |
373 | rorgn_begin_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr; | |
374 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.upper_limit_reg.reg_offset; | |
375 | rorgn_end_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr; | |
376 | } | |
377 | } | |
378 | ||
379 | assert(rorgn_end_page[lg][0][0] > rorgn_begin_page[lg][0][0]); | |
380 | ||
381 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
382 | for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) { | |
383 | if ((rorgn_begin_page[lg][aperture][plane] != rorgn_begin_page[0][0][0]) | |
384 | || (rorgn_end_page[lg][aperture][plane] != rorgn_end_page[0][0][0])) { | |
385 | panic("Inconsistent memory config"); | |
386 | } | |
387 | } | |
388 | } | |
389 | ||
390 | uint64_t page_bytes = 1ULL << lock_group[lg].ctrr_a.page_size_shift; | |
391 | ||
392 | /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */ | |
393 | rorgn_begin = (rorgn_begin_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase; | |
394 | rorgn_end = (rorgn_end_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase + page_bytes - 1; | |
395 | } | |
396 | ||
397 | assert(segLOWESTRO && gVirtBase && gPhysBase); | |
398 | ||
399 | /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */ | |
400 | ctrr_begin = kvtophys(segLOWESTRO); | |
401 | ||
402 | #if defined(KERNEL_INTEGRITY_KTRR) | |
403 | ||
404 | /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region) | |
405 | * | |
406 | * +------------------+-----------+-----------------------------------+ | |
407 | * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) | | |
408 | * +------------------+-----------+-----------------------------------+ | |
c3c9b80d A |
409 | * | | KLD | <- KTRR RO Region End (ctrr_end) | |
410 | * | | TEXT_EXEC | | | |
f427ee49 A |
411 | * +------------------+-----------+-----------------------------------+ |
412 | * | | ... | | | |
413 | * +------------------+-----------+-----------------------------------+ | |
414 | * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin | | |
415 | * | | | (ctrr_begin/rorgn_begin) | | |
416 | * +------------------+-----------+-----------------------------------+ | |
417 | * | |
418 | */ | |
419 | ||
420 | ctrr_end = kvtophys(segLASTB) - segSizeLASTDATACONST - 1; | |
421 | ||
422 | /* assert not booted from kernel collection */ | |
423 | assert(!segHIGHESTRO); | |
424 | ||
425 | /* assert that __LAST segment containing privileged insns is only a single page */ | |
426 | assert(segSizeLAST == PAGE_SIZE); | |
427 | ||
428 | /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */ | |
c3c9b80d | 429 | assert((ctrr_end + 1) == kvtophys(segTEXTEXECB) + segSizeTEXTEXEC + segSizeKLD); |
f427ee49 A |
430 | |
431 | /* ensure that iboot and xnu agree on the amcc rorgn range */ | |
432 | assert((rorgn_begin == ctrr_begin) && (rorgn_end == (ctrr_end + segSizeLASTDATACONST + segSizeLAST))); | |
433 | #elif defined(KERNEL_INTEGRITY_CTRR) | |
434 | ||
435 | /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making | |
436 | * __pinst no execute because PXN applies with MMU off in CTRR. | |
437 | * | |
438 | * +------------------+-----------+------------------------------+ | |
439 | * | Largest Address | LAST | <- CTRR/AMCC RO Region End | | |
440 | * | | | (ctrr_end/rorgn_end) | | |
441 | * +------------------+-----------+------------------------------+ | |
c3c9b80d A |
442 | * | | PPLDATA_CONST | |
443 | * | | PPLTEXT | | | |
444 | * | | KLD | | | |
f427ee49 A |
445 | * | | TEXT_EXEC | | |
446 | * +------------------+-----------+------------------------------+ | |
447 | * | | ... | | | |
448 | * +------------------+-----------+------------------------------+ | |
449 | * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin | | |
450 | * | | | (ctrr_begin/rorgn_begin) | | |
451 | * +------------------+-----------+------------------------------+ | |
452 | * | |
453 | */ | |
454 | ||
455 | if (segHIGHESTRO) { | |
456 | /* | |
457 | * kernel collections may have additional kext RO data after kernel LAST | |
458 | */ | |
459 | assert(segLASTB + segSizeLAST <= segHIGHESTRO); | |
460 | ctrr_end = kvtophys(segHIGHESTRO) - 1; | |
461 | } else { | |
462 | ctrr_end = kvtophys(segLASTB) + segSizeLAST - 1; | |
463 | } | |
464 | ||
465 | /* ensure that iboot and xnu agree on the amcc rorgn range */ | |
466 | assert((rorgn_begin == ctrr_begin) && (rorgn_end == ctrr_end)); | |
467 | #endif | |
468 | } | |
469 | ||
f427ee49 A |
470 | static void |
471 | lock_all_lock_groups(lock_group_t const *lock_group, vm_offset_t begin, vm_offset_t end) | |
472 | { | |
473 | uint64_t reg_addr; | |
474 | assert(lock_group); | |
475 | ||
476 | /* | |
477 | * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock | |
478 | * | |
479 | * begin and end are first and last byte inclusive of lock group read only region | |
480 | */ | |
481 | ||
482 | CleanPoC_DcacheRegion_Force(begin, end - begin + 1); | |
483 | ||
484 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
485 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
486 | /* lock planes in reverse order: plane 0 should be locked last */ | |
487 | unsigned int plane = lock_group[lg].plane_count - 1; | |
488 | do { | |
489 | // Enable the protection region if the lock group defines an enable register. | |
490 | if (lock_group[lg].ctrr_a.enable_reg.reg_mask != 0) { | |
491 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.enable_reg.reg_offset; | |
492 | *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.enable_reg.reg_value; | |
493 | } | |
494 | ||
495 | // Disable writes if the lock group defines a write disable register. | |
496 | if (lock_group[lg].ctrr_a.write_disable_reg.reg_mask != 0) { | |
497 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.write_disable_reg.reg_offset; | |
498 | *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.write_disable_reg.reg_value; | |
499 | } | |
500 | ||
501 | // Lock the lock if the lock group defines an enable register. | |
502 | if (lock_group[lg].ctrr_a.lock_reg.reg_mask != 0) { | |
503 | reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lock_reg.reg_offset; | |
504 | *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.lock_reg.reg_value; | |
505 | } | |
506 | ||
507 | __builtin_arm_isb(ISB_SY); | |
508 | } while (plane-- > 0); | |
509 | #if HAS_IOA | |
510 | // Lock the master lock if the lock group define a master lock register. | |
511 | if (lock_group[lg].master_lock_reg.reg_mask != 0) { | |
512 | reg_addr = lock_group_va[lg][aperture] + lock_group[lg].master_lock_reg.reg_offset; | |
513 | *(volatile uint32_t *)reg_addr = lock_group[lg].master_lock_reg.reg_value; | |
514 | } | |
515 | __builtin_arm_isb(ISB_SY); | |
516 | #endif | |
517 | } | |
518 | } | |
519 | } | |
520 | ||
f427ee49 A |
521 | #if DEVELOPMENT || DEBUG |
522 | static void | |
523 | assert_amcc_cache_disabled(lock_group_t const *lock_group) | |
524 | { | |
525 | assert(lock_group); | |
526 | ||
527 | const lock_reg_t *cache_status_reg = &lock_group[AMCC_LOCK_GROUP].cache_status_reg; | |
528 | ||
529 | // If the platform does not define a cache status register, then we're done here. | |
530 | if (cache_status_reg->reg_mask != 0) { | |
531 | return; | |
532 | } | |
533 | ||
534 | for (unsigned int aperture = 0; aperture < lock_group[AMCC_LOCK_GROUP].aperture_count; aperture++) { | |
535 | for (unsigned int plane = 0; plane < lock_group[AMCC_LOCK_GROUP].plane_count; plane++) { | |
536 | uint64_t reg_addr = lock_group_va[AMCC_LOCK_GROUP][aperture] + (plane * lock_group[AMCC_LOCK_GROUP].plane_stride) + cache_status_reg->reg_offset; | |
537 | uint32_t reg_value = *(volatile uint32_t *)reg_addr; | |
538 | assert((reg_value & cache_status_reg->reg_mask) == cache_status_reg->reg_value); | |
539 | } | |
540 | } | |
541 | } | |
542 | #endif /* DEVELOPMENT || DEBUG */ | |
543 | ||
544 | /* | |
545 | * void rorgn_lockdown(void) | |
546 | * | |
547 | * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked | |
548 | * | |
549 | * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in | |
550 | * start.s:start_cpu() for subsequent wake/resume of all cores | |
551 | */ | |
552 | void | |
553 | rorgn_lockdown(void) | |
554 | { | |
555 | boolean_t ctrr_disable = FALSE; | |
556 | ||
557 | #if DEVELOPMENT || DEBUG | |
558 | PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable)); | |
559 | #endif /* DEVELOPMENT || DEBUG */ | |
560 | ||
561 | #if CONFIG_CSR_FROM_DT | |
562 | if (csr_unsafe_kernel_text) { | |
563 | ctrr_disable = true; | |
564 | } | |
565 | #endif /* CONFIG_CSR_FROM_DT */ | |
566 | ||
567 | if (!ctrr_disable) { | |
568 | lock_group_t const * const lock_group = find_lock_group_data(); | |
569 | ||
570 | #if DEVELOPMENT || DEBUG | |
f427ee49 A |
571 | printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin, (void *)rorgn_end); |
572 | printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin, (void *)ctrr_end); | |
573 | ||
574 | assert_amcc_cache_disabled(lock_group); | |
575 | #endif /* DEVELOPMENT || DEBUG */ | |
576 | ||
577 | // Lock the AMCC/IOA PIO lock registers. | |
578 | lock_all_lock_groups(lock_group, phystokv(rorgn_begin), phystokv(rorgn_end)); | |
579 | ||
f427ee49 A |
580 | // Unmap and free PIO VA space needed to lockdown the lock groups. |
581 | for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { | |
582 | for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { | |
583 | ml_io_unmap(lock_group_va[lg][aperture], lock_group[lg].aperture_size); | |
584 | } | |
585 | } | |
586 | } | |
587 | ||
588 | #if defined(KERNEL_INTEGRITY_CTRR) | |
589 | /* wake any threads blocked on cluster master lockdown */ | |
590 | cpu_data_t *cdp; | |
591 | ||
592 | cdp = getCpuDatap(); | |
593 | ||
594 | cdp->cpu_cluster_id = ml_get_cluster_number_local(); | |
595 | assert(cdp->cpu_cluster_id <= (uint32_t)ml_get_max_cluster_number()); | |
596 | ctrr_cluster_locked[cdp->cpu_cluster_id] = CTRR_LOCKED; | |
597 | thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]); | |
598 | #endif | |
599 | } | |
600 | ||
601 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |