]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/cpu_data_internal.h> | |
30 | #include <arm/dbgwrap.h> | |
31 | #include <arm64/proc_reg.h> | |
32 | #include <machine/atomic.h> | |
33 | #include <pexpert/arm64/board_config.h> | |
34 | ||
0a7de745 A |
35 | #define DBGWRAP_REG_OFFSET 0 |
36 | #define DBGWRAP_DBGHALT (1ULL << 31) | |
37 | #define DBGWRAP_DBGACK (1ULL << 28) | |
38 | ||
39 | #define EDDTRRX_REG_OFFSET 0x80 | |
40 | #define EDITR_REG_OFFSET 0x84 | |
41 | #define EDSCR_REG_OFFSET 0x88 | |
42 | #define EDSCR_TXFULL (1ULL << 29) | |
43 | #define EDSCR_ITE (1ULL << 24) | |
44 | #define EDSCR_MA (1ULL << 20) | |
45 | #define EDSCR_ERR (1ULL << 6) | |
46 | #define EDDTRTX_REG_OFFSET 0x8C | |
47 | #define EDRCR_REG_OFFSET 0x90 | |
48 | #define EDRCR_CSE (1ULL << 2) | |
49 | #define EDPRSR_REG_OFFSET 0x314 | |
50 | #define EDPRSR_OSLK (1ULL << 5) | |
51 | ||
52 | #define MAX_EDITR_RETRIES 16 | |
5ba3f43e A |
53 | |
54 | /* Older SoCs require 32-bit accesses for DBGWRAP; | |
55 | * newer ones require 64-bit accesses. */ | |
56 | #ifdef HAS_32BIT_DBGWRAP | |
57 | typedef uint32_t dbgwrap_reg_t; | |
58 | #else | |
59 | typedef uint64_t dbgwrap_reg_t; | |
60 | #endif | |
61 | ||
62 | #if DEVELOPMENT || DEBUG | |
0a7de745 | 63 | #define MAX_STUFFED_INSTRS 64 |
5ba3f43e A |
64 | uint32_t stuffed_instrs[MAX_STUFFED_INSTRS]; |
65 | volatile uint32_t stuffed_instr_count = 0; | |
66 | #endif | |
67 | ||
0a7de745 | 68 | static volatile uint32_t halt_from_cpu = (uint32_t)-1; |
5ba3f43e A |
69 | |
70 | boolean_t | |
71 | ml_dbgwrap_cpu_is_halted(int cpu_index) | |
72 | { | |
73 | cpu_data_t *cdp = cpu_datap(cpu_index); | |
0a7de745 | 74 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) { |
5ba3f43e | 75 | return FALSE; |
0a7de745 | 76 | } |
5ba3f43e | 77 | |
0a7de745 | 78 | return (*(volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET) & DBGWRAP_DBGACK) != 0; |
5ba3f43e A |
79 | } |
80 | ||
81 | dbgwrap_status_t | |
82 | ml_dbgwrap_wait_cpu_halted(int cpu_index, uint64_t timeout_ns) | |
83 | { | |
84 | cpu_data_t *cdp = cpu_datap(cpu_index); | |
0a7de745 | 85 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) { |
5ba3f43e | 86 | return DBGWRAP_ERR_UNSUPPORTED; |
0a7de745 | 87 | } |
5ba3f43e A |
88 | |
89 | volatile dbgwrap_reg_t *dbgWrapReg = (volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET); | |
90 | ||
91 | uint64_t interval; | |
92 | nanoseconds_to_absolutetime(timeout_ns, &interval); | |
93 | uint64_t deadline = mach_absolute_time() + interval; | |
94 | while (!(*dbgWrapReg & DBGWRAP_DBGACK)) { | |
0a7de745 A |
95 | if (mach_absolute_time() > deadline) { |
96 | return DBGWRAP_ERR_HALT_TIMEOUT; | |
97 | } | |
5ba3f43e A |
98 | } |
99 | ||
100 | return DBGWRAP_SUCCESS; | |
101 | } | |
102 | ||
103 | dbgwrap_status_t | |
104 | ml_dbgwrap_halt_cpu(int cpu_index, uint64_t timeout_ns) | |
105 | { | |
106 | cpu_data_t *cdp = cpu_datap(cpu_index); | |
0a7de745 | 107 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) { |
5ba3f43e | 108 | return DBGWRAP_ERR_UNSUPPORTED; |
0a7de745 | 109 | } |
5ba3f43e A |
110 | |
111 | /* Only one cpu is allowed to initiate the halt sequence, to prevent cpus from cross-halting | |
112 | * each other. The first cpu to request a halt may then halt any and all other cpus besides itself. */ | |
113 | int curcpu = cpu_number(); | |
0a7de745 | 114 | if (cpu_index == curcpu) { |
5ba3f43e | 115 | return DBGWRAP_ERR_SELF_HALT; |
0a7de745 | 116 | } |
5ba3f43e | 117 | |
cb323159 | 118 | if (!os_atomic_cmpxchg(&halt_from_cpu, (uint32_t)-1, (unsigned int)curcpu, acq_rel) && |
0a7de745 | 119 | (halt_from_cpu != (uint32_t)curcpu)) { |
5ba3f43e | 120 | return DBGWRAP_ERR_INPROGRESS; |
0a7de745 | 121 | } |
5ba3f43e A |
122 | |
123 | volatile dbgwrap_reg_t *dbgWrapReg = (volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET); | |
124 | ||
0a7de745 | 125 | if (ml_dbgwrap_cpu_is_halted(cpu_index)) { |
5ba3f43e | 126 | return DBGWRAP_WARN_ALREADY_HALTED; |
0a7de745 | 127 | } |
5ba3f43e A |
128 | |
129 | /* Clear all other writable bits besides dbgHalt; none of the power-down or reset bits must be set. */ | |
130 | *dbgWrapReg = DBGWRAP_DBGHALT; | |
131 | ||
132 | if (timeout_ns != 0) { | |
133 | dbgwrap_status_t stat = ml_dbgwrap_wait_cpu_halted(cpu_index, timeout_ns); | |
134 | return stat; | |
0a7de745 | 135 | } else { |
5ba3f43e | 136 | return DBGWRAP_SUCCESS; |
0a7de745 | 137 | } |
5ba3f43e A |
138 | } |
139 | ||
140 | static void | |
141 | ml_dbgwrap_stuff_instr(cpu_data_t *cdp, uint32_t instr, uint64_t timeout_ns, dbgwrap_status_t *status) | |
142 | { | |
0a7de745 | 143 | if (*status < 0) { |
5ba3f43e | 144 | return; |
0a7de745 | 145 | } |
5ba3f43e A |
146 | |
147 | volatile uint32_t *editr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDITR_REG_OFFSET); | |
148 | volatile uint32_t *edscr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDSCR_REG_OFFSET); | |
149 | volatile uint32_t *edrcr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDRCR_REG_OFFSET); | |
150 | ||
151 | int retries = 0; | |
152 | ||
153 | uint64_t interval; | |
154 | nanoseconds_to_absolutetime(timeout_ns, &interval); | |
155 | uint64_t deadline = mach_absolute_time() + interval; | |
156 | ||
157 | #if DEVELOPMENT || DEBUG | |
cb323159 | 158 | uint32_t stuffed_instr_index = os_atomic_inc(&stuffed_instr_count, relaxed); |
5ba3f43e A |
159 | stuffed_instrs[(stuffed_instr_index - 1) % MAX_STUFFED_INSTRS] = instr; |
160 | #endif | |
161 | ||
162 | do { | |
163 | *editr = instr; | |
164 | volatile uint32_t edscr_val; | |
165 | while (!((edscr_val = *edscr) & EDSCR_ITE)) { | |
166 | if (mach_absolute_time() > deadline) { | |
167 | *status = DBGWRAP_ERR_INSTR_TIMEOUT; | |
168 | return; | |
169 | } | |
0a7de745 | 170 | if (edscr_val & EDSCR_ERR) { |
5ba3f43e | 171 | break; |
0a7de745 | 172 | } |
5ba3f43e A |
173 | } |
174 | if (edscr_val & EDSCR_ERR) { | |
175 | /* If memory access mode was enable by a debugger, clear it. | |
0a7de745 A |
176 | * This will cause ERR to be set on any attempt to use EDITR. */ |
177 | if (edscr_val & EDSCR_MA) { | |
5ba3f43e | 178 | *edscr = edscr_val & ~EDSCR_MA; |
0a7de745 | 179 | } |
5ba3f43e A |
180 | *edrcr = EDRCR_CSE; |
181 | ++retries; | |
0a7de745 | 182 | } else { |
5ba3f43e | 183 | break; |
0a7de745 | 184 | } |
5ba3f43e A |
185 | } while (retries < MAX_EDITR_RETRIES); |
186 | ||
187 | if (retries >= MAX_EDITR_RETRIES) { | |
188 | *status = DBGWRAP_ERR_INSTR_ERROR; | |
189 | return; | |
190 | } | |
191 | } | |
192 | ||
193 | static uint64_t | |
194 | ml_dbgwrap_read_dtr(cpu_data_t *cdp, uint64_t timeout_ns, dbgwrap_status_t *status) | |
195 | { | |
0a7de745 | 196 | if (*status < 0) { |
5ba3f43e | 197 | return 0; |
0a7de745 | 198 | } |
5ba3f43e A |
199 | |
200 | uint64_t interval; | |
201 | nanoseconds_to_absolutetime(timeout_ns, &interval); | |
202 | uint64_t deadline = mach_absolute_time() + interval; | |
203 | ||
0a7de745 | 204 | /* Per armv8 debug spec, writes to DBGDTR_EL0 on target cpu will set EDSCR.TXFull, |
5ba3f43e A |
205 | * with bits 63:32 available in EDDTRRX and bits 31:0 availabe in EDDTRTX. */ |
206 | volatile uint32_t *edscr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDSCR_REG_OFFSET); | |
207 | ||
208 | while (!(*edscr & EDSCR_TXFULL)) { | |
209 | if (*edscr & EDSCR_ERR) { | |
210 | *status = DBGWRAP_ERR_INSTR_ERROR; | |
211 | return 0; | |
212 | } | |
213 | if (mach_absolute_time() > deadline) { | |
214 | *status = DBGWRAP_ERR_INSTR_TIMEOUT; | |
215 | return 0; | |
216 | } | |
217 | } | |
218 | ||
219 | uint32_t dtrrx = *((volatile uint32_t*)(cdp->coresight_base[CORESIGHT_ED] + EDDTRRX_REG_OFFSET)); | |
220 | uint32_t dtrtx = *((volatile uint32_t*)(cdp->coresight_base[CORESIGHT_ED] + EDDTRTX_REG_OFFSET)); | |
221 | ||
0a7de745 | 222 | return ((uint64_t)dtrrx << 32) | dtrtx; |
5ba3f43e A |
223 | } |
224 | ||
225 | dbgwrap_status_t | |
226 | ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_thread_state_t *state) | |
227 | { | |
228 | cpu_data_t *cdp = cpu_datap(cpu_index); | |
0a7de745 | 229 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_ED] == 0)) { |
5ba3f43e | 230 | return DBGWRAP_ERR_UNSUPPORTED; |
0a7de745 | 231 | } |
5ba3f43e A |
232 | |
233 | /* Ensure memory-mapped coresight registers can be written */ | |
234 | *((volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR)) = ARM_DBG_LOCK_ACCESS_KEY; | |
235 | ||
236 | dbgwrap_status_t status = ml_dbgwrap_halt_cpu(cpu_index, timeout_ns); | |
237 | ||
238 | /* A core that is not fully powered (e.g. idling in wfi) can still be halted; the dbgwrap | |
239 | * register and certain coresight registers such EDPRSR are in the always-on domain. | |
240 | * However, EDSCR/EDITR are not in the always-on domain and will generate a parity abort | |
241 | * on read. EDPRSR can be safely read in all cases, and the OS lock defaults to being set | |
242 | * but we clear it first thing, so use that to detect the offline state. */ | |
243 | if (*((volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDPRSR_REG_OFFSET)) & EDPRSR_OSLK) { | |
244 | bzero(state, sizeof(*state)); | |
245 | return DBGWRAP_WARN_CPU_OFFLINE; | |
246 | } | |
247 | ||
248 | uint32_t instr; | |
249 | ||
250 | for (unsigned int i = 0; i < (sizeof(state->x) / sizeof(state->x[0])); ++i) { | |
251 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | i; // msr DBGDTR0, x<i> | |
252 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
253 | state->x[i] = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); | |
254 | } | |
255 | ||
256 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 29; // msr DBGDTR0, fp | |
257 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
258 | state->fp = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); | |
259 | ||
260 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 30; // msr DBGDTR0, lr | |
261 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
262 | state->lr = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); | |
263 | ||
264 | /* Stack pointer (x31) can't be used as a register operand for msr; register 31 is treated as xzr | |
265 | * rather than sp when used as the transfer operand there. Instead, load sp into a GPR | |
266 | * we've already saved off and then store that register in the DTR. I've chosen x18 | |
267 | * as the temporary GPR since it's reserved by the arm64 ABI and unused by xnu, so overwriting | |
0a7de745 | 268 | * it poses the least risk of causing trouble for external debuggers. */ |
5ba3f43e A |
269 | |
270 | instr = (0x91U << 24) | (31 << 5) | 18; // mov x18, sp | |
271 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
272 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 18; // msr DBGDTR0, x18 | |
273 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
274 | state->sp = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); | |
275 | ||
276 | /* reading PC (e.g. through adr) is undefined in debug state. Instead use DLR_EL0, | |
277 | * which contains PC at time of entry into debug state.*/ | |
278 | ||
279 | instr = (0xD53U << 20) | (1 << 19) | (3 << 16) | (4 << 12) | (5 << 8) | (1 << 5) | 18; // mrs x18, DLR_EL0 | |
280 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
281 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 18; // msr DBGDTR0, x18 | |
282 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
283 | state->pc = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); | |
284 | ||
285 | /* reading CPSR is undefined in debug state. Instead use DSPSR_EL0, | |
286 | * which contains CPSR at time of entry into debug state.*/ | |
287 | instr = (0xD53U << 20) | (1 << 19) | (3 << 16) | (4 << 12) | (5 << 8) | 18; // mrs x18, DSPSR_EL0 | |
288 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
289 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 18; // msr DBGDTR0, x18 | |
290 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); | |
291 | state->cpsr = (uint32_t)ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); | |
292 | ||
293 | return status; | |
294 | } |