2 * Copyright (c) 2007-2018 Apple Inc. All rights reserved.
7 * The contents of this file are subject to the terms of the
8 * Common Development and Distribution License, Version 1.0 only
9 * (the "License"). You may not use this file except in compliance
12 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
13 * or http://www.opensolaris.org/os/licensing.
14 * See the License for the specific language governing permissions
15 * and limitations under the License.
17 * When distributing Covered Code, include this CDDL HEADER in each
18 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
19 * If applicable, add the following below this CDDL HEADER, with the
20 * fields enclosed by brackets "[]" replaced with your own identifying
21 * information: Portions Copyright [yyyy] [name of copyright owner]
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
30 #include <kern/thread.h>
31 #include <mach/thread_status.h>
32 #include <arm/proc_reg.h>
33 #include <arm/caches_internal.h>
35 #include <mach-o/loader.h>
36 #include <mach-o/nlist.h>
37 #include <libkern/kernel_mach_header.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/errno.h>
43 #include <sys/ioctl.h>
45 #include <sys/fcntl.h>
46 #include <miscfs/devfs/devfs.h>
48 #include <sys/dtrace.h>
49 #include <sys/dtrace_impl.h>
52 #include <sys/dtrace_glue.h>
54 #if __has_include(<ptrauth.h>)
58 #define DTRACE_INVOP_PUSH_FRAME 11
60 #define DTRACE_INVOP_NOP_SKIP 4
61 #define DTRACE_INVOP_ADD_FP_SP_SKIP 4
63 #define DTRACE_INVOP_POP_PC_SKIP 2
66 * stp fp, lr, [sp, #val]
67 * stp fp, lr, [sp, #val]!
69 #define FBT_IS_ARM64_FRAME_PUSH(x) \
70 (((x) & 0xffc07fff) == 0xa9007bfd || ((x) & 0xffc07fff) == 0xa9807bfd)
73 * stp Xt1, Xt2, [sp, #val]
74 * stp Xt1, Xt2, [sp, #val]!
76 #define FBT_IS_ARM64_PUSH(x) \
77 (((x) & 0xffc003e0) == 0xa90003e0 || ((x) & 0xffc003e0) == 0xa98003e0)
80 * ldp fp, lr, [sp, #val]
81 * ldp fp, lr, [sp], #val
83 #define FBT_IS_ARM64_FRAME_POP(x) \
84 (((x) & 0xffc07fff) == 0xa9407bfd || ((x) & 0xffc07fff) == 0xa8c07bfd)
86 #define FBT_IS_ARM64_ADD_FP_SP(x) (((x) & 0xffc003ff) == 0x910003fd) /* add fp, sp, #val (add fp, sp, #0 == mov fp, sp) */
87 #define FBT_IS_ARM64_RET(x) (((x) == 0xd65f03c0) || ((x) == 0xd65f0fff)) /* ret, retab */
90 #define FBT_B_MASK 0xff000000
91 #define FBT_B_IMM_MASK 0x00ffffff
92 #define FBT_B_INSTR 0x14000000
94 #define FBT_IS_ARM64_B_INSTR(x) ((x & FBT_B_MASK) == FBT_B_INSTR)
95 #define FBT_GET_ARM64_B_IMM(x) ((x & FBT_B_IMM_MASK) << 2)
97 #define FBT_PATCHVAL 0xe7eeee7e
98 #define FBT_AFRAMES_ENTRY 7
99 #define FBT_AFRAMES_RETURN 7
101 #define FBT_ENTRY "entry"
102 #define FBT_RETURN "return"
103 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
105 extern dtrace_provider_id_t fbt_id
;
106 extern fbt_probe_t
**fbt_probetab
;
107 extern int fbt_probetab_mask
;
109 kern_return_t
fbt_perfCallback(int, struct arm_saved_state
*, __unused
int, __unused
int);
112 fbt_invop(uintptr_t addr
, uintptr_t * stack
, uintptr_t rval
)
114 fbt_probe_t
*fbt
= fbt_probetab
[FBT_ADDR2NDX(addr
)];
116 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_hashnext
) {
117 if ((uintptr_t) fbt
->fbtp_patchpoint
== addr
) {
118 if (0 == CPU
->cpu_dtrace_invop_underway
) {
119 CPU
->cpu_dtrace_invop_underway
= 1; /* Race not possible on
120 * this per-cpu state */
123 * Stack looks like this:
128 * Extra args for callee
129 * ------------------------
131 * Frame from traced function: <previous sp (e.g. 0x1000), return address>
133 * Missing as the return probe has already popped the frame in the callee and
134 * traps with LR set to the return address in caller.
135 * ------------------------
137 * ------------------------
138 * Frame from trap handler: <previous sp (e.g. 0x1000) , traced PC >
139 * The traced function has either never pushed the frame
140 * or already popped it. So there is no frame in the
141 * backtrace pointing to the frame on the stack containing
142 * the LR in the caller.
143 * ------------------------
146 * | stack grows this way
152 * cpu_dtrace_caller compensates for fact that the LR is not stored on stack as explained
153 * above. When walking the stack, when we reach the frame where we extract a PC in the
154 * patched function, we put the cpu_dtrace_caller in the backtrace instead. The next
155 * frame we extract will be in the caller's caller, so we output a backtrace starting
156 * at the caller and going sequentially up the stack.
158 arm_saved_state_t
*regs
= (arm_saved_state_t
*)(&((arm_context_t
*)stack
)->ss
);
160 CPU
->cpu_dtrace_caller
= get_saved_state_lr(regs
);
162 /* When fbt_roffset is non-zero, we know we are handling a return probe point. */
163 if (fbt
->fbtp_roffset
== 0) {
164 dtrace_probe(fbt
->fbtp_id
, get_saved_state_reg(regs
, 0), get_saved_state_reg(regs
, 1),
165 get_saved_state_reg(regs
, 2), get_saved_state_reg(regs
, 3), get_saved_state_reg(regs
, 4));
167 dtrace_probe(fbt
->fbtp_id
, fbt
->fbtp_roffset
, rval
, 0, 0, 0);
170 CPU
->cpu_dtrace_caller
= 0;
171 CPU
->cpu_dtrace_invop_underway
= 0;
175 * On other architectures, we return a DTRACE constant to let the callback function
176 * know what was replaced. On the ARM, since the function prologue/epilogue machine code
177 * can vary, we need the actual bytes of the instruction, so return the savedval instead.
179 return fbt
->fbtp_savedval
;
186 #define IS_USER_TRAP(regs) (PSR64_IS_USER(get_saved_state_cpsr(regs)))
187 #define T_INVALID_OPCODE EXC_BAD_INSTRUCTION
188 #define FBT_EXCEPTION_CODE T_INVALID_OPCODE
193 struct arm_saved_state
* regs
,
194 __unused
int unused1
,
195 __unused
int unused2
)
197 kern_return_t retval
= KERN_FAILURE
;
199 if (FBT_EXCEPTION_CODE
== trapno
&& !IS_USER_TRAP(regs
)) {
200 boolean_t oldlevel
= 0;
201 machine_inst_t emul
= 0;
205 oldlevel
= ml_set_interrupts_enabled(FALSE
);
208 "Ldtrace_invop_callsite_pre_label:\n"
210 ".private_extern _dtrace_invop_callsite_pre\n"
211 "_dtrace_invop_callsite_pre:\n"
212 " .quad Ldtrace_invop_callsite_pre_label\n"
216 emul
= dtrace_invop(get_saved_state_pc(regs
), (uintptr_t*) regs
, get_saved_state_reg(regs
, 0));
219 "Ldtrace_invop_callsite_post_label:\n"
221 ".private_extern _dtrace_invop_callsite_post\n"
222 "_dtrace_invop_callsite_post:\n"
223 " .quad Ldtrace_invop_callsite_post_label\n"
227 if (emul
== DTRACE_INVOP_NOP
) {
229 * Skip over the patched NOP planted by sdt
231 add_saved_state_pc(regs
, DTRACE_INVOP_NOP_SKIP
);
232 retval
= KERN_SUCCESS
;
233 } else if (FBT_IS_ARM64_ADD_FP_SP(emul
)) {
234 /* retrieve the value to add */
235 uint64_t val
= (emul
>> 10) & 0xfff;
239 sp
= get_saved_state_sp(regs
);
242 * emulate the instruction:
245 assert(sp
< (UINT64_MAX
- val
));
246 set_saved_state_fp(regs
, sp
+ val
);
248 /* skip over the bytes of the patched instruction */
249 add_saved_state_pc(regs
, DTRACE_INVOP_ADD_FP_SP_SKIP
);
251 retval
= KERN_SUCCESS
;
252 } else if (FBT_IS_ARM64_RET(emul
)) {
253 lr
= get_saved_state_lr(regs
);
254 #if __has_feature(ptrauth_calls)
255 lr
= (user_addr_t
) ptrauth_strip((void *)lr
, ptrauth_key_return_address
);
257 set_saved_state_pc(regs
, lr
);
258 retval
= KERN_SUCCESS
;
259 } else if (FBT_IS_ARM64_B_INSTR(emul
)) {
260 imm
= FBT_GET_ARM64_B_IMM(emul
);
261 add_saved_state_pc(regs
, imm
);
262 retval
= KERN_SUCCESS
;
263 } else if (emul
== FBT_PATCHVAL
) {
264 /* Means we encountered an error but handled it, try same inst again */
265 retval
= KERN_SUCCESS
;
267 retval
= KERN_FAILURE
;
270 ml_set_interrupts_enabled(oldlevel
);
277 fbt_provide_probe(struct modctl
*ctl
, const char *modname
, const char* symbolName
, machine_inst_t
* symbolStart
, machine_inst_t
*instrHigh
)
282 fbt_probe_t
*newfbt
, *retfbt
, *entryfbt
;
283 machine_inst_t
*instr
, *pushinstr
= NULL
, *limit
, theInstr
;
284 int foundPushLR
, savedRegs
;
287 * Guard against null and invalid symbols
289 if (!symbolStart
|| !instrHigh
|| instrHigh
< symbolStart
) {
290 kprintf("dtrace: %s has an invalid address\n", symbolName
);
295 * Assume the compiler doesn't schedule instructions in the prologue.
299 limit
= (machine_inst_t
*)instrHigh
;
301 assert(sizeof(*instr
) == 4);
303 for (instr
= symbolStart
, theInstr
= 0; instr
< instrHigh
; instr
++) {
305 * Count the number of time we pushed something onto the stack
306 * before hitting a frame push. That will give us an estimation
307 * of how many stack pops we should expect when looking for the
311 if (FBT_IS_ARM64_FRAME_PUSH(theInstr
)) {
316 if (foundPushLR
&& (FBT_IS_ARM64_ADD_FP_SP(theInstr
))) {
317 /* Guard against a random setting of fp from sp, we make sure we found the push first */
320 if (FBT_IS_ARM64_RET(theInstr
)) { /* We've gone too far, bail. */
323 if (FBT_IS_ARM64_FRAME_POP(theInstr
)) { /* We've gone too far, bail. */
328 if (!(foundPushLR
&& (FBT_IS_ARM64_ADD_FP_SP(theInstr
)))) {
332 thisid
= dtrace_probe_lookup(fbt_id
, modname
, symbolName
, FBT_ENTRY
);
333 newfbt
= kmem_zalloc(sizeof(fbt_probe_t
), KM_SLEEP
);
334 newfbt
->fbtp_next
= NULL
;
335 strlcpy((char *)&(newfbt
->fbtp_name
), symbolName
, MAX_FBTP_NAME_CHARS
);
339 * The dtrace_probe previously existed, so we have to hook
340 * the newfbt entry onto the end of the existing fbt's
342 * If we find an fbt entry that was previously patched to
343 * fire, (as indicated by the current patched value), then
344 * we want to enable this newfbt on the spot.
346 entryfbt
= dtrace_probe_arg(fbt_id
, thisid
);
347 ASSERT(entryfbt
!= NULL
);
348 for (; entryfbt
!= NULL
; entryfbt
= entryfbt
->fbtp_next
) {
349 if (entryfbt
->fbtp_currentval
== entryfbt
->fbtp_patchval
) {
353 if (entryfbt
->fbtp_next
== NULL
) {
354 entryfbt
->fbtp_next
= newfbt
;
355 newfbt
->fbtp_id
= entryfbt
->fbtp_id
;
361 * The dtrace_probe did not previously exist, so we
362 * create it and hook in the newfbt. Since the probe is
363 * new, we obviously do not need to enable it on the spot.
365 newfbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
, symbolName
, FBT_ENTRY
, FBT_AFRAMES_ENTRY
, newfbt
);
369 newfbt
->fbtp_patchpoint
= instr
;
370 newfbt
->fbtp_ctl
= ctl
;
371 newfbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
372 newfbt
->fbtp_rval
= DTRACE_INVOP_PUSH_FRAME
;
373 newfbt
->fbtp_savedval
= theInstr
;
374 newfbt
->fbtp_patchval
= FBT_PATCHVAL
;
375 newfbt
->fbtp_currentval
= 0;
376 newfbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(instr
)];
377 fbt_probetab
[FBT_ADDR2NDX(instr
)] = newfbt
;
380 fbt_enable(NULL
, newfbt
->fbtp_id
, newfbt
);
384 * The fbt entry chain is in place, one entry point per symbol.
385 * The fbt return chain can have multiple return points per
387 * Here we find the end of the fbt return chain.
392 thisid
= dtrace_probe_lookup(fbt_id
, modname
, symbolName
, FBT_RETURN
);
395 /* The dtrace_probe previously existed, so we have to
396 * find the end of the existing fbt chain. If we find
397 * an fbt return that was previously patched to fire,
398 * (as indicated by the currrent patched value), then
399 * we want to enable any new fbts on the spot.
401 retfbt
= dtrace_probe_arg(fbt_id
, thisid
);
402 ASSERT(retfbt
!= NULL
);
403 for (; retfbt
!= NULL
; retfbt
= retfbt
->fbtp_next
) {
404 if (retfbt
->fbtp_currentval
== retfbt
->fbtp_patchval
) {
407 if (retfbt
->fbtp_next
== NULL
) {
417 * Go back to the start of the function, in case
418 * the compiler emitted pcrel data loads
419 * before FP was adjusted.
421 instr
= pushinstr
+ 1;
423 if (instr
>= limit
) {
427 /* XXX FIXME ... extra jump table detection? */
430 * OK, it's an instruction.
434 /* Walked onto the start of the next routine? If so, bail out from this function */
435 if (FBT_IS_ARM64_FRAME_PUSH(theInstr
)) {
437 kprintf("dtrace: fbt: No return probe for %s, walked to next routine at 0x%016llx\n", symbolName
, (uint64_t)instr
);
442 /* XXX fancy detection of end of function using PC-relative loads */
446 * ldp fp, lr, [sp], #val
447 * ldp fp, lr, [sp, #val]
449 if (!FBT_IS_ARM64_FRAME_POP(theInstr
)) {
454 /* go to the next instruction */
457 /* Scan ahead for a ret or a branch outside the function */
458 for (; instr
< limit
; instr
++) {
460 if (FBT_IS_ARM64_RET(theInstr
)) {
463 if (FBT_IS_ARM64_B_INSTR(theInstr
)) {
464 machine_inst_t
*dest
= instr
+ FBT_GET_ARM64_B_IMM(theInstr
);
466 * Check whether the destination of the branch
467 * is outside of the function
469 if (dest
>= limit
|| dest
< symbolStart
) {
475 if (!FBT_IS_ARM64_RET(theInstr
) && !FBT_IS_ARM64_B_INSTR(theInstr
)) {
479 newfbt
= kmem_zalloc(sizeof(fbt_probe_t
), KM_SLEEP
);
480 newfbt
->fbtp_next
= NULL
;
481 strlcpy((char *)&(newfbt
->fbtp_name
), symbolName
, MAX_FBTP_NAME_CHARS
);
483 if (retfbt
== NULL
) {
484 newfbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
,
485 symbolName
, FBT_RETURN
, FBT_AFRAMES_RETURN
, newfbt
);
487 retfbt
->fbtp_next
= newfbt
;
488 newfbt
->fbtp_id
= retfbt
->fbtp_id
;
492 newfbt
->fbtp_patchpoint
= instr
;
493 newfbt
->fbtp_ctl
= ctl
;
494 newfbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
496 ASSERT(FBT_IS_ARM64_RET(theInstr
) || FBT_IS_ARM64_B_INSTR(theInstr
));
497 newfbt
->fbtp_rval
= DTRACE_INVOP_RET
;
498 newfbt
->fbtp_roffset
= (uintptr_t) ((uint8_t*) instr
- (uint8_t *)symbolStart
);
499 newfbt
->fbtp_savedval
= theInstr
;
500 newfbt
->fbtp_patchval
= FBT_PATCHVAL
;
501 newfbt
->fbtp_currentval
= 0;
502 newfbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(instr
)];
503 fbt_probetab
[FBT_ADDR2NDX(instr
)] = newfbt
;
506 fbt_enable(NULL
, newfbt
->fbtp_id
, newfbt
);