]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/dev/arm64/fasttrap_isa.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / dev / arm64 / fasttrap_isa.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007-2018 Apple Inc. All rights reserved.
3 */
4/*
5 * CDDL HEADER START
6 *
7 * The contents of this file are subject to the terms of the
8 * Common Development and Distribution License, Version 1.0 only
9 * (the "License"). You may not use this file except in compliance
10 * with the License.
11 *
12 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
13 * or http://www.opensolaris.org/os/licensing.
14 * See the License for the specific language governing permissions
15 * and limitations under the License.
16 *
17 * When distributing Covered Code, include this CDDL HEADER in each
18 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
19 * If applicable, add the following below this CDDL HEADER, with the
20 * fields enclosed by brackets "[]" replaced with your own identifying
21 * information: Portions Copyright [yyyy] [name of copyright owner]
22 *
23 * CDDL HEADER END
24 */
25/*
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30#include <sys/fasttrap_isa.h>
31#include <sys/fasttrap_impl.h>
32#include <sys/dtrace.h>
33#include <sys/dtrace_impl.h>
34#include <kern/task.h>
35#include <arm/thread.h>
36
37#include <sys/dtrace_ptss.h>
38
39#if __has_include(<ptrauth.h>)
40#include <ptrauth.h>
41#endif
42
43extern dtrace_id_t dtrace_probeid_error;
44
45/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
46#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
47
48extern uint8_t dtrace_decode_arm64(uint32_t instr);
49
50#define IS_ARM64_NOP(x) ((x) == 0xD503201F)
51/* Marker for is-enabled probes */
52#define IS_ARM64_IS_ENABLED(x) ((x) == 0xD2800000)
53
54int
55fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp,
56 user_addr_t pc, fasttrap_probe_type_t type)
57{
58#pragma unused(type)
59 uint32_t instr = 0;
60
61 /*
62 * Read the instruction at the given address out of the process's
63 * address space. We don't have to worry about a debugger
64 * changing this instruction before we overwrite it with our trap
65 * instruction since P_PR_LOCK is set. Since instructions can span
66 * pages, we potentially read the instruction in two parts. If the
67 * second part fails, we just zero out that part of the instruction.
68 */
69 /*
70 * APPLE NOTE: Of course, we do not have a P_PR_LOCK, so this is racey...
71 */
72
73 if (uread(p, &instr, 4, pc) != 0) {
74 return -1;
75 }
76
77 tp->ftt_instr = instr;
78
79 if (tp->ftt_fntype != FASTTRAP_FN_DONE_INIT) {
80 switch (tp->ftt_fntype) {
81 case FASTTRAP_FN_UNKNOWN:
82 case FASTTRAP_FN_ARM64:
83 case FASTTRAP_FN_ARM64_32:
84 /*
85 * On arm64 there is no distinction between
86 * arm vs. thumb mode instruction types.
87 */
88 tp->ftt_fntype = FASTTRAP_FN_DONE_INIT;
89 break;
90
91 case FASTTRAP_FN_USDT:
92 if (IS_ARM64_NOP(instr) || IS_ARM64_IS_ENABLED(instr)) {
93 tp->ftt_fntype = FASTTRAP_FN_DONE_INIT;
94 } else {
95 /*
96 * Shouldn't reach here - this means we don't
97 * recognize the instruction at one of the
98 * USDT probe locations
99 */
100 return -1;
101 }
102
103 break;
104
105 case FASTTRAP_FN_ARM:
106 case FASTTRAP_FN_THUMB:
107 default:
108 /*
109 * If we get an arm or thumb mode type
110 * then we are clearly in the wrong path.
111 */
112 return -1;
113 }
114 }
115
116 tp->ftt_type = dtrace_decode_arm64(instr);
117
118 if (tp->ftt_type == FASTTRAP_T_ARM64_EXCLUSIVE_MEM) {
119 kprintf("Detected attempt to place DTrace probe on exclusive memory instruction (pc = 0x%llx); refusing to trace (or exclusive operation could never succeed).\n", pc);
120 tp->ftt_type = FASTTRAP_T_INV;
121 return -1;
122 }
123
124 if (tp->ftt_type == FASTTRAP_T_INV) {
125 /* This is an instruction we either don't recognize or can't instrument */
126 printf("dtrace: fasttrap init64: Unrecognized instruction: %08x at %08llx\n", instr, pc);
127 return -1;
128 }
129
130 return 0;
131}
132
133int
134fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp)
135{
136 uint32_t instr;
137 int size;
138
139 if (proc_is64bit_data(p)) {
140 size = 4;
141 instr = FASTTRAP_ARM64_INSTR;
142 } else {
143 return -1;
144 }
145
146 if (uwrite(p, &instr, size, tp->ftt_pc) != 0) {
147 return -1;
148 }
149
150 tp->ftt_installed = 1;
151
152 return 0;
153}
154
155int
156fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp)
157{
158 uint32_t instr;
159 int size = 4;
160
161 if (proc_is64bit_data(p)) {
162 /*
163 * Distinguish between read or write failures and a changed
164 * instruction.
165 */
166 if (uread(p, &instr, size, tp->ftt_pc) != 0) {
167 goto end;
168 }
169
170 if (instr != FASTTRAP_ARM64_INSTR) {
171 goto end;
172 }
173 } else {
174 return -1;
175 }
176
177 if (uwrite(p, &tp->ftt_instr, size, tp->ftt_pc) != 0) {
178 return -1;
179 }
180
181end:
182 tp->ftt_installed = 0;
183
184 return 0;
185}
186
187static void
188fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_addr_t new_pc)
189{
190 pid_t pid = p->p_pid;
191 fasttrap_tracepoint_t *tp;
192 fasttrap_bucket_t *bucket;
193 fasttrap_id_t *id;
194 lck_mtx_t *pid_mtx;
195 int retire_tp = 1;
196 pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
197 lck_mtx_lock(pid_mtx);
198 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
199
200 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
201 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
202 tp->ftt_proc->ftpc_acount != 0) {
203 break;
204 }
205 }
206
207 /*
208 * Don't sweat it if we can't find the tracepoint again; unlike
209 * when we're in fasttrap_pid_probe(), finding the tracepoint here
210 * is not essential to the correct execution of the process.
211 */
212 if (tp == NULL) {
213 lck_mtx_unlock(pid_mtx);
214 return;
215 }
216
217 for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
218 fasttrap_probe_t *probe = id->fti_probe;
219 /* ARM64_TODO - check for FASTTRAP_T_RET */
220 if ((tp->ftt_type != FASTTRAP_T_ARM64_RET || tp->ftt_type != FASTTRAP_T_ARM64_RETAB) &&
221 new_pc - probe->ftp_faddr < probe->ftp_fsize) {
222 continue;
223 }
224 if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) {
225 if (os_atomic_xchg(&probe->ftp_triggered, 1, relaxed)) {
226 /* already triggered */
227 continue;
228 }
229 }
230 /*
231 * If we have at least one probe associated that
232 * is not a oneshot probe, don't remove the
233 * tracepoint
234 */
235 else {
236 retire_tp = 0;
237 }
238
239#if defined(XNU_TARGET_OS_OSX)
240 if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
241 dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id,
242 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
243#else
244 if (FALSE) {
245#endif /* defined(XNU_TARGET_OS_OSX) */
246 } else {
247 dtrace_probe(probe->ftp_id,
248 pc - id->fti_probe->ftp_faddr,
249 saved_state64(regs)->x[0], 0, 0, 0);
250 }
251 }
252 if (retire_tp) {
253 fasttrap_tracepoint_retire(p, tp);
254 }
255
256 lck_mtx_unlock(pid_mtx);
257}
258
259#if DEBUG
260__dead2
261#endif
262static void
263fasttrap_sigsegv(proc_t *p, uthread_t t, user_addr_t addr, arm_saved_state_t *regs)
264{
265 /* TODO: This function isn't implemented yet. In debug mode, panic the system to
266 * find out why we're hitting this point. In other modes, kill the process.
267 */
268#if DEBUG
269#pragma unused(p,t,addr,arm_saved_state)
270 panic("fasttrap: sigsegv not yet implemented");
271#else
272#pragma unused(p,t,addr)
273 /* Kill the process */
274 set_saved_state_pc(regs, 0);
275#endif
276
277#if 0
278 proc_lock(p);
279
280 /* Set fault address and mark signal */
281 t->uu_code = addr;
282 t->uu_siglist |= sigmask(SIGSEGV);
283
284 /*
285 * XXX These two line may be redundant; if not, then we need
286 * XXX to potentially set the data address in the machine
287 * XXX specific thread state structure to indicate the address.
288 */
289 t->uu_exception = KERN_INVALID_ADDRESS; /* SIGSEGV */
290 t->uu_subcode = 0; /* XXX pad */
291
292 proc_unlock(p);
293
294 /* raise signal */
295 signal_setast(t->uu_context.vc_thread);
296#endif
297}
298
299static void
300fasttrap_usdt_args64(fasttrap_probe_t *probe, arm_saved_state64_t *regs64, int argc,
301 uint64_t *argv)
302{
303 int i, x, cap = MIN(argc, probe->ftp_nargs);
304
305 for (i = 0; i < cap; i++) {
306 x = probe->ftp_argmap[i];
307
308 /* Up to 8 args are passed in registers on arm64 */
309 if (x < 8) {
310 argv[i] = regs64->x[x];
311 } else {
312 fasttrap_fuword64_noerr(regs64->sp + (x - 8) * sizeof(uint64_t), &argv[i]);
313 }
314 }
315
316 for (; i < argc; i++) {
317 argv[i] = 0;
318 }
319}
320
321static int
322condition_true(int cond, int cpsr)
323{
324 int taken = 0;
325 int zf = (cpsr & PSR_ZF) ? 1 : 0,
326 nf = (cpsr & PSR_NF) ? 1 : 0,
327 cf = (cpsr & PSR_CF) ? 1 : 0,
328 vf = (cpsr & PSR_VF) ? 1 : 0;
329
330 switch (cond) {
331 case 0: taken = zf; break;
332 case 1: taken = !zf; break;
333 case 2: taken = cf; break;
334 case 3: taken = !cf; break;
335 case 4: taken = nf; break;
336 case 5: taken = !nf; break;
337 case 6: taken = vf; break;
338 case 7: taken = !vf; break;
339 case 8: taken = (cf && !zf); break;
340 case 9: taken = (!cf || zf); break;
341 case 10: taken = (nf == vf); break;
342 case 11: taken = (nf != vf); break;
343 case 12: taken = (!zf && (nf == vf)); break;
344 case 13: taken = (zf || (nf != vf)); break;
345 case 14: taken = 1; break;
346 case 15: taken = 1; break; /* always "true" for ARM, unpredictable for THUMB. */
347 }
348
349 return taken;
350}
351
352/*
353 * Copy out an instruction for execution in userland.
354 * Trap back to kernel to handle return to original flow of execution, because
355 * direct branches don't have sufficient range (+/- 128MB) and we
356 * cannot clobber a GPR. Note that we have to specially handle PC-rel loads/stores
357 * as well, which have range +/- 1MB (convert to an indirect load). Instruction buffer
358 * layout:
359 *
360 * [ Thunked instruction sequence ]
361 * [ Trap for return to original code and return probe handling ]
362 *
363 * This *does* make it impossible for an ldxr/stxr pair to succeed if we trace on or between
364 * them... may need to get fancy at some point.
365 */
366static void
367fasttrap_pid_probe_thunk_instr64(arm_saved_state_t *state, fasttrap_tracepoint_t *tp, proc_t *p, uthread_t uthread,
368 const uint32_t *instructions, uint32_t num_instrs, user_addr_t *pc_out)
369{
370 uint32_t local_scratch[8];
371 user_addr_t pc = get_saved_state_pc(state);
372 user_addr_t user_scratch_area;
373
374 assert(num_instrs < 8);
375
376 bcopy(instructions, local_scratch, num_instrs * sizeof(uint32_t));
377 local_scratch[num_instrs] = FASTTRAP_ARM64_RET_INSTR;
378
379 uthread->t_dtrace_astpc = uthread->t_dtrace_scrpc = uthread->t_dtrace_scratch->addr;
380 user_scratch_area = uthread->t_dtrace_scratch->write_addr;
381
382 if (user_scratch_area == (user_addr_t)0) {
383 fasttrap_sigtrap(p, uthread, pc); // Should be killing target proc
384 *pc_out = pc;
385 return;
386 }
387
388 if (uwrite(p, local_scratch, (num_instrs + 1) * sizeof(uint32_t), user_scratch_area) != KERN_SUCCESS) {
389 fasttrap_sigtrap(p, uthread, pc);
390 *pc_out = pc;
391 return;
392 }
393
394 /* We're stepping (come back to kernel to adjust PC for return to regular code). */
395 uthread->t_dtrace_step = 1;
396
397 /* We may or may not be about to run a return probe (but we wouldn't thunk ret lr)*/
398 uthread->t_dtrace_ret = (tp->ftt_retids != NULL);
399 assert(tp->ftt_type != FASTTRAP_T_ARM64_RET);
400 assert(tp->ftt_type != FASTTRAP_T_ARM64_RETAB);
401
402 /* Set address of instruction we've patched */
403 uthread->t_dtrace_pc = pc;
404
405 /* Any branch would be emulated, next instruction should be one ahead */
406 uthread->t_dtrace_npc = pc + 4;
407
408 /* We are certainly handling a probe */
409 uthread->t_dtrace_on = 1;
410
411 /* Let's jump to the scratch area */
412 *pc_out = uthread->t_dtrace_scratch->addr;
413}
414
415/*
416 * Sign-extend bit "sign_bit_index" out to bit 64.
417 */
418static int64_t
419sign_extend(int64_t input, uint32_t sign_bit_index)
420{
421 assert(sign_bit_index < 63);
422 if (input & (1ULL << sign_bit_index)) {
423 /* All 1's & ~[1's from 0 to sign bit] */
424 input |= ((~0ULL) & ~((1ULL << (sign_bit_index + 1)) - 1ULL));
425 }
426
427 return input;
428}
429
430/*
431 * Handle xzr vs. sp, fp, lr, etc. Will *not* read the SP.
432 */
433static uint64_t
434get_saved_state64_regno(arm_saved_state64_t *regs64, uint32_t regno, int use_xzr)
435{
436 /* Set PC to register value */
437 switch (regno) {
438 case 29:
439 return regs64->fp;
440 case 30:
441 return regs64->lr;
442 case 31:
443 /* xzr */
444 if (use_xzr) {
445 return 0;
446 } else {
447 return regs64->sp;
448 }
449 default:
450 return regs64->x[regno];
451 }
452}
453
454static void
455set_saved_state64_regno(arm_saved_state64_t *regs64, uint32_t regno, int use_xzr, register_t value)
456{
457 /* Set PC to register value */
458 switch (regno) {
459 case 29:
460 regs64->fp = value;
461 break;
462 case 30:
463 regs64->lr = value;
464 break;
465 case 31:
466 if (!use_xzr) {
467 regs64->sp = value;
468 }
469 break;
470 default:
471 regs64->x[regno] = value;
472 break;
473 }
474}
475
476/*
477 * Common operation: extract sign-extended PC offset from instruction
478 * Left-shifts result by two bits.
479 */
480static uint64_t
481extract_address_literal_sign_extended(uint32_t instr, uint32_t base, uint32_t numbits)
482{
483 uint64_t offset;
484
485 offset = (instr >> base) & ((1 << numbits) - 1);
486 offset = sign_extend(offset, numbits - 1);
487 offset = offset << 2;
488
489 return offset;
490}
491
492static void
493do_cbz_cnbz(arm_saved_state64_t *regs64, uint32_t regwidth, uint32_t instr, int is_cbz, user_addr_t *pc_out)
494{
495 uint32_t regno;
496 uint64_t regval;
497 uint64_t offset;
498
499 /* Extract register */
500 regno = (instr & 0x1f);
501 assert(regno <= 31);
502 regval = get_saved_state64_regno(regs64, regno, 1);
503
504 /* Control for size */
505 if (regwidth == 32) {
506 regval &= 0xFFFFFFFFULL;
507 }
508
509 /* Extract offset */
510 offset = extract_address_literal_sign_extended(instr, 5, 19);
511
512 /* Do test */
513 if ((is_cbz && regval == 0) || ((!is_cbz) && regval != 0)) {
514 /* Set PC from label */
515 *pc_out = regs64->pc + offset;
516 } else {
517 /* Advance PC */
518 *pc_out = regs64->pc + 4;
519 }
520}
521
522static void
523do_tbz_tbnz(arm_saved_state64_t *regs64, uint32_t instr, int is_tbz, user_addr_t *pc_out)
524{
525 uint64_t offset, regval;
526 uint32_t bit_index, b5, b40, regno, bit_set;
527
528 /* Compute offset */
529 offset = extract_address_literal_sign_extended(instr, 5, 14);
530
531 /* Extract bit index */
532 b5 = (instr >> 31);
533 b40 = ((instr >> 19) & 0x1f);
534 bit_index = (b5 << 5) | b40;
535 assert(bit_index <= 63);
536
537 /* Extract register */
538 regno = (instr & 0x1f);
539 assert(regno <= 31);
540 regval = get_saved_state64_regno(regs64, regno, 1);
541
542 /* Test bit */
543 bit_set = ((regval & (1 << bit_index)) != 0);
544
545 if ((is_tbz && (!bit_set)) || ((!is_tbz) && bit_set)) {
546 /* Branch: unsigned addition so overflow defined */
547 *pc_out = regs64->pc + offset;
548 } else {
549 /* Advance PC */
550 *pc_out = regs64->pc + 4;
551 }
552}
553
554
555static void
556fasttrap_pid_probe_handle_patched_instr64(arm_saved_state_t *state, fasttrap_tracepoint_t *tp __unused, uthread_t uthread,
557 proc_t *p, uint_t is_enabled, int *was_simulated)
558{
559 int res1, res2;
560 arm_saved_state64_t *regs64 = saved_state64(state);
561 uint32_t instr = tp->ftt_instr;
562 user_addr_t new_pc = 0;
563
564 /* Neon state should be threaded throw, but hack it until we have better arm/arm64 integration */
565 arm_neon_saved_state64_t *ns64 = &(get_user_neon_regs(uthread->uu_thread)->ns_64);
566
567 /* is-enabled probe: set x0 to 1 and step forwards */
568 if (is_enabled) {
569 regs64->x[0] = 1;
570 set_saved_state_pc(state, regs64->pc + 4);
571 return;
572 }
573
574 /* For USDT probes, bypass all the emulation logic for the nop instruction */
575 if (IS_ARM64_NOP(tp->ftt_instr)) {
576 set_saved_state_pc(state, regs64->pc + 4);
577 return;
578 }
579
580
581 /* Only one of many cases in the switch doesn't simulate */
582 switch (tp->ftt_type) {
583 /*
584 * Function entry: emulate for speed.
585 * stp fp, lr, [sp, #-16]!
586 */
587 case FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY:
588 {
589 /* Store values to stack */
590 res1 = fasttrap_suword64(regs64->sp - 16, regs64->fp);
591 res2 = fasttrap_suword64(regs64->sp - 8, regs64->lr);
592 if (res1 != 0 || res2 != 0) {
593 fasttrap_sigsegv(p, uthread, regs64->sp - (res1 ? 16 : 8), state);
594#ifndef DEBUG
595 new_pc = regs64->pc; /* Bit of a hack */
596 break;
597#endif
598 }
599
600 /* Move stack pointer */
601 regs64->sp -= 16;
602
603 /* Move PC forward */
604 new_pc = regs64->pc + 4;
605 *was_simulated = 1;
606 break;
607 }
608
609 /*
610 * PC-relative loads/stores: emulate for correctness.
611 * All loads are 32bits or greater (no need to handle byte or halfword accesses).
612 * LDR Wt, addr
613 * LDR Xt, addr
614 * LDRSW Xt, addr
615 *
616 * LDR St, addr
617 * LDR Dt, addr
618 * LDR Qt, addr
619 * PRFM label -> becomes a NOP
620 */
621 case FASTTRAP_T_ARM64_LDR_S_PC_REL:
622 case FASTTRAP_T_ARM64_LDR_W_PC_REL:
623 case FASTTRAP_T_ARM64_LDR_D_PC_REL:
624 case FASTTRAP_T_ARM64_LDR_X_PC_REL:
625 case FASTTRAP_T_ARM64_LDR_Q_PC_REL:
626 case FASTTRAP_T_ARM64_LDRSW_PC_REL:
627 {
628 uint64_t offset;
629 uint32_t valsize, regno;
630 user_addr_t address;
631 union {
632 uint32_t val32;
633 uint64_t val64;
634 uint128_t val128;
635 } value;
636
637 /* Extract 19-bit offset, add to pc */
638 offset = extract_address_literal_sign_extended(instr, 5, 19);
639 address = regs64->pc + offset;
640
641 /* Extract destination register */
642 regno = (instr & 0x1f);
643 assert(regno <= 31);
644
645 /* Read value of desired size from memory */
646 switch (tp->ftt_type) {
647 case FASTTRAP_T_ARM64_LDR_S_PC_REL:
648 case FASTTRAP_T_ARM64_LDR_W_PC_REL:
649 case FASTTRAP_T_ARM64_LDRSW_PC_REL:
650 valsize = 4;
651 break;
652 case FASTTRAP_T_ARM64_LDR_D_PC_REL:
653 case FASTTRAP_T_ARM64_LDR_X_PC_REL:
654 valsize = 8;
655 break;
656 case FASTTRAP_T_ARM64_LDR_Q_PC_REL:
657 valsize = 16;
658 break;
659 default:
660 panic("Should never get here!");
661 valsize = -1;
662 break;
663 }
664
665 if (copyin(address, &value, valsize) != 0) {
666 fasttrap_sigsegv(p, uthread, address, state);
667#ifndef DEBUG
668 new_pc = regs64->pc; /* Bit of a hack, we know about update in fasttrap_sigsegv() */
669 break;
670#endif
671 }
672
673 /* Stash in correct register slot */
674 switch (tp->ftt_type) {
675 case FASTTRAP_T_ARM64_LDR_W_PC_REL:
676 set_saved_state64_regno(regs64, regno, 1, value.val32);
677 break;
678 case FASTTRAP_T_ARM64_LDRSW_PC_REL:
679 set_saved_state64_regno(regs64, regno, 1, sign_extend(value.val32, 31));
680 break;
681 case FASTTRAP_T_ARM64_LDR_X_PC_REL:
682 set_saved_state64_regno(regs64, regno, 1, value.val64);
683 break;
684 case FASTTRAP_T_ARM64_LDR_S_PC_REL:
685 ns64->v.s[regno][0] = value.val32;
686 break;
687 case FASTTRAP_T_ARM64_LDR_D_PC_REL:
688 ns64->v.d[regno][0] = value.val64;
689 break;
690 case FASTTRAP_T_ARM64_LDR_Q_PC_REL:
691 ns64->v.q[regno] = value.val128;
692 break;
693 default:
694 panic("Should never get here!");
695 }
696
697
698 /* Move PC forward */
699 new_pc = regs64->pc + 4;
700 *was_simulated = 1;
701 break;
702 }
703
704 case FASTTRAP_T_ARM64_PRFM:
705 {
706 /* Becomes a NOP (architecturally permitted). Just move PC forward */
707 new_pc = regs64->pc + 4;
708 *was_simulated = 1;
709 break;
710 }
711
712 /*
713 * End explicit memory accesses.
714 */
715
716 /*
717 * Branches: parse condition codes if needed, emulate for correctness and
718 * in the case of the indirect branches, convenience
719 * B.cond
720 * CBNZ Wn, label
721 * CBNZ Xn, label
722 * CBZ Wn, label
723 * CBZ Xn, label
724 * TBNZ, Xn|Wn, #uimm16, label
725 * TBZ, Xn|Wn, #uimm16, label
726 *
727 * B label
728 * BL label
729 *
730 * BLR Xm
731 * BR Xm
732 * RET Xm
733 */
734 case FASTTRAP_T_ARM64_B_COND:
735 {
736 int cond;
737
738 /* Extract condition code */
739 cond = (instr & 0xf);
740
741 /* Determine if it passes */
742 if (condition_true(cond, regs64->cpsr)) {
743 uint64_t offset;
744
745 /* Extract 19-bit target offset, add to PC */
746 offset = extract_address_literal_sign_extended(instr, 5, 19);
747 new_pc = regs64->pc + offset;
748 } else {
749 /* Move forwards */
750 new_pc = regs64->pc + 4;
751 }
752
753 *was_simulated = 1;
754 break;
755 }
756
757 case FASTTRAP_T_ARM64_CBNZ_W:
758 {
759 do_cbz_cnbz(regs64, 32, instr, 0, &new_pc);
760 *was_simulated = 1;
761 break;
762 }
763 case FASTTRAP_T_ARM64_CBNZ_X:
764 {
765 do_cbz_cnbz(regs64, 64, instr, 0, &new_pc);
766 *was_simulated = 1;
767 break;
768 }
769 case FASTTRAP_T_ARM64_CBZ_W:
770 {
771 do_cbz_cnbz(regs64, 32, instr, 1, &new_pc);
772 *was_simulated = 1;
773 break;
774 }
775 case FASTTRAP_T_ARM64_CBZ_X:
776 {
777 do_cbz_cnbz(regs64, 64, instr, 1, &new_pc);
778 *was_simulated = 1;
779 break;
780 }
781
782 case FASTTRAP_T_ARM64_TBNZ:
783 {
784 do_tbz_tbnz(regs64, instr, 0, &new_pc);
785 *was_simulated = 1;
786 break;
787 }
788 case FASTTRAP_T_ARM64_TBZ:
789 {
790 do_tbz_tbnz(regs64, instr, 1, &new_pc);
791 *was_simulated = 1;
792 break;
793 }
794 case FASTTRAP_T_ARM64_B:
795 case FASTTRAP_T_ARM64_BL:
796 {
797 uint64_t offset;
798
799 /* Extract offset from instruction */
800 offset = extract_address_literal_sign_extended(instr, 0, 26);
801
802 /* Update LR if appropriate */
803 if (tp->ftt_type == FASTTRAP_T_ARM64_BL) {
804 regs64->lr = regs64->pc + 4;
805 }
806
807 /* Compute PC (unsigned addition for defined overflow) */
808 new_pc = regs64->pc + offset;
809 *was_simulated = 1;
810 break;
811 }
812
813 case FASTTRAP_T_ARM64_BLR:
814 case FASTTRAP_T_ARM64_BR:
815 {
816 uint32_t regno;
817
818 /* Extract register from instruction */
819 regno = ((instr >> 5) & 0x1f);
820 assert(regno <= 31);
821
822 /* Update LR if appropriate */
823 if (tp->ftt_type == FASTTRAP_T_ARM64_BLR) {
824 regs64->lr = regs64->pc + 4;
825 }
826
827 /* Update PC in saved state */
828 new_pc = get_saved_state64_regno(regs64, regno, 1);
829 *was_simulated = 1;
830 break;
831 }
832
833 case FASTTRAP_T_ARM64_RET:
834 {
835 /* Extract register */
836 unsigned regno = ((instr >> 5) & 0x1f);
837 assert(regno <= 31);
838
839 /* Set PC to register value (xzr, not sp) */
840 new_pc = get_saved_state64_regno(regs64, regno, 1);
841
842 *was_simulated = 1;
843 break;
844 }
845 case FASTTRAP_T_ARM64_RETAB:
846 {
847 /* Set PC to register value (xzr, not sp) */
848 new_pc = get_saved_state64_regno(regs64, 30, 1);
849#if __has_feature(ptrauth_calls)
850 new_pc = (user_addr_t) ptrauth_strip((void *)new_pc, ptrauth_key_return_address);
851#endif
852
853 *was_simulated = 1;
854 break;
855 }
856 /*
857 * End branches.
858 */
859
860 /*
861 * Address calculations: emulate for correctness.
862 *
863 * ADRP Xd, label
864 * ADR Xd, label
865 */
866 case FASTTRAP_T_ARM64_ADRP:
867 case FASTTRAP_T_ARM64_ADR:
868 {
869 uint64_t immhi, immlo, offset, result;
870 uint32_t regno;
871
872 /* Extract destination register */
873 regno = (instr & 0x1f);
874 assert(regno <= 31);
875
876 /* Extract offset */
877 immhi = ((instr & 0x00ffffe0) >> 5); /* bits [23,5]: 19 bits */
878 immlo = ((instr & 0x60000000) >> 29); /* bits [30,29]: 2 bits */
879
880 /* Add to PC. Use unsigned addition so that overflow wraps (rather than being undefined). */
881 if (tp->ftt_type == FASTTRAP_T_ARM64_ADRP) {
882 offset = (immhi << 14) | (immlo << 12); /* Concatenate bits into [32,12]*/
883 offset = sign_extend(offset, 32); /* Sign extend from bit 32 */
884 result = (regs64->pc & ~0xfffULL) + offset; /* And add to page of current pc */
885 } else {
886 assert(tp->ftt_type == FASTTRAP_T_ARM64_ADR);
887 offset = (immhi << 2) | immlo; /* Concatenate bits into [20,0] */
888 offset = sign_extend(offset, 20); /* Sign-extend */
889 result = regs64->pc + offset; /* And add to page of current pc */
890 }
891
892 /* xzr, not sp */
893 set_saved_state64_regno(regs64, regno, 1, result);
894
895 /* Move PC forward */
896 new_pc = regs64->pc + 4;
897 *was_simulated = 1;
898 break;
899 }
900
901 /*
902 * End address calculations.
903 */
904
905 /*
906 * Everything else: thunk to userland
907 */
908 case FASTTRAP_T_COMMON:
909 {
910 fasttrap_pid_probe_thunk_instr64(state, tp, p, uthread, &tp->ftt_instr, 1, &new_pc);
911 *was_simulated = 0;
912 break;
913 }
914 default:
915 {
916 panic("An instruction DTrace doesn't expect: %d\n", tp->ftt_type);
917 break;
918 }
919 }
920
921 set_saved_state_pc(state, new_pc);
922 return;
923}
924
925int
926fasttrap_pid_probe(arm_saved_state_t *state)
927{
928 proc_t *p = current_proc();
929 fasttrap_bucket_t *bucket;
930 lck_mtx_t *pid_mtx;
931 fasttrap_tracepoint_t *tp, tp_local;
932 pid_t pid;
933 dtrace_icookie_t cookie;
934 uint_t is_enabled = 0;
935 int was_simulated, retire_tp = 1;
936
937 uint64_t pc = get_saved_state_pc(state);
938
939 assert(is_saved_state64(state));
940
941 uthread_t uthread = (uthread_t) get_bsdthread_info(current_thread());
942
943 /*
944 * It's possible that a user (in a veritable orgy of bad planning)
945 * could redirect this thread's flow of control before it reached the
946 * return probe fasttrap. In this case we need to kill the process
947 * since it's in a unrecoverable state.
948 */
949 if (uthread->t_dtrace_step) {
950 ASSERT(uthread->t_dtrace_on);
951 fasttrap_sigtrap(p, uthread, (user_addr_t)pc);
952 return 0;
953 }
954
955 /*
956 * Clear all user tracing flags.
957 */
958 uthread->t_dtrace_ft = 0;
959 uthread->t_dtrace_pc = 0;
960 uthread->t_dtrace_npc = 0;
961 uthread->t_dtrace_scrpc = 0;
962 uthread->t_dtrace_astpc = 0;
963 uthread->t_dtrace_reg = 0;
964
965 /*
966 * Treat a child created by a call to vfork(2) as if it were its
967 * parent. We know that there's only one thread of control in such a
968 * process: this one.
969 */
970 if (p->p_lflag & P_LINVFORK) {
971 proc_list_lock();
972 while (p->p_lflag & P_LINVFORK) {
973 p = p->p_pptr;
974 }
975 proc_list_unlock();
976 }
977
978 pid = p->p_pid;
979 pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
980 lck_mtx_lock(pid_mtx);
981 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
982
983 /*
984 * Lookup the tracepoint that the process just hit.
985 */
986 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
987 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
988 tp->ftt_proc->ftpc_acount != 0) {
989 break;
990 }
991 }
992
993 /*
994 * If we couldn't find a matching tracepoint, either a tracepoint has
995 * been inserted without using the pid<pid> ioctl interface (see
996 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
997 */
998 if (tp == NULL) {
999 lck_mtx_unlock(pid_mtx);
1000 return -1;
1001 }
1002
1003 /* Execute the actual probe */
1004 if (tp->ftt_ids != NULL) {
1005 fasttrap_id_t *id;
1006 uint64_t arg4;
1007
1008 if (is_saved_state64(state)) {
1009 arg4 = get_saved_state_reg(state, 4);
1010 } else {
1011 return -1;
1012 }
1013
1014
1015 /* First four parameters are passed in registers */
1016
1017 for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
1018 fasttrap_probe_t *probe = id->fti_probe;
1019
1020#if defined(XNU_TARGET_OS_OSX)
1021 if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
1022 dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id,
1023 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
1024#else
1025 if (FALSE) {
1026#endif /* defined(XNU_TARGET_OS_OSX) */
1027 } else {
1028 if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) {
1029 if (os_atomic_xchg(&probe->ftp_triggered, 1, relaxed)) {
1030 /* already triggered */
1031 continue;
1032 }
1033 }
1034 /*
1035 * If we have at least one probe associated that
1036 * is not a oneshot probe, don't remove the
1037 * tracepoint
1038 */
1039 else {
1040 retire_tp = 0;
1041 }
1042 if (id->fti_ptype == DTFTP_ENTRY) {
1043 /*
1044 * We note that this was an entry
1045 * probe to help ustack() find the
1046 * first caller.
1047 */
1048 cookie = dtrace_interrupt_disable();
1049 DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
1050 dtrace_probe(probe->ftp_id,
1051 get_saved_state_reg(state, 0),
1052 get_saved_state_reg(state, 1),
1053 get_saved_state_reg(state, 2),
1054 get_saved_state_reg(state, 3),
1055 arg4);
1056 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
1057 dtrace_interrupt_enable(cookie);
1058 } else if (id->fti_ptype == DTFTP_IS_ENABLED) {
1059 /*
1060 * Note that in this case, we don't
1061 * call dtrace_probe() since it's only
1062 * an artificial probe meant to change
1063 * the flow of control so that it
1064 * encounters the true probe.
1065 */
1066 is_enabled = 1;
1067 } else if (probe->ftp_argmap == NULL) {
1068 dtrace_probe(probe->ftp_id,
1069 get_saved_state_reg(state, 0),
1070 get_saved_state_reg(state, 1),
1071 get_saved_state_reg(state, 2),
1072 get_saved_state_reg(state, 3),
1073 arg4);
1074 } else {
1075 uint64_t t[5];
1076
1077 fasttrap_usdt_args64(probe, saved_state64(state), 5, t);
1078 dtrace_probe(probe->ftp_id, t[0], t[1], t[2], t[3], t[4]);
1079 }
1080 }
1081 }
1082 if (retire_tp) {
1083 fasttrap_tracepoint_retire(p, tp);
1084 }
1085 }
1086 /*
1087 * We're about to do a bunch of work so we cache a local copy of
1088 * the tracepoint to emulate the instruction, and then find the
1089 * tracepoint again later if we need to light up any return probes.
1090 */
1091 tp_local = *tp;
1092 lck_mtx_unlock(pid_mtx);
1093 tp = &tp_local;
1094
1095 /*
1096 * APPLE NOTE:
1097 *
1098 * Subroutines should update PC.
1099 * We're setting this earlier than Solaris does, to get a "correct"
1100 * ustack() output. In the Sun code, a() -> b() -> c() -> d() is
1101 * reported at: d, b, a. The new way gives c, b, a, which is closer
1102 * to correct, as the return instruction has already exectued.
1103 */
1104 fasttrap_pid_probe_handle_patched_instr64(state, tp, uthread, p, is_enabled, &was_simulated);
1105
1106 /*
1107 * If there were no return probes when we first found the tracepoint,
1108 * we should feel no obligation to honor any return probes that were
1109 * subsequently enabled -- they'll just have to wait until the next
1110 * time around.
1111 */
1112 if (tp->ftt_retids != NULL) {
1113 /*
1114 * We need to wait until the results of the instruction are
1115 * apparent before invoking any return probes. If this
1116 * instruction was emulated we can just call
1117 * fasttrap_return_common(); if it needs to be executed, we
1118 * need to wait until the user thread returns to the kernel.
1119 */
1120 /*
1121 * It used to be that only common instructions were simulated.
1122 * For performance reasons, we now simulate some instructions
1123 * when safe and go back to userland otherwise. The was_simulated
1124 * flag means we don't need to go back to userland.
1125 */
1126 if (was_simulated) {
1127 fasttrap_return_common(p, state, (user_addr_t)pc, (user_addr_t)get_saved_state_pc(state));
1128 } else {
1129 ASSERT(uthread->t_dtrace_ret != 0);
1130 ASSERT(uthread->t_dtrace_pc == pc);
1131 ASSERT(uthread->t_dtrace_scrpc != 0);
1132 ASSERT(((user_addr_t)get_saved_state_pc(state)) == uthread->t_dtrace_astpc);
1133 }
1134 }
1135
1136 return 0;
1137}
1138
1139int
1140fasttrap_return_probe(arm_saved_state_t *regs)
1141{
1142 proc_t *p = current_proc();
1143 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
1144 user_addr_t pc = uthread->t_dtrace_pc;
1145 user_addr_t npc = uthread->t_dtrace_npc;
1146
1147 uthread->t_dtrace_pc = 0;
1148 uthread->t_dtrace_npc = 0;
1149 uthread->t_dtrace_scrpc = 0;
1150 uthread->t_dtrace_astpc = 0;
1151
1152 /*
1153 * Treat a child created by a call to vfork(2) as if it were its
1154 * parent. We know that there's only one thread of control in such a
1155 * process: this one.
1156 */
1157 if (p->p_lflag & P_LINVFORK) {
1158 proc_list_lock();
1159 while (p->p_lflag & P_LINVFORK) {
1160 p = p->p_pptr;
1161 }
1162 proc_list_unlock();
1163 }
1164
1165 /*
1166 * We set rp->r_pc to the address of the traced instruction so
1167 * that it appears to dtrace_probe() that we're on the original
1168 * instruction, and so that the user can't easily detect our
1169 * complex web of lies. dtrace_return_probe() (our caller)
1170 * will correctly set %pc after we return.
1171 */
1172 set_saved_state_pc(regs, pc);
1173
1174 fasttrap_return_common(p, regs, pc, npc);
1175
1176 return 0;
1177}
1178
1179uint64_t
1180fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
1181 int aframes)
1182{
1183#pragma unused(arg, id, parg, aframes)
1184 arm_saved_state_t* regs = find_user_regs(current_thread());
1185
1186 /* First eight arguments are in registers */
1187 if (argno < 8) {
1188 return saved_state64(regs)->x[argno];
1189 }
1190
1191 /* Look on the stack for the rest */
1192 uint64_t value;
1193 uint64_t* sp = (uint64_t*) saved_state64(regs)->sp;
1194 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1195 value = dtrace_fuword64((user_addr_t) (sp + argno - 8));
1196 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
1197
1198 return value;
1199}
1200
1201uint64_t
1202fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
1203{
1204#pragma unused(arg, id, parg, argno, aframes)
1205 return 0;
1206}