]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/dtrace_isa.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / dev / i386 / dtrace_isa.c
1 /*
2 * Copyright (c) 2005-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/thread.h>
30 #include <mach/thread_status.h>
31
32 typedef x86_saved_state_t savearea_t;
33
34 #include <stdarg.h>
35 #include <string.h>
36 #include <sys/malloc.h>
37 #include <sys/time.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/proc_internal.h>
41 #include <sys/kauth.h>
42 #include <sys/dtrace.h>
43 #include <sys/dtrace_impl.h>
44 #include <libkern/OSAtomic.h>
45 #include <kern/thread_call.h>
46 #include <kern/task.h>
47 #include <kern/sched_prim.h>
48 #include <miscfs/devfs/devfs.h>
49 #include <mach/vm_param.h>
50 #include <machine/pal_routines.h>
51 #include <i386/mp.h>
52 #include <machine/trap.h>
53
54 /*
55 * APPLE NOTE: The regmap is used to decode which 64bit uregs[] register
56 * is being accessed when passed the 32bit uregs[] constant (based on
57 * the reg.d translator file). The dtrace_getreg() is smart enough to handle
58 * the register mappings. The register set definitions are the same as
59 * those used by the fasttrap_getreg code.
60 */
61 #include "fasttrap_regset.h"
62 static const uint8_t regmap[19] = {
63 REG_GS, /* GS */
64 REG_FS, /* FS */
65 REG_ES, /* ES */
66 REG_DS, /* DS */
67 REG_RDI, /* EDI */
68 REG_RSI, /* ESI */
69 REG_RBP, /* EBP, REG_FP */
70 REG_RSP, /* ESP */
71 REG_RBX, /* EBX */
72 REG_RDX, /* EDX, REG_R1 */
73 REG_RCX, /* ECX */
74 REG_RAX, /* EAX, REG_R0 */
75 REG_TRAPNO, /* TRAPNO */
76 REG_ERR, /* ERR */
77 REG_RIP, /* EIP, REG_PC */
78 REG_CS, /* CS */
79 REG_RFL, /* EFL, REG_PS */
80 REG_RSP, /* UESP, REG_SP */
81 REG_SS /* SS */
82 };
83
84 extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
85
86 void
87 dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
88 int fltoffs, int fault, uint64_t illval)
89 {
90 /*
91 * For the case of the error probe firing lets
92 * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
93 */
94 state->dts_arg_error_illval = illval;
95 dtrace_probe( dtrace_probeid_error, (uint64_t)(uintptr_t)state, epid, which, fltoffs, fault );
96 }
97
98 /*
99 * Atomicity and synchronization
100 */
101 void
102 dtrace_membar_producer(void)
103 {
104 __asm__ volatile("sfence");
105 }
106
107 void
108 dtrace_membar_consumer(void)
109 {
110 __asm__ volatile("lfence");
111 }
112
113 /*
114 * Interrupt manipulation
115 * XXX dtrace_getipl() can be called from probe context.
116 */
117 int
118 dtrace_getipl(void)
119 {
120 /*
121 * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE
122 * in osfmk/kern/cpu_data.h
123 */
124 /* return get_interrupt_level(); */
125 return (ml_at_interrupt_context() ? 1: 0);
126 }
127
128 /*
129 * MP coordination
130 */
131 typedef struct xcArg {
132 processorid_t cpu;
133 dtrace_xcall_t f;
134 void *arg;
135 } xcArg_t;
136
137 static void
138 xcRemote( void *foo )
139 {
140 xcArg_t *pArg = (xcArg_t *)foo;
141
142 if ( pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL ) {
143 (pArg->f)(pArg->arg);
144 }
145 }
146
147
148 /*
149 * dtrace_xcall() is not called from probe context.
150 */
151 void
152 dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg)
153 {
154 xcArg_t xcArg;
155
156 xcArg.cpu = cpu;
157 xcArg.f = f;
158 xcArg.arg = arg;
159
160 if (cpu == DTRACE_CPUALL) {
161 mp_cpus_call (CPUMASK_ALL, ASYNC, xcRemote, (void*)&xcArg);
162 }
163 else {
164 mp_cpus_call (cpu_to_cpumask((cpu_t)cpu), ASYNC, xcRemote, (void*)&xcArg);
165 }
166 }
167
168 /*
169 * Initialization
170 */
171 void
172 dtrace_isa_init(void)
173 {
174 return;
175 }
176
177 /*
178 * Runtime and ABI
179 */
180 uint64_t
181 dtrace_getreg(struct regs *savearea, uint_t reg)
182 {
183 boolean_t is64Bit = proc_is64bit(current_proc());
184 x86_saved_state_t *regs = (x86_saved_state_t *)savearea;
185
186 if (regs == NULL) {
187 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
188 return (0);
189 }
190
191 if (is64Bit) {
192 if (reg <= SS) {
193 reg = regmap[reg];
194 } else {
195 reg -= (SS + 1);
196 }
197
198 switch (reg) {
199 case REG_RDI:
200 return (uint64_t)(regs->ss_64.rdi);
201 case REG_RSI:
202 return (uint64_t)(regs->ss_64.rsi);
203 case REG_RDX:
204 return (uint64_t)(regs->ss_64.rdx);
205 case REG_RCX:
206 return (uint64_t)(regs->ss_64.rcx);
207 case REG_R8:
208 return (uint64_t)(regs->ss_64.r8);
209 case REG_R9:
210 return (uint64_t)(regs->ss_64.r9);
211 case REG_RAX:
212 return (uint64_t)(regs->ss_64.rax);
213 case REG_RBX:
214 return (uint64_t)(regs->ss_64.rbx);
215 case REG_RBP:
216 return (uint64_t)(regs->ss_64.rbp);
217 case REG_R10:
218 return (uint64_t)(regs->ss_64.r10);
219 case REG_R11:
220 return (uint64_t)(regs->ss_64.r11);
221 case REG_R12:
222 return (uint64_t)(regs->ss_64.r12);
223 case REG_R13:
224 return (uint64_t)(regs->ss_64.r13);
225 case REG_R14:
226 return (uint64_t)(regs->ss_64.r14);
227 case REG_R15:
228 return (uint64_t)(regs->ss_64.r15);
229 case REG_FS:
230 return (uint64_t)(regs->ss_64.fs);
231 case REG_GS:
232 return (uint64_t)(regs->ss_64.gs);
233 case REG_TRAPNO:
234 return (uint64_t)(regs->ss_64.isf.trapno);
235 case REG_ERR:
236 return (uint64_t)(regs->ss_64.isf.err);
237 case REG_RIP:
238 return (uint64_t)(regs->ss_64.isf.rip);
239 case REG_CS:
240 return (uint64_t)(regs->ss_64.isf.cs);
241 case REG_SS:
242 return (uint64_t)(regs->ss_64.isf.ss);
243 case REG_RFL:
244 return (uint64_t)(regs->ss_64.isf.rflags);
245 case REG_RSP:
246 return (uint64_t)(regs->ss_64.isf.rsp);
247 case REG_DS:
248 case REG_ES:
249 default:
250 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
251 return (0);
252 }
253
254 } else { /* is 32bit user */
255 /* beyond register SS */
256 if (reg > x86_SAVED_STATE32_COUNT - 1) {
257 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
258 return (0);
259 }
260 return (uint64_t)((unsigned int *)(&(regs->ss_32.gs)))[reg];
261 }
262 }
263
264 uint64_t
265 dtrace_getvmreg(uint_t ndx)
266 {
267 uint64_t reg = 0;
268 bool failed = false;
269
270 /* Any change in the vmread final opcode must be reflected in dtrace_handle_trap below. */
271 __asm__ __volatile__(
272 "vmread %2, %0\n"
273 "ja 1f\n"
274 "mov $1, %1\n"
275 "1:\n"
276 : "=a" (reg), "+r" (failed) : "D" ((uint64_t)ndx));
277
278 /*
279 * Check for fault in vmreg first. If DTrace has recovered the fault cause by
280 * vmread above then the value in failed will be unreliable.
281 */
282 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ILLOP)) {
283 return 0;
284 }
285
286 /* If vmread succeeded but failed because CF or ZS is 1 report fail. */
287 if (failed) {
288 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
289 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = ndx;
290 return 0;
291 }
292
293 return reg;
294 }
295
296 #define RETURN_OFFSET 4
297 #define RETURN_OFFSET64 8
298
299 static int
300 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc,
301 user_addr_t sp)
302 {
303 volatile uint16_t *flags =
304 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
305
306 #if 0
307 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl */
308 size_t s1, s2;
309 #endif
310 int ret = 0;
311 boolean_t is64Bit = proc_is64bit(current_proc());
312
313 ASSERT(pcstack == NULL || pcstack_limit > 0);
314
315 #if 0 /* XXX signal stack crawl */
316 if (p->p_model == DATAMODEL_NATIVE) {
317 s1 = sizeof (struct frame) + 2 * sizeof (long);
318 s2 = s1 + sizeof (siginfo_t);
319 } else {
320 s1 = sizeof (struct frame32) + 3 * sizeof (int);
321 s2 = s1 + sizeof (siginfo32_t);
322 }
323 #endif
324
325 while (pc != 0) {
326 ret++;
327 if (pcstack != NULL) {
328 *pcstack++ = (uint64_t)pc;
329 pcstack_limit--;
330 if (pcstack_limit <= 0)
331 break;
332 }
333
334 if (sp == 0)
335 break;
336
337 #if 0 /* XXX signal stack crawl */
338 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
339 if (p->p_model == DATAMODEL_NATIVE) {
340 ucontext_t *ucp = (ucontext_t *)oldcontext;
341 greg_t *gregs = ucp->uc_mcontext.gregs;
342
343 sp = dtrace_fulword(&gregs[REG_FP]);
344 pc = dtrace_fulword(&gregs[REG_PC]);
345
346 oldcontext = dtrace_fulword(&ucp->uc_link);
347 } else {
348 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
349 greg32_t *gregs = ucp->uc_mcontext.gregs;
350
351 sp = dtrace_fuword32(&gregs[EBP]);
352 pc = dtrace_fuword32(&gregs[EIP]);
353
354 oldcontext = dtrace_fuword32(&ucp->uc_link);
355 }
356 }
357 else
358 #endif
359 {
360 if (is64Bit) {
361 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
362 sp = dtrace_fuword64(sp);
363 } else {
364 pc = dtrace_fuword32((sp + RETURN_OFFSET));
365 sp = dtrace_fuword32(sp);
366 }
367 }
368
369 /* Truncate ustack if the iterator causes fault. */
370 if (*flags & CPU_DTRACE_FAULT) {
371 *flags &= ~CPU_DTRACE_FAULT;
372 break;
373 }
374 }
375
376 return (ret);
377 }
378
379
380 /*
381 * The return value indicates if we've modified the stack.
382 */
383 static int
384 dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc,
385 user_addr_t sp)
386 {
387 volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
388 int64_t missing_tos;
389 int rc = 0;
390 boolean_t is64Bit = proc_is64bit(current_proc());
391
392 ASSERT(pc != NULL);
393
394 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
395 /*
396 * If we found ourselves in an entry probe, the frame pointer has not
397 * yet been pushed (that happens in the
398 * function prologue). The best approach is to
399 * add the current pc as a missing top of stack,
400 * and back the pc up to the caller, which is stored at the
401 * current stack pointer address since the call
402 * instruction puts it there right before
403 * the branch.
404 */
405
406 missing_tos = *pc;
407
408 if (is64Bit)
409 *pc = dtrace_fuword64(sp);
410 else
411 *pc = dtrace_fuword32(sp);
412
413 /* Truncate ustack if the iterator causes fault. */
414 if (*flags & CPU_DTRACE_FAULT) {
415 *flags &= ~CPU_DTRACE_FAULT;
416 }
417 } else {
418 /*
419 * We might have a top of stack override, in which case we just
420 * add that frame without question to the top. This
421 * happens in return probes where you have a valid
422 * frame pointer, but it's for the callers frame
423 * and you'd like to add the pc of the return site
424 * to the frame.
425 */
426 missing_tos = cpu_core[CPU->cpu_id].cpuc_missing_tos;
427 }
428
429 if (missing_tos != 0) {
430 if (pcstack != NULL && pcstack_limit != NULL) {
431 /*
432 * If the missing top of stack has been filled out, then
433 * we add it and adjust the size.
434 */
435 *(*pcstack)++ = missing_tos;
436 (*pcstack_limit)--;
437 }
438 /*
439 * return 1 because we would have changed the
440 * stack whether or not it was passed in. This
441 * ensures the stack count is correct
442 */
443 rc = 1;
444 }
445 return rc;
446 }
447
448 void
449 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
450 {
451 thread_t thread = current_thread();
452 x86_saved_state_t *regs;
453 user_addr_t pc, sp, fp;
454 volatile uint16_t *flags =
455 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
456 int n;
457 boolean_t is64Bit = proc_is64bit(current_proc());
458
459 if (*flags & CPU_DTRACE_FAULT)
460 return;
461
462 if (pcstack_limit <= 0)
463 return;
464
465 /*
466 * If there's no user context we still need to zero the stack.
467 */
468 if (thread == NULL)
469 goto zero;
470
471 pal_register_cache_state(thread, VALID);
472 regs = (x86_saved_state_t *)find_user_regs(thread);
473 if (regs == NULL)
474 goto zero;
475
476 *pcstack++ = (uint64_t)dtrace_proc_selfpid();
477 pcstack_limit--;
478
479 if (pcstack_limit <= 0)
480 return;
481
482 if (is64Bit) {
483 pc = regs->ss_64.isf.rip;
484 sp = regs->ss_64.isf.rsp;
485 fp = regs->ss_64.rbp;
486 } else {
487 pc = regs->ss_32.eip;
488 sp = regs->ss_32.uesp;
489 fp = regs->ss_32.ebp;
490 }
491
492 /*
493 * The return value indicates if we've modified the stack.
494 * Since there is nothing else to fix up in either case,
495 * we can safely ignore it here.
496 */
497 (void)dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp);
498
499 if(pcstack_limit <= 0)
500 return;
501
502 /*
503 * Note that unlike ppc, the x86 code does not use
504 * CPU_DTRACE_USTACK_FP. This is because x86 always
505 * traces from the fp, even in syscall/profile/fbt
506 * providers.
507 */
508 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
509 ASSERT(n >= 0);
510 ASSERT(n <= pcstack_limit);
511
512 pcstack += n;
513 pcstack_limit -= n;
514
515 zero:
516 while (pcstack_limit-- > 0)
517 *pcstack++ = 0;
518 }
519
520 int
521 dtrace_getustackdepth(void)
522 {
523 thread_t thread = current_thread();
524 x86_saved_state_t *regs;
525 user_addr_t pc, sp, fp;
526 int n = 0;
527 boolean_t is64Bit = proc_is64bit(current_proc());
528
529 if (thread == NULL)
530 return 0;
531
532 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
533 return (-1);
534
535 pal_register_cache_state(thread, VALID);
536 regs = (x86_saved_state_t *)find_user_regs(thread);
537 if (regs == NULL)
538 return 0;
539
540 if (is64Bit) {
541 pc = regs->ss_64.isf.rip;
542 sp = regs->ss_64.isf.rsp;
543 fp = regs->ss_64.rbp;
544 } else {
545 pc = regs->ss_32.eip;
546 sp = regs->ss_32.uesp;
547 fp = regs->ss_32.ebp;
548 }
549
550 if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) {
551 /*
552 * we would have adjusted the stack if we had
553 * supplied one (that is what rc == 1 means).
554 * Also, as a side effect, the pc might have
555 * been fixed up, which is good for calling
556 * in to dtrace_getustack_common.
557 */
558 n++;
559 }
560
561 /*
562 * Note that unlike ppc, the x86 code does not use
563 * CPU_DTRACE_USTACK_FP. This is because x86 always
564 * traces from the fp, even in syscall/profile/fbt
565 * providers.
566 */
567
568 n += dtrace_getustack_common(NULL, 0, pc, fp);
569
570 return (n);
571 }
572
573 void
574 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
575 {
576 thread_t thread = current_thread();
577 savearea_t *regs;
578 user_addr_t pc, sp;
579 volatile uint16_t *flags =
580 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
581 #if 0
582 uintptr_t oldcontext;
583 size_t s1, s2;
584 #endif
585 boolean_t is64Bit = proc_is64bit(current_proc());
586
587 if (*flags & CPU_DTRACE_FAULT)
588 return;
589
590 if (pcstack_limit <= 0)
591 return;
592
593 /*
594 * If there's no user context we still need to zero the stack.
595 */
596 if (thread == NULL)
597 goto zero;
598
599 regs = (savearea_t *)find_user_regs(thread);
600 if (regs == NULL)
601 goto zero;
602
603 *pcstack++ = (uint64_t)dtrace_proc_selfpid();
604 pcstack_limit--;
605
606 if (pcstack_limit <= 0)
607 return;
608
609 pc = regs->ss_32.eip;
610 sp = regs->ss_32.ebp;
611
612 #if 0 /* XXX signal stack crawl */
613 oldcontext = lwp->lwp_oldcontext;
614
615 if (p->p_model == DATAMODEL_NATIVE) {
616 s1 = sizeof (struct frame) + 2 * sizeof (long);
617 s2 = s1 + sizeof (siginfo_t);
618 } else {
619 s1 = sizeof (struct frame32) + 3 * sizeof (int);
620 s2 = s1 + sizeof (siginfo32_t);
621 }
622 #endif
623
624 if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) {
625 /*
626 * we made a change.
627 */
628 *fpstack++ = 0;
629 if (pcstack_limit <= 0)
630 return;
631 }
632
633 while (pc != 0) {
634 *pcstack++ = (uint64_t)pc;
635 *fpstack++ = sp;
636 pcstack_limit--;
637 if (pcstack_limit <= 0)
638 break;
639
640 if (sp == 0)
641 break;
642
643 #if 0 /* XXX signal stack crawl */
644 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
645 if (p->p_model == DATAMODEL_NATIVE) {
646 ucontext_t *ucp = (ucontext_t *)oldcontext;
647 greg_t *gregs = ucp->uc_mcontext.gregs;
648
649 sp = dtrace_fulword(&gregs[REG_FP]);
650 pc = dtrace_fulword(&gregs[REG_PC]);
651
652 oldcontext = dtrace_fulword(&ucp->uc_link);
653 } else {
654 ucontext_t *ucp = (ucontext_t *)oldcontext;
655 greg_t *gregs = ucp->uc_mcontext.gregs;
656
657 sp = dtrace_fuword32(&gregs[EBP]);
658 pc = dtrace_fuword32(&gregs[EIP]);
659
660 oldcontext = dtrace_fuword32(&ucp->uc_link);
661 }
662 }
663 else
664 #endif
665 {
666 if (is64Bit) {
667 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
668 sp = dtrace_fuword64(sp);
669 } else {
670 pc = dtrace_fuword32((sp + RETURN_OFFSET));
671 sp = dtrace_fuword32(sp);
672 }
673 }
674
675 /* Truncate ustack if the iterator causes fault. */
676 if (*flags & CPU_DTRACE_FAULT) {
677 *flags &= ~CPU_DTRACE_FAULT;
678 break;
679 }
680 }
681
682 zero:
683 while (pcstack_limit-- > 0)
684 *pcstack++ = 0;
685 }
686
687 void
688 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
689 uint32_t *intrpc)
690 {
691 struct frame *fp = (struct frame *)__builtin_frame_address(0);
692 struct frame *nextfp, *minfp, *stacktop;
693 int depth = 0;
694 int last = 0;
695 uintptr_t pc;
696 uintptr_t caller = CPU->cpu_dtrace_caller;
697 int on_intr;
698
699 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
700 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
701 else
702 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
703
704 minfp = fp;
705
706 aframes++;
707
708 if (intrpc != NULL && depth < pcstack_limit)
709 pcstack[depth++] = (pc_t)intrpc;
710
711 while (depth < pcstack_limit) {
712 nextfp = *(struct frame **)fp;
713 pc = *(uintptr_t *)(((uintptr_t)fp) + RETURN_OFFSET64);
714
715 if (nextfp <= minfp || nextfp >= stacktop) {
716 if (on_intr) {
717 /*
718 * Hop from interrupt stack to thread stack.
719 */
720 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
721
722 minfp = (struct frame *)kstack_base;
723 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
724
725 on_intr = 0;
726 continue;
727 }
728 /*
729 * This is the last frame we can process; indicate
730 * that we should return after processing this frame.
731 */
732 last = 1;
733 }
734
735 if (aframes > 0) {
736 if (--aframes == 0 && caller != 0) {
737 /*
738 * We've just run out of artificial frames,
739 * and we have a valid caller -- fill it in
740 * now.
741 */
742 ASSERT(depth < pcstack_limit);
743 pcstack[depth++] = (pc_t)caller;
744 caller = 0;
745 }
746 } else {
747 if (depth < pcstack_limit)
748 pcstack[depth++] = (pc_t)pc;
749 }
750
751 if (last) {
752 while (depth < pcstack_limit)
753 pcstack[depth++] = 0;
754 return;
755 }
756
757 fp = nextfp;
758 minfp = fp;
759 }
760 }
761
762 struct frame {
763 struct frame *backchain;
764 uintptr_t retaddr;
765 };
766
767 uint64_t
768 dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
769 {
770 uint64_t val = 0;
771 struct frame *fp = (struct frame *)__builtin_frame_address(0);
772 uintptr_t *stack;
773 uintptr_t pc;
774 int i;
775
776
777 /*
778 * A total of 6 arguments are passed via registers; any argument with
779 * index of 5 or lower is therefore in a register.
780 */
781 int inreg = 5;
782
783 for (i = 1; i <= aframes; i++) {
784 fp = fp->backchain;
785 pc = fp->retaddr;
786
787 if (dtrace_invop_callsite_pre != NULL
788 && pc > (uintptr_t)dtrace_invop_callsite_pre
789 && pc <= (uintptr_t)dtrace_invop_callsite_post) {
790 /*
791 * In the case of x86_64, we will use the pointer to the
792 * save area structure that was pushed when we took the
793 * trap. To get this structure, we must increment
794 * beyond the frame structure. If the
795 * argument that we're seeking is passed on the stack,
796 * we'll pull the true stack pointer out of the saved
797 * registers and decrement our argument by the number
798 * of arguments passed in registers; if the argument
799 * we're seeking is passed in regsiters, we can just
800 * load it directly.
801 */
802
803 /* fp points to frame of dtrace_invop() activation. */
804 fp = fp->backchain; /* to fbt_perfcallback() activation. */
805 fp = fp->backchain; /* to kernel_trap() activation. */
806 fp = fp->backchain; /* to trap_from_kernel() activation. */
807
808 x86_saved_state_t *tagged_regs = (x86_saved_state_t *)&fp[1];
809 x86_saved_state64_t *saved_state = saved_state64(tagged_regs);
810
811 if (arg <= inreg) {
812 stack = (uintptr_t *)(void*)&saved_state->rdi;
813 } else {
814 fp = (struct frame *)(saved_state->isf.rsp);
815 stack = (uintptr_t *)&fp[1]; /* Find marshalled
816 arguments */
817 arg -= inreg + 1;
818 }
819 goto load;
820 }
821 }
822
823 /*
824 * We know that we did not come through a trap to get into
825 * dtrace_probe() -- We arrive here when the provider has
826 * called dtrace_probe() directly.
827 * The probe ID is the first argument to dtrace_probe().
828 * We must advance beyond that to get the argX.
829 */
830 arg++; /* Advance past probeID */
831
832 if (arg <= inreg) {
833 /*
834 * This shouldn't happen. If the argument is passed in a
835 * register then it should have been, well, passed in a
836 * register...
837 */
838 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
839 return (0);
840 }
841
842 arg -= (inreg + 1);
843 stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
844
845 load:
846 if (dtrace_canload((uint64_t)(stack + arg), sizeof(uint64_t),
847 mstate, vstate)) {
848 /* dtrace_probe arguments arg0 ... arg4 are 64bits wide */
849 val = dtrace_load64((uint64_t)(stack + arg));
850 }
851
852 return (val);
853 }
854
855 /*
856 * Load/Store Safety
857 */
858 void
859 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
860 {
861 /*
862 * "base" is the smallest toxic address in the range, "limit" is the first
863 * VALID address greater than "base".
864 */
865 func(0x0, VM_MIN_KERNEL_AND_KEXT_ADDRESS);
866 if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0)
867 func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0);
868 }
869
870 /*
871 * Trap Safety
872 */
873 extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *);
874
875 boolean_t
876 dtrace_handle_trap(int trapno, x86_saved_state_t *state)
877 {
878 x86_saved_state64_t *saved_state = saved_state64(state);
879
880 if (!DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT)) {
881 return FALSE;
882 }
883
884 /*
885 * General purpose solution would require pulling in disassembler. Right now there
886 * is only one specific case to be handled so it is hardcoded here.
887 */
888 if (trapno == T_INVALID_OPCODE) {
889 uint8_t *inst = (uint8_t *)saved_state->isf.rip;
890
891 /* vmread %rdi, %rax */
892 if (inst[0] == 0x0f && inst[1] == 0x78 && inst[2] == 0xf8) {
893 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
894 saved_state->isf.rip += 3;
895 return TRUE;
896 }
897 }
898
899 return FALSE;
900 }