]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/dtrace_isa.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / bsd / dev / i386 / dtrace_isa.c
1 /*
2 * Copyright (c) 2005-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/thread.h>
30 #include <mach/thread_status.h>
31
32 typedef x86_saved_state_t savearea_t;
33
34 #include <stdarg.h>
35 #include <string.h>
36 #include <sys/malloc.h>
37 #include <sys/time.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/proc_internal.h>
41 #include <sys/kauth.h>
42 #include <sys/dtrace.h>
43 #include <sys/dtrace_impl.h>
44 #include <libkern/OSAtomic.h>
45 #include <kern/thread_call.h>
46 #include <kern/task.h>
47 #include <kern/sched_prim.h>
48 #include <miscfs/devfs/devfs.h>
49 #include <mach/vm_param.h>
50 #include <machine/pal_routines.h>
51 #include <i386/mp.h>
52
53 /*
54 * APPLE NOTE: The regmap is used to decode which 64bit uregs[] register
55 * is being accessed when passed the 32bit uregs[] constant (based on
56 * the reg.d translator file). The dtrace_getreg() is smart enough to handle
57 * the register mappings. The register set definitions are the same as
58 * those used by the fasttrap_getreg code.
59 */
60 #include "fasttrap_regset.h"
61 static const uint8_t regmap[19] = {
62 REG_GS, /* GS */
63 REG_FS, /* FS */
64 REG_ES, /* ES */
65 REG_DS, /* DS */
66 REG_RDI, /* EDI */
67 REG_RSI, /* ESI */
68 REG_RBP, /* EBP, REG_FP */
69 REG_RSP, /* ESP */
70 REG_RBX, /* EBX */
71 REG_RDX, /* EDX, REG_R1 */
72 REG_RCX, /* ECX */
73 REG_RAX, /* EAX, REG_R0 */
74 REG_TRAPNO, /* TRAPNO */
75 REG_ERR, /* ERR */
76 REG_RIP, /* EIP, REG_PC */
77 REG_CS, /* CS */
78 REG_RFL, /* EFL, REG_PS */
79 REG_RSP, /* UESP, REG_SP */
80 REG_SS /* SS */
81 };
82
83 extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
84
85 void
86 dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
87 int fltoffs, int fault, uint64_t illval)
88 {
89 /*
90 * For the case of the error probe firing lets
91 * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
92 */
93 state->dts_arg_error_illval = illval;
94 dtrace_probe( dtrace_probeid_error, (uint64_t)(uintptr_t)state, epid, which, fltoffs, fault );
95 }
96
97 /*
98 * Atomicity and synchronization
99 */
100 void
101 dtrace_membar_producer(void)
102 {
103 __asm__ volatile("sfence");
104 }
105
106 void
107 dtrace_membar_consumer(void)
108 {
109 __asm__ volatile("lfence");
110 }
111
112 /*
113 * Interrupt manipulation
114 * XXX dtrace_getipl() can be called from probe context.
115 */
116 int
117 dtrace_getipl(void)
118 {
119 /*
120 * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE
121 * in osfmk/kern/cpu_data.h
122 */
123 /* return get_interrupt_level(); */
124 return (ml_at_interrupt_context() ? 1: 0);
125 }
126
127 /*
128 * MP coordination
129 */
130 typedef struct xcArg {
131 processorid_t cpu;
132 dtrace_xcall_t f;
133 void *arg;
134 } xcArg_t;
135
136 static void
137 xcRemote( void *foo )
138 {
139 xcArg_t *pArg = (xcArg_t *)foo;
140
141 if ( pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL ) {
142 (pArg->f)(pArg->arg);
143 }
144 }
145
146
147 /*
148 * dtrace_xcall() is not called from probe context.
149 */
150 void
151 dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg)
152 {
153 xcArg_t xcArg;
154
155 xcArg.cpu = cpu;
156 xcArg.f = f;
157 xcArg.arg = arg;
158
159 if (cpu == DTRACE_CPUALL) {
160 mp_cpus_call (CPUMASK_ALL, ASYNC, xcRemote, (void*)&xcArg);
161 }
162 else {
163 mp_cpus_call (cpu_to_cpumask((cpu_t)cpu), ASYNC, xcRemote, (void*)&xcArg);
164 }
165 }
166
167 /*
168 * Initialization
169 */
170 void
171 dtrace_isa_init(void)
172 {
173 return;
174 }
175
176 /*
177 * Runtime and ABI
178 */
179 uint64_t
180 dtrace_getreg(struct regs *savearea, uint_t reg)
181 {
182 boolean_t is64Bit = proc_is64bit(current_proc());
183 x86_saved_state_t *regs = (x86_saved_state_t *)savearea;
184
185 if (regs == NULL) {
186 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
187 return (0);
188 }
189
190 if (is64Bit) {
191 if (reg <= SS) {
192 reg = regmap[reg];
193 } else {
194 reg -= (SS + 1);
195 }
196
197 switch (reg) {
198 case REG_RDI:
199 return (uint64_t)(regs->ss_64.rdi);
200 case REG_RSI:
201 return (uint64_t)(regs->ss_64.rsi);
202 case REG_RDX:
203 return (uint64_t)(regs->ss_64.rdx);
204 case REG_RCX:
205 return (uint64_t)(regs->ss_64.rcx);
206 case REG_R8:
207 return (uint64_t)(regs->ss_64.r8);
208 case REG_R9:
209 return (uint64_t)(regs->ss_64.r9);
210 case REG_RAX:
211 return (uint64_t)(regs->ss_64.rax);
212 case REG_RBX:
213 return (uint64_t)(regs->ss_64.rbx);
214 case REG_RBP:
215 return (uint64_t)(regs->ss_64.rbp);
216 case REG_R10:
217 return (uint64_t)(regs->ss_64.r10);
218 case REG_R11:
219 return (uint64_t)(regs->ss_64.r11);
220 case REG_R12:
221 return (uint64_t)(regs->ss_64.r12);
222 case REG_R13:
223 return (uint64_t)(regs->ss_64.r13);
224 case REG_R14:
225 return (uint64_t)(regs->ss_64.r14);
226 case REG_R15:
227 return (uint64_t)(regs->ss_64.r15);
228 case REG_FS:
229 return (uint64_t)(regs->ss_64.fs);
230 case REG_GS:
231 return (uint64_t)(regs->ss_64.gs);
232 case REG_TRAPNO:
233 return (uint64_t)(regs->ss_64.isf.trapno);
234 case REG_ERR:
235 return (uint64_t)(regs->ss_64.isf.err);
236 case REG_RIP:
237 return (uint64_t)(regs->ss_64.isf.rip);
238 case REG_CS:
239 return (uint64_t)(regs->ss_64.isf.cs);
240 case REG_SS:
241 return (uint64_t)(regs->ss_64.isf.ss);
242 case REG_RFL:
243 return (uint64_t)(regs->ss_64.isf.rflags);
244 case REG_RSP:
245 return (uint64_t)(regs->ss_64.isf.rsp);
246 case REG_DS:
247 case REG_ES:
248 default:
249 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
250 return (0);
251 }
252
253 } else { /* is 32bit user */
254 /* beyond register SS */
255 if (reg > x86_SAVED_STATE32_COUNT - 1) {
256 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
257 return (0);
258 }
259 return (uint64_t)((unsigned int *)(&(regs->ss_32.gs)))[reg];
260 }
261 }
262
263 #define RETURN_OFFSET 4
264 #define RETURN_OFFSET64 8
265
266 static int
267 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc,
268 user_addr_t sp)
269 {
270 #if 0
271 volatile uint16_t *flags =
272 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
273
274 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl */
275 size_t s1, s2;
276 #endif
277 int ret = 0;
278 boolean_t is64Bit = proc_is64bit(current_proc());
279
280 ASSERT(pcstack == NULL || pcstack_limit > 0);
281
282 #if 0 /* XXX signal stack crawl */
283 if (p->p_model == DATAMODEL_NATIVE) {
284 s1 = sizeof (struct frame) + 2 * sizeof (long);
285 s2 = s1 + sizeof (siginfo_t);
286 } else {
287 s1 = sizeof (struct frame32) + 3 * sizeof (int);
288 s2 = s1 + sizeof (siginfo32_t);
289 }
290 #endif
291
292 while (pc != 0) {
293 ret++;
294 if (pcstack != NULL) {
295 *pcstack++ = (uint64_t)pc;
296 pcstack_limit--;
297 if (pcstack_limit <= 0)
298 break;
299 }
300
301 if (sp == 0)
302 break;
303
304 #if 0 /* XXX signal stack crawl */
305 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
306 if (p->p_model == DATAMODEL_NATIVE) {
307 ucontext_t *ucp = (ucontext_t *)oldcontext;
308 greg_t *gregs = ucp->uc_mcontext.gregs;
309
310 sp = dtrace_fulword(&gregs[REG_FP]);
311 pc = dtrace_fulword(&gregs[REG_PC]);
312
313 oldcontext = dtrace_fulword(&ucp->uc_link);
314 } else {
315 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
316 greg32_t *gregs = ucp->uc_mcontext.gregs;
317
318 sp = dtrace_fuword32(&gregs[EBP]);
319 pc = dtrace_fuword32(&gregs[EIP]);
320
321 oldcontext = dtrace_fuword32(&ucp->uc_link);
322 }
323 }
324 else
325 #endif
326 {
327 if (is64Bit) {
328 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
329 sp = dtrace_fuword64(sp);
330 } else {
331 pc = dtrace_fuword32((sp + RETURN_OFFSET));
332 sp = dtrace_fuword32(sp);
333 }
334 }
335
336 #if 0 /* XXX */
337 /*
338 * This is totally bogus: if we faulted, we're going to clear
339 * the fault and break. This is to deal with the apparently
340 * broken Java stacks on x86.
341 */
342 if (*flags & CPU_DTRACE_FAULT) {
343 *flags &= ~CPU_DTRACE_FAULT;
344 break;
345 }
346 #endif
347 }
348
349 return (ret);
350 }
351
352
353 /*
354 * The return value indicates if we've modified the stack.
355 */
356 static int
357 dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc,
358 user_addr_t sp)
359 {
360 int64_t missing_tos;
361 int rc = 0;
362 boolean_t is64Bit = proc_is64bit(current_proc());
363
364 ASSERT(pc != NULL);
365
366 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
367 /*
368 * If we found ourselves in an entry probe, the frame pointer has not
369 * yet been pushed (that happens in the
370 * function prologue). The best approach is to
371 * add the current pc as a missing top of stack,
372 * and back the pc up to the caller, which is stored at the
373 * current stack pointer address since the call
374 * instruction puts it there right before
375 * the branch.
376 */
377
378 missing_tos = *pc;
379
380 if (is64Bit)
381 *pc = dtrace_fuword64(sp);
382 else
383 *pc = dtrace_fuword32(sp);
384 } else {
385 /*
386 * We might have a top of stack override, in which case we just
387 * add that frame without question to the top. This
388 * happens in return probes where you have a valid
389 * frame pointer, but it's for the callers frame
390 * and you'd like to add the pc of the return site
391 * to the frame.
392 */
393 missing_tos = cpu_core[CPU->cpu_id].cpuc_missing_tos;
394 }
395
396 if (missing_tos != 0) {
397 if (pcstack != NULL && pcstack_limit != NULL) {
398 /*
399 * If the missing top of stack has been filled out, then
400 * we add it and adjust the size.
401 */
402 *(*pcstack)++ = missing_tos;
403 (*pcstack_limit)--;
404 }
405 /*
406 * return 1 because we would have changed the
407 * stack whether or not it was passed in. This
408 * ensures the stack count is correct
409 */
410 rc = 1;
411 }
412 return rc;
413 }
414
415 void
416 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
417 {
418 thread_t thread = current_thread();
419 x86_saved_state_t *regs;
420 user_addr_t pc, sp, fp;
421 volatile uint16_t *flags =
422 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
423 int n;
424 boolean_t is64Bit = proc_is64bit(current_proc());
425
426 if (*flags & CPU_DTRACE_FAULT)
427 return;
428
429 if (pcstack_limit <= 0)
430 return;
431
432 /*
433 * If there's no user context we still need to zero the stack.
434 */
435 if (thread == NULL)
436 goto zero;
437
438 pal_register_cache_state(thread, VALID);
439 regs = (x86_saved_state_t *)find_user_regs(thread);
440 if (regs == NULL)
441 goto zero;
442
443 *pcstack++ = (uint64_t)dtrace_proc_selfpid();
444 pcstack_limit--;
445
446 if (pcstack_limit <= 0)
447 return;
448
449 if (is64Bit) {
450 pc = regs->ss_64.isf.rip;
451 sp = regs->ss_64.isf.rsp;
452 fp = regs->ss_64.rbp;
453 } else {
454 pc = regs->ss_32.eip;
455 sp = regs->ss_32.uesp;
456 fp = regs->ss_32.ebp;
457 }
458
459 /*
460 * The return value indicates if we've modified the stack.
461 * Since there is nothing else to fix up in either case,
462 * we can safely ignore it here.
463 */
464 (void)dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp);
465
466 if(pcstack_limit <= 0)
467 return;
468
469 /*
470 * Note that unlike ppc, the x86 code does not use
471 * CPU_DTRACE_USTACK_FP. This is because x86 always
472 * traces from the fp, even in syscall/profile/fbt
473 * providers.
474 */
475 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
476 ASSERT(n >= 0);
477 ASSERT(n <= pcstack_limit);
478
479 pcstack += n;
480 pcstack_limit -= n;
481
482 zero:
483 while (pcstack_limit-- > 0)
484 *pcstack++ = 0;
485 }
486
487 int
488 dtrace_getustackdepth(void)
489 {
490 thread_t thread = current_thread();
491 x86_saved_state_t *regs;
492 user_addr_t pc, sp, fp;
493 int n = 0;
494 boolean_t is64Bit = proc_is64bit(current_proc());
495
496 if (thread == NULL)
497 return 0;
498
499 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
500 return (-1);
501
502 pal_register_cache_state(thread, VALID);
503 regs = (x86_saved_state_t *)find_user_regs(thread);
504 if (regs == NULL)
505 return 0;
506
507 if (is64Bit) {
508 pc = regs->ss_64.isf.rip;
509 sp = regs->ss_64.isf.rsp;
510 fp = regs->ss_64.rbp;
511 } else {
512 pc = regs->ss_32.eip;
513 sp = regs->ss_32.uesp;
514 fp = regs->ss_32.ebp;
515 }
516
517 if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) {
518 /*
519 * we would have adjusted the stack if we had
520 * supplied one (that is what rc == 1 means).
521 * Also, as a side effect, the pc might have
522 * been fixed up, which is good for calling
523 * in to dtrace_getustack_common.
524 */
525 n++;
526 }
527
528 /*
529 * Note that unlike ppc, the x86 code does not use
530 * CPU_DTRACE_USTACK_FP. This is because x86 always
531 * traces from the fp, even in syscall/profile/fbt
532 * providers.
533 */
534
535 n += dtrace_getustack_common(NULL, 0, pc, fp);
536
537 return (n);
538 }
539
540 void
541 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
542 {
543 thread_t thread = current_thread();
544 savearea_t *regs;
545 user_addr_t pc, sp;
546 volatile uint16_t *flags =
547 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
548 #if 0
549 uintptr_t oldcontext;
550 size_t s1, s2;
551 #endif
552 boolean_t is64Bit = proc_is64bit(current_proc());
553
554 if (*flags & CPU_DTRACE_FAULT)
555 return;
556
557 if (pcstack_limit <= 0)
558 return;
559
560 /*
561 * If there's no user context we still need to zero the stack.
562 */
563 if (thread == NULL)
564 goto zero;
565
566 regs = (savearea_t *)find_user_regs(thread);
567 if (regs == NULL)
568 goto zero;
569
570 *pcstack++ = (uint64_t)dtrace_proc_selfpid();
571 pcstack_limit--;
572
573 if (pcstack_limit <= 0)
574 return;
575
576 pc = regs->ss_32.eip;
577 sp = regs->ss_32.ebp;
578
579 #if 0 /* XXX signal stack crawl */
580 oldcontext = lwp->lwp_oldcontext;
581
582 if (p->p_model == DATAMODEL_NATIVE) {
583 s1 = sizeof (struct frame) + 2 * sizeof (long);
584 s2 = s1 + sizeof (siginfo_t);
585 } else {
586 s1 = sizeof (struct frame32) + 3 * sizeof (int);
587 s2 = s1 + sizeof (siginfo32_t);
588 }
589 #endif
590
591 if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) {
592 /*
593 * we made a change.
594 */
595 *fpstack++ = 0;
596 if (pcstack_limit <= 0)
597 return;
598 }
599
600 while (pc != 0) {
601 *pcstack++ = (uint64_t)pc;
602 *fpstack++ = sp;
603 pcstack_limit--;
604 if (pcstack_limit <= 0)
605 break;
606
607 if (sp == 0)
608 break;
609
610 #if 0 /* XXX signal stack crawl */
611 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
612 if (p->p_model == DATAMODEL_NATIVE) {
613 ucontext_t *ucp = (ucontext_t *)oldcontext;
614 greg_t *gregs = ucp->uc_mcontext.gregs;
615
616 sp = dtrace_fulword(&gregs[REG_FP]);
617 pc = dtrace_fulword(&gregs[REG_PC]);
618
619 oldcontext = dtrace_fulword(&ucp->uc_link);
620 } else {
621 ucontext_t *ucp = (ucontext_t *)oldcontext;
622 greg_t *gregs = ucp->uc_mcontext.gregs;
623
624 sp = dtrace_fuword32(&gregs[EBP]);
625 pc = dtrace_fuword32(&gregs[EIP]);
626
627 oldcontext = dtrace_fuword32(&ucp->uc_link);
628 }
629 }
630 else
631 #endif
632 {
633 if (is64Bit) {
634 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
635 sp = dtrace_fuword64(sp);
636 } else {
637 pc = dtrace_fuword32((sp + RETURN_OFFSET));
638 sp = dtrace_fuword32(sp);
639 }
640 }
641
642 #if 0 /* XXX */
643 /*
644 * This is totally bogus: if we faulted, we're going to clear
645 * the fault and break. This is to deal with the apparently
646 * broken Java stacks on x86.
647 */
648 if (*flags & CPU_DTRACE_FAULT) {
649 *flags &= ~CPU_DTRACE_FAULT;
650 break;
651 }
652 #endif
653 }
654
655 zero:
656 while (pcstack_limit-- > 0)
657 *pcstack++ = 0;
658 }
659
660 void
661 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
662 uint32_t *intrpc)
663 {
664 struct frame *fp = (struct frame *)__builtin_frame_address(0);
665 struct frame *nextfp, *minfp, *stacktop;
666 int depth = 0;
667 int last = 0;
668 uintptr_t pc;
669 uintptr_t caller = CPU->cpu_dtrace_caller;
670 int on_intr;
671
672 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
673 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
674 else
675 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
676
677 minfp = fp;
678
679 aframes++;
680
681 if (intrpc != NULL && depth < pcstack_limit)
682 pcstack[depth++] = (pc_t)intrpc;
683
684 while (depth < pcstack_limit) {
685 nextfp = *(struct frame **)fp;
686 pc = *(uintptr_t *)(((uintptr_t)fp) + RETURN_OFFSET64);
687
688 if (nextfp <= minfp || nextfp >= stacktop) {
689 if (on_intr) {
690 /*
691 * Hop from interrupt stack to thread stack.
692 */
693 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
694
695 minfp = (struct frame *)kstack_base;
696 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
697
698 on_intr = 0;
699 continue;
700 }
701 /*
702 * This is the last frame we can process; indicate
703 * that we should return after processing this frame.
704 */
705 last = 1;
706 }
707
708 if (aframes > 0) {
709 if (--aframes == 0 && caller != 0) {
710 /*
711 * We've just run out of artificial frames,
712 * and we have a valid caller -- fill it in
713 * now.
714 */
715 ASSERT(depth < pcstack_limit);
716 pcstack[depth++] = (pc_t)caller;
717 caller = 0;
718 }
719 } else {
720 if (depth < pcstack_limit)
721 pcstack[depth++] = (pc_t)pc;
722 }
723
724 if (last) {
725 while (depth < pcstack_limit)
726 pcstack[depth++] = 0;
727 return;
728 }
729
730 fp = nextfp;
731 minfp = fp;
732 }
733 }
734
735 struct frame {
736 struct frame *backchain;
737 uintptr_t retaddr;
738 };
739
740 uint64_t
741 dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
742 {
743 uint64_t val = 0;
744 struct frame *fp = (struct frame *)__builtin_frame_address(0);
745 uintptr_t *stack;
746 uintptr_t pc;
747 int i;
748
749
750 /*
751 * A total of 6 arguments are passed via registers; any argument with
752 * index of 5 or lower is therefore in a register.
753 */
754 int inreg = 5;
755
756 for (i = 1; i <= aframes; i++) {
757 fp = fp->backchain;
758 pc = fp->retaddr;
759
760 if (dtrace_invop_callsite_pre != NULL
761 && pc > (uintptr_t)dtrace_invop_callsite_pre
762 && pc <= (uintptr_t)dtrace_invop_callsite_post) {
763 /*
764 * In the case of x86_64, we will use the pointer to the
765 * save area structure that was pushed when we took the
766 * trap. To get this structure, we must increment
767 * beyond the frame structure. If the
768 * argument that we're seeking is passed on the stack,
769 * we'll pull the true stack pointer out of the saved
770 * registers and decrement our argument by the number
771 * of arguments passed in registers; if the argument
772 * we're seeking is passed in regsiters, we can just
773 * load it directly.
774 */
775
776 /* fp points to frame of dtrace_invop() activation. */
777 fp = fp->backchain; /* to fbt_perfcallback() activation. */
778 fp = fp->backchain; /* to kernel_trap() activation. */
779 fp = fp->backchain; /* to trap_from_kernel() activation. */
780
781 x86_saved_state_t *tagged_regs = (x86_saved_state_t *)&fp[1];
782 x86_saved_state64_t *saved_state = saved_state64(tagged_regs);
783
784 if (arg <= inreg) {
785 stack = (uintptr_t *)(void*)&saved_state->rdi;
786 } else {
787 fp = (struct frame *)(saved_state->isf.rsp);
788 stack = (uintptr_t *)&fp[1]; /* Find marshalled
789 arguments */
790 arg -= inreg + 1;
791 }
792 goto load;
793 }
794 }
795
796 /*
797 * We know that we did not come through a trap to get into
798 * dtrace_probe() -- We arrive here when the provider has
799 * called dtrace_probe() directly.
800 * The probe ID is the first argument to dtrace_probe().
801 * We must advance beyond that to get the argX.
802 */
803 arg++; /* Advance past probeID */
804
805 if (arg <= inreg) {
806 /*
807 * This shouldn't happen. If the argument is passed in a
808 * register then it should have been, well, passed in a
809 * register...
810 */
811 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
812 return (0);
813 }
814
815 arg -= (inreg + 1);
816 stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
817
818 load:
819 if (dtrace_canload((uint64_t)(stack + arg), sizeof(uint64_t),
820 mstate, vstate)) {
821 /* dtrace_probe arguments arg0 ... arg4 are 64bits wide */
822 val = dtrace_load64((uint64_t)(stack + arg));
823 }
824
825 return (val);
826 }
827
828 /*
829 * Load/Store Safety
830 */
831 void
832 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
833 {
834 /*
835 * "base" is the smallest toxic address in the range, "limit" is the first
836 * VALID address greater than "base".
837 */
838 func(0x0, VM_MIN_KERNEL_AND_KEXT_ADDRESS);
839 if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0)
840 func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0);
841 }
842