]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/i386/dtrace_isa.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / bsd / dev / i386 / dtrace_isa.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
30#include <kern/thread.h>
31#include <mach/thread_status.h>
32
33typedef x86_saved_state_t savearea_t;
34
35#include <stdarg.h>
36#include <string.h>
37#include <sys/malloc.h>
38#include <sys/time.h>
39#include <sys/systm.h>
40#include <sys/proc.h>
41#include <sys/proc_internal.h>
42#include <sys/kauth.h>
43#include <sys/dtrace.h>
44#include <sys/dtrace_impl.h>
45#include <libkern/OSAtomic.h>
46#include <kern/thread_call.h>
47#include <kern/task.h>
48#include <kern/sched_prim.h>
49#include <miscfs/devfs/devfs.h>
50#include <mach/vm_param.h>
51
b0d623f7
A
52/*
53 * APPLE NOTE: The regmap is used to decode which 64bit uregs[] register
54 * is being accessed when passed the 32bit uregs[] constant (based on
55 * the reg.d translator file). The dtrace_getreg() is smart enough to handle
56 * the register mappings. The register set definitions are the same as
57 * those used by the fasttrap_getreg code.
58 */
59#include "fasttrap_regset.h"
60static const uint8_t regmap[19] = {
61 REG_GS, /* GS */
62 REG_FS, /* FS */
63 REG_ES, /* ES */
64 REG_DS, /* DS */
65 REG_RDI, /* EDI */
66 REG_RSI, /* ESI */
67 REG_RBP, /* EBP, REG_FP */
68 REG_RSP, /* ESP */
69 REG_RBX, /* EBX */
70 REG_RDX, /* EDX, REG_R1 */
71 REG_RCX, /* ECX */
72 REG_RAX, /* EAX, REG_R0 */
73 REG_TRAPNO, /* TRAPNO */
74 REG_ERR, /* ERR */
75 REG_RIP, /* EIP, REG_PC */
76 REG_CS, /* CS */
77 REG_RFL, /* EFL, REG_PS */
78 REG_RSP, /* UESP, REG_SP */
79 REG_SS /* SS */
80};
81
2d21ac55
A
82extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
83
84void
85dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
b0d623f7 86 int fltoffs, int fault, uint64_t illval)
2d21ac55
A
87{
88 /*
89 * For the case of the error probe firing lets
90 * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
91 */
92 state->dts_arg_error_illval = illval;
b0d623f7 93 dtrace_probe( dtrace_probeid_error, (uint64_t)(uintptr_t)state, epid, which, fltoffs, fault );
2d21ac55
A
94}
95
96/*
97 * Atomicity and synchronization
98 */
99void
100dtrace_membar_producer(void)
101{
102 __asm__ volatile("sfence");
103}
104
105void
106dtrace_membar_consumer(void)
107{
108 __asm__ volatile("lfence");
109}
110
111/*
112 * Interrupt manipulation
113 * XXX dtrace_getipl() can be called from probe context.
114 */
115int
116dtrace_getipl(void)
117{
118 /*
119 * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE
120 * in osfmk/kern/cpu_data.h
121 */
122 /* return get_interrupt_level(); */
123 return (ml_at_interrupt_context() ? 1: 0);
124}
125
126/*
127 * MP coordination
128 */
129
130extern void mp_broadcast(
131 void (*action_func)(void *),
132 void *arg);
133
134typedef struct xcArg {
135 processorid_t cpu;
136 dtrace_xcall_t f;
137 void *arg;
138} xcArg_t;
139
140static void
141xcRemote( void *foo )
142{
143 xcArg_t *pArg = (xcArg_t *)foo;
144
145 if ( pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL ) {
146 (pArg->f)(pArg->arg);
147 }
148}
149
150/*
151 * dtrace_xcall() is not called from probe context.
152 */
153void
154dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg)
155{
156 xcArg_t xcArg;
157
158 xcArg.cpu = cpu;
159 xcArg.f = f;
160 xcArg.arg = arg;
161
162 mp_broadcast( xcRemote, (void *)&xcArg);
163}
164
165/*
166 * Runtime and ABI
167 */
2d21ac55
A
168
169uint64_t
170dtrace_getreg(struct regs *savearea, uint_t reg)
171{
172 boolean_t is64Bit = proc_is64bit(current_proc());
173 x86_saved_state_t *regs = (x86_saved_state_t *)savearea;
b0d623f7 174
2d21ac55 175 if (is64Bit) {
b0d623f7
A
176 if (reg <= SS) {
177 reg = regmap[reg];
178 } else {
179 reg -= (SS + 1);
180 }
181
182 switch (reg) {
183 case REG_RDI:
184 return (uint64_t)(regs->ss_64.rdi);
185 case REG_RSI:
186 return (uint64_t)(regs->ss_64.rsi);
187 case REG_RDX:
188 return (uint64_t)(regs->ss_64.rdx);
189 case REG_RCX:
190 return (uint64_t)(regs->ss_64.rcx);
191 case REG_R8:
192 return (uint64_t)(regs->ss_64.r8);
193 case REG_R9:
194 return (uint64_t)(regs->ss_64.r9);
195 case REG_RAX:
196 return (uint64_t)(regs->ss_64.rax);
197 case REG_RBX:
198 return (uint64_t)(regs->ss_64.rbx);
199 case REG_RBP:
200 return (uint64_t)(regs->ss_64.rbp);
201 case REG_R10:
202 return (uint64_t)(regs->ss_64.r10);
203 case REG_R11:
204 return (uint64_t)(regs->ss_64.r11);
205 case REG_R12:
206 return (uint64_t)(regs->ss_64.r12);
207 case REG_R13:
208 return (uint64_t)(regs->ss_64.r13);
209 case REG_R14:
210 return (uint64_t)(regs->ss_64.r14);
211 case REG_R15:
212 return (uint64_t)(regs->ss_64.r15);
213 case REG_FS:
214 return (uint64_t)(regs->ss_64.fs);
215 case REG_GS:
216 return (uint64_t)(regs->ss_64.gs);
217 case REG_TRAPNO:
218 return (uint64_t)(regs->ss_64.isf.trapno);
219 case REG_ERR:
220 return (uint64_t)(regs->ss_64.isf.err);
221 case REG_RIP:
222 return (uint64_t)(regs->ss_64.isf.rip);
223 case REG_CS:
224 return (uint64_t)(regs->ss_64.isf.cs);
225 case REG_SS:
226 return (uint64_t)(regs->ss_64.isf.ss);
227 case REG_RFL:
228 return (uint64_t)(regs->ss_64.isf.rflags);
229 case REG_RSP:
230 return (uint64_t)(regs->ss_64.isf.rsp);
231 case REG_DS:
232 case REG_ES:
233 default:
234 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
235 return (0);
236 }
237
238 } else { /* is 32bit user */
2d21ac55
A
239 /* beyond register SS */
240 if (reg > x86_SAVED_STATE32_COUNT - 1) {
241 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
242 return (0);
243 }
244 return (uint64_t)((unsigned int *)(&(regs->ss_32.gs)))[reg];
245 }
2d21ac55
A
246}
247
248#define RETURN_OFFSET 4
249#define RETURN_OFFSET64 8
250
251static int
252dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc,
253 user_addr_t sp)
254{
255#if 0
256 volatile uint16_t *flags =
257 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
258
259 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl */
260 size_t s1, s2;
261#endif
262 int ret = 0;
263 boolean_t is64Bit = proc_is64bit(current_proc());
264
265 ASSERT(pcstack == NULL || pcstack_limit > 0);
266
267#if 0 /* XXX signal stack crawl */
268 if (p->p_model == DATAMODEL_NATIVE) {
269 s1 = sizeof (struct frame) + 2 * sizeof (long);
270 s2 = s1 + sizeof (siginfo_t);
271 } else {
272 s1 = sizeof (struct frame32) + 3 * sizeof (int);
273 s2 = s1 + sizeof (siginfo32_t);
274 }
275#endif
276
277 while (pc != 0) {
278 ret++;
279 if (pcstack != NULL) {
280 *pcstack++ = (uint64_t)pc;
281 pcstack_limit--;
282 if (pcstack_limit <= 0)
283 break;
284 }
285
286 if (sp == 0)
287 break;
288
289#if 0 /* XXX signal stack crawl */
290 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
291 if (p->p_model == DATAMODEL_NATIVE) {
292 ucontext_t *ucp = (ucontext_t *)oldcontext;
293 greg_t *gregs = ucp->uc_mcontext.gregs;
294
295 sp = dtrace_fulword(&gregs[REG_FP]);
296 pc = dtrace_fulword(&gregs[REG_PC]);
297
298 oldcontext = dtrace_fulword(&ucp->uc_link);
299 } else {
300 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
301 greg32_t *gregs = ucp->uc_mcontext.gregs;
302
303 sp = dtrace_fuword32(&gregs[EBP]);
304 pc = dtrace_fuword32(&gregs[EIP]);
305
306 oldcontext = dtrace_fuword32(&ucp->uc_link);
307 }
308 }
309 else
310#endif
311 {
312 if (is64Bit) {
313 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
314 sp = dtrace_fuword64(sp);
315 } else {
316 pc = dtrace_fuword32((sp + RETURN_OFFSET));
317 sp = dtrace_fuword32(sp);
318 }
319 }
320
321#if 0 /* XXX */
322 /*
323 * This is totally bogus: if we faulted, we're going to clear
324 * the fault and break. This is to deal with the apparently
325 * broken Java stacks on x86.
326 */
327 if (*flags & CPU_DTRACE_FAULT) {
328 *flags &= ~CPU_DTRACE_FAULT;
329 break;
330 }
331#endif
332 }
333
334 return (ret);
335}
336
b0d623f7
A
337
338/*
339 * The return value indicates if we've modified the stack.
340 */
341static int
342dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc,
343 user_addr_t sp)
344{
345 int64_t missing_tos;
346 int rc = 0;
347 boolean_t is64Bit = proc_is64bit(current_proc());
348
349 ASSERT(pc != NULL);
350
351 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
352 /*
353 * If we found ourselves in an entry probe, the frame pointer has not
354 * yet been pushed (that happens in the
355 * function prologue). The best approach is to
356 * add the current pc as a missing top of stack,
357 * and back the pc up to the caller, which is stored at the
358 * current stack pointer address since the call
359 * instruction puts it there right before
360 * the branch.
361 */
362
363 missing_tos = *pc;
364
365 if (is64Bit)
366 *pc = dtrace_fuword64(sp);
367 else
368 *pc = dtrace_fuword32(sp);
369 } else {
370 /*
371 * We might have a top of stack override, in which case we just
372 * add that frame without question to the top. This
373 * happens in return probes where you have a valid
374 * frame pointer, but it's for the callers frame
375 * and you'd like to add the pc of the return site
376 * to the frame.
377 */
378 missing_tos = cpu_core[CPU->cpu_id].cpuc_missing_tos;
379 }
380
381 if (missing_tos != 0) {
382 if (pcstack != NULL && pcstack_limit != NULL) {
383 /*
384 * If the missing top of stack has been filled out, then
385 * we add it and adjust the size.
386 */
387 *(*pcstack)++ = missing_tos;
388 (*pcstack_limit)--;
389 }
390 /*
391 * return 1 because we would have changed the
392 * stack whether or not it was passed in. This
393 * ensures the stack count is correct
394 */
395 rc = 1;
396 }
397 return rc;
398}
399
2d21ac55
A
400void
401dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
402{
403 thread_t thread = current_thread();
404 x86_saved_state_t *regs;
405 user_addr_t pc, sp, fp;
406 volatile uint16_t *flags =
407 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
408 int n;
409 boolean_t is64Bit = proc_is64bit(current_proc());
410
411 if (*flags & CPU_DTRACE_FAULT)
412 return;
413
414 if (pcstack_limit <= 0)
415 return;
416
417 /*
418 * If there's no user context we still need to zero the stack.
419 */
420 if (thread == NULL)
421 goto zero;
422
423 regs = (x86_saved_state_t *)find_user_regs(thread);
424 if (regs == NULL)
425 goto zero;
426
427 *pcstack++ = (uint64_t)proc_selfpid();
428 pcstack_limit--;
429
430 if (pcstack_limit <= 0)
431 return;
432
433 if (is64Bit) {
434 pc = regs->ss_64.isf.rip;
435 sp = regs->ss_64.isf.rsp;
436 fp = regs->ss_64.rbp;
437 } else {
438 pc = regs->ss_32.eip;
439 sp = regs->ss_32.uesp;
440 fp = regs->ss_32.ebp;
441 }
442
b0d623f7
A
443 /*
444 * The return value indicates if we've modified the stack.
445 * Since there is nothing else to fix up in either case,
446 * we can safely ignore it here.
447 */
448 (void)dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp);
2d21ac55 449
b0d623f7
A
450 if(pcstack_limit <= 0)
451 return;
2d21ac55
A
452
453 /*
454 * Note that unlike ppc, the x86 code does not use
455 * CPU_DTRACE_USTACK_FP. This is because x86 always
456 * traces from the fp, even in syscall/profile/fbt
457 * providers.
458 */
459 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
460 ASSERT(n >= 0);
461 ASSERT(n <= pcstack_limit);
462
463 pcstack += n;
464 pcstack_limit -= n;
465
466zero:
467 while (pcstack_limit-- > 0)
468 *pcstack++ = 0;
469}
470
471int
472dtrace_getustackdepth(void)
473{
474 thread_t thread = current_thread();
475 x86_saved_state_t *regs;
476 user_addr_t pc, sp, fp;
477 int n = 0;
478 boolean_t is64Bit = proc_is64bit(current_proc());
479
480 if (thread == NULL)
481 return 0;
482
483 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
484 return (-1);
485
486 regs = (x86_saved_state_t *)find_user_regs(thread);
487 if (regs == NULL)
488 return 0;
489
490 if (is64Bit) {
491 pc = regs->ss_64.isf.rip;
492 sp = regs->ss_64.isf.rsp;
493 fp = regs->ss_64.rbp;
494 } else {
495 pc = regs->ss_32.eip;
496 sp = regs->ss_32.uesp;
497 fp = regs->ss_32.ebp;
498 }
499
b0d623f7
A
500 if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) {
501 /*
502 * we would have adjusted the stack if we had
503 * supplied one (that is what rc == 1 means).
504 * Also, as a side effect, the pc might have
505 * been fixed up, which is good for calling
506 * in to dtrace_getustack_common.
507 */
508 n++;
2d21ac55 509 }
b0d623f7 510
2d21ac55
A
511 /*
512 * Note that unlike ppc, the x86 code does not use
513 * CPU_DTRACE_USTACK_FP. This is because x86 always
514 * traces from the fp, even in syscall/profile/fbt
515 * providers.
516 */
517
518 n += dtrace_getustack_common(NULL, 0, pc, fp);
519
520 return (n);
521}
522
523void
524dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
525{
526 thread_t thread = current_thread();
527 savearea_t *regs;
528 user_addr_t pc, sp;
529 volatile uint16_t *flags =
530 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
531#if 0
532 uintptr_t oldcontext;
533 size_t s1, s2;
534#endif
535 boolean_t is64Bit = proc_is64bit(current_proc());
536
537 if (*flags & CPU_DTRACE_FAULT)
538 return;
539
540 if (pcstack_limit <= 0)
541 return;
542
543 /*
544 * If there's no user context we still need to zero the stack.
545 */
546 if (thread == NULL)
547 goto zero;
548
549 regs = (savearea_t *)find_user_regs(thread);
550 if (regs == NULL)
551 goto zero;
552
553 *pcstack++ = (uint64_t)proc_selfpid();
554 pcstack_limit--;
555
556 if (pcstack_limit <= 0)
557 return;
558
559 pc = regs->ss_32.eip;
560 sp = regs->ss_32.ebp;
561
562#if 0 /* XXX signal stack crawl */
563 oldcontext = lwp->lwp_oldcontext;
564
565 if (p->p_model == DATAMODEL_NATIVE) {
566 s1 = sizeof (struct frame) + 2 * sizeof (long);
567 s2 = s1 + sizeof (siginfo_t);
568 } else {
569 s1 = sizeof (struct frame32) + 3 * sizeof (int);
570 s2 = s1 + sizeof (siginfo32_t);
571 }
572#endif
573
b0d623f7
A
574 if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) {
575 /*
576 * we made a change.
577 */
578 *fpstack++ = 0;
579 if (pcstack_limit <= 0)
580 return;
2d21ac55
A
581 }
582
583 while (pc != 0) {
584 *pcstack++ = (uint64_t)pc;
585 *fpstack++ = sp;
586 pcstack_limit--;
587 if (pcstack_limit <= 0)
588 break;
589
590 if (sp == 0)
591 break;
592
593#if 0 /* XXX signal stack crawl */
594 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
595 if (p->p_model == DATAMODEL_NATIVE) {
596 ucontext_t *ucp = (ucontext_t *)oldcontext;
597 greg_t *gregs = ucp->uc_mcontext.gregs;
598
599 sp = dtrace_fulword(&gregs[REG_FP]);
600 pc = dtrace_fulword(&gregs[REG_PC]);
601
602 oldcontext = dtrace_fulword(&ucp->uc_link);
603 } else {
604 ucontext_t *ucp = (ucontext_t *)oldcontext;
605 greg_t *gregs = ucp->uc_mcontext.gregs;
606
607 sp = dtrace_fuword32(&gregs[EBP]);
608 pc = dtrace_fuword32(&gregs[EIP]);
609
610 oldcontext = dtrace_fuword32(&ucp->uc_link);
611 }
612 }
613 else
614#endif
615 {
616 if (is64Bit) {
617 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
618 sp = dtrace_fuword64(sp);
619 } else {
620 pc = dtrace_fuword32((sp + RETURN_OFFSET));
621 sp = dtrace_fuword32(sp);
622 }
623 }
624
625#if 0 /* XXX */
626 /*
627 * This is totally bogus: if we faulted, we're going to clear
628 * the fault and break. This is to deal with the apparently
629 * broken Java stacks on x86.
630 */
631 if (*flags & CPU_DTRACE_FAULT) {
632 *flags &= ~CPU_DTRACE_FAULT;
633 break;
634 }
635#endif
636 }
637
638zero:
639 while (pcstack_limit-- > 0)
640 *pcstack++ = 0;
641}
642
643void
644dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
645 uint32_t *intrpc)
646{
b0d623f7 647 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
648 struct frame *nextfp, *minfp, *stacktop;
649 int depth = 0;
650 int last = 0;
651 uintptr_t pc;
652 uintptr_t caller = CPU->cpu_dtrace_caller;
653 int on_intr;
654
655 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
656 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
657 else
b0d623f7 658 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
2d21ac55
A
659
660 minfp = fp;
661
662 aframes++;
663
664 if (intrpc != NULL && depth < pcstack_limit)
665 pcstack[depth++] = (pc_t)intrpc;
666
667 while (depth < pcstack_limit) {
668 nextfp = *(struct frame **)fp;
b0d623f7
A
669#if defined(__x86_64__)
670 pc = *(uintptr_t *)(((uintptr_t)fp) + RETURN_OFFSET64);
671#else
672 pc = *(uintptr_t *)(((uintptr_t)fp) + RETURN_OFFSET);
673#endif
2d21ac55
A
674
675 if (nextfp <= minfp || nextfp >= stacktop) {
676 if (on_intr) {
677 /*
678 * Hop from interrupt stack to thread stack.
679 */
680 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
681
682 minfp = (struct frame *)kstack_base;
b0d623f7 683 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
2d21ac55
A
684
685 on_intr = 0;
686 continue;
687 }
688 /*
689 * This is the last frame we can process; indicate
690 * that we should return after processing this frame.
691 */
692 last = 1;
693 }
694
695 if (aframes > 0) {
696 if (--aframes == 0 && caller != 0) {
697 /*
698 * We've just run out of artificial frames,
699 * and we have a valid caller -- fill it in
700 * now.
701 */
702 ASSERT(depth < pcstack_limit);
703 pcstack[depth++] = (pc_t)caller;
704 caller = 0;
705 }
706 } else {
707 if (depth < pcstack_limit)
708 pcstack[depth++] = (pc_t)pc;
709 }
710
711 if (last) {
712 while (depth < pcstack_limit)
713 pcstack[depth++] = 0;
714 return;
715 }
716
717 fp = nextfp;
718 minfp = fp;
719 }
720}
721
722struct frame {
723 struct frame *backchain;
724 uintptr_t retaddr;
725};
726
727uint64_t
728dtrace_getarg(int arg, int aframes)
729{
730 uint64_t val;
b0d623f7 731 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
732 uintptr_t *stack;
733 uintptr_t pc;
734 int i;
735
b0d623f7
A
736
737#if defined(__x86_64__)
738 /*
739 * A total of 6 arguments are passed via registers; any argument with
740 * index of 5 or lower is therefore in a register.
741 */
742 int inreg = 5;
743#endif
744
2d21ac55
A
745 for (i = 1; i <= aframes; i++) {
746 fp = fp->backchain;
747 pc = fp->retaddr;
748
749 if (pc == (uintptr_t)dtrace_invop_callsite) {
b0d623f7 750#if defined(__i386__)
2d21ac55
A
751 /*
752 * If we pass through the invalid op handler, we will
753 * use the pointer that it passed to the stack as the
754 * second argument to dtrace_invop() as the pointer to
755 * the frame we're hunting for.
756 */
757
758 stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
759 fp = (struct frame *)stack[1]; /* Grab *second* argument */
760 stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
b0d623f7
A
761#elif defined(__x86_64__)
762 /*
763 * In the case of x86_64, we will use the pointer to the
764 * save area structure that was pushed when we took the
765 * trap. To get this structure, we must increment
766 * beyond the frame structure. If the
767 * argument that we're seeking is passed on the stack,
768 * we'll pull the true stack pointer out of the saved
769 * registers and decrement our argument by the number
770 * of arguments passed in registers; if the argument
771 * we're seeking is passed in regsiters, we can just
772 * load it directly.
773 */
774
775 /* fp points to frame of dtrace_invop() activation. */
776 fp = fp->backchain; /* to fbt_perfcallback() activation. */
777 fp = fp->backchain; /* to kernel_trap() activation. */
778 fp = fp->backchain; /* to trap_from_kernel() activation. */
779
780 x86_saved_state_t *tagged_regs = (x86_saved_state_t *)&fp[1];
781 x86_saved_state64_t *saved_state = saved_state64(tagged_regs);
782
783 if (arg <= inreg) {
784 stack = (uintptr_t *)&saved_state->rdi;
785 } else {
786 stack = (uintptr_t *)(saved_state->isf.rsp);
787 arg -= inreg;
788 }
789#else
790#error Unknown arch
791#endif
792 goto load;
2d21ac55
A
793 }
794 }
795
796 /*
797 * Arrive here when provider has called dtrace_probe directly.
798 */
b0d623f7
A
799 arg++; /* Advance past probeID */
800
801#if defined(__x86_64__)
802 if (arg <= inreg) {
803 /*
804 * This shouldn't happen. If the argument is passed in a
805 * register then it should have been, well, passed in a
806 * register...
807 */
808 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
809 return (0);
810 }
811
812 arg -= (inreg + 1);
813#endif
2d21ac55 814 stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
2d21ac55 815
b0d623f7 816load:
2d21ac55
A
817 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
818 val = *(((uint64_t *)stack) + arg); /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */
819 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
820
821 return (val);
822}
823
824/*
825 * Load/Store Safety
826 */
827void
828dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
829{
830 /*
831 * "base" is the smallest toxic address in the range, "limit" is the first
832 * VALID address greater than "base".
833 */
b0d623f7
A
834 func(0x0, VM_MIN_KERNEL_AND_KEXT_ADDRESS);
835 if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0)
836 func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0);
2d21ac55
A
837}
838