2  * Copyright (c) 2005-2018 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 #include <kern/thread.h> 
  30 #include <mach/thread_status.h> 
  32 typedef x86_saved_state_t savearea_t
; 
  36 #include <sys/malloc.h> 
  38 #include <sys/systm.h> 
  40 #include <sys/proc_internal.h> 
  41 #include <sys/kauth.h> 
  42 #include <sys/dtrace.h> 
  43 #include <sys/dtrace_impl.h> 
  44 #include <libkern/OSAtomic.h> 
  45 #include <kern/thread_call.h> 
  46 #include <kern/task.h> 
  47 #include <kern/sched_prim.h> 
  48 #include <miscfs/devfs/devfs.h> 
  49 #include <mach/vm_param.h> 
  50 #include <machine/pal_routines.h> 
  52 #include <machine/trap.h> 
  55  * APPLE NOTE:  The regmap is used to decode which 64bit uregs[] register 
  56  * is being accessed when passed the 32bit uregs[] constant (based on 
  57  * the reg.d translator file). The dtrace_getreg() is smart enough to handle 
  58  * the register mappings.   The register set definitions are the same as 
  59  * those used by the fasttrap_getreg code. 
  61 #include "fasttrap_regset.h" 
  62 static const uint8_t regmap
[19] = { 
  69     REG_RBP
,            /* EBP, REG_FP  */ 
  72     REG_RDX
,            /* EDX, REG_R1  */ 
  74     REG_RAX
,            /* EAX, REG_R0  */ 
  75     REG_TRAPNO
,         /* TRAPNO */ 
  77     REG_RIP
,            /* EIP, REG_PC  */ 
  79     REG_RFL
,            /* EFL, REG_PS  */ 
  80     REG_RSP
,            /* UESP, REG_SP */ 
  84 extern dtrace_id_t      dtrace_probeid_error
;   /* special ERROR probe */ 
  87 dtrace_probe_error(dtrace_state_t 
*state
, dtrace_epid_t epid
, int which
, 
  88     int fltoffs
, int fault
, uint64_t illval
) 
  91      * For the case of the error probe firing lets 
  92      * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG. 
  94     state
->dts_arg_error_illval 
= illval
; 
  95     dtrace_probe( dtrace_probeid_error
, (uint64_t)(uintptr_t)state
, epid
, which
, fltoffs
, fault 
); 
  99  * Atomicity and synchronization 
 102 dtrace_membar_producer(void) 
 104         __asm__ 
volatile("sfence"); 
 108 dtrace_membar_consumer(void) 
 110         __asm__ 
volatile("lfence"); 
 114  * Interrupt manipulation 
 115  * XXX dtrace_getipl() can be called from probe context. 
 121          * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE 
 122          * in osfmk/kern/cpu_data.h 
 124         /* return get_interrupt_level(); */ 
 125         return (ml_at_interrupt_context() ? 1: 0); 
 131 typedef struct xcArg 
{ 
 138 xcRemote( void *foo 
) 
 140         xcArg_t 
*pArg 
= (xcArg_t 
*)foo
; 
 142         if ( pArg
->cpu 
== CPU
->cpu_id 
|| pArg
->cpu 
== DTRACE_CPUALL 
) { 
 143                 (pArg
->f
)(pArg
->arg
); 
 149  * dtrace_xcall() is not called from probe context. 
 152 dtrace_xcall(processorid_t cpu
, dtrace_xcall_t f
, void *arg
) 
 160         if (cpu 
== DTRACE_CPUALL
) { 
 161                 mp_cpus_call (CPUMASK_ALL
, ASYNC
, xcRemote
, (void*)&xcArg
); 
 164                 mp_cpus_call (cpu_to_cpumask((cpu_t
)cpu
), ASYNC
, xcRemote
, (void*)&xcArg
); 
 172 dtrace_isa_init(void) 
 181 dtrace_getreg(struct regs 
*savearea
, uint_t reg
) 
 183         boolean_t is64Bit 
= proc_is64bit(current_proc()); 
 184         x86_saved_state_t 
*regs 
= (x86_saved_state_t 
*)savearea
; 
 187                 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
); 
 200                 return (uint64_t)(regs
->ss_64
.rdi
); 
 202                 return (uint64_t)(regs
->ss_64
.rsi
); 
 204                 return (uint64_t)(regs
->ss_64
.rdx
); 
 206                 return (uint64_t)(regs
->ss_64
.rcx
); 
 208                 return (uint64_t)(regs
->ss_64
.r8
); 
 210                 return (uint64_t)(regs
->ss_64
.r9
); 
 212                 return (uint64_t)(regs
->ss_64
.rax
); 
 214                 return (uint64_t)(regs
->ss_64
.rbx
); 
 216                 return (uint64_t)(regs
->ss_64
.rbp
); 
 218                 return (uint64_t)(regs
->ss_64
.r10
); 
 220                 return (uint64_t)(regs
->ss_64
.r11
); 
 222                 return (uint64_t)(regs
->ss_64
.r12
); 
 224                 return (uint64_t)(regs
->ss_64
.r13
); 
 226                 return (uint64_t)(regs
->ss_64
.r14
); 
 228                 return (uint64_t)(regs
->ss_64
.r15
); 
 230                 return (uint64_t)(regs
->ss_64
.fs
); 
 232                 return (uint64_t)(regs
->ss_64
.gs
); 
 234                 return (uint64_t)(regs
->ss_64
.isf
.trapno
); 
 236                 return (uint64_t)(regs
->ss_64
.isf
.err
); 
 238                 return (uint64_t)(regs
->ss_64
.isf
.rip
); 
 240                 return (uint64_t)(regs
->ss_64
.isf
.cs
); 
 242                 return (uint64_t)(regs
->ss_64
.isf
.ss
); 
 244                 return (uint64_t)(regs
->ss_64
.isf
.rflags
); 
 246                 return (uint64_t)(regs
->ss_64
.isf
.rsp
); 
 250                 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
); 
 254         } else {   /* is 32bit user */ 
 255                 /* beyond register SS */ 
 256                 if (reg 
> x86_SAVED_STATE32_COUNT 
- 1) { 
 257                         DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
); 
 260                 return (uint64_t)((unsigned int *)(&(regs
->ss_32
.gs
)))[reg
]; 
 265 dtrace_getvmreg(uint_t ndx
) 
 270         /* Any change in the vmread final opcode must be reflected in dtrace_handle_trap below. */ 
 271         __asm__ 
__volatile__( 
 276         : "=a" (reg
), "+r" (failed
) : "D" ((uint64_t)ndx
)); 
 279          * Check for fault in vmreg first. If DTrace has recovered the fault cause by 
 280          * vmread above then the value in failed will be unreliable. 
 282         if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ILLOP
)) { 
 286         /* If vmread succeeded but failed because CF or ZS is 1 report fail. */ 
 288                 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR
); 
 289                 cpu_core
[CPU
->cpu_id
].cpuc_dtrace_illval 
= ndx
; 
 296 #define RETURN_OFFSET 4 
 297 #define RETURN_OFFSET64 8 
 300 dtrace_getustack_common(uint64_t *pcstack
, int pcstack_limit
, user_addr_t pc
, 
 303         volatile uint16_t *flags 
= 
 304             (volatile uint16_t *)&cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
; 
 307         uintptr_t oldcontext 
= lwp
->lwp_oldcontext
; /* XXX signal stack crawl */ 
 311         boolean_t is64Bit 
= proc_is64bit(current_proc()); 
 313         ASSERT(pcstack 
== NULL 
|| pcstack_limit 
> 0); 
 315 #if 0 /* XXX signal stack crawl */ 
 316         if (p
->p_model 
== DATAMODEL_NATIVE
) { 
 317                 s1 
= sizeof (struct frame
) + 2 * sizeof (long); 
 318                 s2 
= s1 
+ sizeof (siginfo_t
); 
 320                 s1 
= sizeof (struct frame32
) + 3 * sizeof (int); 
 321                 s2 
= s1 
+ sizeof (siginfo32_t
); 
 327                 if (pcstack 
!= NULL
) { 
 328                         *pcstack
++ = (uint64_t)pc
; 
 330                         if (pcstack_limit 
<= 0) 
 337 #if 0 /* XXX signal stack crawl */ 
 338                 if (oldcontext 
== sp 
+ s1 
|| oldcontext 
== sp 
+ s2
) { 
 339                         if (p
->p_model 
== DATAMODEL_NATIVE
) { 
 340                                 ucontext_t 
*ucp 
= (ucontext_t 
*)oldcontext
; 
 341                                 greg_t 
*gregs 
= ucp
->uc_mcontext
.gregs
; 
 343                                 sp 
= dtrace_fulword(&gregs
[REG_FP
]); 
 344                                 pc 
= dtrace_fulword(&gregs
[REG_PC
]); 
 346                                 oldcontext 
= dtrace_fulword(&ucp
->uc_link
); 
 348                                 ucontext32_t 
*ucp 
= (ucontext32_t 
*)oldcontext
; 
 349                                 greg32_t 
*gregs 
= ucp
->uc_mcontext
.gregs
; 
 351                                 sp 
= dtrace_fuword32(&gregs
[EBP
]); 
 352                                 pc 
= dtrace_fuword32(&gregs
[EIP
]); 
 354                                 oldcontext 
= dtrace_fuword32(&ucp
->uc_link
); 
 361                                 pc 
= dtrace_fuword64((sp 
+ RETURN_OFFSET64
)); 
 362                                 sp 
= dtrace_fuword64(sp
); 
 364                                 pc 
= dtrace_fuword32((sp 
+ RETURN_OFFSET
)); 
 365                                 sp 
= dtrace_fuword32(sp
); 
 369                 /* Truncate ustack if the iterator causes fault. */ 
 370                 if (*flags 
& CPU_DTRACE_FAULT
) { 
 371                         *flags 
&= ~CPU_DTRACE_FAULT
; 
 381  * The return value indicates if we've modified the stack. 
 384 dtrace_adjust_stack(uint64_t **pcstack
, int *pcstack_limit
, user_addr_t 
*pc
, 
 387     volatile uint16_t *flags 
= (volatile uint16_t *) &cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
; 
 390     boolean_t is64Bit 
= proc_is64bit(current_proc()); 
 394     if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) { 
 396          * If we found ourselves in an entry probe, the frame pointer has not 
 397          * yet been pushed (that happens in the 
 398          * function prologue).  The best approach is to 
 399          * add the current pc as a missing top of stack, 
 400          * and back the pc up to the caller, which is stored  at the 
 401          * current stack pointer address since the call 
 402          * instruction puts it there right before 
 409             *pc 
= dtrace_fuword64(sp
); 
 411             *pc 
= dtrace_fuword32(sp
); 
 413         /* Truncate ustack if the iterator causes fault. */ 
 414         if (*flags 
& CPU_DTRACE_FAULT
) { 
 415                 *flags 
&= ~CPU_DTRACE_FAULT
; 
 419          * We might have a top of stack override, in which case we just 
 420          * add that frame without question to the top.  This 
 421          * happens in return probes where you have a valid 
 422          * frame pointer, but it's for the callers frame 
 423          * and you'd like to add the pc of the return site 
 426         missing_tos 
= cpu_core
[CPU
->cpu_id
].cpuc_missing_tos
; 
 429     if (missing_tos 
!= 0) { 
 430         if (pcstack 
!= NULL 
&& pcstack_limit 
!= NULL
) { 
 432              * If the missing top of stack has been filled out, then 
 433              * we add it and adjust the size. 
 435             *(*pcstack
)++ = missing_tos
; 
 439          * return 1 because we would have changed the 
 440          * stack whether or not it was passed in.  This 
 441          * ensures the stack count is correct 
 449 dtrace_getupcstack(uint64_t *pcstack
, int pcstack_limit
) 
 451         thread_t thread 
= current_thread(); 
 452         x86_saved_state_t 
*regs
; 
 453         user_addr_t pc
, sp
, fp
; 
 454         volatile uint16_t *flags 
= 
 455             (volatile uint16_t *)&cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
; 
 457         boolean_t is64Bit 
= proc_is64bit(current_proc()); 
 459         if (*flags 
& CPU_DTRACE_FAULT
) 
 462         if (pcstack_limit 
<= 0) 
 466          * If there's no user context we still need to zero the stack. 
 471         pal_register_cache_state(thread
, VALID
); 
 472         regs 
= (x86_saved_state_t 
*)find_user_regs(thread
); 
 476         *pcstack
++ = (uint64_t)dtrace_proc_selfpid(); 
 479         if (pcstack_limit 
<= 0) 
 483                 pc 
= regs
->ss_64
.isf
.rip
; 
 484                 sp 
= regs
->ss_64
.isf
.rsp
; 
 485                 fp 
= regs
->ss_64
.rbp
; 
 487                 pc 
= regs
->ss_32
.eip
; 
 488                 sp 
= regs
->ss_32
.uesp
; 
 489                 fp 
= regs
->ss_32
.ebp
; 
 493          * The return value indicates if we've modified the stack. 
 494          * Since there is nothing else to fix up in either case, 
 495          * we can safely ignore it here. 
 497         (void)dtrace_adjust_stack(&pcstack
, &pcstack_limit
, &pc
, sp
); 
 499         if(pcstack_limit 
<= 0) 
 503          * Note that unlike ppc, the x86 code does not use 
 504          * CPU_DTRACE_USTACK_FP. This is because x86 always 
 505          * traces from the fp, even in syscall/profile/fbt 
 508         n 
= dtrace_getustack_common(pcstack
, pcstack_limit
, pc
, fp
); 
 510         ASSERT(n 
<= pcstack_limit
); 
 516         while (pcstack_limit
-- > 0) 
 521 dtrace_getustackdepth(void) 
 523         thread_t thread 
= current_thread(); 
 524         x86_saved_state_t 
*regs
; 
 525         user_addr_t pc
, sp
, fp
; 
 527         boolean_t is64Bit 
= proc_is64bit(current_proc()); 
 532         if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT
)) 
 535         pal_register_cache_state(thread
, VALID
); 
 536         regs 
= (x86_saved_state_t 
*)find_user_regs(thread
); 
 541                 pc 
= regs
->ss_64
.isf
.rip
; 
 542                 sp 
= regs
->ss_64
.isf
.rsp
; 
 543                 fp 
= regs
->ss_64
.rbp
; 
 545                 pc 
= regs
->ss_32
.eip
; 
 546                 sp 
= regs
->ss_32
.uesp
; 
 547                 fp 
= regs
->ss_32
.ebp
; 
 550         if (dtrace_adjust_stack(NULL
, NULL
, &pc
, sp
) == 1) { 
 552              * we would have adjusted the stack if we had 
 553              * supplied one (that is what rc == 1 means). 
 554              * Also, as a side effect, the pc might have 
 555              * been fixed up, which is good for calling 
 556              * in to dtrace_getustack_common. 
 562          * Note that unlike ppc, the x86 code does not use 
 563          * CPU_DTRACE_USTACK_FP. This is because x86 always 
 564          * traces from the fp, even in syscall/profile/fbt 
 568         n 
+= dtrace_getustack_common(NULL
, 0, pc
, fp
); 
 574 dtrace_getufpstack(uint64_t *pcstack
, uint64_t *fpstack
, int pcstack_limit
) 
 576         thread_t thread 
= current_thread(); 
 579         volatile uint16_t *flags 
= 
 580             (volatile uint16_t *)&cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
; 
 582         uintptr_t oldcontext
; 
 585         boolean_t is64Bit 
= proc_is64bit(current_proc()); 
 587         if (*flags 
& CPU_DTRACE_FAULT
) 
 590         if (pcstack_limit 
<= 0) 
 594          * If there's no user context we still need to zero the stack. 
 599         regs 
= (savearea_t 
*)find_user_regs(thread
); 
 603         *pcstack
++ = (uint64_t)dtrace_proc_selfpid(); 
 606         if (pcstack_limit 
<= 0) 
 609         pc 
= regs
->ss_32
.eip
; 
 610         sp 
= regs
->ss_32
.ebp
; 
 612 #if 0 /* XXX signal stack crawl */ 
 613         oldcontext 
= lwp
->lwp_oldcontext
; 
 615         if (p
->p_model 
== DATAMODEL_NATIVE
) { 
 616                 s1 
= sizeof (struct frame
) + 2 * sizeof (long); 
 617                 s2 
= s1 
+ sizeof (siginfo_t
); 
 619                 s1 
= sizeof (struct frame32
) + 3 * sizeof (int); 
 620                 s2 
= s1 
+ sizeof (siginfo32_t
); 
 624         if(dtrace_adjust_stack(&pcstack
, &pcstack_limit
, &pc
, sp
) == 1) { 
 629             if (pcstack_limit 
<= 0) 
 634                 *pcstack
++ = (uint64_t)pc
; 
 637                 if (pcstack_limit 
<= 0) 
 643 #if 0 /* XXX signal stack crawl */ 
 644                 if (oldcontext 
== sp 
+ s1 
|| oldcontext 
== sp 
+ s2
) { 
 645                         if (p
->p_model 
== DATAMODEL_NATIVE
) { 
 646                                 ucontext_t 
*ucp 
= (ucontext_t 
*)oldcontext
; 
 647                                 greg_t 
*gregs 
= ucp
->uc_mcontext
.gregs
; 
 649                                 sp 
= dtrace_fulword(&gregs
[REG_FP
]); 
 650                                 pc 
= dtrace_fulword(&gregs
[REG_PC
]); 
 652                                 oldcontext 
= dtrace_fulword(&ucp
->uc_link
); 
 654                                 ucontext_t 
*ucp 
= (ucontext_t 
*)oldcontext
; 
 655                                 greg_t 
*gregs 
= ucp
->uc_mcontext
.gregs
; 
 657                                 sp 
= dtrace_fuword32(&gregs
[EBP
]); 
 658                                 pc 
= dtrace_fuword32(&gregs
[EIP
]); 
 660                                 oldcontext 
= dtrace_fuword32(&ucp
->uc_link
); 
 667                                 pc 
= dtrace_fuword64((sp 
+ RETURN_OFFSET64
)); 
 668                                 sp 
= dtrace_fuword64(sp
); 
 670                                 pc 
= dtrace_fuword32((sp 
+ RETURN_OFFSET
)); 
 671                                 sp 
= dtrace_fuword32(sp
); 
 675                 /* Truncate ustack if the iterator causes fault. */ 
 676                 if (*flags 
& CPU_DTRACE_FAULT
) { 
 677                         *flags 
&= ~CPU_DTRACE_FAULT
; 
 683         while (pcstack_limit
-- > 0) 
 688 dtrace_getpcstack(pc_t 
*pcstack
, int pcstack_limit
, int aframes
, 
 691         struct frame 
*fp 
= (struct frame 
*)__builtin_frame_address(0); 
 692         struct frame 
*nextfp
, *minfp
, *stacktop
; 
 696         uintptr_t caller 
= CPU
->cpu_dtrace_caller
; 
 699         if ((on_intr 
= CPU_ON_INTR(CPU
)) != 0) 
 700                 stacktop 
= (struct frame 
*)dtrace_get_cpu_int_stack_top(); 
 702                 stacktop 
= (struct frame 
*)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size
); 
 708         if (intrpc 
!= NULL 
&& depth 
< pcstack_limit
) 
 709                 pcstack
[depth
++] = (pc_t
)intrpc
; 
 711         while (depth 
< pcstack_limit
) { 
 712                 nextfp 
= *(struct frame 
**)fp
; 
 713                 pc 
= *(uintptr_t *)(((uintptr_t)fp
) + RETURN_OFFSET64
); 
 715                 if (nextfp 
<= minfp 
|| nextfp 
>= stacktop
) { 
 718                                  * Hop from interrupt stack to thread stack. 
 720                                 vm_offset_t kstack_base 
= dtrace_get_kernel_stack(current_thread()); 
 722                                 minfp 
= (struct frame 
*)kstack_base
; 
 723                                 stacktop 
= (struct frame 
*)(kstack_base 
+ kernel_stack_size
); 
 729                          * This is the last frame we can process; indicate 
 730                          * that we should return after processing this frame. 
 736                         if (--aframes 
== 0 && caller 
!= 0) { 
 738                                  * We've just run out of artificial frames, 
 739                                  * and we have a valid caller -- fill it in 
 742                                 ASSERT(depth 
< pcstack_limit
); 
 743                                 pcstack
[depth
++] = (pc_t
)caller
; 
 747                         if (depth 
< pcstack_limit
) 
 748                                 pcstack
[depth
++] = (pc_t
)pc
; 
 752                         while (depth 
< pcstack_limit
) 
 753                                 pcstack
[depth
++] = 0; 
 763         struct frame 
*backchain
; 
 768 dtrace_getarg(int arg
, int aframes
, dtrace_mstate_t 
*mstate
, dtrace_vstate_t 
*vstate
) 
 771         struct frame 
*fp 
= (struct frame 
*)__builtin_frame_address(0); 
 778      * A total of 6 arguments are passed via registers; any argument with 
 779      * index of 5 or lower is therefore in a register. 
 783         for (i 
= 1; i 
<= aframes
; i
++) { 
 787                 if (dtrace_invop_callsite_pre 
!= NULL
 
 788                         && pc  
>  (uintptr_t)dtrace_invop_callsite_pre
 
 789                         && pc  
<= (uintptr_t)dtrace_invop_callsite_post
) { 
 791                          * In the case of x86_64, we will use the pointer to the 
 792                          * save area structure that was pushed when we took the 
 793                          * trap.  To get this structure, we must increment 
 794                          * beyond the frame structure. If the 
 795                          * argument that we're seeking is passed on the stack, 
 796                          * we'll pull the true stack pointer out of the saved 
 797                          * registers and decrement our argument by the number 
 798                          * of arguments passed in registers; if the argument 
 799                          * we're seeking is passed in regsiters, we can just 
 803                         /* fp points to frame of dtrace_invop() activation. */ 
 804                         fp 
= fp
->backchain
; /* to fbt_perfcallback() activation. */ 
 805                         fp 
= fp
->backchain
; /* to kernel_trap() activation. */ 
 806                         fp 
= fp
->backchain
; /* to trap_from_kernel() activation. */ 
 808                         x86_saved_state_t   
*tagged_regs 
= (x86_saved_state_t 
*)&fp
[1]; 
 809                         x86_saved_state64_t 
*saved_state 
= saved_state64(tagged_regs
); 
 812                                 stack 
= (uintptr_t *)(void*)&saved_state
->rdi
; 
 814                                 fp 
= (struct frame 
*)(saved_state
->isf
.rsp
); 
 815                                 stack 
= (uintptr_t *)&fp
[1]; /* Find marshalled 
 824          * We know that we did not come through a trap to get into 
 825          * dtrace_probe() --  We arrive here when the provider has 
 826          * called dtrace_probe() directly. 
 827          * The probe ID is the first argument to dtrace_probe(). 
 828          * We must advance beyond that to get the argX. 
 830         arg
++; /* Advance past probeID */ 
 834                  * This shouldn't happen.  If the argument is passed in a 
 835                  * register then it should have been, well, passed in a 
 838                 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
); 
 843         stack 
= (uintptr_t *)&fp
[1]; /* Find marshalled arguments */ 
 846         if (dtrace_canload((uint64_t)(stack 
+ arg
), sizeof(uint64_t), 
 848                 /* dtrace_probe arguments arg0 ... arg4 are 64bits wide */ 
 849                 val 
= dtrace_load64((uint64_t)(stack 
+ arg
)); 
 859 dtrace_toxic_ranges(void (*func
)(uintptr_t base
, uintptr_t limit
)) 
 862          * "base" is the smallest toxic address in the range, "limit" is the first 
 863          * VALID address greater than "base". 
 865         func(0x0, VM_MIN_KERNEL_AND_KEXT_ADDRESS
); 
 866         if (VM_MAX_KERNEL_ADDRESS 
< ~(uintptr_t)0) 
 867                         func(VM_MAX_KERNEL_ADDRESS 
+ 1, ~(uintptr_t)0); 
 873 extern boolean_t 
dtrace_handle_trap(int, x86_saved_state_t 
*); 
 876 dtrace_handle_trap(int trapno
, x86_saved_state_t 
*state
) 
 878         x86_saved_state64_t 
*saved_state 
= saved_state64(state
); 
 880         if (!DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT
)) { 
 885          * General purpose solution would require pulling in disassembler. Right now there 
 886          * is only one specific case to be handled so it is hardcoded here. 
 888         if (trapno 
== T_INVALID_OPCODE
) { 
 889                 uint8_t *inst 
= (uint8_t *)saved_state
->isf
.rip
; 
 891                 /* vmread %rdi, %rax */ 
 892                 if (inst
[0] == 0x0f && inst
[1] == 0x78 && inst
[2] == 0xf8) { 
 893                         DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
); 
 894                         saved_state
->isf
.rip 
+= 3;