2 * Copyright (c) 2005-2008 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from
30 * mach/ppc/thread_status.h */
31 #include <arm/caches_internal.h>
32 #include <arm/proc_reg.h>
34 #include <kern/thread.h>
35 #include <mach/thread_status.h>
37 #if __has_include(<ptrauth.h>)
42 #include <sys/malloc.h>
44 #include <sys/systm.h>
46 #include <sys/proc_internal.h>
47 #include <sys/kauth.h>
48 #include <sys/dtrace.h>
49 #include <sys/dtrace_impl.h>
50 #include <libkern/OSAtomic.h>
51 #include <kern/simple_lock.h>
52 #include <kern/sched_prim.h> /* for thread_wakeup() */
53 #include <kern/thread_call.h>
54 #include <kern/task.h>
55 #include <miscfs/devfs/devfs.h>
56 #include <mach/vm_param.h>
58 extern struct arm_saved_state
*find_kern_regs(thread_t
);
60 extern dtrace_id_t dtrace_probeid_error
; /* special ERROR probe */
61 typedef arm_saved_state_t savearea_t
;
63 extern lck_attr_t
*dtrace_lck_attr
;
64 extern lck_grp_t
*dtrace_lck_grp
;
68 struct frame
*backchain
;
73 * Atomicity and synchronization
76 dtrace_membar_producer(void)
79 __asm__
volatile("dmb ish" : : : "memory");
81 __asm__
volatile("nop" : : : "memory");
86 dtrace_membar_consumer(void)
89 __asm__
volatile("dmb ish" : : : "memory");
91 __asm__
volatile("nop" : : : "memory");
96 * Interrupt manipulation
97 * XXX dtrace_getipl() can be called from probe context.
103 * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE
104 * in osfmk/kern/cpu_data.h
106 /* return get_interrupt_level(); */
107 return (ml_at_interrupt_context() ? 1 : 0);
115 decl_lck_mtx_data(static, dt_xc_lock
);
116 static uint32_t dt_xc_sync
;
118 typedef struct xcArg
{
127 xcArg_t
*pArg
= (xcArg_t
*) foo
;
129 if (pArg
->cpu
== CPU
->cpu_id
|| pArg
->cpu
== DTRACE_CPUALL
)
130 (pArg
->f
) (pArg
->arg
);
132 if (hw_atomic_sub(&dt_xc_sync
, 1) == 0)
133 thread_wakeup((event_t
) &dt_xc_sync
);
138 * dtrace_xcall() is not called from probe context.
141 dtrace_xcall(processorid_t cpu
, dtrace_xcall_t f
, void *arg
)
144 /* Only one dtrace_xcall in flight allowed */
145 lck_mtx_lock(&dt_xc_lock
);
153 cpu_broadcast_xcall(&dt_xc_sync
, TRUE
, xcRemote
, (void*) &xcArg
);
155 lck_mtx_unlock(&dt_xc_lock
);
159 /* On uniprocessor systems, the cpu should always be either ourselves or all */
160 ASSERT(cpu
== CPU
->cpu_id
|| cpu
== DTRACE_CPUALL
);
171 dtrace_isa_init(void)
173 lck_mtx_init(&dt_xc_lock
, dtrace_lck_grp
, dtrace_lck_attr
);
179 * Register definitions
191 #define ARM64_CPSR 33
197 dtrace_getreg(struct regs
* savearea
, uint_t reg
)
199 struct arm_saved_state
*regs
= (struct arm_saved_state
*) savearea
;
202 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
);
206 if (is_saved_state32(regs
)) {
207 // Fix special registers if user is 32 bits
227 if (!check_saved_state_reglimit(regs
, reg
)) {
228 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
);
232 return ((uint64_t)get_saved_state_reg(regs
, reg
));
235 #define RETURN_OFFSET 4
236 #define RETURN_OFFSET64 8
239 dtrace_getustack_common(uint64_t * pcstack
, int pcstack_limit
, user_addr_t pc
,
243 boolean_t is64bit
= proc_is64bit_data(current_proc());
245 ASSERT(pcstack
== NULL
|| pcstack_limit
> 0);
249 if (pcstack
!= NULL
) {
250 *pcstack
++ = (uint64_t) pc
;
252 if (pcstack_limit
<= 0)
260 pc
= dtrace_fuword64((sp
+ RETURN_OFFSET64
));
261 sp
= dtrace_fuword64(sp
);
263 pc
= dtrace_fuword32((sp
+ RETURN_OFFSET
));
264 sp
= dtrace_fuword32(sp
);
272 dtrace_getupcstack(uint64_t * pcstack
, int pcstack_limit
)
274 thread_t thread
= current_thread();
276 user_addr_t pc
, sp
, fp
;
277 volatile uint16_t *flags
= (volatile uint16_t *) & cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
;
280 if (*flags
& CPU_DTRACE_FAULT
)
283 if (pcstack_limit
<= 0)
287 * If there's no user context we still need to zero the stack.
292 regs
= (savearea_t
*) find_user_regs(thread
);
296 *pcstack
++ = (uint64_t)dtrace_proc_selfpid();
299 if (pcstack_limit
<= 0)
302 pc
= get_saved_state_pc(regs
);
303 sp
= get_saved_state_sp(regs
);
304 fp
= get_saved_state_fp(regs
);
306 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) {
307 *pcstack
++ = (uint64_t) pc
;
309 if (pcstack_limit
<= 0)
312 pc
= get_saved_state_lr(regs
);
315 n
= dtrace_getustack_common(pcstack
, pcstack_limit
, pc
, fp
);
318 ASSERT(n
<= pcstack_limit
);
324 while (pcstack_limit
-- > 0)
329 dtrace_getustackdepth(void)
331 thread_t thread
= current_thread();
333 user_addr_t pc
, sp
, fp
;
339 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT
))
342 regs
= (savearea_t
*) find_user_regs(thread
);
346 pc
= get_saved_state_pc(regs
);
347 sp
= get_saved_state_sp(regs
);
348 fp
= get_saved_state_fp(regs
);
350 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) {
352 pc
= get_saved_state_lr(regs
);
356 * Note that unlike ppc, the arm code does not use
357 * CPU_DTRACE_USTACK_FP. This is because arm always
358 * traces from the sp, even in syscall/profile/fbt
362 n
+= dtrace_getustack_common(NULL
, 0, pc
, fp
);
368 dtrace_getufpstack(uint64_t * pcstack
, uint64_t * fpstack
, int pcstack_limit
)
370 thread_t thread
= current_thread();
371 boolean_t is64bit
= proc_is64bit_data(current_proc());
374 volatile uint16_t *flags
= (volatile uint16_t *) & cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
;
377 uintptr_t oldcontext
;
381 if (*flags
& CPU_DTRACE_FAULT
)
384 if (pcstack_limit
<= 0)
388 * If there's no user context we still need to zero the stack.
393 regs
= (savearea_t
*) find_user_regs(thread
);
397 *pcstack
++ = (uint64_t)dtrace_proc_selfpid();
400 if (pcstack_limit
<= 0)
403 pc
= get_saved_state_pc(regs
);
404 sp
= get_saved_state_lr(regs
);
406 #if 0 /* XXX signal stack crawl */
407 oldcontext
= lwp
->lwp_oldcontext
;
409 if (p
->p_model
== DATAMODEL_NATIVE
) {
410 s1
= sizeof(struct frame
) + 2 * sizeof(long);
411 s2
= s1
+ sizeof(siginfo_t
);
413 s1
= sizeof(struct frame32
) + 3 * sizeof(int);
414 s2
= s1
+ sizeof(siginfo32_t
);
418 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) {
419 *pcstack
++ = (uint64_t) pc
;
422 if (pcstack_limit
<= 0)
426 pc
= dtrace_fuword64(sp
);
428 pc
= dtrace_fuword32(sp
);
430 while (pc
!= 0 && sp
!= 0) {
431 *pcstack
++ = (uint64_t) pc
;
434 if (pcstack_limit
<= 0)
437 #if 0 /* XXX signal stack crawl */
438 if (oldcontext
== sp
+ s1
|| oldcontext
== sp
+ s2
) {
439 if (p
->p_model
== DATAMODEL_NATIVE
) {
440 ucontext_t
*ucp
= (ucontext_t
*) oldcontext
;
441 greg_t
*gregs
= ucp
->uc_mcontext
.gregs
;
443 sp
= dtrace_fulword(&gregs
[REG_FP
]);
444 pc
= dtrace_fulword(&gregs
[REG_PC
]);
446 oldcontext
= dtrace_fulword(&ucp
->uc_link
);
448 ucontext_t
*ucp
= (ucontext_t
*) oldcontext
;
449 greg_t
*gregs
= ucp
->uc_mcontext
.gregs
;
451 sp
= dtrace_fuword32(&gregs
[EBP
]);
452 pc
= dtrace_fuword32(&gregs
[EIP
]);
454 oldcontext
= dtrace_fuword32(&ucp
->uc_link
);
460 pc
= dtrace_fuword64((sp
+ RETURN_OFFSET64
));
461 sp
= dtrace_fuword64(sp
);
463 pc
= dtrace_fuword32((sp
+ RETURN_OFFSET
));
464 sp
= dtrace_fuword32(sp
);
471 * This is totally bogus: if we faulted, we're going to clear
472 * the fault and break. This is to deal with the apparently
473 * broken Java stacks on x86.
475 if (*flags
& CPU_DTRACE_FAULT
) {
476 *flags
&= ~CPU_DTRACE_FAULT
;
483 while (pcstack_limit
-- > 0)
489 dtrace_getpcstack(pc_t
* pcstack
, int pcstack_limit
, int aframes
,
492 struct frame
*fp
= (struct frame
*) __builtin_frame_address(0);
493 struct frame
*nextfp
, *minfp
, *stacktop
;
498 uintptr_t caller
= CPU
->cpu_dtrace_caller
;
500 if ((on_intr
= CPU_ON_INTR(CPU
)) != 0)
501 stacktop
= (struct frame
*) dtrace_get_cpu_int_stack_top();
503 stacktop
= (struct frame
*) (dtrace_get_kernel_stack(current_thread()) + kernel_stack_size
);
509 if (intrpc
!= NULL
&& depth
< pcstack_limit
)
510 pcstack
[depth
++] = (pc_t
) intrpc
;
512 while (depth
< pcstack_limit
) {
513 nextfp
= *(struct frame
**) fp
;
514 pc
= *(uintptr_t *) (((uintptr_t) fp
) + RETURN_OFFSET64
);
516 if (nextfp
<= minfp
|| nextfp
>= stacktop
) {
519 * Hop from interrupt stack to thread stack.
521 arm_saved_state_t
*arm_kern_regs
= (arm_saved_state_t
*) find_kern_regs(current_thread());
523 nextfp
= (struct frame
*)(saved_state64(arm_kern_regs
)->fp
);
526 vm_offset_t kstack_base
= dtrace_get_kernel_stack(current_thread());
528 minfp
= (struct frame
*)kstack_base
;
529 stacktop
= (struct frame
*)(kstack_base
+ kernel_stack_size
);
534 if (nextfp
<= minfp
|| nextfp
>= stacktop
) {
539 * If this thread was on the interrupt stack, but did not
540 * take an interrupt (i.e, the idle thread), there is no
541 * explicit saved state for us to use.
548 * This is the last frame we can process; indicate
549 * that we should return after processing this frame.
556 if (--aframes
== 0 && caller
!= (uintptr_t)NULL
) {
558 * We've just run out of artificial frames,
559 * and we have a valid caller -- fill it in
562 ASSERT(depth
< pcstack_limit
);
563 pcstack
[depth
++] = (pc_t
) caller
;
564 caller
= (uintptr_t)NULL
;
567 if (depth
< pcstack_limit
)
568 pcstack
[depth
++] = (pc_t
) pc
;
572 while (depth
< pcstack_limit
)
573 pcstack
[depth
++] = (pc_t
) NULL
;
582 * On arm64, we support both 32bit and 64bit user processes.
583 * This routine is only called when handling 32bit processes
584 * where thumb_mode is pertinent.
585 * If this routine is called when handling 64bit processes
586 * thumb_mode should always be zero.
589 dtrace_instr_size(uint32_t instr
, int thumb_mode
)
592 uint16_t instr16
= *(uint16_t*) &instr
;
593 if (((instr16
>> 11) & 0x1F) > 0x1C)
603 dtrace_getarg(int arg
, int aframes
, dtrace_mstate_t
*mstate
, dtrace_vstate_t
*vstate
)
605 #pragma unused(arg, aframes)
607 struct frame
*fp
= (struct frame
*)__builtin_frame_address(0);
613 * A total of 8 arguments are passed via registers; any argument with
614 * index of 7 or lower is therefore in a register.
618 for (i
= 1; i
<= aframes
; ++i
) {
620 #if __has_feature(ptrauth_returns)
621 pc
= (uintptr_t)ptrauth_strip((void*)fp
->retaddr
, ptrauth_key_return_address
);
626 if (dtrace_invop_callsite_pre
!= NULL
627 && pc
> (uintptr_t) dtrace_invop_callsite_pre
628 && pc
<= (uintptr_t) dtrace_invop_callsite_post
)
630 /* fp points to frame of dtrace_invop() activation */
631 fp
= fp
->backchain
; /* to fbt_perfCallback activation */
632 fp
= fp
->backchain
; /* to sleh_synchronous activation */
633 fp
= fp
->backchain
; /* to fleh_synchronous activation */
635 arm_saved_state_t
*tagged_regs
= (arm_saved_state_t
*) ((void*) &fp
[1]);
636 arm_saved_state64_t
*saved_state
= saved_state64(tagged_regs
);
639 /* the argument will be found in a register */
640 stack
= (uintptr_t*) &saved_state
->x
[0];
642 /* the argument will be found in the stack */
643 fp
= (struct frame
*) saved_state
->sp
;
644 stack
= (uintptr_t*) &fp
[1];
653 * We know that we did not come through a trap to get into
654 * dtrace_probe() -- We arrive here when the provider has
655 * called dtrace_probe() directly.
656 * The probe ID is the first argument to dtrace_probe().
657 * We must advance beyond that to get the argX.
659 arg
++; /* Advance past probeID */
663 * This shouldn't happen. If the argument is passed in a
664 * register then it should have been, well, passed in a
667 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
);
672 stack
= (uintptr_t*) &fp
[1]; /* Find marshalled arguments */
675 if (dtrace_canload((uint64_t)(stack
+ arg
), sizeof(uint64_t),
677 /* dtrace_probe arguments arg0 ... arg4 are 64bits wide */
678 val
= dtrace_load64((uint64_t)(stack
+ arg
));
685 dtrace_probe_error(dtrace_state_t
*state
, dtrace_epid_t epid
, int which
,
686 int fltoffs
, int fault
, uint64_t illval
)
690 * For the case of the error probe firing lets
691 * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
693 state
->dts_arg_error_illval
= illval
;
694 dtrace_probe( dtrace_probeid_error
, (uint64_t)(uintptr_t)state
, epid
, which
, fltoffs
, fault
);
698 dtrace_toxic_ranges(void (*func
)(uintptr_t base
, uintptr_t limit
))
700 /* XXX ARMTODO check copied from ppc/x86*/
702 * "base" is the smallest toxic address in the range, "limit" is the first
703 * VALID address greater than "base".
705 func(0x0, VM_MIN_KERNEL_ADDRESS
);
706 if (VM_MAX_KERNEL_ADDRESS
< ~(uintptr_t)0)
707 func(VM_MAX_KERNEL_ADDRESS
+ 1, ~(uintptr_t)0);
710 void dtrace_flush_caches(void)
712 /* TODO There were some problems with flushing just the cache line that had been modified.
713 * For now, we'll flush the entire cache, until we figure out how to flush just the patched block.
716 InvalidatePoU_Icache();