2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
30 #include <kern/thread.h>
31 #include <mach/thread_status.h>
34 #include <sys/malloc.h>
36 #include <sys/systm.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/dtrace.h>
41 #include <sys/dtrace_impl.h>
42 #include <libkern/OSAtomic.h>
43 #include <kern/thread_call.h>
44 #include <kern/task.h>
45 #include <kern/sched_prim.h>
46 #include <miscfs/devfs/devfs.h>
47 #include <mach/vm_param.h>
48 #include <machine/cpu_capabilities.h>
50 extern dtrace_id_t dtrace_probeid_error
; /* special ERROR probe */
53 dtrace_probe_error(dtrace_state_t
*state
, dtrace_epid_t epid
, int which
,
54 int fltoffs
, int fault
, uint64_t illval
)
57 * dtrace_getarg() is a lost cause on PPC. For the case of the error probe firing lets
58 * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
60 state
->dts_arg_error_illval
= illval
;
61 dtrace_probe( dtrace_probeid_error
, (uint64_t)(uintptr_t)state
, epid
, which
, fltoffs
, fault
);
65 * Atomicity and synchronization
68 dtrace_membar_producer(void)
70 __asm__
volatile("sync");
74 dtrace_membar_consumer(void)
76 __asm__
volatile("isync");
80 * Interrupt manipulation
81 * XXX dtrace_getipl() can be called from probe context.
86 return (ml_at_interrupt_context() ? 1: 0);
92 typedef void (*broadcastFunc
) (uint32_t);
94 int32_t cpu_broadcast(uint32_t *, broadcastFunc
, uint32_t); /* osfmk/ppc/machine_cpu.h */
96 typedef struct xcArg
{
104 xcRemote( uint32_t foo
)
106 xcArg_t
*pArg
= (xcArg_t
*)foo
;
108 if ( pArg
->cpu
== CPU
->cpu_id
|| pArg
->cpu
== DTRACE_CPUALL
) {
109 (pArg
->f
)(pArg
->arg
);
112 if(!hw_atomic_sub(&(pArg
->waitVar
), 1)) { /* Drop the wait count */
113 thread_wakeup((event_t
)&(pArg
->waitVar
)); /* If we were the last, wake up the signaller */
118 * dtrace_xcall() is not called from probe context.
121 dtrace_xcall(processorid_t cpu
, dtrace_xcall_t f
, void *arg
)
125 /* Talking to ourselves, are we? */
126 if ( cpu
== CPU
->cpu_id
) {
131 if ( cpu
== DTRACE_CPUALL
) {
140 (void)cpu_broadcast(&(xcArg
.waitVar
), xcRemote
, (uint32_t)&xcArg
);
147 dtrace_getreg(struct regs
*savearea
, uint_t reg
)
149 ppc_saved_state_t
*regs
= (ppc_saved_state_t
*)savearea
;
150 uint64_t mask
= (_cpu_capabilities
& k64Bit
) ? 0xffffffffffffffffULL
: 0x00000000ffffffffULL
;
152 /* See osfmk/ppc/savearea.h */
153 if (reg
> 68) { /* beyond mmcr2 */
154 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP
);
159 /* First 38 registers are saved to 64 bits r0-r31, srr0, srr1, xer, lr, ctr, dar. */
161 return (((uint64_t *)(&(regs
->save_r0
)))[reg
]) & mask
;
163 /* Handle the 32-bit registers */
164 case 38: case 39: case 40: case 41: /* cr, dsisr, exception, vrsave */
165 case 42: case 43: case 44: case 45: /* vscr[4] */
166 case 46: case 47: case 48: case 49: /* fpscrpad, fpscr, save_1d8[2] */
167 case 50: case 51: case 52: case 53: /* save_1E0[8] */
168 case 54: case 55: case 56: case 57:
169 case 58: case 59: case 60: case 61: /* save_pmc[8] */
170 case 62: case 63: case 64: case 65:
171 return (uint64_t)(((unsigned int *)(&(regs
->save_cr
)))[reg
- 38]);
174 return regs
->save_mmcr0
& mask
;
176 return regs
->save_mmcr1
& mask
;
178 return regs
->save_mmcr2
& mask
;
182 #define RETURN_OFFSET 8
183 #define RETURN_OFFSET64 16
184 #define REGPC save_srr0
185 #define REGSP save_r1
188 * XXX dtrace_getustack_common() can be called from probe context.
191 dtrace_getustack_common(uint64_t *pcstack
, int pcstack_limit
, user_addr_t pc
,
195 volatile uint16_t *flags
=
196 (volatile uint16_t *)&cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
;
198 uintptr_t oldcontext
= lwp
->lwp_oldcontext
; /* XXX signal stack crawl*/
202 boolean_t is64Bit
= proc_is64bit(current_proc());
204 ASSERT(pcstack
== NULL
|| pcstack_limit
> 0);
206 #if 0 /* XXX signal stack crawl*/
207 if (p
->p_model
== DATAMODEL_NATIVE
) {
208 s1
= sizeof (struct frame
) + 2 * sizeof (long);
209 s2
= s1
+ sizeof (siginfo_t
);
211 s1
= sizeof (struct frame32
) + 3 * sizeof (int);
212 s2
= s1
+ sizeof (siginfo32_t
);
218 if (pcstack
!= NULL
) {
219 *pcstack
++ = (uint64_t)pc
;
221 if (pcstack_limit
<= 0)
228 #if 0 /* XXX signal stack crawl*/
229 if (oldcontext
== sp
+ s1
|| oldcontext
== sp
+ s2
) {
230 if (p
->p_model
== DATAMODEL_NATIVE
) {
231 ucontext_t
*ucp
= (ucontext_t
*)oldcontext
;
232 greg_t
*gregs
= ucp
->uc_mcontext
.gregs
;
234 sp
= dtrace_fulword(&gregs
[REG_FP
]);
235 pc
= dtrace_fulword(&gregs
[REG_PC
]);
237 oldcontext
= dtrace_fulword(&ucp
->uc_link
);
239 ucontext32_t
*ucp
= (ucontext32_t
*)oldcontext
;
240 greg32_t
*gregs
= ucp
->uc_mcontext
.gregs
;
242 sp
= dtrace_fuword32(&gregs
[EBP
]);
243 pc
= dtrace_fuword32(&gregs
[EIP
]);
245 oldcontext
= dtrace_fuword32(&ucp
->uc_link
);
252 pc
= dtrace_fuword64((sp
+ RETURN_OFFSET64
));
253 sp
= dtrace_fuword64(sp
);
255 pc
= dtrace_fuword32((sp
+ RETURN_OFFSET
));
256 sp
= dtrace_fuword32(sp
);
265 dtrace_getupcstack(uint64_t *pcstack
, int pcstack_limit
)
267 thread_t thread
= current_thread();
268 ppc_saved_state_t
*regs
;
270 volatile uint16_t *flags
=
271 (volatile uint16_t *)&cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
;
273 boolean_t is64Bit
= proc_is64bit(current_proc());
275 if (*flags
& CPU_DTRACE_FAULT
)
278 if (pcstack_limit
<= 0)
282 * If there's no user context we still need to zero the stack.
287 regs
= (ppc_saved_state_t
*)find_user_regs(thread
);
291 *pcstack
++ = (uint64_t)proc_selfpid();
294 if (pcstack_limit
<= 0)
300 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) {
301 *pcstack
++ = (uint64_t)pc
;
303 if (pcstack_limit
<= 0)
309 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP
)) {
311 * If the ustack fp flag is set, the stack frame from sp to
312 * fp contains no valid call information. Start with the fp.
315 sp
= dtrace_fuword64(sp
);
317 sp
= (user_addr_t
)dtrace_fuword32(sp
);
320 n
= dtrace_getustack_common(pcstack
, pcstack_limit
, pc
, sp
);
322 ASSERT(n
<= pcstack_limit
);
328 while (pcstack_limit
-- > 0)
333 dtrace_getustackdepth(void)
335 thread_t thread
= current_thread();
336 ppc_saved_state_t
*regs
;
339 boolean_t is64Bit
= proc_is64bit(current_proc());
344 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT
))
347 regs
= (ppc_saved_state_t
*)find_user_regs(thread
);
354 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) {
359 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP
)) {
361 * If the ustack fp flag is set, the stack frame from sp to
362 * fp contains no valid call information. Start with the fp.
365 sp
= dtrace_fuword64(sp
);
367 sp
= (user_addr_t
)dtrace_fuword32(sp
);
370 n
+= dtrace_getustack_common(NULL
, 0, pc
, sp
);
376 dtrace_getufpstack(uint64_t *pcstack
, uint64_t *fpstack
, int pcstack_limit
)
378 thread_t thread
= current_thread();
379 ppc_saved_state_t
*regs
;
381 volatile uint16_t *flags
=
382 (volatile uint16_t *)&cpu_core
[CPU
->cpu_id
].cpuc_dtrace_flags
;
384 uintptr_t oldcontext
;
387 boolean_t is64Bit
= proc_is64bit(current_proc());
389 if (*flags
& CPU_DTRACE_FAULT
)
392 if (pcstack_limit
<= 0)
396 * If there's no user context we still need to zero the stack.
401 regs
= (ppc_saved_state_t
*)find_user_regs(thread
);
405 *pcstack
++ = (uint64_t)proc_selfpid();
408 if (pcstack_limit
<= 0)
414 #if 0 /* XXX signal stack crawl*/
415 oldcontext
= lwp
->lwp_oldcontext
;
417 if (p
->p_model
== DATAMODEL_NATIVE
) {
418 s1
= sizeof (struct frame
) + 2 * sizeof (long);
419 s2
= s1
+ sizeof (siginfo_t
);
421 s1
= sizeof (struct frame32
) + 3 * sizeof (int);
422 s2
= s1
+ sizeof (siginfo32_t
);
426 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY
)) {
427 *pcstack
++ = (uint64_t)pc
;
430 if (pcstack_limit
<= 0)
434 * XXX This is wrong, but we do not yet support stack helpers.
437 pc
= dtrace_fuword64(sp
);
439 pc
= dtrace_fuword32(sp
);
443 *pcstack
++ = (uint64_t)pc
;
446 if (pcstack_limit
<= 0)
452 #if 0 /* XXX signal stack crawl*/
453 if (oldcontext
== sp
+ s1
|| oldcontext
== sp
+ s2
) {
454 if (p
->p_model
== DATAMODEL_NATIVE
) {
455 ucontext_t
*ucp
= (ucontext_t
*)oldcontext
;
456 greg_t
*gregs
= ucp
->uc_mcontext
.gregs
;
458 sp
= dtrace_fulword(&gregs
[REG_FP
]);
459 pc
= dtrace_fulword(&gregs
[REG_PC
]);
461 oldcontext
= dtrace_fulword(&ucp
->uc_link
);
463 ucontext_t
*ucp
= (ucontext_t
*)oldcontext
;
464 greg_t
*gregs
= ucp
->uc_mcontext
.gregs
;
466 sp
= dtrace_fuword32(&gregs
[EBP
]);
467 pc
= dtrace_fuword32(&gregs
[EIP
]);
469 oldcontext
= dtrace_fuword32(&ucp
->uc_link
);
476 pc
= dtrace_fuword64((sp
+ RETURN_OFFSET64
));
477 sp
= dtrace_fuword64(sp
);
479 pc
= dtrace_fuword32((sp
+ RETURN_OFFSET
));
480 sp
= dtrace_fuword32(sp
);
486 while (pcstack_limit
-- > 0)
491 dtrace_getpcstack(pc_t
*pcstack
, int pcstack_limit
, int aframes
,
494 struct frame
*fp
= (struct frame
*)__builtin_frame_address(0);
495 struct frame
*nextfp
, *minfp
, *stacktop
;
499 uintptr_t caller
= CPU
->cpu_dtrace_caller
;
502 if ((on_intr
= CPU_ON_INTR(CPU
)) != 0)
503 stacktop
= (struct frame
*)dtrace_get_cpu_int_stack_top();
505 stacktop
= (struct frame
*)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size
);
511 if (intrpc
!= NULL
&& depth
< pcstack_limit
)
512 pcstack
[depth
++] = (pc_t
)intrpc
;
514 while (depth
< pcstack_limit
) {
515 nextfp
= *(struct frame
**)fp
;
516 pc
= *(uintptr_t *)(((uintptr_t)fp
) + RETURN_OFFSET
);
518 if (nextfp
<= minfp
|| nextfp
>= stacktop
) {
521 * Hop from interrupt stack to thread stack.
523 vm_offset_t kstack_base
= dtrace_get_kernel_stack(current_thread());
525 minfp
= (struct frame
*)kstack_base
;
526 stacktop
= (struct frame
*)(kstack_base
+ kernel_stack_size
);
532 * This is the last frame we can process; indicate
533 * that we should return after processing this frame.
539 if (--aframes
== 0 && caller
!= 0) {
541 * We've just run out of artificial frames,
542 * and we have a valid caller -- fill it in
545 ASSERT(depth
< pcstack_limit
);
546 pcstack
[depth
++] = (pc_t
)caller
;
550 if (depth
< pcstack_limit
)
551 pcstack
[depth
++] = (pc_t
)pc
;
555 while (depth
< pcstack_limit
)
556 pcstack
[depth
++] = 0;
566 dtrace_getarg(int arg
, int aframes
)
568 #pragma unused(arg,aframes)
569 return 0xfeedfacedeafbeadLL
; /* XXX Only called for arg >= 5 */
577 dtrace_toxic_ranges(void (*func
)(uintptr_t base
, uintptr_t limit
))
580 * "base" is the smallest toxic address in the range, "limit" is the first
581 * VALID address greater than "base".
583 func(0x0, VM_MIN_KERNEL_ADDRESS
);
584 if (VM_MAX_KERNEL_ADDRESS
< ~(uintptr_t)0)
585 func(VM_MAX_KERNEL_ADDRESS
+ 1, ~(uintptr_t)0);
588 extern void *mapping_phys_lookup(ppnum_t
, unsigned int *);