2 * Copyright (c) 2005-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 // NOTE: This file is only c++ so I can get static initialisers going
30 #include <libkern/OSDebug.h>
31 #include <IOKit/IOLib.h>
33 #include <sys/cdefs.h>
36 #include <mach/mach_types.h>
37 #include <mach/kmod.h>
38 #include <kern/locks.h>
40 #include <libkern/libkern.h> // From bsd's libkern directory
41 #include <mach/vm_param.h>
43 #include <sys/kdebug.h>
44 #include <kern/thread.h>
46 #if defined(HAS_APPLE_PAC)
52 // From osmfk/kern/thread.h but considered to be private
53 extern vm_offset_t
min_valid_stack_address(void);
54 extern vm_offset_t
max_valid_stack_address(void);
56 // From osfmk/kern/printf.c
57 extern boolean_t doprnt_hide_pointers
;
60 extern void kmod_dump_log(vm_offset_t
*addr
, unsigned int cnt
, boolean_t doUnslide
);
62 extern addr64_t
kvtophys(vm_offset_t va
);
64 extern int copyinframe(vm_address_t fp
, char *frame
);
65 #elif defined(__arm64__)
66 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
71 extern lck_grp_t
*IOLockGroup
;
73 static lck_mtx_t
*sOSReportLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
75 /* Use kernel_debug() to log a backtrace */
77 trace_backtrace(uint32_t debugid
, uint32_t debugid2
, uintptr_t size
, uintptr_t data
)
80 const unsigned cnt
= sizeof(bt
) / sizeof(bt
[0]);
86 /* find first non-kernel frame */
87 for (i
= 3; i
< cnt
&& bt
[i
]; i
++) {
88 if (bt
[i
] > (void*)&etext
) {
94 * if there are non-kernel frames, only log these
95 * otherwise, log everything but the first two
101 #define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : NULL)
102 kernel_debug(debugid
, data
, size
, safe_bt(i
), safe_bt(i
+ 1), 0);
103 kernel_debug(debugid2
, safe_bt(i
+ 2), safe_bt(i
+ 3), safe_bt(i
+ 4), safe_bt(i
+ 5), 0);
106 /* Report a message with a 4 entry backtrace - very slow */
108 OSReportWithBacktrace(const char *str
, ...)
112 const unsigned cnt
= sizeof(bt
) / sizeof(bt
[0]);
115 // Ignore the our and our callers stackframes, skipping frames 0 & 1
116 (void) OSBacktrace(bt
, cnt
);
118 va_start(listp
, str
);
119 vsnprintf(buf
, sizeof(buf
), str
, listp
);
122 lck_mtx_lock(sOSReportLock
);
124 boolean_t old_doprnt_hide_pointers
= doprnt_hide_pointers
;
125 doprnt_hide_pointers
= FALSE
;
126 printf("%s\nBacktrace 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", buf
,
127 (unsigned long) VM_KERNEL_UNSLIDE(bt
[2]), (unsigned long) VM_KERNEL_UNSLIDE(bt
[3]),
128 (unsigned long) VM_KERNEL_UNSLIDE(bt
[4]), (unsigned long) VM_KERNEL_UNSLIDE(bt
[5]),
129 (unsigned long) VM_KERNEL_UNSLIDE(bt
[6]), (unsigned long) VM_KERNEL_UNSLIDE(bt
[7]),
130 (unsigned long) VM_KERNEL_UNSLIDE(bt
[8]));
131 kmod_dump_log((vm_offset_t
*) &bt
[2], cnt
- 2, TRUE
);
132 doprnt_hide_pointers
= old_doprnt_hide_pointers
;
134 lck_mtx_unlock(sOSReportLock
);
137 static vm_offset_t minstackaddr
= min_valid_stack_address();
138 static vm_offset_t maxstackaddr
= max_valid_stack_address();
142 #define x86_64_RETURN_OFFSET 8
144 x86_64_validate_raddr(vm_offset_t raddr
)
146 return (raddr
> VM_MIN_KERNEL_AND_KEXT_ADDRESS
) &&
147 (raddr
< VM_MAX_KERNEL_ADDRESS
);
150 x86_64_validate_stackptr(vm_offset_t stackptr
)
152 /* Existence and alignment check
154 if (!stackptr
|| (stackptr
& 0x7) || !x86_64_validate_raddr(stackptr
)) {
158 /* Is a virtual->physical translation present?
160 if (!kvtophys(stackptr
)) {
164 /* Check if the return address lies on the same page;
165 * If not, verify that a translation exists.
167 if (((PAGE_SIZE
- (stackptr
& PAGE_MASK
)) < x86_64_RETURN_OFFSET
) &&
168 !kvtophys(stackptr
+ x86_64_RETURN_OFFSET
)) {
176 OSPrintBacktrace(void)
179 int tmp
= OSBacktrace(btbuf
, 20);
181 for (i
= 0; i
< tmp
; i
++) {
182 kprintf("bt[%.2d] = %p\n", i
, btbuf
[i
]);
187 OSBacktrace(void **bt
, unsigned maxAddrs
)
190 if (!current_thread()) {
195 #define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1)
196 vm_offset_t stackptr
, stackptr_prev
, raddr
;
197 unsigned frame_index
= 0;
198 /* Obtain current frame pointer */
200 __asm__
volatile ("movq %%rbp, %0" : "=m" (stackptr
));
202 if (!x86_64_validate_stackptr(stackptr
)) {
206 raddr
= *((vm_offset_t
*) (stackptr
+ x86_64_RETURN_OFFSET
));
208 if (!x86_64_validate_raddr(raddr
)) {
212 bt
[frame_index
++] = (void *) raddr
;
214 for (; frame_index
< maxAddrs
; frame_index
++) {
215 stackptr_prev
= stackptr
;
216 stackptr
= *((vm_offset_t
*) stackptr_prev
);
218 if (!x86_64_validate_stackptr(stackptr
)) {
221 /* Stack grows downwards */
222 if (stackptr
< stackptr_prev
) {
226 if ((stackptr
- stackptr_prev
) > SANE_x86_64_FRAME_SIZE
) {
230 raddr
= *((vm_offset_t
*) (stackptr
+ x86_64_RETURN_OFFSET
));
232 if (!x86_64_validate_raddr(raddr
)) {
236 bt
[frame_index
] = (void *) raddr
;
241 for (; frame_index
< maxAddrs
; frame_index
++) {
242 bt
[frame_index
] = (void *) NULL
;
244 #elif __arm__ || __arm64__
249 // get the current frame pointer for this thread
251 #define OSBacktraceFrameAlignOK(x) (((x) & 0x3) == 0)
252 __asm__
volatile ("mov %0,r7" : "=r" (fp
));
253 #elif defined(__arm64__)
254 #define OSBacktraceFrameAlignOK(x) (((x) & 0xf) == 0)
255 __asm__
volatile ("mov %0, fp" : "=r" (fp
));
257 #error Unknown architecture.
260 // now crawl up the stack recording the link value of each frame
263 if ((fp
== 0) || (!OSBacktraceFrameAlignOK(fp
)) || (fp
> VM_MAX_KERNEL_ADDRESS
) || (fp
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
268 if (copyinframe(fp
, (char*)frameb
, TRUE
) != 0) {
270 if (copyinframe(fp
, (char*)frameb
) != 0) {
275 // No need to use copyin as this is always a kernel address, see check above
276 #if defined(HAS_APPLE_PAC)
277 /* return addresses on stack signed by arm64e ABI */
278 bt
[i
] = ptrauth_strip((void*)frameb
[1], ptrauth_key_return_address
); // link register
280 bt
[i
] = (void*)frameb
[1]; // link register
283 } while (++i
< maxAddrs
);