2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 // NOTE: This file is only c++ so I can get static initialisers going
30 #include <libkern/OSDebug.h>
32 #include <sys/cdefs.h>
35 #include <mach/mach_types.h>
36 #include <mach/kmod.h>
37 #include <kern/locks.h>
39 #include <libkern/libkern.h> // From bsd's libkern directory
40 #include <mach/vm_param.h>
42 #include <sys/kdebug.h>
43 #include <kern/thread.h>
47 // From osmfk/kern/thread.h but considered to be private
48 extern vm_offset_t
min_valid_stack_address(void);
49 extern vm_offset_t
max_valid_stack_address(void);
52 extern void kmod_dump_log(vm_offset_t
*addr
, unsigned int cnt
);
54 extern addr64_t
kvtophys(vm_offset_t va
);
58 extern lck_grp_t
*IOLockGroup
;
60 static lck_mtx_t
*sOSReportLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
62 /* Use kernel_debug() to log a backtrace */
64 trace_backtrace(uint32_t debugid
, uint32_t debugid2
, uintptr_t size
, uintptr_t data
) {
66 const unsigned cnt
= sizeof(bt
) / sizeof(bt
[0]);
72 /* find first non-kernel frame */
73 for (i
= 3; i
< cnt
&& bt
[i
]; i
++) {
74 if (bt
[i
] > (void*)&etext
) {
80 * if there are non-kernel frames, only log these
81 * otherwise, log everything but the first two
85 #define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : 0)
86 kernel_debug(debugid
, data
, size
, safe_bt(i
), safe_bt(i
+1), 0);
87 kernel_debug(debugid2
, safe_bt(i
+2), safe_bt(i
+3), safe_bt(i
+4), safe_bt(i
+5), 0);
90 /* Report a message with a 4 entry backtrace - very slow */
92 OSReportWithBacktrace(const char *str
, ...)
96 const unsigned cnt
= sizeof(bt
) / sizeof(bt
[0]);
99 // Ignore the our and our callers stackframes, skipping frames 0 & 1
100 (void) OSBacktrace(bt
, cnt
);
102 va_start(listp
, str
);
103 vsnprintf(buf
, sizeof(buf
), str
, listp
);
106 lck_mtx_lock(sOSReportLock
);
108 printf("%s\nBacktrace %p %p %p %p %p %p %p\n",
109 buf
, bt
[2], bt
[3], bt
[4], bt
[5], bt
[6], bt
[7], bt
[8]);
110 kmod_dump_log((vm_offset_t
*) &bt
[2], cnt
- 2);
112 lck_mtx_unlock(sOSReportLock
);
115 static vm_offset_t minstackaddr
= min_valid_stack_address();
116 static vm_offset_t maxstackaddr
= max_valid_stack_address();
119 #define i386_RETURN_OFFSET 4
122 i386_validate_stackptr(vm_offset_t stackptr
)
124 /* Existence and alignment check
126 if (!stackptr
|| (stackptr
& 0x3))
129 /* Is a virtual->physical translation present?
131 if (!kvtophys(stackptr
))
134 /* Check if the return address lies on the same page;
135 * If not, verify that a translation exists.
137 if (((PAGE_SIZE
- (stackptr
& PAGE_MASK
)) < i386_RETURN_OFFSET
) &&
138 !kvtophys(stackptr
+ i386_RETURN_OFFSET
))
144 i386_validate_raddr(vm_offset_t raddr
)
146 return ((raddr
> VM_MIN_KERNEL_AND_KEXT_ADDRESS
) &&
147 (raddr
< VM_MAX_KERNEL_ADDRESS
));
152 #define x86_64_RETURN_OFFSET 8
154 x86_64_validate_raddr(vm_offset_t raddr
)
156 return ((raddr
> VM_MIN_KERNEL_AND_KEXT_ADDRESS
) &&
157 (raddr
< VM_MAX_KERNEL_ADDRESS
));
160 x86_64_validate_stackptr(vm_offset_t stackptr
)
162 /* Existence and alignment check
164 if (!stackptr
|| (stackptr
& 0x7) || !x86_64_validate_raddr(stackptr
))
167 /* Is a virtual->physical translation present?
169 if (!kvtophys(stackptr
))
172 /* Check if the return address lies on the same page;
173 * If not, verify that a translation exists.
175 if (((PAGE_SIZE
- (stackptr
& PAGE_MASK
)) < x86_64_RETURN_OFFSET
) &&
176 !kvtophys(stackptr
+ x86_64_RETURN_OFFSET
))
183 unsigned OSBacktrace(void **bt
, unsigned maxAddrs
)
188 vm_offset_t stackptr
, stackptr_prev
;
189 const vm_offset_t
* const mem
= (vm_offset_t
*) 0;
192 __asm__
volatile("mflr %0" : "=r" (stackptr
));
193 bt
[i
++] = (void *) stackptr
;
195 __asm__
volatile("mr %0,r1" : "=r" (stackptr
));
196 for ( ; i
< maxAddrs
; i
++) {
197 // Validate we have a reasonable stackptr
198 if ( !(minstackaddr
<= stackptr
&& stackptr
< maxstackaddr
)
202 stackptr_prev
= stackptr
;
203 stackptr
= mem
[stackptr_prev
>> 2];
204 if ((stackptr
- stackptr_prev
) > 8 * 1024) // Sanity check
207 vm_offset_t addr
= mem
[(stackptr
>> 2) + 2];
208 if ((addr
& 3) || (addr
< 0x8000)) // More sanity checks
210 bt
[i
] = (void *) addr
;
214 for ( ; i
< maxAddrs
; i
++)
217 #define SANE_i386_FRAME_SIZE (kernel_stack_size >> 1)
218 vm_offset_t stackptr
, stackptr_prev
, raddr
;
219 unsigned frame_index
= 0;
220 /* Obtain current frame pointer */
221 __asm__
volatile("movl %%ebp, %0" : "=m" (stackptr
));
223 if (!i386_validate_stackptr(stackptr
))
226 raddr
= *((vm_offset_t
*) (stackptr
+ i386_RETURN_OFFSET
));
228 if (!i386_validate_raddr(raddr
))
231 bt
[frame_index
++] = (void *) raddr
;
233 for ( ; frame_index
< maxAddrs
; frame_index
++) {
234 stackptr_prev
= stackptr
;
235 stackptr
= *((vm_offset_t
*) stackptr_prev
);
237 if (!i386_validate_stackptr(stackptr
))
239 /* Stack grows downwards */
240 if (stackptr
< stackptr_prev
)
243 if ((stackptr
- stackptr_prev
) > SANE_i386_FRAME_SIZE
)
246 raddr
= *((vm_offset_t
*) (stackptr
+ i386_RETURN_OFFSET
));
248 if (!i386_validate_raddr(raddr
))
251 bt
[frame_index
] = (void *) raddr
;
256 for ( ; frame_index
< maxAddrs
; frame_index
++)
257 bt
[frame_index
] = (void *) 0;
259 #define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1)
260 vm_offset_t stackptr
, stackptr_prev
, raddr
;
261 unsigned frame_index
= 0;
262 /* Obtain current frame pointer */
264 __asm__
volatile("movq %%rbp, %0" : "=m" (stackptr
));
266 if (!x86_64_validate_stackptr(stackptr
))
269 raddr
= *((vm_offset_t
*) (stackptr
+ x86_64_RETURN_OFFSET
));
271 if (!x86_64_validate_raddr(raddr
))
274 bt
[frame_index
++] = (void *) raddr
;
276 for ( ; frame_index
< maxAddrs
; frame_index
++) {
277 stackptr_prev
= stackptr
;
278 stackptr
= *((vm_offset_t
*) stackptr_prev
);
280 if (!x86_64_validate_stackptr(stackptr
))
282 /* Stack grows downwards */
283 if (stackptr
< stackptr_prev
)
286 if ((stackptr
- stackptr_prev
) > SANE_x86_64_FRAME_SIZE
)
289 raddr
= *((vm_offset_t
*) (stackptr
+ x86_64_RETURN_OFFSET
));
291 if (!x86_64_validate_raddr(raddr
))
294 bt
[frame_index
] = (void *) raddr
;
299 for ( ; frame_index
< maxAddrs
; frame_index
++)
300 bt
[frame_index
] = (void *) 0;