]> git.saurik.com Git - apple/xnu.git/blob - libkern/gen/OSDebug.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / gen / OSDebug.cpp
1 /*
2 * Copyright (c) 2005-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 // NOTE: This file is only c++ so I can get static initialisers going
30 #include <libkern/OSDebug.h>
31 #include <IOKit/IOLib.h>
32
33 #include <sys/cdefs.h>
34
35 #include <stdarg.h>
36 #include <mach/mach_types.h>
37 #include <mach/kmod.h>
38 #include <kern/locks.h>
39
40 #include <libkern/libkern.h> // From bsd's libkern directory
41 #include <mach/vm_param.h>
42
43 #include <sys/kdebug.h>
44 #include <kern/thread.h>
45
46 #if defined(HAS_APPLE_PAC)
47 #include <ptrauth.h>
48 #endif
49
50 extern int etext;
51 __BEGIN_DECLS
52 // From osmfk/kern/thread.h but considered to be private
53 extern vm_offset_t min_valid_stack_address(void);
54 extern vm_offset_t max_valid_stack_address(void);
55
56 // From osfmk/kern/printf.c
57 extern boolean_t doprnt_hide_pointers;
58
59 // From osfmk/kmod.c
60 extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide);
61
62 extern addr64_t kvtophys(vm_offset_t va);
63 #if __arm__
64 extern int copyinframe(vm_address_t fp, char *frame);
65 #elif defined(__arm64__)
66 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
67 #endif
68
69 __END_DECLS
70
71 extern lck_grp_t *IOLockGroup;
72
73 static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
74
75 /* Use kernel_debug() to log a backtrace */
76 void
77 trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data)
78 {
79 void *bt[16];
80 const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
81 unsigned i;
82 int found = 0;
83
84 OSBacktrace(bt, cnt);
85
86 /* find first non-kernel frame */
87 for (i = 3; i < cnt && bt[i]; i++) {
88 if (bt[i] > (void*)&etext) {
89 found = 1;
90 break;
91 }
92 }
93 /*
94 * if there are non-kernel frames, only log these
95 * otherwise, log everything but the first two
96 */
97 if (!found) {
98 i = 2;
99 }
100
101 #define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : NULL)
102 kernel_debug(debugid, data, size, safe_bt(i), safe_bt(i + 1), 0);
103 kernel_debug(debugid2, safe_bt(i + 2), safe_bt(i + 3), safe_bt(i + 4), safe_bt(i + 5), 0);
104 }
105
106 /* Report a message with a 4 entry backtrace - very slow */
107 void
108 OSReportWithBacktrace(const char *str, ...)
109 {
110 char buf[128];
111 void *bt[9] = {};
112 const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
113 va_list listp;
114
115 // Ignore the our and our callers stackframes, skipping frames 0 & 1
116 (void) OSBacktrace(bt, cnt);
117
118 va_start(listp, str);
119 vsnprintf(buf, sizeof(buf), str, listp);
120 va_end(listp);
121
122 lck_mtx_lock(sOSReportLock);
123 {
124 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
125 doprnt_hide_pointers = FALSE;
126 printf("%s\nBacktrace 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", buf,
127 (unsigned long) VM_KERNEL_UNSLIDE(bt[2]), (unsigned long) VM_KERNEL_UNSLIDE(bt[3]),
128 (unsigned long) VM_KERNEL_UNSLIDE(bt[4]), (unsigned long) VM_KERNEL_UNSLIDE(bt[5]),
129 (unsigned long) VM_KERNEL_UNSLIDE(bt[6]), (unsigned long) VM_KERNEL_UNSLIDE(bt[7]),
130 (unsigned long) VM_KERNEL_UNSLIDE(bt[8]));
131 kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2, TRUE);
132 doprnt_hide_pointers = old_doprnt_hide_pointers;
133 }
134 lck_mtx_unlock(sOSReportLock);
135 }
136
137 static vm_offset_t minstackaddr = min_valid_stack_address();
138 static vm_offset_t maxstackaddr = max_valid_stack_address();
139
140
141 #if __x86_64__
142 #define x86_64_RETURN_OFFSET 8
143 static unsigned int
144 x86_64_validate_raddr(vm_offset_t raddr)
145 {
146 return (raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) &&
147 (raddr < VM_MAX_KERNEL_ADDRESS);
148 }
149 static unsigned int
150 x86_64_validate_stackptr(vm_offset_t stackptr)
151 {
152 /* Existence and alignment check
153 */
154 if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) {
155 return 0;
156 }
157
158 /* Is a virtual->physical translation present?
159 */
160 if (!kvtophys(stackptr)) {
161 return 0;
162 }
163
164 /* Check if the return address lies on the same page;
165 * If not, verify that a translation exists.
166 */
167 if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) &&
168 !kvtophys(stackptr + x86_64_RETURN_OFFSET)) {
169 return 0;
170 }
171 return 1;
172 }
173 #endif
174
175 void
176 OSPrintBacktrace(void)
177 {
178 void * btbuf[20];
179 int tmp = OSBacktrace(btbuf, 20);
180 int i;
181 for (i = 0; i < tmp; i++) {
182 kprintf("bt[%.2d] = %p\n", i, btbuf[i]);
183 }
184 }
185
186 unsigned
187 OSBacktrace(void **bt, unsigned maxAddrs)
188 {
189 unsigned frame;
190 if (!current_thread()) {
191 return 0;
192 }
193
194 #if __x86_64__
195 #define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1)
196 vm_offset_t stackptr, stackptr_prev, raddr;
197 unsigned frame_index = 0;
198 /* Obtain current frame pointer */
199
200 __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr));
201
202 if (!x86_64_validate_stackptr(stackptr)) {
203 goto pad;
204 }
205
206 raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET));
207
208 if (!x86_64_validate_raddr(raddr)) {
209 goto pad;
210 }
211
212 bt[frame_index++] = (void *) raddr;
213
214 for (; frame_index < maxAddrs; frame_index++) {
215 stackptr_prev = stackptr;
216 stackptr = *((vm_offset_t *) stackptr_prev);
217
218 if (!x86_64_validate_stackptr(stackptr)) {
219 break;
220 }
221 /* Stack grows downwards */
222 if (stackptr < stackptr_prev) {
223 break;
224 }
225
226 if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) {
227 break;
228 }
229
230 raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET));
231
232 if (!x86_64_validate_raddr(raddr)) {
233 break;
234 }
235
236 bt[frame_index] = (void *) raddr;
237 }
238 pad:
239 frame = frame_index;
240
241 for (; frame_index < maxAddrs; frame_index++) {
242 bt[frame_index] = (void *) NULL;
243 }
244 #elif __arm__ || __arm64__
245 uint32_t i = 0;
246 uintptr_t frameb[2];
247 uintptr_t fp = 0;
248
249 // get the current frame pointer for this thread
250 #if defined(__arm__)
251 #define OSBacktraceFrameAlignOK(x) (((x) & 0x3) == 0)
252 __asm__ volatile ("mov %0,r7" : "=r" (fp));
253 #elif defined(__arm64__)
254 #define OSBacktraceFrameAlignOK(x) (((x) & 0xf) == 0)
255 __asm__ volatile ("mov %0, fp" : "=r" (fp));
256 #else
257 #error Unknown architecture.
258 #endif
259
260 // now crawl up the stack recording the link value of each frame
261 do {
262 // check bounds
263 if ((fp == 0) || (!OSBacktraceFrameAlignOK(fp)) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) {
264 break;
265 }
266 // safely read frame
267 #ifdef __arm64__
268 if (copyinframe(fp, (char*)frameb, TRUE) != 0) {
269 #else
270 if (copyinframe(fp, (char*)frameb) != 0) {
271 #endif
272 break;
273 }
274
275 // No need to use copyin as this is always a kernel address, see check above
276 #if defined(HAS_APPLE_PAC)
277 /* return addresses on stack signed by arm64e ABI */
278 bt[i] = ptrauth_strip((void*)frameb[1], ptrauth_key_return_address); // link register
279 #else
280 bt[i] = (void*)frameb[1]; // link register
281 #endif
282 fp = frameb[0];
283 } while (++i < maxAddrs);
284 frame = i;
285 #else
286 #error arch
287 #endif
288 return frame;
289 }