]> git.saurik.com Git - apple/xnu.git/blob - libkern/gen/OSDebug.cpp
xnu-792.25.20.tar.gz
[apple/xnu.git] / libkern / gen / OSDebug.cpp
1 /*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 // NOTE: This file is only c++ so I can get static initialisers going
24 #include <libkern/OSDebug.h>
25
26 #include <sys/cdefs.h>
27
28 #include <stdarg.h>
29 #include <mach/mach_types.h>
30 #include <mach/kmod.h>
31 #include <kern/lock.h>
32
33 #include <libkern/libkern.h> // From bsd's libkern directory
34 #include <mach/vm_param.h>
35
36 __BEGIN_DECLS
37 // From osmfk/kern/thread.h but considered to be private
38 extern vm_offset_t min_valid_stack_address(void);
39 extern vm_offset_t max_valid_stack_address(void);
40
41 // From osfmk/kmod.c
42 extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt);
43
44 extern addr64_t kvtophys(vm_offset_t va);
45 __END_DECLS
46
47 static mutex_t *sOSReportLock = mutex_alloc(0);
48
49 /* Report a message with a 4 entry backtrace - very slow */
50 void
51 OSReportWithBacktrace(const char *str, ...)
52 {
53 char buf[128];
54 void *bt[9];
55 const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
56 va_list listp;
57
58 // Ignore the our and our callers stackframes, skipping frames 0 & 1
59 (void) OSBacktrace(bt, cnt);
60
61 va_start(listp, str);
62 vsnprintf(buf, sizeof(buf), str, listp);
63 va_end(listp);
64
65 mutex_lock(sOSReportLock);
66 {
67 printf("%s\nBacktrace %p %p %p %p %p %p %p\n",
68 buf, bt[2], bt[3], bt[4], bt[5], bt[6], bt[7], bt[8]);
69 kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2);
70 }
71 mutex_unlock(sOSReportLock);
72 }
73
74 static vm_offset_t minstackaddr = min_valid_stack_address();
75 static vm_offset_t maxstackaddr = max_valid_stack_address();
76
77 #if __i386__
78 #define i386_RETURN_OFFSET 4
79
80 static unsigned int
81 i386_validate_stackptr(vm_offset_t stackptr)
82 {
83 /* Existence and alignment check
84 */
85 if (!stackptr || (stackptr & 0x3))
86 return 0;
87
88 /* Is a virtual->physical translation present?
89 */
90 if (!kvtophys(stackptr))
91 return 0;
92
93 /* Check if the return address lies on the same page;
94 * If not, verify that a translation exists.
95 */
96 if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < i386_RETURN_OFFSET) &&
97 !kvtophys(stackptr + i386_RETURN_OFFSET))
98 return 0;
99 return 1;
100 }
101
102 static unsigned int
103 i386_validate_raddr(vm_offset_t raddr)
104 {
105 return ((raddr > VM_MIN_KERNEL_ADDRESS) &&
106 (raddr < VM_MAX_KERNEL_ADDRESS));
107 }
108 #endif
109
110 unsigned OSBacktrace(void **bt, unsigned maxAddrs)
111 {
112 unsigned frame;
113
114 #if __ppc__
115 vm_offset_t stackptr, stackptr_prev;
116 const vm_offset_t * const mem = (vm_offset_t *) 0;
117 unsigned i = 0;
118
119 __asm__ volatile("mflr %0" : "=r" (stackptr));
120 bt[i++] = (void *) stackptr;
121
122 __asm__ volatile("mr %0,r1" : "=r" (stackptr));
123 for ( ; i < maxAddrs; i++) {
124 // Validate we have a reasonable stackptr
125 if ( !(minstackaddr <= stackptr && stackptr < maxstackaddr)
126 || (stackptr & 3))
127 break;
128
129 stackptr_prev = stackptr;
130 stackptr = mem[stackptr_prev >> 2];
131 if ((stackptr_prev ^ stackptr) > 8 * 1024) // Sanity check
132 break;
133
134 vm_offset_t addr = mem[(stackptr >> 2) + 2];
135 if ((addr & 3) || (addr < 0x8000)) // More sanity checks
136 break;
137 bt[i] = (void *) addr;
138 }
139 frame = i;
140
141 for ( ; i < maxAddrs; i++)
142 bt[i] = (void *) 0;
143 #elif __i386__
144 #define SANE_i386_FRAME_SIZE 8*1024
145 vm_offset_t stackptr, stackptr_prev, raddr;
146 unsigned frame_index = 0;
147 /* Obtain current frame pointer */
148 __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
149
150 if (!i386_validate_stackptr(stackptr))
151 goto pad;
152
153 raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET));
154
155 if (!i386_validate_raddr(raddr))
156 goto pad;
157
158 bt[frame_index++] = (void *) raddr;
159
160 for ( ; frame_index < maxAddrs; frame_index++) {
161 stackptr_prev = stackptr;
162 stackptr = *((vm_offset_t *) stackptr_prev);
163
164 if (!i386_validate_stackptr(stackptr))
165 break;
166 /* Stack grows downwards */
167 if (stackptr < stackptr_prev)
168 break;
169
170 if ((stackptr_prev ^ stackptr) > SANE_i386_FRAME_SIZE)
171 break;
172
173 raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET));
174
175 if (!i386_validate_raddr(raddr))
176 break;
177
178 bt[frame_index] = (void *) raddr;
179 }
180 pad:
181 frame = frame_index;
182
183 for ( ; frame_index < maxAddrs; frame_index++)
184 bt[frame_index] = (void *) 0;
185 #else
186 #error arch
187 #endif
188 return frame;
189 }