1 // Copyright (c) 2016-2020 Apple Computer, Inc. All rights reserved.
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
10 #include <sys/sysctl.h>
12 T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
14 #define USER_FRAMES (12)
15 #define MAX_SYSCALL_SETUP_FRAMES (2)
16 #define NON_RECURSE_FRAMES (2)
18 static const char *user_bt
[USER_FRAMES
] = {
20 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
21 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
22 "recurse_a", "recurse_b", "expect_callstack",
25 struct callstack_exp
{
26 bool in_syscall_setup
;
27 unsigned int syscall_frames
;
28 const char **callstack
;
30 unsigned int nchecked
;
34 expect_frame(struct callstack_exp
*cs
, CSSymbolRef symbol
,
35 unsigned long addr
, unsigned int bt_idx
)
37 if (CSIsNull(symbol
)) {
38 if (!cs
->in_syscall_setup
) {
39 T_FAIL("invalid symbol for address %#lx at frame %d", addr
,
45 const char *name
= CSSymbolGetName(symbol
);
47 if (cs
->in_syscall_setup
) {
48 if (strcmp(name
, cs
->callstack
[cs
->callstack_len
- 1]) == 0) {
49 cs
->in_syscall_setup
= false;
50 cs
->syscall_frames
= bt_idx
;
51 T_LOG("found start of controlled stack at frame %u, expected "
52 "index %zu", cs
->syscall_frames
, cs
->callstack_len
- 1);
54 T_LOG("found syscall setup symbol %s at frame %u", name
,
58 if (!cs
->in_syscall_setup
) {
59 if (cs
->nchecked
>= cs
->callstack_len
) {
60 T_LOG("frame %2u: skipping system frame %s", bt_idx
, name
);
62 size_t frame_idx
= cs
->callstack_len
- cs
->nchecked
- 1;
63 T_EXPECT_EQ_STR(name
, cs
->callstack
[frame_idx
],
64 "frame %2zu: saw '%s', expected '%s'",
65 frame_idx
, name
, cs
->callstack
[frame_idx
]);
70 if (!cs
->in_syscall_setup
) {
71 T_ASSERT_NOTNULL(name
, NULL
, "symbol should not be NULL");
77 is_kernel_64_bit(void)
79 static dispatch_once_t k64_once
;
80 static bool k64
= false;
81 dispatch_once(&k64_once
, ^{
83 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, 0 /* kernproc */ };
86 size_t len
= sizeof(kp
);
88 errb
= sysctl(mib
, sizeof(mib
) / sizeof(mib
[0]), &kp
, &len
, NULL
, 0);
89 T_QUIET
; T_ASSERT_POSIX_SUCCESS(errb
,
90 "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
92 k64
= kp
.kp_proc
.p_flag
& P_LP64
;
93 T_LOG("executing with a %s-bit kernel", k64
? "64" : "32");
98 // Use an extra, non-inlineable function so that any frames after expect_stack
99 // can be safely ignored. This insulates the test from changes in how syscalls
100 // are called by Libc and the kernel.
101 static int __attribute__((noinline
, not_tail_called
))
102 backtrace_current_thread_wrapper(uint64_t *bt
, size_t *bt_filled
)
104 int ret
= sysctlbyname("kern.backtrace.user", bt
, bt_filled
, NULL
, 0);
105 getpid(); // Really prevent tail calls.
109 static void __attribute__((noinline
, not_tail_called
))
110 expect_callstack(void)
112 uint64_t bt
[USER_FRAMES
+ MAX_SYSCALL_SETUP_FRAMES
] = { 0 };
114 static CSSymbolicatorRef user_symb
;
115 static dispatch_once_t expect_stack_once
;
116 dispatch_once(&expect_stack_once
, ^{
117 user_symb
= CSSymbolicatorCreateWithTask(mach_task_self());
118 T_QUIET
; T_ASSERT_FALSE(CSIsNull(user_symb
), NULL
);
119 T_QUIET
; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb
), NULL
);
122 size_t bt_filled
= USER_FRAMES
+ MAX_SYSCALL_SETUP_FRAMES
;
123 int ret
= backtrace_current_thread_wrapper(bt
, &bt_filled
);
124 if (ret
== -1 && errno
== ENOENT
) {
125 T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
127 T_ASSERT_POSIX_SUCCESS(ret
, "sysctlbyname(\"kern.backtrace.user\")");
128 T_LOG("kernel returned %zu frame backtrace", bt_filled
);
130 unsigned int bt_len
= (unsigned int)bt_filled
;
131 T_EXPECT_GE(bt_len
, (unsigned int)USER_FRAMES
,
132 "at least %u frames should be present in backtrace", USER_FRAMES
);
133 T_EXPECT_LE(bt_len
, (unsigned int)USER_FRAMES
+ MAX_SYSCALL_SETUP_FRAMES
,
134 "at most %u frames should be present in backtrace",
135 USER_FRAMES
+ MAX_SYSCALL_SETUP_FRAMES
);
137 struct callstack_exp callstack
= {
138 .in_syscall_setup
= true,
140 .callstack
= user_bt
,
141 .callstack_len
= USER_FRAMES
,
144 for (unsigned int i
= 0; i
< bt_len
; i
++) {
146 #if !defined(__LP64__)
147 // Backtrace frames come out as kernel words; convert them back to user
148 // uintptr_t for 32-bit processes.
149 if (is_kernel_64_bit()) {
150 addr
= (uintptr_t)(bt
[i
]);
152 addr
= (uintptr_t)(((uint32_t *)bt
)[i
]);
154 #else // defined(__LP32__)
155 addr
= (uintptr_t)bt
[i
];
156 #endif // defined(__LP32__)
158 CSSymbolRef symbol
= CSSymbolicatorGetSymbolWithAddressAtTime(
159 user_symb
, addr
, kCSNow
);
160 expect_frame(&callstack
, symbol
, addr
, i
);
163 T_EXPECT_GE(callstack
.nchecked
, USER_FRAMES
,
164 "checked enough frames for correct symbols");
167 static int __attribute__((noinline
, not_tail_called
))
168 recurse_a(unsigned int frames
);
169 static int __attribute__((noinline
, not_tail_called
))
170 recurse_b(unsigned int frames
);
172 static int __attribute__((noinline
, not_tail_called
))
173 recurse_a(unsigned int frames
)
177 getpid(); // Really prevent tail calls.
181 return recurse_b(frames
- 1) + 1;
184 static int __attribute__((noinline
, not_tail_called
))
185 recurse_b(unsigned int frames
)
189 getpid(); // Really prevent tail calls.
193 return recurse_a(frames
- 1) + 1;
197 backtrace_thread(void *arg
)
202 // backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
204 // Always make one less call for this frame (backtrace_thread).
205 calls
= USER_FRAMES
- NON_RECURSE_FRAMES
;
207 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
208 calls
, NON_RECURSE_FRAMES
);
209 (void)recurse_a(calls
);
213 T_DECL(backtrace_user
, "test that the kernel can backtrace user stacks",
214 T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
218 // Run the test from a different thread to insulate it from libdarwintest
220 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_create(&thread
, NULL
, backtrace_thread
,
221 NULL
), "create additional thread to backtrace");
223 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_join(thread
, NULL
), NULL
);
226 T_DECL(backtrace_user_bounds
,
227 "test that the kernel doesn't write frames out of expected bounds")
229 uint64_t bt_init
[USER_FRAMES
] = {};
230 size_t bt_filled
= USER_FRAMES
, bt_filled_after
= 0;
232 kern_return_t kr
= KERN_FAILURE
;
233 void *bt_page
= NULL
;
234 void *guard_page
= NULL
;
235 void *bt_start
= NULL
;
237 // The backtrace addresses come back as kernel words.
238 size_t kword_size
= is_kernel_64_bit() ? 8 : 4;
240 // Get an idea of how many frames to expect.
241 int ret
= sysctlbyname("kern.backtrace.user", bt_init
, &bt_filled
, NULL
, 0);
242 if (ret
== -1 && errno
== ENOENT
) {
243 T_SKIP("release kernel: kern.backtrace.user missing");
245 T_ASSERT_POSIX_SUCCESS(error
, "sysctlbyname(\"kern.backtrace.user\")");
247 // Allocate two pages -- a first one that's valid and a second that
248 // will be non-writeable to catch a copyout that's too large.
249 bt_page
= mmap(NULL
, vm_page_size
* 2, PROT_READ
| PROT_WRITE
,
250 MAP_ANON
| MAP_PRIVATE
, -1, 0);
252 T_ASSERT_NE(bt_page
, MAP_FAILED
, "allocated backtrace pages");
253 guard_page
= (char *)bt_page
+ vm_page_size
;
255 error
= mprotect(guard_page
, vm_page_size
, PROT_READ
);
256 T_ASSERT_POSIX_SUCCESS(error
, "mprotect(..., PROT_READ) guard page");
258 // Ensure the pages are set up as expected.
259 kr
= vm_write(mach_task_self(), (vm_address_t
)bt_page
,
260 (vm_offset_t
)&(int){ 12345 }, sizeof(int));
261 T_ASSERT_MACH_SUCCESS(kr
,
262 "should succeed in writing to backtrace page");
263 kr
= vm_write(mach_task_self(), (vm_address_t
)guard_page
,
264 (vm_offset_t
)&(int){ 12345 }, sizeof(int));
265 T_ASSERT_NE(kr
, KERN_SUCCESS
, "should fail to write to guard page");
267 // Ask the kernel to write the backtrace just before the guard page.
268 bt_start
= (char *)guard_page
- (kword_size
* bt_filled
);
269 bt_filled_after
= bt_filled
;
271 error
= sysctlbyname("kern.backtrace.user", bt_start
, &bt_filled_after
,
273 T_EXPECT_POSIX_SUCCESS(error
,
274 "sysctlbyname(\"kern.backtrace.user\") just before guard page");
275 T_EXPECT_EQ(bt_filled
, bt_filled_after
,
276 "both calls to backtrace should have filled in the same number of "
279 // Expect the kernel to fault when writing too far.
280 bt_start
= (char *)bt_start
+ 1;
281 bt_filled_after
= bt_filled
;
282 error
= sysctlbyname("kern.backtrace.user", bt_start
, &bt_filled_after
,
284 T_EXPECT_POSIX_FAILURE(error
, EFAULT
,
285 "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "