1 /* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
10 #include <sys/sysctl.h>
12 #define USER_FRAMES (12)
14 #define NON_RECURSE_FRAMES (5)
16 static const char *user_bt
[USER_FRAMES
] = {
19 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
20 "recurse_a", "recurse_b", "recurse_a",
25 expect_frame(const char **bt
, unsigned int bt_len
, CSSymbolRef symbol
,
26 unsigned long addr
, unsigned int bt_idx
, unsigned int max_frames
)
29 unsigned int frame_idx
= max_frames
- bt_idx
- 1;
31 if (bt
[frame_idx
] == NULL
) {
32 T_LOG("frame %2u: skipping system frame", frame_idx
);
36 if (CSIsNull(symbol
)) {
37 T_FAIL("invalid symbol for address %#lx at frame %d", addr
, frame_idx
);
41 if (frame_idx
>= bt_len
) {
42 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
43 CSSymbolGetName(symbol
), addr
, frame_idx
);
47 name
= CSSymbolGetName(symbol
);
48 T_QUIET
; T_ASSERT_NOTNULL(name
, NULL
);
49 T_EXPECT_EQ_STR(name
, bt
[frame_idx
],
50 "frame %2u: saw '%s', expected '%s'",
51 frame_idx
, name
, bt
[frame_idx
]);
55 is_kernel_64_bit(void)
57 static dispatch_once_t k64_once
;
58 static bool k64
= false;
59 dispatch_once(&k64_once
, ^{
61 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, 0 /* kernproc */ };
64 size_t len
= sizeof(kp
);
66 errb
= sysctl(mib
, sizeof(mib
) / sizeof(mib
[0]), &kp
, &len
, NULL
, 0);
67 T_QUIET
; T_ASSERT_POSIX_SUCCESS(errb
,
68 "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
70 k64
= kp
.kp_proc
.p_flag
& P_LP64
;
71 T_LOG("executing with a %s-bit kernel", k64
? "64" : "32");
76 static void __attribute__((noinline
, not_tail_called
))
79 uint64_t bt
[USER_FRAMES
] = { 0 };
80 unsigned int bt_len
= USER_FRAMES
;
85 static CSSymbolicatorRef user_symb
;
86 static dispatch_once_t expect_stack_once
;
87 dispatch_once(&expect_stack_once
, ^{
88 user_symb
= CSSymbolicatorCreateWithTask(mach_task_self());
89 T_QUIET
; T_ASSERT_FALSE(CSIsNull(user_symb
), NULL
);
90 T_QUIET
; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb
), NULL
);
93 k64
= is_kernel_64_bit();
94 bt_filled
= USER_FRAMES
;
95 err
= sysctlbyname("kern.backtrace.user", bt
, &bt_filled
, NULL
, 0);
97 T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
99 T_ASSERT_POSIX_SUCCESS(err
, "sysctlbyname(\"kern.backtrace.user\")");
101 bt_len
= (unsigned int)bt_filled
;
102 T_EXPECT_EQ(bt_len
, (unsigned int)USER_FRAMES
,
103 "%u frames should be present in backtrace", (unsigned int)USER_FRAMES
);
105 for (unsigned int i
= 0; i
< bt_len
; i
++) {
107 #if !defined(__LP64__)
109 * Backtrace frames come out as kernel words; convert them back to user
110 * uintptr_t for 32-bit processes.
113 addr
= (uintptr_t)(bt
[i
]);
115 addr
= (uintptr_t)(((uint32_t *)bt
)[i
]);
117 #else /* defined(__LP32__) */
118 addr
= (uintptr_t)bt
[i
];
119 #endif /* defined(__LP32__) */
121 CSSymbolRef symbol
= CSSymbolicatorGetSymbolWithAddressAtTime(
122 user_symb
, addr
, kCSNow
);
123 expect_frame(user_bt
, USER_FRAMES
, symbol
, addr
, i
, bt_len
);
127 static int __attribute__((noinline
, not_tail_called
))
128 recurse_a(unsigned int frames
);
129 static int __attribute__((noinline
, not_tail_called
))
130 recurse_b(unsigned int frames
);
132 static int __attribute__((noinline
, not_tail_called
))
133 recurse_a(unsigned int frames
)
141 return recurse_b(frames
- 1) + 1;
144 static int __attribute__((noinline
, not_tail_called
))
145 recurse_b(unsigned int frames
)
153 return recurse_a(frames
- 1) + 1;
157 backtrace_thread(void *arg
)
163 * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
165 * Always make one less call for this frame (backtrace_thread).
167 calls
= USER_FRAMES
- NON_RECURSE_FRAMES
;
169 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
170 calls
, NON_RECURSE_FRAMES
);
171 (void)recurse_a(calls
);
175 T_DECL(backtrace_user
, "test that the kernel can backtrace user stacks",
176 T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
180 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_create(&thread
, NULL
, backtrace_thread
,
181 NULL
), "create additional thread to backtrace");
183 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_join(thread
, NULL
), NULL
);
186 T_DECL(backtrace_user_bounds
,
187 "test that the kernel doesn't write frames out of expected bounds")
189 uint64_t bt_init
[USER_FRAMES
] = {};
190 size_t bt_filled
= USER_FRAMES
, bt_filled_after
= 0;
192 kern_return_t kr
= KERN_FAILURE
;
193 void *bt_page
= NULL
;
194 void *guard_page
= NULL
;
195 void *bt_start
= NULL
;
198 * The backtrace addresses come back as kernel words.
200 size_t kword_size
= is_kernel_64_bit() ? 8 : 4;
203 * Get an idea of how many frames to expect.
205 error
= sysctlbyname("kern.backtrace.user", bt_init
, &bt_filled
, NULL
,
207 if (error
== ENOENT
) {
208 T_SKIP("release kernel: kern.backtrace.user missing");
210 T_ASSERT_POSIX_SUCCESS(error
, "sysctlbyname(\"kern.backtrace.user\")");
213 * Allocate two pages -- a first one that's valid and a second that
214 * will be non-writeable to catch a copyout that's too large.
217 bt_page
= mmap(NULL
, vm_page_size
* 2, PROT_READ
| PROT_WRITE
,
218 MAP_ANON
| MAP_PRIVATE
, -1, 0);
220 T_ASSERT_NE(bt_page
, MAP_FAILED
, "allocated backtrace pages");
221 guard_page
= (char *)bt_page
+ vm_page_size
;
223 error
= mprotect(guard_page
, vm_page_size
, PROT_READ
);
224 T_ASSERT_POSIX_SUCCESS(error
, "mprotect(..., PROT_READ) guard page");
227 * Ensure the pages are set up as expected.
230 kr
= vm_write(mach_task_self(), (vm_address_t
)bt_page
,
231 (vm_offset_t
)&(int){ 12345 }, sizeof(int));
232 T_ASSERT_MACH_SUCCESS(kr
,
233 "should succeed in writing to backtrace page");
235 kr
= vm_write(mach_task_self(), (vm_address_t
)guard_page
,
236 (vm_offset_t
)&(int){ 12345 }, sizeof(int));
237 T_ASSERT_NE(kr
, KERN_SUCCESS
, "should fail to write to guard page");
240 * Ask the kernel to write the backtrace just before the guard page.
243 bt_start
= (char *)guard_page
- (kword_size
* bt_filled
);
244 bt_filled_after
= bt_filled
;
246 error
= sysctlbyname("kern.backtrace.user", bt_start
, &bt_filled_after
,
248 T_EXPECT_POSIX_SUCCESS(error
,
249 "sysctlbyname(\"kern.backtrace.user\") just before guard page");
250 T_EXPECT_EQ(bt_filled
, bt_filled_after
,
251 "both calls to backtrace should have filled in the same number of "
255 * Expect the kernel to fault when writing too far.
258 bt_start
= (char *)bt_start
+ 1;
259 bt_filled_after
= bt_filled
;
260 error
= sysctlbyname("kern.backtrace.user", bt_start
, &bt_filled_after
,
262 T_EXPECT_POSIX_FAILURE(error
, EFAULT
,
263 "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "