1 /* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
10 #include <sys/sysctl.h>
12 T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
14 #define USER_FRAMES (12)
16 #define NON_RECURSE_FRAMES (4)
18 static const char *user_bt
[USER_FRAMES
] = {
21 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
22 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
27 expect_frame(const char **bt
, unsigned int bt_len
, CSSymbolRef symbol
,
28 unsigned long addr
, unsigned int bt_idx
, unsigned int max_frames
)
31 unsigned int frame_idx
= max_frames
- bt_idx
- 1;
33 if (CSIsNull(symbol
)) {
34 T_FAIL("invalid symbol for address %#lx at frame %d", addr
,
39 if (bt
[frame_idx
] == NULL
) {
40 T_LOG("frame %2u: skipping system frame %s", frame_idx
,
41 CSSymbolGetName(symbol
));
45 if (frame_idx
>= bt_len
) {
46 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
47 CSSymbolGetName(symbol
), addr
, frame_idx
);
51 name
= CSSymbolGetName(symbol
);
52 T_QUIET
; T_ASSERT_NOTNULL(name
, NULL
);
53 T_EXPECT_EQ_STR(name
, bt
[frame_idx
],
54 "frame %2u: saw '%s', expected '%s'",
55 frame_idx
, name
, bt
[frame_idx
]);
59 is_kernel_64_bit(void)
61 static dispatch_once_t k64_once
;
62 static bool k64
= false;
63 dispatch_once(&k64_once
, ^{
65 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, 0 /* kernproc */ };
68 size_t len
= sizeof(kp
);
70 errb
= sysctl(mib
, sizeof(mib
) / sizeof(mib
[0]), &kp
, &len
, NULL
, 0);
71 T_QUIET
; T_ASSERT_POSIX_SUCCESS(errb
,
72 "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
74 k64
= kp
.kp_proc
.p_flag
& P_LP64
;
75 T_LOG("executing with a %s-bit kernel", k64
? "64" : "32");
80 static void __attribute__((noinline
, not_tail_called
))
83 uint64_t bt
[USER_FRAMES
] = { 0 };
84 unsigned int bt_len
= USER_FRAMES
;
89 static CSSymbolicatorRef user_symb
;
90 static dispatch_once_t expect_stack_once
;
91 dispatch_once(&expect_stack_once
, ^{
92 user_symb
= CSSymbolicatorCreateWithTask(mach_task_self());
93 T_QUIET
; T_ASSERT_FALSE(CSIsNull(user_symb
), NULL
);
94 T_QUIET
; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb
), NULL
);
97 k64
= is_kernel_64_bit();
98 bt_filled
= USER_FRAMES
;
99 err
= sysctlbyname("kern.backtrace.user", bt
, &bt_filled
, NULL
, 0);
101 T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
103 T_ASSERT_POSIX_SUCCESS(err
, "sysctlbyname(\"kern.backtrace.user\")");
105 bt_len
= (unsigned int)bt_filled
;
106 T_EXPECT_EQ(bt_len
, (unsigned int)USER_FRAMES
,
107 "%u frames should be present in backtrace", (unsigned int)USER_FRAMES
);
109 for (unsigned int i
= 0; i
< bt_len
; i
++) {
111 #if !defined(__LP64__)
113 * Backtrace frames come out as kernel words; convert them back to user
114 * uintptr_t for 32-bit processes.
117 addr
= (uintptr_t)(bt
[i
]);
119 addr
= (uintptr_t)(((uint32_t *)bt
)[i
]);
121 #else /* defined(__LP32__) */
122 addr
= (uintptr_t)bt
[i
];
123 #endif /* defined(__LP32__) */
125 CSSymbolRef symbol
= CSSymbolicatorGetSymbolWithAddressAtTime(
126 user_symb
, addr
, kCSNow
);
127 expect_frame(user_bt
, USER_FRAMES
, symbol
, addr
, i
, bt_len
);
131 static int __attribute__((noinline
, not_tail_called
))
132 recurse_a(unsigned int frames
);
133 static int __attribute__((noinline
, not_tail_called
))
134 recurse_b(unsigned int frames
);
136 static int __attribute__((noinline
, not_tail_called
))
137 recurse_a(unsigned int frames
)
145 return recurse_b(frames
- 1) + 1;
148 static int __attribute__((noinline
, not_tail_called
))
149 recurse_b(unsigned int frames
)
157 return recurse_a(frames
- 1) + 1;
161 backtrace_thread(void *arg
)
167 * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
169 * Always make one less call for this frame (backtrace_thread).
171 calls
= USER_FRAMES
- NON_RECURSE_FRAMES
;
173 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
174 calls
, NON_RECURSE_FRAMES
);
175 (void)recurse_a(calls
);
179 T_DECL(backtrace_user
, "test that the kernel can backtrace user stacks",
180 T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
184 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_create(&thread
, NULL
, backtrace_thread
,
185 NULL
), "create additional thread to backtrace");
187 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_join(thread
, NULL
), NULL
);
190 T_DECL(backtrace_user_bounds
,
191 "test that the kernel doesn't write frames out of expected bounds")
193 uint64_t bt_init
[USER_FRAMES
] = {};
194 size_t bt_filled
= USER_FRAMES
, bt_filled_after
= 0;
196 kern_return_t kr
= KERN_FAILURE
;
197 void *bt_page
= NULL
;
198 void *guard_page
= NULL
;
199 void *bt_start
= NULL
;
202 * The backtrace addresses come back as kernel words.
204 size_t kword_size
= is_kernel_64_bit() ? 8 : 4;
207 * Get an idea of how many frames to expect.
209 error
= sysctlbyname("kern.backtrace.user", bt_init
, &bt_filled
, NULL
,
211 if (error
== ENOENT
) {
212 T_SKIP("release kernel: kern.backtrace.user missing");
214 T_ASSERT_POSIX_SUCCESS(error
, "sysctlbyname(\"kern.backtrace.user\")");
217 * Allocate two pages -- a first one that's valid and a second that
218 * will be non-writeable to catch a copyout that's too large.
221 bt_page
= mmap(NULL
, vm_page_size
* 2, PROT_READ
| PROT_WRITE
,
222 MAP_ANON
| MAP_PRIVATE
, -1, 0);
224 T_ASSERT_NE(bt_page
, MAP_FAILED
, "allocated backtrace pages");
225 guard_page
= (char *)bt_page
+ vm_page_size
;
227 error
= mprotect(guard_page
, vm_page_size
, PROT_READ
);
228 T_ASSERT_POSIX_SUCCESS(error
, "mprotect(..., PROT_READ) guard page");
231 * Ensure the pages are set up as expected.
234 kr
= vm_write(mach_task_self(), (vm_address_t
)bt_page
,
235 (vm_offset_t
)&(int){ 12345 }, sizeof(int));
236 T_ASSERT_MACH_SUCCESS(kr
,
237 "should succeed in writing to backtrace page");
239 kr
= vm_write(mach_task_self(), (vm_address_t
)guard_page
,
240 (vm_offset_t
)&(int){ 12345 }, sizeof(int));
241 T_ASSERT_NE(kr
, KERN_SUCCESS
, "should fail to write to guard page");
244 * Ask the kernel to write the backtrace just before the guard page.
247 bt_start
= (char *)guard_page
- (kword_size
* bt_filled
);
248 bt_filled_after
= bt_filled
;
250 error
= sysctlbyname("kern.backtrace.user", bt_start
, &bt_filled_after
,
252 T_EXPECT_POSIX_SUCCESS(error
,
253 "sysctlbyname(\"kern.backtrace.user\") just before guard page");
254 T_EXPECT_EQ(bt_filled
, bt_filled_after
,
255 "both calls to backtrace should have filled in the same number of "
259 * Expect the kernel to fault when writing too far.
262 bt_start
= (char *)bt_start
+ 1;
263 bt_filled_after
= bt_filled
;
264 error
= sysctlbyname("kern.backtrace.user", bt_start
, &bt_filled_after
,
266 T_EXPECT_POSIX_FAILURE(error
, EFAULT
,
267 "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "