]> git.saurik.com Git - apple/xnu.git/blob - tests/os_refcnt.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / tests / os_refcnt.c
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <stdio.h>
4 #include <assert.h>
5 #include <setjmp.h>
6
7 #define DEVELOPMENT 1
8 #define DEBUG 0
9 #define XNU_KERNEL_PRIVATE 1
10
11 #define OS_REFCNT_DEBUG 1
12 #define STRESS_TESTS 0
13
14 void handle_panic(const char *func, char *str, ...);
15 #define panic(...) handle_panic(__func__, __VA_ARGS__)
16
17 #include "../libkern/os/refcnt.h"
18 #include "../libkern/os/refcnt.c"
19
20 T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
21
22 /* import some of the refcnt internal state for testing */
23 extern bool ref_debug_enable;
24 os_refgrp_decl_extern(global_ref_group);
25
26 T_GLOBAL_META(
27 T_META_NAMESPACE("os_refcnt"),
28 T_META_CHECK_LEAKS(false)
29 );
30
31 T_DECL(os_refcnt, "Basic atomic refcount")
32 {
33 struct os_refcnt rc;
34 os_ref_init(&rc, NULL);
35 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 1, "refcount correctly initialized");
36
37 os_ref_retain(&rc);
38 os_ref_retain(&rc);
39 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 3, "retain increased count");
40
41 os_ref_count_t x = os_ref_release(&rc);
42 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 2, "release decreased count");
43 T_ASSERT_EQ_UINT(x, 2, "release returned correct count");
44
45 os_ref_release_live(&rc);
46 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 1, "release_live decreased count");
47
48 x = os_ref_release(&rc);
49 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 0, "released");
50 T_ASSERT_EQ_UINT(x, 0, "returned released");
51
52 os_ref_init(&rc, NULL);
53 x = os_ref_retain_try(&rc);
54 T_ASSERT_GT_INT(x, 0, "try retained");
55
56 (void)os_ref_release(&rc);
57 (void)os_ref_release(&rc);
58 T_QUIET; T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 0, "release");
59
60 x = os_ref_retain_try(&rc);
61 T_ASSERT_EQ_INT(x, 0, "try failed");
62 }
63
64 T_DECL(refcnt_raw, "Raw refcount")
65 {
66 os_ref_atomic_t rc;
67 os_ref_init_raw(&rc, NULL);
68 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 1, "refcount correctly initialized");
69
70 os_ref_retain_raw(&rc, NULL);
71 os_ref_retain_raw(&rc, NULL);
72 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 3, "retain increased count");
73
74 os_ref_count_t x = os_ref_release_raw(&rc, NULL);
75 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 2, "release decreased count");
76 T_ASSERT_EQ_UINT(x, 2, "release returned correct count");
77
78 os_ref_release_live_raw(&rc, NULL);
79 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 1, "release_live decreased count");
80
81 x = os_ref_release_raw(&rc, NULL);
82 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 0, "released");
83 T_ASSERT_EQ_UINT(x, 0, "returned released");
84
85 os_ref_init_raw(&rc, NULL);
86 x = os_ref_retain_try_raw(&rc, NULL);
87 T_ASSERT_GT_INT(x, 0, "try retained");
88
89 (void)os_ref_release_raw(&rc, NULL);
90 (void)os_ref_release_raw(&rc, NULL);
91 T_QUIET; T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 0, "release");
92
93 x = os_ref_retain_try_raw(&rc, NULL);
94 T_ASSERT_EQ_INT(x, 0, "try failed");
95 }
96
97 T_DECL(refcnt_locked, "Locked refcount")
98 {
99 struct os_refcnt rc;
100 os_ref_init(&rc, NULL);
101
102 os_ref_retain_locked(&rc);
103 os_ref_retain_locked(&rc);
104 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 3, "retain increased count");
105
106 os_ref_count_t x = os_ref_release_locked(&rc);
107 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 2, "release decreased count");
108 T_ASSERT_EQ_UINT(x, 2, "release returned correct count");
109
110 (void)os_ref_release_locked(&rc);
111 x = os_ref_release_locked(&rc);
112 T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 0, "released");
113 T_ASSERT_EQ_UINT(x, 0, "returned released");
114 }
115
116 T_DECL(refcnt_raw_locked, "Locked raw refcount")
117 {
118 os_ref_atomic_t rc;
119 os_ref_init_raw(&rc, NULL);
120
121 os_ref_retain_locked_raw(&rc, NULL);
122 os_ref_retain_locked_raw(&rc, NULL);
123 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 3, "retain increased count");
124
125 os_ref_count_t x = os_ref_release_locked_raw(&rc, NULL);
126 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 2, "release decreased count");
127 T_ASSERT_EQ_UINT(x, 2, "release returned correct count");
128
129 (void)os_ref_release_locked_raw(&rc, NULL);
130 x = os_ref_release_locked_raw(&rc, NULL);
131 T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 0, "released");
132 T_ASSERT_EQ_UINT(x, 0, "returned released");
133 }
134
135 T_DECL(refcnt_mask_locked, "Locked bitwise refcount")
136 {
137 const os_ref_count_t b = 12;
138 os_ref_atomic_t rc;
139 os_ref_count_t reserved = 0xaaa;
140 os_ref_init_count_mask(&rc, NULL, 1, reserved, b);
141
142 os_ref_retain_locked_mask(&rc, NULL, b);
143 os_ref_retain_locked_mask(&rc, NULL, b);
144 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, b), 3, "retain increased count");
145
146 os_ref_count_t x = os_ref_release_locked_mask(&rc, NULL, b);
147 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, b), 2, "release decreased count");
148 T_ASSERT_EQ_UINT(x, 2, "release returned correct count");
149 T_ASSERT_EQ_UINT(rc & ((1U << b) - 1), reserved, "Reserved bits not modified");
150
151 (void)os_ref_release_locked_mask(&rc, NULL, b);
152 x = os_ref_release_locked_mask(&rc, NULL, b);
153 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, b), 0, "released");
154 T_ASSERT_EQ_UINT(x, 0, "returned released");
155 T_ASSERT_EQ_UINT(rc & ((1U << b) - 1), reserved, "Reserved bits not modified");
156 }
157
158 static void
159 do_bitwise_test(const os_ref_count_t bits)
160 {
161 os_ref_atomic_t rc;
162 os_ref_count_t reserved = 0xaaaaaaaaU & ((1U << bits) - 1);
163 os_ref_init_count_mask(&rc, NULL, 1, reserved, bits);
164
165 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 1, "[%u bits] refcount initialized", bits);
166
167 os_ref_retain_mask(&rc, NULL, bits);
168 os_ref_retain_mask(&rc, NULL, bits);
169 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 3, "retain increased count");
170
171 os_ref_count_t x = os_ref_release_mask(&rc, NULL, bits);
172 T_ASSERT_EQ_UINT(x, 2, "release returned correct count");
173
174 os_ref_release_live_mask(&rc, NULL, bits);
175 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 1, "release_live decreased count");
176
177 x = os_ref_release_mask(&rc, NULL, bits);
178 T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 0, "released");
179 T_ASSERT_EQ_UINT(x, 0, "returned released");
180
181 T_ASSERT_EQ_UINT(rc & ((1U << bits) - 1), reserved, "Reserved bits not modified");
182
183 os_ref_init_count_mask(&rc, NULL, 1, reserved, bits);
184 x = os_ref_retain_try_mask(&rc, NULL, bits);
185 T_ASSERT_GT_INT(x, 0, "try retained");
186
187 (void)os_ref_release_mask(&rc, NULL, bits);
188 (void)os_ref_release_mask(&rc, NULL, bits);
189 T_QUIET; T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 0, "release");
190
191 x = os_ref_retain_try_mask(&rc, NULL, bits);
192 T_ASSERT_EQ_INT(x, 0, "try failed");
193
194 T_ASSERT_EQ_UINT(rc & ((1U << bits) - 1), reserved, "Reserved bits not modified");
195 }
196
197 T_DECL(refcnt_bitwise, "Bitwise refcount")
198 {
199 do_bitwise_test(0);
200 do_bitwise_test(1);
201 do_bitwise_test(8);
202 do_bitwise_test(26);
203
204 os_ref_atomic_t rc = 0xaaaaaaaa;
205
206 const os_ref_count_t nbits = 3;
207 const os_ref_count_t count = 5;
208 const os_ref_count_t bits = 7;
209 os_ref_init_count_mask(&rc, NULL, count, bits, nbits);
210
211 os_ref_count_t mask = (1U << nbits) - 1;
212 T_ASSERT_EQ_UINT(rc & mask, bits, "bits correctly initialized");
213 T_ASSERT_EQ_UINT(rc >> nbits, count, "count correctly initialized");
214 }
215
216 os_refgrp_decl(static, g1, "test group", NULL);
217 os_refgrp_decl_extern(g1);
218
219 T_DECL(refcnt_groups, "Group accounting")
220 {
221 #if OS_REFCNT_DEBUG
222 ref_debug_enable = true;
223
224 struct os_refcnt rc;
225 os_ref_init(&rc, &g1);
226
227 T_ASSERT_EQ_UINT(g1.grp_children, 1, "group attached");
228 T_ASSERT_EQ_UINT(global_ref_group.grp_children, 1, "global group attached");
229 T_ASSERT_EQ_UINT(g1.grp_count, 1, "group count");
230 T_ASSERT_EQ_ULLONG(g1.grp_retain_total, 1ULL, "group retains");
231 T_ASSERT_EQ_ULLONG(g1.grp_release_total, 0ULL, "group releases");
232
233 os_ref_retain(&rc);
234 os_ref_retain(&rc);
235 os_ref_release_live(&rc);
236 os_ref_release_live(&rc);
237
238 T_EXPECT_EQ_ULLONG(g1.grp_retain_total, 3ULL, "group retains");
239 T_EXPECT_EQ_ULLONG(g1.grp_release_total, 2ULL, "group releases");
240
241 os_ref_count_t x = os_ref_release(&rc);
242 T_QUIET; T_ASSERT_EQ_UINT(x, 0, "released");
243
244 T_ASSERT_EQ_UINT(g1.grp_children, 0, "group detatched");
245 T_ASSERT_EQ_UINT(g1.grp_count, 0, "group count");
246 #else
247 T_SKIP("Refcount debugging disabled");
248 #endif
249 }
250
251 enum {
252 OSREF_UNDERFLOW = 1,
253 OSREF_OVERFLOW = 2,
254 OSREF_RESURRECTION = 3,
255 OSREF_DEALLOC_LIVE = 4,
256 };
257
258 static jmp_buf jb;
259 static bool expect_panic = false;
260
261 void
262 handle_panic(const char *func, char *__unused str, ...)
263 {
264 int ret = -1;
265 if (!expect_panic) {
266 T_FAIL("unexpected panic from %s", func);
267 T_LOG("corrupt program state, aborting");
268 abort();
269 }
270 expect_panic = false;
271
272 if (strcmp(func, "os_ref_panic_underflow") == 0) {
273 ret = OSREF_UNDERFLOW;
274 } else if (strcmp(func, "os_ref_panic_overflow") == 0) {
275 ret = OSREF_OVERFLOW;
276 } else if (strcmp(func, "os_ref_panic_resurrection") == 0) {
277 ret = OSREF_RESURRECTION;
278 } else if (strcmp(func, "os_ref_panic_live") == 0) {
279 ret = OSREF_DEALLOC_LIVE;
280 } else {
281 T_LOG("unexpected panic from %s", func);
282 }
283
284 longjmp(jb, ret);
285 }
286
287 T_DECL(refcnt_underflow, "Underflow")
288 {
289 os_ref_atomic_t rc;
290 os_ref_init_raw(&rc, NULL);
291 (void)os_ref_release_raw(&rc, NULL);
292
293 int x = setjmp(jb);
294 if (x == 0) {
295 expect_panic = true;
296 (void)os_ref_release_raw(&rc, NULL);
297 T_FAIL("underflow not caught");
298 } else {
299 T_ASSERT_EQ_INT(x, OSREF_UNDERFLOW, "underflow caught");
300 }
301 }
302
303 T_DECL(refcnt_overflow, "Overflow")
304 {
305 os_ref_atomic_t rc;
306 os_ref_init_count_raw(&rc, NULL, 0x0fffffffU);
307
308 int x = setjmp(jb);
309 if (x == 0) {
310 expect_panic = true;
311 (void)os_ref_retain_raw(&rc, NULL);
312 T_FAIL("overflow not caught");
313 } else {
314 T_ASSERT_EQ_INT(x, OSREF_OVERFLOW, "overflow caught");
315 }
316 }
317
318 T_DECL(refcnt_resurrection, "Resurrection")
319 {
320 os_ref_atomic_t rc;
321 os_ref_init_raw(&rc, NULL);
322 os_ref_count_t n = os_ref_release_raw(&rc, NULL);
323
324 T_QUIET; T_EXPECT_EQ_UINT(n, 0, "reference not released");
325
326 int x = setjmp(jb);
327 if (x == 0) {
328 expect_panic = true;
329 (void)os_ref_retain_raw(&rc, NULL);
330 T_FAIL("resurrection not caught");
331 } else {
332 T_ASSERT_EQ_INT(x, OSREF_RESURRECTION, "resurrection caught");
333 }
334 }
335
336 T_DECL(refcnt_dealloc_live, "Dealloc expected live object")
337 {
338 os_ref_atomic_t rc;
339 os_ref_init_raw(&rc, NULL);
340
341 expect_panic = true;
342 int x = setjmp(jb);
343 if (x == 0) {
344 expect_panic = true;
345 os_ref_release_live_raw(&rc, NULL);
346 T_FAIL("dealloc live not caught");
347 } else {
348 T_ASSERT_EQ_INT(x, OSREF_DEALLOC_LIVE, "dealloc live caught");
349 }
350 }
351
352 T_DECL(refcnt_initializer, "Static intializers")
353 {
354 struct os_refcnt rc = OS_REF_INITIALIZER;
355 os_ref_atomic_t rca = OS_REF_ATOMIC_INITIALIZER;
356
357 T_ASSERT_EQ_INT(0, os_ref_retain_try(&rc), NULL);
358 T_ASSERT_EQ_INT(0, os_ref_get_count_raw(&rca), NULL);
359 }
360
361 #if STRESS_TESTS
362
363 static const unsigned long iters = 1024 * 1024 * 32;
364
365 static void *
366 func(void *_rc)
367 {
368 struct os_refcnt *rc = _rc;
369 for (unsigned long i = 0; i < iters; i++) {
370 os_ref_retain(rc);
371 os_ref_release_live(rc);
372 }
373 return NULL;
374 }
375
376 T_DECL(refcnt_stress, "Stress test")
377 {
378 pthread_t th1, th2;
379
380 struct os_refcnt rc;
381 os_ref_init(&rc, NULL);
382
383 T_ASSERT_POSIX_ZERO(pthread_create(&th1, NULL, func, &rc), "pthread_create");
384 T_ASSERT_POSIX_ZERO(pthread_create(&th2, NULL, func, &rc), "pthread_create");
385
386 void *r1, *r2;
387 T_ASSERT_POSIX_ZERO(pthread_join(th1, &r1), "pthread_join");
388 T_ASSERT_POSIX_ZERO(pthread_join(th2, &r2), "pthread_join");
389
390 os_ref_count_t x = os_ref_release(&rc);
391 T_ASSERT_EQ_INT(x, 0, "Consistent refcount");
392 }
393
394 #endif