]> git.saurik.com Git - apple/xnu.git/blob - libkern/os/refcnt.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / libkern / os / refcnt.c
1 #if KERNEL
2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
8 #endif
9
10 #include "refcnt.h"
11
12 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
13
14 #if OS_REFCNT_DEBUG
15 extern struct os_refgrp global_ref_group;
16 os_refgrp_decl(, global_ref_group, "all", NULL);
17
18 extern bool ref_debug_enable;
19 bool ref_debug_enable = false;
20 static const size_t ref_log_nrecords = 1000000;
21
22 #define REFLOG_BTDEPTH 10
23 #define REFLOG_RETAIN 1
24 #define REFLOG_RELEASE 2
25
26 #define __debug_only
27 #else
28 # define __debug_only __unused
29 #endif /* OS_REFCNT_DEBUG */
30
31 void
32 os_ref_panic_live(void *rc)
33 {
34 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
35 __builtin_unreachable();
36 }
37
38 __abortlike
39 static void
40 os_ref_panic_underflow(void *rc)
41 {
42 panic("os_refcnt: underflow (rc=%p)\n", rc);
43 __builtin_unreachable();
44 }
45
46 __abortlike
47 static void
48 os_ref_panic_resurrection(void *rc)
49 {
50 panic("os_refcnt: attempted resurrection (rc=%p)\n", rc);
51 __builtin_unreachable();
52 }
53
54 __abortlike
55 static void
56 os_ref_panic_overflow(void *rc)
57 {
58 panic("os_refcnt: overflow (rc=%p)\n", rc);
59 __builtin_unreachable();
60 }
61
62 static inline void
63 os_ref_check_underflow(void *rc, os_ref_count_t count)
64 {
65 if (__improbable(count == 0)) {
66 os_ref_panic_underflow(rc);
67 }
68 }
69
70 static inline void
71 os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
72 {
73 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
74 os_ref_panic_overflow(rc);
75 }
76 }
77
78 static inline void
79 os_ref_assert_referenced(void *rc, os_ref_count_t count)
80 {
81 if (__improbable(count == 0)) {
82 os_ref_panic_resurrection(rc);
83 }
84 }
85
86 static inline void
87 os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count)
88 {
89 os_ref_assert_referenced(rc, count);
90 os_ref_check_overflow(rc, count);
91 }
92
93 #if OS_REFCNT_DEBUG
94 #if KERNEL
95 __attribute__((cold, noinline))
96 static void
97 ref_log_op(struct os_refgrp *grp, void *elem, int op)
98 {
99 if (grp == NULL) {
100 return;
101 }
102
103 if (grp->grp_log == NULL) {
104 ref_log_op(grp->grp_parent, elem, op);
105 return;
106 }
107
108 uintptr_t bt[REFLOG_BTDEPTH];
109 uint32_t nframes = backtrace(bt, REFLOG_BTDEPTH, NULL);
110 btlog_add_entry((btlog_t *)grp->grp_log, elem, op, (void **)bt, nframes);
111 }
112
113 __attribute__((cold, noinline))
114 static void
115 ref_log_drop(struct os_refgrp *grp, void *elem)
116 {
117 if (!ref_debug_enable || grp == NULL) {
118 return;
119 }
120
121 if (grp->grp_log == NULL) {
122 ref_log_drop(grp->grp_parent, elem);
123 return;
124 }
125
126 btlog_remove_entries_for_element(grp->grp_log, elem);
127 }
128
129 __attribute__((cold, noinline))
130 static void
131 ref_log_init(struct os_refgrp *grp)
132 {
133 if (grp->grp_log != NULL) {
134 return;
135 }
136
137 char grpbuf[128];
138 char *refgrp = grpbuf;
139 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
140 return;
141 }
142
143 /*
144 * Enable refcount statistics if the rlog boot-arg is present,
145 * even when no specific group is logged.
146 */
147 ref_debug_enable = true;
148
149 const char *g;
150 while ((g = strsep(&refgrp, ",")) != NULL) {
151 if (strcmp(g, grp->grp_name) == 0) {
152 /* enable logging on this refgrp */
153 grp->grp_log = btlog_create(ref_log_nrecords, REFLOG_BTDEPTH, true);
154 return;
155 }
156 }
157 }
158 #else
159
160 #ifndef ref_log_init
161 # define ref_log_init(...) do {} while (0)
162 #endif
163 #ifndef ref_log_op
164 # define ref_log_op(...) do {} while (0)
165 #endif
166 #ifndef ref_log_drop
167 # define ref_log_drop(...) do {} while (0)
168 #endif
169
170 #endif /* KERNEL */
171
172 /*
173 * attach a new refcnt to a group
174 */
175 __attribute__((cold, noinline))
176 static void
177 ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
178 {
179 if (grp == NULL) {
180 return;
181 }
182
183 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
184 /* First reference count object in this group. Check if we should enable
185 * refcount logging. */
186 ref_log_init(grp);
187 }
188
189 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
190 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
191
192 if (grp == &global_ref_group) {
193 return;
194 }
195
196 if (grp->grp_parent == NULL) {
197 grp->grp_parent = &global_ref_group;
198 }
199
200 ref_attach_to_group(rc, grp->grp_parent, init_count);
201 }
202
203 static void
204 ref_retain_group(struct os_refgrp *grp)
205 {
206 if (grp) {
207 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
208 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
209 ref_retain_group(grp->grp_parent);
210 }
211 }
212
213 __attribute__((cold, noinline))
214 static void
215 ref_release_group(struct os_refgrp *grp, bool final)
216 {
217 if (grp) {
218 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
219 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
220 if (final) {
221 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
222 }
223
224 ref_release_group(grp->grp_parent, final);
225 }
226 }
227
228 __attribute__((cold, noinline))
229 static void
230 ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
231 {
232 ref_attach_to_group(rc, grp, count);
233
234 for (os_ref_count_t i = 0; i < count; i++) {
235 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
236 }
237 }
238
239 __attribute__((cold, noinline))
240 static void
241 ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
242 {
243 ref_retain_group(grp);
244 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
245 }
246 #endif
247
248 void
249 os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
250 {
251 os_ref_check_underflow(rc, count);
252 atomic_init(rc, count);
253
254 #if OS_REFCNT_DEBUG
255 if (__improbable(ref_debug_enable && grp)) {
256 ref_init_debug(rc, grp, count);
257 }
258 #endif
259 }
260
261 void
262 os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
263 {
264 os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
265 os_ref_check_retain(rc, old);
266
267 #if OS_REFCNT_DEBUG
268 if (__improbable(grp && ref_debug_enable)) {
269 ref_retain_debug(rc, grp);
270 }
271 #endif
272 }
273
274 bool
275 os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
276 {
277 os_ref_count_t cur = os_ref_get_count_internal(rc);
278
279 while (1) {
280 if (__improbable(cur == 0)) {
281 return false;
282 }
283
284 os_ref_check_retain(rc, cur);
285
286 if (atomic_compare_exchange_weak_explicit(rc, &cur, cur + 1,
287 memory_order_relaxed, memory_order_relaxed)) {
288 break;
289 }
290 }
291
292 #if OS_REFCNT_DEBUG
293 if (__improbable(grp && ref_debug_enable)) {
294 ref_retain_debug(rc, grp);
295 }
296 #endif
297
298 return true;
299 }
300
301 __attribute__((always_inline))
302 static inline os_ref_count_t
303 _os_ref_release_inline(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
304 memory_order release_order, memory_order dealloc_order)
305 {
306 os_ref_count_t val;
307
308 #if OS_REFCNT_DEBUG
309 if (__improbable(grp && ref_debug_enable)) {
310 /*
311 * Care not to use 'rc' after the decrement because it might be deallocated
312 * under us.
313 */
314 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
315 }
316 #endif
317
318 val = atomic_fetch_sub_explicit(rc, 1, release_order);
319 os_ref_check_underflow(rc, val);
320 if (__improbable(--val == 0)) {
321 atomic_load_explicit(rc, dealloc_order);
322 }
323
324 #if OS_REFCNT_DEBUG
325 if (__improbable(grp && ref_debug_enable)) {
326 if (val == 0) {
327 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
328 }
329 ref_release_group(grp, !val);
330 }
331 #endif
332
333 return val;
334 }
335
336 __attribute__((noinline))
337 os_ref_count_t
338 os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
339 memory_order release_order, memory_order dealloc_order)
340 {
341 // Legacy exported interface with bad codegen due to the barriers
342 // not being immediate
343 //
344 // Also serves as the debug function
345 return _os_ref_release_inline(rc, grp, release_order, dealloc_order);
346 }
347
348 os_ref_count_t
349 os_ref_release_barrier_internal(os_ref_atomic_t *rc,
350 struct os_refgrp * __debug_only grp)
351 {
352 #if OS_REFCNT_DEBUG
353 if (__improbable(grp && ref_debug_enable)) {
354 return os_ref_release_internal(rc, grp,
355 memory_order_release, memory_order_acquire);
356 }
357 #endif
358 return _os_ref_release_inline(rc, NULL,
359 memory_order_release, memory_order_acquire);
360 }
361
362 os_ref_count_t
363 os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
364 struct os_refgrp * __debug_only grp)
365 {
366 #if OS_REFCNT_DEBUG
367 if (__improbable(grp && ref_debug_enable)) {
368 return os_ref_release_internal(rc, grp,
369 memory_order_relaxed, memory_order_relaxed);
370 }
371 #endif
372 return _os_ref_release_inline(rc, NULL,
373 memory_order_relaxed, memory_order_relaxed);
374 }
375
376 void
377 os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
378 {
379 os_ref_count_t val = os_ref_get_count_internal(rc);
380 os_ref_check_retain(rc, val);
381 atomic_store_explicit(rc, ++val, memory_order_relaxed);
382
383 #if OS_REFCNT_DEBUG
384 if (__improbable(grp && ref_debug_enable)) {
385 ref_retain_debug(rc, grp);
386 }
387 #endif
388 }
389
390 os_ref_count_t
391 os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
392 {
393 os_ref_count_t val = os_ref_get_count_internal(rc);
394 os_ref_check_underflow(rc, val);
395 atomic_store_explicit(rc, --val, memory_order_relaxed);
396
397 #if OS_REFCNT_DEBUG
398 if (__improbable(grp && ref_debug_enable)) {
399 ref_release_group(grp, !val);
400 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
401 if (val == 0) {
402 ref_log_drop(grp, (void *)rc);
403 }
404 }
405 #endif
406
407 return val;
408 }
409
410 /*
411 * Bitwise API
412 */
413
414 os_ref_count_t
415 os_ref_get_count_mask(os_ref_atomic_t *rc, os_ref_count_t bits)
416 {
417 os_ref_count_t ret;
418 ret = os_ref_get_count_raw(rc);
419 return ret >> bits;
420 }
421
422 #undef os_ref_init_count_mask
423 void
424 os_ref_init_count_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
425 os_ref_count_t init_count, os_ref_count_t init_bits, os_ref_count_t b)
426 {
427 assert(init_bits < (1U << b));
428 os_ref_check_underflow(rc, init_count);
429 atomic_init(rc, (init_count << b) | init_bits);
430
431 #if OS_REFCNT_DEBUG
432 if (__improbable(ref_debug_enable && grp)) {
433 ref_init_debug(rc, grp, init_count);
434 }
435 #endif
436 }
437
438 #undef os_ref_retain_mask
439 void
440 os_ref_retain_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits)
441 {
442 os_ref_count_t old = atomic_fetch_add_explicit(rc, 1U << bits, memory_order_relaxed);
443 os_ref_check_overflow(rc, old);
444 os_ref_assert_referenced(rc, old >> bits);
445
446 #if OS_REFCNT_DEBUG
447 if (__improbable(grp && ref_debug_enable)) {
448 ref_retain_debug(rc, grp);
449 }
450 #endif
451 }
452
453 #undef os_ref_release_mask_internal
454 os_ref_count_t
455 os_ref_release_mask_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits,
456 memory_order release_order, memory_order dealloc_order)
457 {
458 #if OS_REFCNT_DEBUG
459 if (__improbable(grp && ref_debug_enable)) {
460 /*
461 * Care not to use 'rc' after the decrement because it might be deallocated
462 * under us.
463 */
464 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
465 }
466 #endif
467
468 os_ref_count_t val = atomic_fetch_sub_explicit(rc, 1U << bits, release_order);
469 val >>= bits;
470 os_ref_check_underflow(rc, val);
471 if (__improbable(--val == 0)) {
472 atomic_load_explicit(rc, dealloc_order);
473 }
474
475 #if OS_REFCNT_DEBUG
476 if (__improbable(grp && ref_debug_enable)) {
477 if (val == 0) {
478 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
479 }
480 ref_release_group(grp, !val);
481 }
482 #endif
483
484 return val;
485 }
486
487 #undef os_ref_retain_try_mask
488 bool
489 os_ref_retain_try_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits)
490 {
491 os_ref_count_t cur = os_ref_get_count_internal(rc);
492
493 while (1) {
494 if (__improbable((cur >> bits) == 0)) {
495 return false;
496 }
497
498 os_ref_check_overflow(rc, cur);
499
500 os_ref_count_t next = cur + (1U << bits);
501 if (atomic_compare_exchange_weak_explicit(rc, &cur, next,
502 memory_order_relaxed, memory_order_relaxed)) {
503 break;
504 }
505 }
506
507 #if OS_REFCNT_DEBUG
508 if (__improbable(grp && ref_debug_enable)) {
509 ref_retain_debug(rc, grp);
510 }
511 #endif
512
513 return true;
514 }
515
516 #undef os_ref_retain_locked_mask
517 void
518 os_ref_retain_locked_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits)
519 {
520 os_ref_count_t val = os_ref_get_count_internal(rc);
521
522 os_ref_check_overflow(rc, val);
523 os_ref_assert_referenced(rc, val >> bits);
524
525 val += (1U << bits);
526 atomic_store_explicit(rc, val, memory_order_relaxed);
527
528 #if OS_REFCNT_DEBUG
529 if (__improbable(grp && ref_debug_enable)) {
530 ref_retain_debug(rc, grp);
531 }
532 #endif
533 }
534
535 #undef os_ref_release_locked_mask
536 os_ref_count_t
537 os_ref_release_locked_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits)
538 {
539 os_ref_count_t val = os_ref_get_count_internal(rc);
540 os_ref_check_underflow(rc, val >> bits);
541 val -= (1U << bits);
542 atomic_store_explicit(rc, val, memory_order_relaxed);
543
544 val >>= bits;
545
546 #if OS_REFCNT_DEBUG
547 if (__improbable(grp && ref_debug_enable)) {
548 ref_release_group(grp, !val);
549 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
550 if (val == 0) {
551 ref_log_drop(grp, (void *)rc);
552 }
553 }
554 #endif
555
556 return val;
557 }