2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
12 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
15 extern struct os_refgrp global_ref_group
;
16 os_refgrp_decl(, global_ref_group
, "all", NULL
);
18 extern bool ref_debug_enable
;
19 bool ref_debug_enable
= false;
20 static const size_t ref_log_nrecords
= 1000000;
22 #define REFLOG_BTDEPTH 10
23 #define REFLOG_RETAIN 1
24 #define REFLOG_RELEASE 2
28 # define __debug_only __unused
29 #endif /* OS_REFCNT_DEBUG */
32 os_ref_panic_live(void *rc
)
34 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc
);
35 __builtin_unreachable();
40 os_ref_panic_underflow(void *rc
)
42 panic("os_refcnt: underflow (rc=%p)\n", rc
);
43 __builtin_unreachable();
48 os_ref_panic_resurrection(void *rc
)
50 panic("os_refcnt: attempted resurrection (rc=%p)\n", rc
);
51 __builtin_unreachable();
56 os_ref_panic_overflow(void *rc
)
58 panic("os_refcnt: overflow (rc=%p)\n", rc
);
59 __builtin_unreachable();
63 os_ref_check_underflow(void *rc
, os_ref_count_t count
)
65 if (__improbable(count
== 0)) {
66 os_ref_panic_underflow(rc
);
71 os_ref_check_overflow(os_ref_atomic_t
*rc
, os_ref_count_t count
)
73 if (__improbable(count
>= OS_REFCNT_MAX_COUNT
)) {
74 os_ref_panic_overflow(rc
);
79 os_ref_assert_referenced(void *rc
, os_ref_count_t count
)
81 if (__improbable(count
== 0)) {
82 os_ref_panic_resurrection(rc
);
87 os_ref_check_retain(os_ref_atomic_t
*rc
, os_ref_count_t count
)
89 os_ref_assert_referenced(rc
, count
);
90 os_ref_check_overflow(rc
, count
);
95 __attribute__((cold
, noinline
))
97 ref_log_op(struct os_refgrp
*grp
, void *elem
, int op
)
103 if (grp
->grp_log
== NULL
) {
104 ref_log_op(grp
->grp_parent
, elem
, op
);
108 uintptr_t bt
[REFLOG_BTDEPTH
];
109 uint32_t nframes
= backtrace(bt
, REFLOG_BTDEPTH
, NULL
);
110 btlog_add_entry((btlog_t
*)grp
->grp_log
, elem
, op
, (void **)bt
, nframes
);
113 __attribute__((cold
, noinline
))
115 ref_log_drop(struct os_refgrp
*grp
, void *elem
)
117 if (!ref_debug_enable
|| grp
== NULL
) {
121 if (grp
->grp_log
== NULL
) {
122 ref_log_drop(grp
->grp_parent
, elem
);
126 btlog_remove_entries_for_element(grp
->grp_log
, elem
);
129 __attribute__((cold
, noinline
))
131 ref_log_init(struct os_refgrp
*grp
)
133 if (grp
->grp_log
!= NULL
) {
138 char *refgrp
= grpbuf
;
139 if (!PE_parse_boot_argn("rlog", refgrp
, sizeof(grpbuf
))) {
144 * Enable refcount statistics if the rlog boot-arg is present,
145 * even when no specific group is logged.
147 ref_debug_enable
= true;
150 while ((g
= strsep(&refgrp
, ",")) != NULL
) {
151 if (strcmp(g
, grp
->grp_name
) == 0) {
152 /* enable logging on this refgrp */
153 grp
->grp_log
= btlog_create(ref_log_nrecords
, REFLOG_BTDEPTH
, true);
161 # define ref_log_init(...) do {} while (0)
164 # define ref_log_op(...) do {} while (0)
167 # define ref_log_drop(...) do {} while (0)
173 * attach a new refcnt to a group
175 __attribute__((cold
, noinline
))
177 ref_attach_to_group(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t init_count
)
183 if (atomic_fetch_add_explicit(&grp
->grp_children
, 1, memory_order_relaxed
) == 0) {
184 /* First reference count object in this group. Check if we should enable
185 * refcount logging. */
189 atomic_fetch_add_explicit(&grp
->grp_count
, init_count
, memory_order_relaxed
);
190 atomic_fetch_add_explicit(&grp
->grp_retain_total
, init_count
, memory_order_relaxed
);
192 if (grp
== &global_ref_group
) {
196 if (grp
->grp_parent
== NULL
) {
197 grp
->grp_parent
= &global_ref_group
;
200 ref_attach_to_group(rc
, grp
->grp_parent
, init_count
);
204 ref_retain_group(struct os_refgrp
*grp
)
207 atomic_fetch_add_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
208 atomic_fetch_add_explicit(&grp
->grp_retain_total
, 1, memory_order_relaxed
);
209 ref_retain_group(grp
->grp_parent
);
213 __attribute__((cold
, noinline
))
215 ref_release_group(struct os_refgrp
*grp
, bool final
)
218 atomic_fetch_sub_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
219 atomic_fetch_add_explicit(&grp
->grp_release_total
, 1, memory_order_relaxed
);
221 atomic_fetch_sub_explicit(&grp
->grp_children
, 1, memory_order_relaxed
);
224 ref_release_group(grp
->grp_parent
, final
);
228 __attribute__((cold
, noinline
))
230 ref_init_debug(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t count
)
232 ref_attach_to_group(rc
, grp
, count
);
234 for (os_ref_count_t i
= 0; i
< count
; i
++) {
235 ref_log_op(grp
, (void *)rc
, REFLOG_RETAIN
);
239 __attribute__((cold
, noinline
))
241 ref_retain_debug(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
243 ref_retain_group(grp
);
244 ref_log_op(grp
, (void *)rc
, REFLOG_RETAIN
);
249 os_ref_init_count_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t count
)
251 os_ref_check_underflow(rc
, count
);
252 atomic_init(rc
, count
);
255 if (__improbable(ref_debug_enable
&& grp
)) {
256 ref_init_debug(rc
, grp
, count
);
262 os_ref_retain_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
264 os_ref_count_t old
= atomic_fetch_add_explicit(rc
, 1, memory_order_relaxed
);
265 os_ref_check_retain(rc
, old
);
268 if (__improbable(grp
&& ref_debug_enable
)) {
269 ref_retain_debug(rc
, grp
);
275 os_ref_retain_try_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
277 os_ref_count_t cur
= os_ref_get_count_internal(rc
);
280 if (__improbable(cur
== 0)) {
284 os_ref_check_retain(rc
, cur
);
286 if (atomic_compare_exchange_weak_explicit(rc
, &cur
, cur
+ 1,
287 memory_order_relaxed
, memory_order_relaxed
)) {
293 if (__improbable(grp
&& ref_debug_enable
)) {
294 ref_retain_debug(rc
, grp
);
301 __attribute__((always_inline
))
302 static inline os_ref_count_t
303 _os_ref_release_inline(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
,
304 memory_order release_order
, memory_order dealloc_order
)
309 if (__improbable(grp
&& ref_debug_enable
)) {
311 * Care not to use 'rc' after the decrement because it might be deallocated
314 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
318 val
= atomic_fetch_sub_explicit(rc
, 1, release_order
);
319 os_ref_check_underflow(rc
, val
);
320 if (__improbable(--val
== 0)) {
321 atomic_load_explicit(rc
, dealloc_order
);
325 if (__improbable(grp
&& ref_debug_enable
)) {
327 ref_log_drop(grp
, (void *)rc
); /* rc is only used as an identifier */
329 ref_release_group(grp
, !val
);
336 __attribute__((noinline
))
338 os_ref_release_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
,
339 memory_order release_order
, memory_order dealloc_order
)
341 // Legacy exported interface with bad codegen due to the barriers
342 // not being immediate
344 // Also serves as the debug function
345 return _os_ref_release_inline(rc
, grp
, release_order
, dealloc_order
);
349 os_ref_release_barrier_internal(os_ref_atomic_t
*rc
,
350 struct os_refgrp
* __debug_only grp
)
353 if (__improbable(grp
&& ref_debug_enable
)) {
354 return os_ref_release_internal(rc
, grp
,
355 memory_order_release
, memory_order_acquire
);
358 return _os_ref_release_inline(rc
, NULL
,
359 memory_order_release
, memory_order_acquire
);
363 os_ref_release_relaxed_internal(os_ref_atomic_t
*rc
,
364 struct os_refgrp
* __debug_only grp
)
367 if (__improbable(grp
&& ref_debug_enable
)) {
368 return os_ref_release_internal(rc
, grp
,
369 memory_order_relaxed
, memory_order_relaxed
);
372 return _os_ref_release_inline(rc
, NULL
,
373 memory_order_relaxed
, memory_order_relaxed
);
377 os_ref_retain_locked_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
379 os_ref_count_t val
= os_ref_get_count_internal(rc
);
380 os_ref_check_retain(rc
, val
);
381 atomic_store_explicit(rc
, ++val
, memory_order_relaxed
);
384 if (__improbable(grp
&& ref_debug_enable
)) {
385 ref_retain_debug(rc
, grp
);
391 os_ref_release_locked_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
393 os_ref_count_t val
= os_ref_get_count_internal(rc
);
394 os_ref_check_underflow(rc
, val
);
395 atomic_store_explicit(rc
, --val
, memory_order_relaxed
);
398 if (__improbable(grp
&& ref_debug_enable
)) {
399 ref_release_group(grp
, !val
);
400 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
402 ref_log_drop(grp
, (void *)rc
);
415 os_ref_get_count_mask(os_ref_atomic_t
*rc
, os_ref_count_t bits
)
418 ret
= os_ref_get_count_raw(rc
);
422 #undef os_ref_init_count_mask
424 os_ref_init_count_mask(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
,
425 os_ref_count_t init_count
, os_ref_count_t init_bits
, os_ref_count_t b
)
427 assert(init_bits
< (1U << b
));
428 os_ref_check_underflow(rc
, init_count
);
429 atomic_init(rc
, (init_count
<< b
) | init_bits
);
432 if (__improbable(ref_debug_enable
&& grp
)) {
433 ref_init_debug(rc
, grp
, init_count
);
438 #undef os_ref_retain_mask
440 os_ref_retain_mask(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t bits
)
442 os_ref_count_t old
= atomic_fetch_add_explicit(rc
, 1U << bits
, memory_order_relaxed
);
443 os_ref_check_overflow(rc
, old
);
444 os_ref_assert_referenced(rc
, old
>> bits
);
447 if (__improbable(grp
&& ref_debug_enable
)) {
448 ref_retain_debug(rc
, grp
);
453 #undef os_ref_release_mask_internal
455 os_ref_release_mask_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t bits
,
456 memory_order release_order
, memory_order dealloc_order
)
459 if (__improbable(grp
&& ref_debug_enable
)) {
461 * Care not to use 'rc' after the decrement because it might be deallocated
464 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
468 os_ref_count_t val
= atomic_fetch_sub_explicit(rc
, 1U << bits
, release_order
);
470 os_ref_check_underflow(rc
, val
);
471 if (__improbable(--val
== 0)) {
472 atomic_load_explicit(rc
, dealloc_order
);
476 if (__improbable(grp
&& ref_debug_enable
)) {
478 ref_log_drop(grp
, (void *)rc
); /* rc is only used as an identifier */
480 ref_release_group(grp
, !val
);
487 #undef os_ref_retain_try_mask
489 os_ref_retain_try_mask(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t bits
)
491 os_ref_count_t cur
= os_ref_get_count_internal(rc
);
494 if (__improbable((cur
>> bits
) == 0)) {
498 os_ref_check_overflow(rc
, cur
);
500 os_ref_count_t next
= cur
+ (1U << bits
);
501 if (atomic_compare_exchange_weak_explicit(rc
, &cur
, next
,
502 memory_order_relaxed
, memory_order_relaxed
)) {
508 if (__improbable(grp
&& ref_debug_enable
)) {
509 ref_retain_debug(rc
, grp
);
516 #undef os_ref_retain_locked_mask
518 os_ref_retain_locked_mask(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t bits
)
520 os_ref_count_t val
= os_ref_get_count_internal(rc
);
522 os_ref_check_overflow(rc
, val
);
523 os_ref_assert_referenced(rc
, val
>> bits
);
526 atomic_store_explicit(rc
, val
, memory_order_relaxed
);
529 if (__improbable(grp
&& ref_debug_enable
)) {
530 ref_retain_debug(rc
, grp
);
535 #undef os_ref_release_locked_mask
537 os_ref_release_locked_mask(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t bits
)
539 os_ref_count_t val
= os_ref_get_count_internal(rc
);
540 os_ref_check_underflow(rc
, val
>> bits
);
542 atomic_store_explicit(rc
, val
, memory_order_relaxed
);
547 if (__improbable(grp
&& ref_debug_enable
)) {
548 ref_release_group(grp
, !val
);
549 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
551 ref_log_drop(grp
, (void *)rc
);