1 #include <kern/assert.h>
2 #include <kern/debug.h>
3 #include <pexpert/pexpert.h>
4 #include <kern/btlog.h>
5 #include <kern/backtrace.h>
6 #include <libkern/libkern.h>
9 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
12 os_refgrp_decl(static, global_ref_group
, "all", NULL
);
13 static bool ref_debug_enable
= false;
14 static const size_t ref_log_nrecords
= 1000000;
16 #define REFLOG_BTDEPTH 10
17 #define REFLOG_RETAIN 1
18 #define REFLOG_RELEASE 2
22 # define __debug_only __unused
23 #endif /* OS_REFCNT_DEBUG */
26 ref_grp_name(struct os_refcnt __debug_only
*rc
)
29 if (rc
&& rc
->ref_group
&& rc
->ref_group
->grp_name
) {
30 return rc
->ref_group
->grp_name
;
36 __attribute__((cold
, noinline
, not_tail_called
, noreturn
))
38 os_ref_panic_underflow(struct os_refcnt
*rc
)
40 panic("os_refcnt: underflow (rc=%p, grp=%s)\n", rc
, ref_grp_name(rc
));
41 __builtin_unreachable();
45 os_ref_check_underflow(struct os_refcnt
*rc
, os_ref_count_t count
)
47 if (__improbable(count
== 0)) {
48 os_ref_panic_underflow(rc
);
52 __attribute__((cold
, noinline
, not_tail_called
, noreturn
))
54 os_ref_panic_resurrection(struct os_refcnt
*rc
)
56 panic("os_refcnt: used unsafely when zero (rc=%p, grp=%s)\n", rc
, ref_grp_name(rc
));
57 __builtin_unreachable();
61 os_ref_assert_referenced(struct os_refcnt
*rc
, os_ref_count_t count
)
63 if (__improbable(count
== 0)) {
64 os_ref_panic_resurrection(rc
);
68 __attribute__((cold
, noinline
, not_tail_called
, noreturn
))
70 os_ref_panic_overflow(struct os_refcnt
*rc
)
72 panic("os_refcnt: overflow (rc=%p, grp=%s)\n", rc
, ref_grp_name(rc
));
73 __builtin_unreachable();
77 os_ref_check_overflow(struct os_refcnt
*rc
, os_ref_count_t count
)
79 if (__improbable(count
>= OS_REFCNT_MAX_COUNT
)) {
80 os_ref_panic_overflow(rc
);
85 os_ref_check_retain(struct os_refcnt
*rc
, os_ref_count_t count
)
87 os_ref_assert_referenced(rc
, count
);
88 os_ref_check_overflow(rc
, count
);
93 ref_log_op(struct os_refgrp
*grp
, void *elem
, int op
)
95 if (!ref_debug_enable
|| grp
== NULL
) {
99 if (grp
->grp_log
== NULL
) {
100 ref_log_op(grp
->grp_parent
, elem
, op
);
104 uintptr_t bt
[REFLOG_BTDEPTH
];
105 uint32_t nframes
= backtrace(bt
, REFLOG_BTDEPTH
);
106 btlog_add_entry((btlog_t
*)grp
->grp_log
, elem
, op
, (void **)bt
, nframes
);
110 ref_log_drop(struct os_refgrp
*grp
, void *elem
)
112 if (!ref_debug_enable
|| grp
== NULL
) {
116 if (grp
->grp_log
== NULL
) {
117 ref_log_drop(grp
->grp_parent
, elem
);
121 btlog_remove_entries_for_element(grp
->grp_log
, elem
);
125 ref_log_init(struct os_refgrp
*grp
)
127 if (grp
->grp_log
!= NULL
) {
132 char *refgrp
= grpbuf
;
133 if (!PE_parse_boot_argn("rlog", refgrp
, sizeof(grpbuf
))) {
138 * Enable refcount statistics if the rlog boot-arg is present,
139 * even when no specific group is logged.
141 ref_debug_enable
= true;
144 while ((g
= strsep(&refgrp
, ",")) != NULL
) {
145 if (strcmp(g
, grp
->grp_name
) == 0) {
146 /* enable logging on this refgrp */
147 grp
->grp_log
= btlog_create(ref_log_nrecords
, REFLOG_BTDEPTH
, true);
148 assert(grp
->grp_log
);
155 * attach a new refcnt to a group
158 ref_attach_to_group(struct os_refcnt
*rc
, struct os_refgrp
*grp
, os_ref_count_t init_count
)
164 if (atomic_fetch_add_explicit(&grp
->grp_children
, 1, memory_order_relaxed
) == 0) {
165 /* First reference count object in this group. Check if we should enable
166 * refcount logging. */
170 atomic_fetch_add_explicit(&grp
->grp_count
, init_count
, memory_order_relaxed
);
171 atomic_fetch_add_explicit(&grp
->grp_retain_total
, init_count
, memory_order_relaxed
);
173 if (grp
== &global_ref_group
) {
177 if (grp
->grp_parent
== NULL
) {
178 grp
->grp_parent
= &global_ref_group
;
181 ref_attach_to_group(rc
, grp
->grp_parent
, init_count
);
185 ref_retain_group(struct os_refgrp
*grp
)
188 atomic_fetch_add_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
189 atomic_fetch_add_explicit(&grp
->grp_retain_total
, 1, memory_order_relaxed
);
190 ref_retain_group(grp
->grp_parent
);
195 ref_release_group(struct os_refgrp
*grp
, bool final
)
198 atomic_fetch_sub_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
199 atomic_fetch_add_explicit(&grp
->grp_release_total
, 1, memory_order_relaxed
);
201 atomic_fetch_sub_explicit(&grp
->grp_children
, 1, memory_order_relaxed
);
204 ref_release_group(grp
->grp_parent
, final
);
209 #undef os_ref_init_count
211 os_ref_init_count(struct os_refcnt
*rc
, struct os_refgrp __debug_only
*grp
, os_ref_count_t count
)
213 atomic_init(&rc
->ref_count
, count
);
220 rc
->ref_group
= &global_ref_group
;
223 ref_attach_to_group(rc
, rc
->ref_group
, count
);
225 for (os_ref_count_t i
= 0; i
< count
; i
++) {
226 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
232 os_ref_retain(struct os_refcnt
*rc
)
234 os_ref_count_t old
= atomic_fetch_add_explicit(&rc
->ref_count
, 1, memory_order_relaxed
);
235 os_ref_check_retain(rc
, old
);
238 if (__improbable(ref_debug_enable
)) {
239 ref_retain_group(rc
->ref_group
);
240 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
246 os_ref_retain_try(struct os_refcnt
*rc
)
248 os_ref_count_t cur
= os_ref_get_count(rc
);
251 if (__improbable(cur
== 0)) {
255 os_ref_check_retain(rc
, cur
);
257 if (atomic_compare_exchange_weak_explicit(&rc
->ref_count
, &cur
, cur
+ 1,
258 memory_order_relaxed
, memory_order_relaxed
)) {
260 if (__improbable(ref_debug_enable
)) {
261 ref_retain_group(rc
->ref_group
);
262 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
271 os_ref_release_explicit(struct os_refcnt
*rc
, memory_order release_order
, memory_order dealloc_order
)
275 * Care not to use 'rc' after the decrement because it might be deallocated
278 struct os_refgrp
*grp
= rc
->ref_group
;
279 if (__improbable(ref_debug_enable
)) {
280 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
284 os_ref_count_t val
= atomic_fetch_sub_explicit(&rc
->ref_count
, 1, release_order
);
285 os_ref_check_underflow(rc
, val
);
286 if (__improbable(--val
== 0)) {
287 atomic_load_explicit(&rc
->ref_count
, dealloc_order
);
289 if (__improbable(ref_debug_enable
)) {
290 ref_log_drop(grp
, (void *)rc
); /* rc is only used as an identifier */
296 if (__improbable(ref_debug_enable
)) {
297 ref_release_group(grp
, !val
);
305 os_ref_retain_locked(struct os_refcnt
*rc
)
307 os_ref_count_t val
= atomic_load_explicit(&rc
->ref_count
, memory_order_relaxed
);
308 os_ref_check_retain(rc
, val
);
309 atomic_store_explicit(&rc
->ref_count
, ++val
, memory_order_relaxed
);
312 if (__improbable(ref_debug_enable
)) {
313 ref_retain_group(rc
->ref_group
);
314 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
320 os_ref_release_locked(struct os_refcnt
*rc
)
322 os_ref_count_t val
= atomic_load_explicit(&rc
->ref_count
, memory_order_relaxed
);
323 os_ref_check_underflow(rc
, val
);
324 atomic_store_explicit(&rc
->ref_count
, --val
, memory_order_relaxed
);
327 if (__improbable(ref_debug_enable
)) {
328 ref_release_group(rc
->ref_group
, !val
);
329 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RELEASE
);
331 ref_log_drop(rc
->ref_group
, (void *)rc
);