1 #include <kern/assert.h>
2 #include <kern/debug.h>
3 #include <pexpert/pexpert.h>
4 #include <kern/btlog.h>
5 #include <kern/backtrace.h>
6 #include <libkern/libkern.h>
9 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
12 os_refgrp_decl(static, global_ref_group
, "all", NULL
);
13 static bool ref_debug_enable
= false;
14 static const size_t ref_log_nrecords
= 1000000;
16 #define REFLOG_BTDEPTH 10
17 #define REFLOG_RETAIN 1
18 #define REFLOG_RELEASE 2
22 # define __debug_only __unused
23 #endif /* OS_REFCNT_DEBUG */
26 ref_grp_name(struct os_refcnt __debug_only
*rc
)
29 if (rc
&& rc
->ref_group
&& rc
->ref_group
->grp_name
) {
30 return rc
->ref_group
->grp_name
;
37 os_ref_check_underflow(struct os_refcnt
*rc
, os_ref_count_t count
)
39 if (__improbable(count
== 0)) {
40 panic("os_refcnt: underflow (rc=%p, grp=%s)\n", rc
, ref_grp_name(rc
));
41 __builtin_unreachable();
46 os_ref_assert_referenced(struct os_refcnt
*rc
, os_ref_count_t count
)
48 if (__improbable(count
== 0)) {
49 panic("os_refcnt: used unsafely when zero (rc=%p, grp=%s)\n", rc
, ref_grp_name(rc
));
50 __builtin_unreachable();
55 os_ref_check_overflow(struct os_refcnt
*rc
, os_ref_count_t count
)
57 if (__improbable(count
>= OS_REFCNT_MAX_COUNT
)) {
58 panic("os_refcnt: overflow (rc=%p, grp=%s)\n", rc
, ref_grp_name(rc
));
59 __builtin_unreachable();
64 os_ref_check_retain(struct os_refcnt
*rc
, os_ref_count_t count
)
66 os_ref_assert_referenced(rc
, count
);
67 os_ref_check_overflow(rc
, count
);
72 ref_log_op(struct os_refgrp
*grp
, void *elem
, int op
)
74 if (!ref_debug_enable
|| grp
== NULL
) {
78 if (grp
->grp_log
== NULL
) {
79 ref_log_op(grp
->grp_parent
, elem
, op
);
83 uintptr_t bt
[REFLOG_BTDEPTH
];
84 uint32_t nframes
= backtrace(bt
, REFLOG_BTDEPTH
);
85 btlog_add_entry((btlog_t
*)grp
->grp_log
, elem
, op
, (void **)bt
, nframes
);
89 ref_log_drop(struct os_refgrp
*grp
, void *elem
)
91 if (!ref_debug_enable
|| grp
== NULL
) {
95 if (grp
->grp_log
== NULL
) {
96 ref_log_drop(grp
->grp_parent
, elem
);
100 btlog_remove_entries_for_element(grp
->grp_log
, elem
);
104 ref_log_init(struct os_refgrp
*grp
)
106 if (grp
->grp_log
!= NULL
) {
111 char *refgrp
= grpbuf
;
112 if (!PE_parse_boot_argn("rlog", refgrp
, sizeof(grpbuf
))) {
117 while ((g
= strsep(&refgrp
, ",")) != NULL
) {
118 if (strcmp(g
, grp
->grp_name
) == 0) {
119 /* enable logging on this refgrp */
120 grp
->grp_log
= btlog_create(ref_log_nrecords
, REFLOG_BTDEPTH
, true);
121 assert(grp
->grp_log
);
122 ref_debug_enable
= true;
130 * attach a new refcnt to a group
133 ref_attach_to_group(struct os_refcnt
*rc
, struct os_refgrp
*grp
, os_ref_count_t init_count
)
139 if (atomic_fetch_add_explicit(&grp
->grp_children
, 1, memory_order_relaxed
) == 0) {
140 /* First reference count object in this group. Check if we should enable
141 * refcount logging. */
145 atomic_fetch_add_explicit(&grp
->grp_count
, init_count
, memory_order_relaxed
);
146 atomic_fetch_add_explicit(&grp
->grp_retain_total
, init_count
, memory_order_relaxed
);
148 if (grp
== &global_ref_group
) {
152 if (grp
->grp_parent
== NULL
) {
153 grp
->grp_parent
= &global_ref_group
;
156 ref_attach_to_group(rc
, grp
->grp_parent
, init_count
);
160 ref_retain_group(struct os_refgrp
*grp
)
163 atomic_fetch_add_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
164 atomic_fetch_add_explicit(&grp
->grp_retain_total
, 1, memory_order_relaxed
);
165 ref_retain_group(grp
->grp_parent
);
170 ref_release_group(struct os_refgrp
*grp
, bool final
)
173 atomic_fetch_sub_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
174 atomic_fetch_add_explicit(&grp
->grp_release_total
, 1, memory_order_relaxed
);
176 atomic_fetch_sub_explicit(&grp
->grp_children
, 1, memory_order_relaxed
);
179 ref_release_group(grp
->grp_parent
, final
);
184 #undef os_ref_init_count
186 os_ref_init_count(struct os_refcnt
*rc
, struct os_refgrp __debug_only
*grp
, os_ref_count_t count
)
188 atomic_init(&rc
->ref_count
, count
);
195 rc
->ref_group
= &global_ref_group
;
198 ref_attach_to_group(rc
, rc
->ref_group
, count
);
200 for (os_ref_count_t i
= 0; i
< count
; i
++) {
201 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
207 os_ref_retain(struct os_refcnt
*rc
)
209 os_ref_count_t old
= atomic_fetch_add_explicit(&rc
->ref_count
, 1, memory_order_relaxed
);
210 os_ref_check_retain(rc
, old
);
213 ref_retain_group(rc
->ref_group
);
214 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
219 os_ref_retain_try(struct os_refcnt
*rc
)
221 os_ref_count_t cur
= os_ref_get_count(rc
);
224 if (__improbable(cur
== 0)) {
228 os_ref_check_retain(rc
, cur
);
230 if (atomic_compare_exchange_weak_explicit(&rc
->ref_count
, &cur
, cur
+1,
231 memory_order_relaxed
, memory_order_relaxed
)) {
233 ref_retain_group(rc
->ref_group
);
234 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
242 os_ref_release_explicit(struct os_refcnt
*rc
, memory_order release_order
, memory_order dealloc_order
)
246 * Care not to use 'rc' after the decrement because it might be deallocated
249 struct os_refgrp
*grp
= rc
->ref_group
;
250 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
253 os_ref_count_t val
= atomic_fetch_sub_explicit(&rc
->ref_count
, 1, release_order
);
254 os_ref_check_underflow(rc
, val
);
255 if (__improbable(--val
== 0)) {
256 atomic_load_explicit(&rc
->ref_count
, dealloc_order
);
258 ref_log_drop(grp
, (void *)rc
); /* rc is only used as an identifier */
263 ref_release_group(grp
, !val
);
270 os_ref_retain_locked(struct os_refcnt
*rc
)
272 os_ref_count_t val
= rc
->ref_count
;
273 os_ref_check_retain(rc
, val
);
274 rc
->ref_count
= ++val
;
277 ref_retain_group(rc
->ref_group
);
278 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RETAIN
);
283 os_ref_release_locked(struct os_refcnt
*rc
)
285 os_ref_count_t val
= rc
->ref_count
;
286 os_ref_check_underflow(rc
, val
);
287 rc
->ref_count
= --val
;
290 ref_release_group(rc
->ref_group
, !val
);
291 ref_log_op(rc
->ref_group
, (void *)rc
, REFLOG_RELEASE
);
293 ref_log_drop(rc
->ref_group
, (void *)rc
);