2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
9 #include <os/atomic_private.h>
13 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
16 extern struct os_refgrp global_ref_group
;
17 os_refgrp_decl(, global_ref_group
, "all", NULL
);
19 extern bool ref_debug_enable
;
20 bool ref_debug_enable
= false;
21 static const size_t ref_log_nrecords
= 1000000;
23 #define REFLOG_BTDEPTH 10
25 __enum_closed_decl(reflog_op_t
, uint8_t, {
32 # define __debug_only __unused
33 #endif /* OS_REFCNT_DEBUG */
36 os_ref_panic_live(void *rc
)
38 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc
);
39 __builtin_unreachable();
44 os_ref_panic_underflow(void *rc
)
46 panic("os_refcnt: underflow (rc=%p)\n", rc
);
47 __builtin_unreachable();
52 os_ref_panic_resurrection(void *rc
)
54 panic("os_refcnt: attempted resurrection (rc=%p)\n", rc
);
55 __builtin_unreachable();
60 os_ref_panic_overflow(void *rc
)
62 panic("os_refcnt: overflow (rc=%p)\n", rc
);
63 __builtin_unreachable();
67 os_ref_check_underflow(void *rc
, os_ref_count_t count
, os_ref_count_t n
)
69 if (__improbable(count
< n
)) {
70 os_ref_panic_underflow(rc
);
75 os_ref_check_overflow(os_ref_atomic_t
*rc
, os_ref_count_t count
)
77 if (__improbable(count
>= OS_REFCNT_MAX_COUNT
)) {
78 os_ref_panic_overflow(rc
);
83 os_ref_check_retain(os_ref_atomic_t
*rc
, os_ref_count_t count
, os_ref_count_t n
)
85 if (__improbable(count
< n
)) {
86 os_ref_panic_resurrection(rc
);
88 os_ref_check_overflow(rc
, count
);
93 __attribute__((cold
, noinline
))
95 ref_log_op(struct os_refgrp
*grp
, void *elem
, reflog_op_t op
)
101 if (grp
->grp_log
== NULL
) {
102 ref_log_op(grp
->grp_parent
, elem
, op
);
106 uintptr_t bt
[REFLOG_BTDEPTH
];
107 uint32_t nframes
= backtrace(bt
, REFLOG_BTDEPTH
, NULL
);
108 btlog_add_entry((btlog_t
*)grp
->grp_log
, elem
, op
, (void **)bt
, nframes
);
111 __attribute__((cold
, noinline
))
113 ref_log_drop(struct os_refgrp
*grp
, void *elem
)
115 if (!ref_debug_enable
|| grp
== NULL
) {
119 if (grp
->grp_log
== NULL
) {
120 ref_log_drop(grp
->grp_parent
, elem
);
124 btlog_remove_entries_for_element(grp
->grp_log
, elem
);
127 __attribute__((cold
, noinline
))
129 ref_log_init(struct os_refgrp
*grp
)
131 if (grp
->grp_log
!= NULL
) {
136 char *refgrp
= grpbuf
;
137 if (!PE_parse_boot_argn("rlog", refgrp
, sizeof(grpbuf
))) {
142 * Enable refcount statistics if the rlog boot-arg is present,
143 * even when no specific group is logged.
145 ref_debug_enable
= true;
148 while ((g
= strsep(&refgrp
, ",")) != NULL
) {
149 if (strcmp(g
, grp
->grp_name
) == 0) {
150 /* enable logging on this refgrp */
151 grp
->grp_log
= btlog_create(ref_log_nrecords
, REFLOG_BTDEPTH
, true);
160 ref_log_init(struct os_refgrp
*grp __unused
)
166 ref_log_op(struct os_refgrp
*grp __unused
, void *rc __unused
, reflog_op_t op __unused
)
172 ref_log_drop(struct os_refgrp
*grp __unused
, void *rc __unused
)
180 * attach a new refcnt to a group
182 __attribute__((cold
, noinline
))
184 ref_attach_to_group(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t init_count
)
190 if (atomic_fetch_add_explicit(&grp
->grp_children
, 1, memory_order_relaxed
) == 0) {
191 /* First reference count object in this group. Check if we should enable
192 * refcount logging. */
196 atomic_fetch_add_explicit(&grp
->grp_count
, init_count
, memory_order_relaxed
);
197 atomic_fetch_add_explicit(&grp
->grp_retain_total
, init_count
, memory_order_relaxed
);
199 if (grp
== &global_ref_group
) {
203 if (grp
->grp_parent
== NULL
) {
204 grp
->grp_parent
= &global_ref_group
;
207 ref_attach_to_group(rc
, grp
->grp_parent
, init_count
);
211 ref_retain_group(struct os_refgrp
*grp
)
214 atomic_fetch_add_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
215 atomic_fetch_add_explicit(&grp
->grp_retain_total
, 1, memory_order_relaxed
);
216 ref_retain_group(grp
->grp_parent
);
220 __attribute__((cold
, noinline
))
222 ref_release_group(struct os_refgrp
*grp
, bool final
)
225 atomic_fetch_sub_explicit(&grp
->grp_count
, 1, memory_order_relaxed
);
226 atomic_fetch_add_explicit(&grp
->grp_release_total
, 1, memory_order_relaxed
);
228 atomic_fetch_sub_explicit(&grp
->grp_children
, 1, memory_order_relaxed
);
231 ref_release_group(grp
->grp_parent
, final
);
235 __attribute__((cold
, noinline
))
237 ref_init_debug(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t count
)
239 ref_attach_to_group(rc
, grp
, count
);
241 for (os_ref_count_t i
= 0; i
< count
; i
++) {
242 ref_log_op(grp
, (void *)rc
, REFLOG_RETAIN
);
246 __attribute__((cold
, noinline
))
248 ref_retain_debug(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
250 ref_retain_group(grp
);
251 ref_log_op(grp
, (void *)rc
, REFLOG_RETAIN
);
256 os_ref_init_count_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
, os_ref_count_t count
)
258 os_ref_check_underflow(rc
, count
, 1);
259 atomic_init(rc
, count
);
262 if (__improbable(ref_debug_enable
&& grp
)) {
263 ref_init_debug(rc
, grp
, count
);
269 os_ref_retain_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
271 os_ref_count_t old
= atomic_fetch_add_explicit(rc
, 1, memory_order_relaxed
);
272 os_ref_check_retain(rc
, old
, 1);
275 if (__improbable(grp
&& ref_debug_enable
)) {
276 ref_retain_debug(rc
, grp
);
282 os_ref_retain_try_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
284 os_ref_count_t cur
, next
;
286 os_atomic_rmw_loop(rc
, cur
, next
, relaxed
, {
287 if (__improbable(cur
== 0)) {
288 os_atomic_rmw_loop_give_up(return false);
294 os_ref_check_overflow(rc
, cur
);
297 if (__improbable(grp
&& ref_debug_enable
)) {
298 ref_retain_debug(rc
, grp
);
305 __attribute__((always_inline
))
306 static inline os_ref_count_t
307 _os_ref_release_inline(os_ref_atomic_t
*rc
, os_ref_count_t n
,
308 struct os_refgrp
* __debug_only grp
,
309 memory_order release_order
, memory_order dealloc_order
)
314 if (__improbable(grp
&& ref_debug_enable
)) {
316 * Care not to use 'rc' after the decrement because it might be deallocated
319 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
323 val
= atomic_fetch_sub_explicit(rc
, n
, release_order
);
324 os_ref_check_underflow(rc
, val
, n
);
326 if (__improbable(val
< n
)) {
327 atomic_load_explicit(rc
, dealloc_order
);
331 if (__improbable(grp
&& ref_debug_enable
)) {
333 ref_log_drop(grp
, (void *)rc
); /* rc is only used as an identifier */
335 ref_release_group(grp
, !val
);
343 __attribute__((noinline
))
344 static os_ref_count_t
345 os_ref_release_n_internal(os_ref_atomic_t
*rc
, os_ref_count_t n
,
346 struct os_refgrp
* __debug_only grp
,
347 memory_order release_order
, memory_order dealloc_order
)
349 // Legacy exported interface with bad codegen due to the barriers
350 // not being immediate
352 // Also serves as the debug function
353 return _os_ref_release_inline(rc
, n
, grp
, release_order
, dealloc_order
);
357 __attribute__((noinline
))
359 os_ref_release_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
,
360 memory_order release_order
, memory_order dealloc_order
)
362 // Legacy exported interface with bad codegen due to the barriers
363 // not being immediate
365 // Also serves as the debug function
366 return _os_ref_release_inline(rc
, 1, grp
, release_order
, dealloc_order
);
370 os_ref_release_barrier_internal(os_ref_atomic_t
*rc
,
371 struct os_refgrp
* __debug_only grp
)
374 if (__improbable(grp
&& ref_debug_enable
)) {
375 return os_ref_release_internal(rc
, grp
,
376 memory_order_release
, memory_order_acquire
);
379 return _os_ref_release_inline(rc
, 1, NULL
,
380 memory_order_release
, memory_order_acquire
);
384 os_ref_release_relaxed_internal(os_ref_atomic_t
*rc
,
385 struct os_refgrp
* __debug_only grp
)
388 if (__improbable(grp
&& ref_debug_enable
)) {
389 return os_ref_release_internal(rc
, grp
,
390 memory_order_relaxed
, memory_order_relaxed
);
393 return _os_ref_release_inline(rc
, 1, NULL
,
394 memory_order_relaxed
, memory_order_relaxed
);
398 os_ref_retain_locked_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
400 os_ref_count_t val
= os_ref_get_count_internal(rc
);
401 os_ref_check_retain(rc
, val
, 1);
402 atomic_store_explicit(rc
, ++val
, memory_order_relaxed
);
405 if (__improbable(grp
&& ref_debug_enable
)) {
406 ref_retain_debug(rc
, grp
);
412 os_ref_release_locked_internal(os_ref_atomic_t
*rc
, struct os_refgrp
* __debug_only grp
)
414 os_ref_count_t val
= os_ref_get_count_internal(rc
);
415 os_ref_check_underflow(rc
, val
, 1);
416 atomic_store_explicit(rc
, --val
, memory_order_relaxed
);
419 if (__improbable(grp
&& ref_debug_enable
)) {
420 ref_release_group(grp
, !val
);
421 ref_log_op(grp
, (void *)rc
, REFLOG_RELEASE
);
423 ref_log_drop(grp
, (void *)rc
);
435 #undef os_ref_init_count_mask
437 os_ref_init_count_mask(os_ref_atomic_t
*rc
, uint32_t b
,
438 struct os_refgrp
*__debug_only grp
,
439 os_ref_count_t init_count
, uint32_t init_bits
)
441 assert(init_bits
< (1U << b
));
442 atomic_init(rc
, (init_count
<< b
) | init_bits
);
443 os_ref_check_underflow(rc
, (init_count
<< b
), 1u << b
);
446 if (__improbable(ref_debug_enable
&& grp
)) {
447 ref_init_debug(rc
, grp
, init_count
);
452 __attribute__((always_inline
))
454 os_ref_retain_mask_inline(os_ref_atomic_t
*rc
, uint32_t n
,
455 struct os_refgrp
*__debug_only grp
, memory_order mo
)
457 os_ref_count_t old
= atomic_fetch_add_explicit(rc
, n
, mo
);
458 os_ref_check_retain(rc
, old
, n
);
461 if (__improbable(grp
&& ref_debug_enable
)) {
462 ref_retain_debug(rc
, grp
);
468 os_ref_retain_mask_internal(os_ref_atomic_t
*rc
, uint32_t n
,
469 struct os_refgrp
*__debug_only grp
)
471 os_ref_retain_mask_inline(rc
, n
, grp
, memory_order_relaxed
);
475 os_ref_retain_acquire_mask_internal(os_ref_atomic_t
*rc
, uint32_t n
,
476 struct os_refgrp
*__debug_only grp
)
478 os_ref_retain_mask_inline(rc
, n
, grp
, memory_order_acquire
);
482 os_ref_release_barrier_mask_internal(os_ref_atomic_t
*rc
, uint32_t n
,
483 struct os_refgrp
*__debug_only grp
)
486 if (__improbable(grp
&& ref_debug_enable
)) {
487 return os_ref_release_n_internal(rc
, n
, grp
,
488 memory_order_release
, memory_order_acquire
);
492 return _os_ref_release_inline(rc
, n
, NULL
,
493 memory_order_release
, memory_order_acquire
);
497 os_ref_release_relaxed_mask_internal(os_ref_atomic_t
*rc
, uint32_t n
,
498 struct os_refgrp
*__debug_only grp
)
501 if (__improbable(grp
&& ref_debug_enable
)) {
502 return os_ref_release_n_internal(rc
, n
, grp
,
503 memory_order_relaxed
, memory_order_relaxed
);
507 return _os_ref_release_inline(rc
, n
, NULL
,
508 memory_order_relaxed
, memory_order_relaxed
);
512 os_ref_retain_try_mask_internal(os_ref_atomic_t
*rc
, uint32_t n
,
513 uint32_t reject_mask
, struct os_refgrp
*__debug_only grp
)
515 os_ref_count_t cur
, next
;
517 os_atomic_rmw_loop(rc
, cur
, next
, relaxed
, {
518 if (__improbable(cur
< n
|| (cur
& reject_mask
))) {
519 os_atomic_rmw_loop_give_up(return false);
524 os_ref_check_overflow(rc
, cur
);
527 if (__improbable(grp
&& ref_debug_enable
)) {
528 ref_retain_debug(rc
, grp
);
536 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t
*rc
, uint32_t n
,
537 uint32_t reject_mask
, struct os_refgrp
*__debug_only grp
)
539 os_ref_count_t cur
, next
;
541 os_atomic_rmw_loop(rc
, cur
, next
, acquire
, {
542 if (__improbable(cur
< n
|| (cur
& reject_mask
))) {
543 os_atomic_rmw_loop_give_up(return false);
548 os_ref_check_overflow(rc
, cur
);
551 if (__improbable(grp
&& ref_debug_enable
)) {
552 ref_retain_debug(rc
, grp
);