* operations and requires no external synchronization, whereas the locked flavor
* assumes the refcnt object is locked by the caller. It is NOT safe to
* mix-and-match locked and atomic calls.
+ *
+ * 'refgrp's are a way to (hierarchically) group like refcount objects for
+ * debugging purposes. The group keeps track of the total number and aggregate
+ * reference count of member refcounts, and the "rlog=" boot-arg is used to enable
+ * refcount logging by group name. Named groups can be created explicitly with
+ * os_refgrp_decl(), or implicitly by passing NULL for the refgrp when
+ * initializing a refcnt object. In the latter case, the group name is the same as
+ * the function enclosing the init call. Groups are only available on DEV or DEBUG
+ * builds, and are otherwise compiled out.
*/
#include <stdatomic.h>
/* type of the internal counter */
typedef uint32_t os_ref_count_t;
-
-#if DEVELOPMENT || DEBUG
-# define OS_REFCNT_DEBUG 1
-#else
-# define OS_REFCNT_DEBUG 0
-#endif
+typedef _Atomic(os_ref_count_t) os_ref_atomic_t;
/*
- * Debugging is keyed off ref_group, so leave that field for kexts so that the
- * combination of dev/debug kernel and release kext works.
+ * OS_REF_INITIALIZER
+ * OS_REF_ATOMIC_INITIALIZER
+ *
+ * Static initializers that create refcnt objects with safe initial values for use
+ * between declaration and initialization (os_ref*_init()). Equivalent to zeroing.
*/
-#if XNU_KERNEL_PRIVATE
-# define OS_REFCNT_HAS_GROUP OS_REFCNT_DEBUG
-#else
-# define OS_REFCNT_HAS_GROUP 1
-#endif
-struct os_refcnt {
- _Atomic os_ref_count_t ref_count;
-#if OS_REFCNT_HAS_GROUP
- struct os_refgrp *ref_group;
+#ifndef KERNEL
+# include <stdlib.h>
+# include <stdio.h>
+# ifndef __improbable
+# define __improbable(x) x
+# endif
+# ifndef panic
+# define panic(x, ...) do { fprintf(stderr, x, __VA_ARGS__); abort(); } while (0)
+# endif
#endif
-};
-
-#if OS_REFCNT_DEBUG
-struct os_refgrp {
- const char *const grp_name;
- _Atomic os_ref_count_t grp_children; /* number of refcount objects in group */
- _Atomic os_ref_count_t grp_count; /* current reference count of group */
- _Atomic uint64_t grp_retain_total;
- _Atomic uint64_t grp_release_total;
- struct os_refgrp *grp_parent;
- void *grp_log; /* refcount logging context */
-};
+
+#ifndef OS_REFCNT_DEBUG
+# if DEVELOPMENT || DEBUG
+# define OS_REFCNT_DEBUG 1
+# else
+# define OS_REFCNT_DEBUG 0
+# endif
#endif
#if __has_attribute(diagnose_if)
* os_ref_init_count: initialize an os_refcnt with a specific count >= 1
*/
#define os_ref_init(rc, grp) os_ref_init_count((rc), (grp), 1)
-void os_ref_init_count(struct os_refcnt *, struct os_refgrp *, os_ref_count_t count)
+static void os_ref_init_count(struct os_refcnt *, struct os_refgrp *, os_ref_count_t count)
os_error_if(count == 0, "Reference count must be non-zero initialized");
-#if OS_REFCNT_DEBUG
-# define os_refgrp_decl(qual, var, name, parent) \
- qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var = { \
- .grp_name = (name), \
- .grp_children = ATOMIC_VAR_INIT(0), \
- .grp_count = ATOMIC_VAR_INIT(0), \
- .grp_retain_total = ATOMIC_VAR_INIT(0), \
- .grp_release_total = ATOMIC_VAR_INIT(0), \
- .grp_parent = (parent), \
- .grp_log = NULL, \
- }
-
-/* Create a default group based on the init() callsite if no explicit group
- * is provided. */
-# define os_ref_init_count(rc, grp, count) ({ \
- os_refgrp_decl(static, __grp, __func__, NULL); \
- (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \
- })
-#else
-# define os_refgrp_decl(...)
-# define os_ref_init_count(rc, grp, count) (os_ref_init_count)((rc), NULL, (count))
-#endif /* OS_REFCNT_DEBUG */
+/*
+ * os_refgrp_decl(qual, var, name, parent): declare a refgroup object 'var' with
+ * given name string and parent group.
+ */
/*
+ *
* os_ref_retain: acquire a reference (increment reference count by 1) atomically.
*
* os_ref_release: release a reference (decrement reference count) atomically and
*
* os_ref_release_live: release a reference that is guaranteed not to be the last one.
*/
-void os_ref_retain(struct os_refcnt *);
-
-os_ref_count_t os_ref_release_explicit(struct os_refcnt *rc,
- memory_order release_order, memory_order dealloc_order) OS_WARN_RESULT;
-
-static inline os_ref_count_t OS_WARN_RESULT
-os_ref_release(struct os_refcnt *rc)
-{
- return os_ref_release_explicit(rc, memory_order_release, memory_order_acquire);
-}
-
-static inline os_ref_count_t OS_WARN_RESULT
-os_ref_release_relaxed(struct os_refcnt *rc)
-{
- return os_ref_release_explicit(rc, memory_order_relaxed, memory_order_relaxed);
-}
-
-static inline void
-os_ref_release_live(struct os_refcnt *rc)
-{
- if (__improbable(os_ref_release_explicit(rc,
- memory_order_release, memory_order_relaxed) == 0)) {
- panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
- __builtin_unreachable();
- }
-}
-
+static void os_ref_retain(struct os_refcnt *);
+static os_ref_count_t os_ref_release(struct os_refcnt *) OS_WARN_RESULT;
+static os_ref_count_t os_ref_release_relaxed(struct os_refcnt *) OS_WARN_RESULT;
+static void os_ref_release_live(struct os_refcnt *);
/*
* os_ref_retain_try: a variant of atomic retain that fails for objects with a
* for objects stored in a collection, because no lock is required on the
* release() side until the object is deallocated.
*/
-bool os_ref_retain_try(struct os_refcnt *) OS_WARN_RESULT;
-
+static bool os_ref_retain_try(struct os_refcnt *) OS_WARN_RESULT;
/*
* os_ref_retain_locked: acquire a reference on an object protected by a held
* os_ref_release_locked: release a reference on an object protected by a held
* lock.
*/
-void os_ref_retain_locked(struct os_refcnt *);
-os_ref_count_t os_ref_release_locked(struct os_refcnt *) OS_WARN_RESULT;
-
+static void os_ref_retain_locked(struct os_refcnt *);
+static os_ref_count_t os_ref_release_locked(struct os_refcnt *) OS_WARN_RESULT;
/*
* os_ref_get_count: return the current reference count. This is unsafe for
* synchronization.
*/
-static inline os_ref_count_t
-os_ref_get_count(struct os_refcnt *rc)
-{
- return atomic_load_explicit(&rc->ref_count, memory_order_relaxed);
-}
+static os_ref_count_t os_ref_get_count(struct os_refcnt *rc);
+
+
+#if XNU_KERNEL_PRIVATE
+/*
+ * Raw API that uses a plain atomic counter (os_ref_atomic_t) and a separate
+ * refgroup. This can be used in situations where the refcount object must be
+ * fixed size, for example for embedding in structures with ABI stability
+ * requirements.
+ */
+
+#define os_ref_init_raw(rc, grp) os_ref_init_count_raw((rc), (grp), 1)
+static void os_ref_init_count_raw(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t count)
+os_error_if(count == 0, "Reference count must be non-zero initialized");
+static void os_ref_retain_raw(os_ref_atomic_t *, struct os_refgrp *);
+static os_ref_count_t os_ref_release_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
+static os_ref_count_t os_ref_release_relaxed_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
+static void os_ref_release_live_raw(os_ref_atomic_t *, struct os_refgrp *);
+static bool os_ref_retain_try_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
+static void os_ref_retain_locked_raw(os_ref_atomic_t *, struct os_refgrp *);
+static os_ref_count_t os_ref_release_locked_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
+static os_ref_count_t os_ref_get_count_raw(os_ref_atomic_t *rc);
+
+
+/*
+ * Bitwise API: like the raw API, but allows some bits in the refcount value to be
+ * reserved for other purposes. 'b' defines the number of trailing (LSB) reserved
+ * bits, which the refcnt_raw API will never modify (except at init()).
+ *
+ * It is assumed that users of this API always use atomic ops on the
+ * os_ref_atomic_t (or hold a lock for the locked variants), and never modify the
+ * top (32 - 'b') bits.
+ *
+ * Due to guard bits, the maximum reference count is 2^(28 - 'b') - 1, and the
+ * maximum 'b' is 26 bits. This API can also be used just to limit the max
+ * refcount.
+ */
+
+/* Initialize the reference count and reserved bits */
+#define os_ref_init_mask(rc, grp, b) os_ref_init_count_mask((rc), (grp), 1, 0, (b))
+void os_ref_init_count_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count,
+ os_ref_count_t init_bits, os_ref_count_t b)
+os_error_if(init_count == 0, "Reference count must be non-zero initialized")
+os_error_if(b > 26, "Bitwise reference count limited to 26 bits")
+os_error_if(init_bits >= (1U << b), "Bits out of range");
+
+void os_ref_retain_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b);
+static os_ref_count_t os_ref_release_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
+static os_ref_count_t os_ref_release_relaxed_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
+static void os_ref_release_live_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b);
+bool os_ref_retain_try_mask(os_ref_atomic_t *, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
+void os_ref_retain_locked_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b);
+os_ref_count_t os_ref_release_locked_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
+os_ref_count_t os_ref_get_count_mask(os_ref_atomic_t *rc, os_ref_count_t b);
+
+#endif /* XNU_KERNEL_PRIVATE */
__END_DECLS
+#include <os/refcnt_internal.h>
#endif