2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * os_refcnt reference counting API
35 * Two flavors are provided: atomic and locked. Atomic internally uses C11 atomic
36 * operations and requires no external synchronization, whereas the locked flavor
37 * assumes the refcnt object is locked by the caller. It is NOT safe to
38 * mix-and-match locked and atomic calls.
41 #include <stdatomic.h>
47 typedef struct os_refcnt os_refcnt_t
;
49 /* type of the internal counter */
50 typedef uint32_t os_ref_count_t
;
52 #if DEVELOPMENT || DEBUG
53 # define OS_REFCNT_DEBUG 1
55 # define OS_REFCNT_DEBUG 0
59 * Debugging is keyed off ref_group, so leave that field for kexts so that the
60 * combination of dev/debug kernel and release kext works.
62 #if XNU_KERNEL_PRIVATE
63 # define OS_REFCNT_HAS_GROUP OS_REFCNT_DEBUG
65 # define OS_REFCNT_HAS_GROUP 1
69 _Atomic os_ref_count_t ref_count
;
70 #if OS_REFCNT_HAS_GROUP
71 struct os_refgrp
*ref_group
;
77 const char *const grp_name
;
78 _Atomic os_ref_count_t grp_children
; /* number of refcount objects in group */
79 _Atomic os_ref_count_t grp_count
; /* current reference count of group */
80 _Atomic
uint64_t grp_retain_total
;
81 _Atomic
uint64_t grp_release_total
;
82 struct os_refgrp
*grp_parent
;
83 void *grp_log
; /* refcount logging context */
87 #if __has_attribute(diagnose_if)
88 # define os_error_if(cond, msg) __attribute__((diagnose_if((cond), (msg), "error")))
90 # define os_error_if(...)
96 * os_ref_init: initialize an os_refcnt with a count of 1
97 * os_ref_init_count: initialize an os_refcnt with a specific count >= 1
99 #define os_ref_init(rc, grp) os_ref_init_count((rc), (grp), 1)
100 void os_ref_init_count(struct os_refcnt
*, struct os_refgrp
*, os_ref_count_t count
)
101 os_error_if(count
== 0, "Reference count must be non-zero initialized");
104 # define os_refgrp_decl(qual, var, name, parent) \
105 qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var = { \
106 .grp_name = (name), \
107 .grp_children = ATOMIC_VAR_INIT(0), \
108 .grp_count = ATOMIC_VAR_INIT(0), \
109 .grp_retain_total = ATOMIC_VAR_INIT(0), \
110 .grp_release_total = ATOMIC_VAR_INIT(0), \
111 .grp_parent = (parent), \
115 /* Create a default group based on the init() callsite if no explicit group
117 # define os_ref_init_count(rc, grp, count) ({ \
118 os_refgrp_decl(static, __grp, __func__, NULL); \
119 (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \
122 # define os_refgrp_decl(...)
123 # define os_ref_init_count(rc, grp, count) (os_ref_init_count)((rc), NULL, (count))
124 #endif /* OS_REFCNT_DEBUG */
127 * os_ref_retain: acquire a reference (increment reference count by 1) atomically.
129 * os_ref_release: release a reference (decrement reference count) atomically and
130 * return the new count. Memory is synchronized such that the dealloc block
131 * (i.e. code handling the final release() == 0 call) sees up-to-date memory
132 * with respect to all prior release()s on the same refcnt object. This
133 * memory ordering is sufficient for most use cases.
135 * os_ref_release_relaxed: same as release() but with weaker relaxed memory ordering.
136 * This can be used when the dealloc block is already synchronized with other
137 * accesses to the object (for example, with a lock).
139 * os_ref_release_live: release a reference that is guaranteed not to be the last one.
141 void os_ref_retain(struct os_refcnt
*);
143 os_ref_count_t
os_ref_release_explicit(struct os_refcnt
*rc
,
144 memory_order release_order
, memory_order dealloc_order
) OS_WARN_RESULT
;
146 static inline os_ref_count_t OS_WARN_RESULT
147 os_ref_release(struct os_refcnt
*rc
)
149 return os_ref_release_explicit(rc
, memory_order_release
, memory_order_acquire
);
152 static inline os_ref_count_t OS_WARN_RESULT
153 os_ref_release_relaxed(struct os_refcnt
*rc
)
155 return os_ref_release_explicit(rc
, memory_order_relaxed
, memory_order_relaxed
);
159 os_ref_release_live(struct os_refcnt
*rc
)
161 if (__improbable(os_ref_release_explicit(rc
,
162 memory_order_release
, memory_order_relaxed
) == 0)) {
163 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc
);
164 __builtin_unreachable();
170 * os_ref_retain_try: a variant of atomic retain that fails for objects with a
171 * zero reference count. The caller must therefore ensure that the object
172 * remains alive for any possible retain_try() caller, usually by using a
173 * lock protecting both the retain and dealloc paths. This variant is useful
174 * for objects stored in a collection, because no lock is required on the
175 * release() side until the object is deallocated.
177 bool os_ref_retain_try(struct os_refcnt
*) OS_WARN_RESULT
;
181 * os_ref_retain_locked: acquire a reference on an object protected by a held
182 * lock. The caller must ensure mutual exclusivity of retain_locked() and
183 * release_locked() calls on the same object.
185 * os_ref_release_locked: release a reference on an object protected by a held
188 void os_ref_retain_locked(struct os_refcnt
*);
189 os_ref_count_t
os_ref_release_locked(struct os_refcnt
*) OS_WARN_RESULT
;
193 * os_ref_get_count: return the current reference count. This is unsafe for
196 static inline os_ref_count_t
197 os_ref_get_count(struct os_refcnt
*rc
)
199 return atomic_load_explicit(&rc
->ref_count
, memory_order_relaxed
);