2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * os_refcnt reference counting API
35 * Two flavors are provided: atomic and locked. Atomic internally uses C11 atomic
36 * operations and requires no external synchronization, whereas the locked flavor
37 * assumes the refcnt object is locked by the caller. It is NOT safe to
38 * mix-and-match locked and atomic calls.
40 * 'refgrp's are a way to (hierarchically) group like refcount objects for
41 * debugging purposes. The group keeps track of the total number and aggregate
42 * reference count of member refcounts, and the "rlog=" boot-arg is used to enable
43 * refcount logging by group name. Named groups can be created explicitly with
44 * os_refgrp_decl(), or implicitly by passing NULL for the refgrp when
45 * initializing a refcnt object. In the latter case, the group name is the same as
46 * the function enclosing the init call. Groups are only available on DEV or DEBUG
47 * builds, and are otherwise compiled out.
50 #include <stdatomic.h>
56 typedef struct os_refcnt os_refcnt_t
;
58 /* type of the internal counter */
59 typedef uint32_t os_ref_count_t
;
60 typedef _Atomic(os_ref_count_t
) os_ref_atomic_t
;
64 * OS_REF_ATOMIC_INITIALIZER
66 * Static initializers that create refcnt objects with safe initial values for use
67 * between declaration and initialization (os_ref*_init()). Equivalent to zeroing.
74 # define __improbable(x) x
77 # define panic(x, ...) do { fprintf(stderr, x, __VA_ARGS__); abort(); } while (0)
81 #ifndef OS_REFCNT_DEBUG
82 # if DEVELOPMENT || DEBUG
83 # define OS_REFCNT_DEBUG 1
85 # define OS_REFCNT_DEBUG 0
89 #if __has_attribute(diagnose_if)
90 # define os_error_if(cond, msg) __attribute__((diagnose_if((cond), (msg), "error")))
92 # define os_error_if(...)
98 * os_ref_init: initialize an os_refcnt with a count of 1
99 * os_ref_init_count: initialize an os_refcnt with a specific count >= 1
101 #define os_ref_init(rc, grp) os_ref_init_count((rc), (grp), 1)
102 static void os_ref_init_count(struct os_refcnt
*, struct os_refgrp
*, os_ref_count_t count
)
103 os_error_if(count
== 0, "Reference count must be non-zero initialized");
106 * os_refgrp_decl(qual, var, name, parent): declare a refgroup object 'var' with
107 * given name string and parent group.
112 * os_ref_retain: acquire a reference (increment reference count by 1) atomically.
114 * os_ref_release: release a reference (decrement reference count) atomically and
115 * return the new count. Memory is synchronized such that the dealloc block
116 * (i.e. code handling the final release() == 0 call) sees up-to-date memory
117 * with respect to all prior release()s on the same refcnt object. This
118 * memory ordering is sufficient for most use cases.
120 * os_ref_release_relaxed: same as release() but with weaker relaxed memory ordering.
121 * This can be used when the dealloc block is already synchronized with other
122 * accesses to the object (for example, with a lock).
124 * os_ref_release_live: release a reference that is guaranteed not to be the last one.
126 static void os_ref_retain(struct os_refcnt
*);
127 static os_ref_count_t
os_ref_release(struct os_refcnt
*) OS_WARN_RESULT
;
128 static os_ref_count_t
os_ref_release_relaxed(struct os_refcnt
*) OS_WARN_RESULT
;
129 static void os_ref_release_live(struct os_refcnt
*);
132 * os_ref_retain_try: a variant of atomic retain that fails for objects with a
133 * zero reference count. The caller must therefore ensure that the object
134 * remains alive for any possible retain_try() caller, usually by using a
135 * lock protecting both the retain and dealloc paths. This variant is useful
136 * for objects stored in a collection, because no lock is required on the
137 * release() side until the object is deallocated.
139 static bool os_ref_retain_try(struct os_refcnt
*) OS_WARN_RESULT
;
142 * os_ref_retain_locked: acquire a reference on an object protected by a held
143 * lock. The caller must ensure mutual exclusivity of retain_locked() and
144 * release_locked() calls on the same object.
146 * os_ref_release_locked: release a reference on an object protected by a held
149 static void os_ref_retain_locked(struct os_refcnt
*);
150 static os_ref_count_t
os_ref_release_locked(struct os_refcnt
*) OS_WARN_RESULT
;
153 * os_ref_get_count: return the current reference count. This is unsafe for
156 static os_ref_count_t
os_ref_get_count(struct os_refcnt
*rc
);
159 #if XNU_KERNEL_PRIVATE
161 * Raw API that uses a plain atomic counter (os_ref_atomic_t) and a separate
162 * refgroup. This can be used in situations where the refcount object must be
163 * fixed size, for example for embedding in structures with ABI stability
167 #define os_ref_init_raw(rc, grp) os_ref_init_count_raw((rc), (grp), 1)
168 static void os_ref_init_count_raw(os_ref_atomic_t
*, struct os_refgrp
*, os_ref_count_t count
)
169 os_error_if(count
== 0, "Reference count must be non-zero initialized");
170 static void os_ref_retain_raw(os_ref_atomic_t
*, struct os_refgrp
*);
171 static os_ref_count_t
os_ref_release_raw(os_ref_atomic_t
*, struct os_refgrp
*) OS_WARN_RESULT
;
172 static os_ref_count_t
os_ref_release_relaxed_raw(os_ref_atomic_t
*, struct os_refgrp
*) OS_WARN_RESULT
;
173 static void os_ref_release_live_raw(os_ref_atomic_t
*, struct os_refgrp
*);
174 static bool os_ref_retain_try_raw(os_ref_atomic_t
*, struct os_refgrp
*) OS_WARN_RESULT
;
175 static void os_ref_retain_locked_raw(os_ref_atomic_t
*, struct os_refgrp
*);
176 static os_ref_count_t
os_ref_release_locked_raw(os_ref_atomic_t
*, struct os_refgrp
*) OS_WARN_RESULT
;
177 static os_ref_count_t
os_ref_get_count_raw(os_ref_atomic_t
*rc
);
181 * Bitwise API: like the raw API, but allows some bits in the refcount value to be
182 * reserved for other purposes. 'b' defines the number of trailing (LSB) reserved
183 * bits, which the refcnt_raw API will never modify (except at init()).
185 * It is assumed that users of this API always use atomic ops on the
186 * os_ref_atomic_t (or hold a lock for the locked variants), and never modify the
187 * top (32 - 'b') bits.
189 * Due to guard bits, the maximum reference count is 2^(28 - 'b') - 1, and the
190 * maximum 'b' is 26 bits. This API can also be used just to limit the max
194 /* Initialize the reference count and reserved bits */
195 #define os_ref_init_mask(rc, grp, b) os_ref_init_count_mask((rc), (grp), 1, 0, (b))
196 void os_ref_init_count_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t init_count
,
197 os_ref_count_t init_bits
, os_ref_count_t b
)
198 os_error_if(init_count
== 0, "Reference count must be non-zero initialized")
199 os_error_if(b
> 26, "Bitwise reference count limited to 26 bits")
200 os_error_if(init_bits
>= (1U << b
), "Bits out of range");
202 void os_ref_retain_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t b
);
203 static os_ref_count_t
os_ref_release_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t b
) OS_WARN_RESULT
;
204 static os_ref_count_t
os_ref_release_relaxed_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t b
) OS_WARN_RESULT
;
205 static void os_ref_release_live_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t b
);
206 bool os_ref_retain_try_mask(os_ref_atomic_t
*, struct os_refgrp
*grp
, os_ref_count_t b
) OS_WARN_RESULT
;
207 void os_ref_retain_locked_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t b
);
208 os_ref_count_t
os_ref_release_locked_mask(os_ref_atomic_t
*rc
, struct os_refgrp
*grp
, os_ref_count_t b
) OS_WARN_RESULT
;
209 os_ref_count_t
os_ref_get_count_mask(os_ref_atomic_t
*rc
, os_ref_count_t b
);
211 #endif /* XNU_KERNEL_PRIVATE */
215 #include <os/refcnt_internal.h>