]> git.saurik.com Git - apple/xnu.git/blob - libkern/os/refcnt_internal.h
a866e215f6afaccb1f6a9ed0f9d9abe9dd9d2927
[apple/xnu.git] / libkern / os / refcnt_internal.h
1 #ifndef _OS_REFCNT_INTERNAL_H
2 #define _OS_REFCNT_INTERNAL_H
3
4 struct os_refcnt {
5 os_ref_atomic_t ref_count;
6 #if OS_REFCNT_DEBUG
7 struct os_refgrp *ref_group;
8 #endif
9 };
10
11 #if OS_REFCNT_DEBUG
12 struct os_refgrp {
13 const char *const grp_name;
14 os_ref_atomic_t grp_children; /* number of refcount objects in group */
15 os_ref_atomic_t grp_count; /* current reference count of group */
16 _Atomic uint64_t grp_retain_total;
17 _Atomic uint64_t grp_release_total;
18 struct os_refgrp *grp_parent;
19 void *grp_log; /* refcount logging context */
20 };
21 #endif
22
23 # define OS_REF_ATOMIC_INITIALIZER ATOMIC_VAR_INIT(0)
24 #if OS_REFCNT_DEBUG
25 # define OS_REF_INITIALIZER { .ref_count = OS_REF_ATOMIC_INITIALIZER, .ref_group = NULL }
26 #else
27 # define OS_REF_INITIALIZER { .ref_count = OS_REF_ATOMIC_INITIALIZER }
28 #endif
29
30 __BEGIN_DECLS
31
32 #if OS_REFCNT_DEBUG
33 # define os_ref_if_debug(x, y) x
34 #else
35 # define os_ref_if_debug(x, y) y
36 #endif
37
38 void os_ref_init_count_external(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t);
39 void os_ref_retain_external(os_ref_atomic_t *, struct os_refgrp *);
40 void os_ref_retain_locked_external(os_ref_atomic_t *, struct os_refgrp *);
41 os_ref_count_t os_ref_release_external(os_ref_atomic_t *, struct os_refgrp *,
42 memory_order release_order, memory_order dealloc_order);
43 os_ref_count_t os_ref_release_relaxed_external(os_ref_atomic_t *, struct os_refgrp *);
44 os_ref_count_t os_ref_release_barrier_external(os_ref_atomic_t *, struct os_refgrp *);
45 os_ref_count_t os_ref_release_locked_external(os_ref_atomic_t *, struct os_refgrp *);
46 bool os_ref_retain_try_external(os_ref_atomic_t *, struct os_refgrp *);
47
48 #if XNU_KERNEL_PRIVATE
49 void os_ref_init_count_internal(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t);
50 void os_ref_retain_internal(os_ref_atomic_t *, struct os_refgrp *);
51 os_ref_count_t os_ref_release_relaxed_internal(os_ref_atomic_t *, struct os_refgrp *);
52 os_ref_count_t os_ref_release_barrier_internal(os_ref_atomic_t *, struct os_refgrp *);
53 os_ref_count_t os_ref_release_internal(os_ref_atomic_t *, struct os_refgrp *,
54 memory_order release_order, memory_order dealloc_order);
55 bool os_ref_retain_try_internal(os_ref_atomic_t *, struct os_refgrp *);
56 void os_ref_retain_locked_internal(os_ref_atomic_t *, struct os_refgrp *);
57 os_ref_count_t os_ref_release_locked_internal(os_ref_atomic_t *, struct os_refgrp *);
58 #else
59 /* For now, the internal and external variants are identical */
60 #define os_ref_init_count_internal os_ref_init_count_external
61 #define os_ref_retain_internal os_ref_retain_external
62 #define os_ref_retain_locked_internal os_ref_retain_locked_external
63 #define os_ref_release_internal os_ref_release_external
64 #define os_ref_release_barrier_internal os_ref_release_barrier_external
65 #define os_ref_release_relaxed_internal os_ref_release_relaxed_external
66 #define os_ref_release_locked_internal os_ref_release_locked_external
67 #define os_ref_retain_try_internal os_ref_retain_try_external
68 #endif
69
70 static inline void
71 os_ref_init_count(struct os_refcnt *rc, struct os_refgrp * __unused grp, os_ref_count_t count)
72 {
73 #if OS_REFCNT_DEBUG
74 rc->ref_group = grp;
75 #endif
76 os_ref_init_count_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL), count);
77 }
78
79 static inline void
80 os_ref_retain(struct os_refcnt *rc)
81 {
82 os_ref_retain_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
83 }
84
85 static inline os_ref_count_t
86 os_ref_release_locked(struct os_refcnt *rc)
87 {
88 return os_ref_release_locked_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
89 }
90
91 static inline void
92 os_ref_retain_locked(struct os_refcnt *rc)
93 {
94 os_ref_retain_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
95 }
96
97 static inline bool
98 os_ref_retain_try(struct os_refcnt *rc)
99 {
100 return os_ref_retain_try_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
101 }
102
103 __deprecated_msg("inefficient codegen, prefer os_ref_release / os_ref_release_relaxed")
104 static inline os_ref_count_t OS_WARN_RESULT
105 os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory_order dealloc_order)
106 {
107 return os_ref_release_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL),
108 release_order, dealloc_order);
109 }
110
111 #if OS_REFCNT_DEBUG
112 # define os_refgrp_decl(qual, var, name, parent) \
113 qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var = { \
114 .grp_name = (name), \
115 .grp_children = ATOMIC_VAR_INIT(0u), \
116 .grp_count = ATOMIC_VAR_INIT(0u), \
117 .grp_retain_total = ATOMIC_VAR_INIT(0u), \
118 .grp_release_total = ATOMIC_VAR_INIT(0u), \
119 .grp_parent = (parent), \
120 .grp_log = NULL, \
121 }
122 # define os_refgrp_decl_extern(var) \
123 extern struct os_refgrp var
124
125 /* Create a default group based on the init() callsite if no explicit group
126 * is provided. */
127 # define os_ref_init_count(rc, grp, count) ({ \
128 os_refgrp_decl(static, __grp, __func__, NULL); \
129 (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \
130 })
131
132 #else /* OS_REFCNT_DEBUG */
133
134 # define os_refgrp_decl(...) extern struct os_refgrp var __attribute__((unused))
135 # define os_refgrp_decl_extern(var) os_refgrp_decl(var)
136 # define os_ref_init_count(rc, grp, count) (os_ref_init_count)((rc), NULL, (count))
137
138 #endif /* OS_REFCNT_DEBUG */
139
140 #if XNU_KERNEL_PRIVATE
141 void os_ref_panic_live(void *rc) __abortlike;
142 #else
143 __abortlike
144 static inline void
145 os_ref_panic_live(void *rc)
146 {
147 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
148 __builtin_unreachable();
149 }
150 #endif
151
152 static inline os_ref_count_t OS_WARN_RESULT
153 os_ref_release(struct os_refcnt *rc)
154 {
155 return os_ref_release_barrier_internal(&rc->ref_count,
156 os_ref_if_debug(rc->ref_group, NULL));
157 }
158
159 static inline os_ref_count_t OS_WARN_RESULT
160 os_ref_release_relaxed(struct os_refcnt *rc)
161 {
162 return os_ref_release_relaxed_internal(&rc->ref_count,
163 os_ref_if_debug(rc->ref_group, NULL));
164 }
165
166 static inline void
167 os_ref_release_live(struct os_refcnt *rc)
168 {
169 if (__improbable(os_ref_release(rc) == 0)) {
170 os_ref_panic_live(rc);
171 }
172 }
173
174 static inline os_ref_count_t
175 os_ref_get_count_internal(os_ref_atomic_t *rc)
176 {
177 return atomic_load_explicit(rc, memory_order_relaxed);
178 }
179
180 static inline os_ref_count_t
181 os_ref_get_count(struct os_refcnt *rc)
182 {
183 return os_ref_get_count_internal(&rc->ref_count);
184 }
185
186
187
188 /*
189 * Raw API
190 */
191
192 static inline void
193 os_ref_init_count_raw(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t count)
194 {
195 os_ref_init_count_internal(rc, grp, count);
196 }
197
198 static inline void
199 os_ref_retain_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
200 {
201 os_ref_retain_internal(rc, grp);
202 }
203
204 static inline os_ref_count_t
205 os_ref_release_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
206 {
207 return os_ref_release_barrier_internal(rc, grp);
208 }
209
210 static inline os_ref_count_t
211 os_ref_release_raw_relaxed(os_ref_atomic_t *rc, struct os_refgrp *grp)
212 {
213 return os_ref_release_relaxed_internal(rc, grp);
214 }
215
216 static inline void
217 os_ref_release_live_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
218 {
219 if (__improbable(os_ref_release_barrier_internal(rc, grp) == 0)) {
220 os_ref_panic_live(rc);
221 }
222 }
223
224 static inline bool
225 os_ref_retain_try_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
226 {
227 return os_ref_retain_try_internal(rc, grp);
228 }
229
230 static inline void
231 os_ref_retain_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
232 {
233 os_ref_retain_locked_internal(rc, grp);
234 }
235
236 static inline os_ref_count_t
237 os_ref_release_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
238 {
239 return os_ref_release_locked_internal(rc, grp);
240 }
241
242 static inline os_ref_count_t
243 os_ref_get_count_raw(os_ref_atomic_t *rc)
244 {
245 return os_ref_get_count_internal(rc);
246 }
247
248 #if !OS_REFCNT_DEBUG
249 /* remove the group argument for non-debug */
250 #define os_ref_init_count_raw(rc, grp, count) (os_ref_init_count_raw)((rc), NULL, (count))
251 #define os_ref_retain_raw(rc, grp) (os_ref_retain_raw)((rc), NULL)
252 #define os_ref_release_raw(rc, grp) (os_ref_release_raw)((rc), NULL)
253 #define os_ref_release_raw_relaxed(rc, grp) (os_ref_release_relaxed_raw)((rc), NULL)
254 #define os_ref_release_live_raw(rc, grp) (os_ref_release_live_raw)((rc), NULL)
255 #define os_ref_retain_try_raw(rc, grp) (os_ref_retain_try_raw)((rc), NULL)
256 #define os_ref_retain_locked_raw(rc, grp) (os_ref_retain_locked_raw)((rc), NULL)
257 #define os_ref_release_locked_raw(rc, grp) (os_ref_release_locked_raw)((rc), NULL)
258 #endif
259
260 #if XNU_KERNEL_PRIVATE
261 #pragma GCC visibility push(hidden)
262
263 extern void
264 os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
265 extern void
266 os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
267 extern bool
268 os_ref_retain_try_mask_internal(os_ref_atomic_t *, uint32_t n,
269 uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT;
270 extern bool
271 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *, uint32_t n,
272 uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT;
273
274 extern uint32_t
275 os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
276 extern uint32_t
277 os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
278
279 static inline uint32_t
280 os_ref_get_raw_mask(os_ref_atomic_t *rc)
281 {
282 return os_ref_get_count_internal(rc);
283 }
284
285 static inline uint32_t
286 os_ref_get_bits_mask(os_ref_atomic_t *rc, uint32_t b)
287 {
288 return os_ref_get_raw_mask(rc) & ((1u << b) - 1);
289 }
290
291 static inline os_ref_count_t
292 os_ref_get_count_mask(os_ref_atomic_t *rc, uint32_t b)
293 {
294 return os_ref_get_raw_mask(rc) >> b;
295 }
296
297 static inline void
298 os_ref_retain_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
299 {
300 os_ref_retain_mask_internal(rc, 1u << b, grp);
301 }
302
303 static inline void
304 os_ref_retain_acquire_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
305 {
306 os_ref_retain_acquire_mask_internal(rc, 1u << b, grp);
307 }
308
309 static inline bool
310 os_ref_retain_try_mask(os_ref_atomic_t *rc, uint32_t b,
311 uint32_t reject_mask, struct os_refgrp *grp)
312 {
313 return os_ref_retain_try_mask_internal(rc, 1u << b, reject_mask, grp);
314 }
315
316 static inline bool
317 os_ref_retain_try_acquire_mask(os_ref_atomic_t *rc, uint32_t b,
318 uint32_t reject_mask, struct os_refgrp *grp)
319 {
320 return os_ref_retain_try_acquire_mask_internal(rc, 1u << b, reject_mask, grp);
321 }
322
323 static inline uint32_t
324 os_ref_release_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
325 {
326 return os_ref_release_barrier_mask_internal(rc, 1u << b, grp);
327 }
328
329 static inline uint32_t
330 os_ref_release_raw_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
331 {
332 return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp);
333 }
334
335 static inline os_ref_count_t
336 os_ref_release_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
337 {
338 return os_ref_release_barrier_mask_internal(rc, 1u << b, grp) >> b;
339 }
340
341 static inline os_ref_count_t
342 os_ref_release_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
343 {
344 return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp) >> b;
345 }
346
347 static inline void
348 os_ref_release_live_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
349 {
350 uint32_t val = os_ref_release_barrier_mask_internal(rc, 1u << b, grp);
351 if (__improbable(val < 1u << b)) {
352 os_ref_panic_live(rc);
353 }
354 }
355
356 #if !OS_REFCNT_DEBUG
357 /* remove the group argument for non-debug */
358 #define os_ref_init_count_mask(rc, b, grp, init_c, init_b) (os_ref_init_count_mask)(rc, b, NULL, init_c, init_b)
359 #define os_ref_retain_mask(rc, b, grp) (os_ref_retain_mask)((rc), (b), NULL)
360 #define os_ref_retain_acquire_mask(rc, b, grp) (os_ref_retain_acquire_mask)((rc), (b), NULL)
361 #define os_ref_retain_try_mask(rc, b, grp) (os_ref_retain_try_mask)((rc), (b), NULL)
362 #define os_ref_retain_try_acquire_mask(rc, b, grp) (os_ref_retain_try_acquire_mask)((rc), (b), NULL)
363 #define os_ref_release_mask(rc, b, grp) (os_ref_release_mask)((rc), (b), NULL)
364 #define os_ref_release_relaxed_mask(rc, b, grp) (os_ref_relaxed_mask)((rc), (b), NULL)
365 #define os_ref_release_raw_mask(rc, b, grp) (os_ref_release_mask)((rc), (b), NULL)
366 #define os_ref_release_relaxed_raw_mask(rc, b, grp) (os_ref_relaxed_mask)((rc), (b), NULL)
367 #define os_ref_release_live_mask(rc, b, grp) (os_ref_release_live_mask)((rc), (b), NULL)
368 #endif
369
370 #pragma GCC visibility pop
371 #endif
372
373 __END_DECLS
374
375 #endif /* _OS_REFCNT_INTERNAL_H */