]> git.saurik.com Git - apple/xnu.git/blob - libkern/os/refcnt.c
67deb068f441cabc21abc2e6ed1fbbcb14f631d4
[apple/xnu.git] / libkern / os / refcnt.c
1 #include <kern/assert.h>
2 #include <kern/debug.h>
3 #include <pexpert/pexpert.h>
4 #include <kern/btlog.h>
5 #include <kern/backtrace.h>
6 #include <libkern/libkern.h>
7 #include "refcnt.h"
8
9 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
10
11 #if OS_REFCNT_DEBUG
12 os_refgrp_decl(static, global_ref_group, "all", NULL);
13 static bool ref_debug_enable = false;
14 static const size_t ref_log_nrecords = 1000000;
15
16 #define REFLOG_BTDEPTH 10
17 #define REFLOG_RETAIN 1
18 #define REFLOG_RELEASE 2
19
20 #define __debug_only
21 #else
22 # define __debug_only __unused
23 #endif /* OS_REFCNT_DEBUG */
24
25 static const char *
26 ref_grp_name(struct os_refcnt __debug_only *rc)
27 {
28 #if OS_REFCNT_DEBUG
29 if (rc && rc->ref_group && rc->ref_group->grp_name) {
30 return rc->ref_group->grp_name;
31 }
32 #endif
33 return "<null>";
34 }
35
36 __attribute__((cold, noinline, not_tail_called, noreturn))
37 static void
38 os_ref_panic_underflow(struct os_refcnt *rc)
39 {
40 panic("os_refcnt: underflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc));
41 __builtin_unreachable();
42 }
43
44 static inline void
45 os_ref_check_underflow(struct os_refcnt *rc, os_ref_count_t count)
46 {
47 if (__improbable(count == 0)) {
48 os_ref_panic_underflow(rc);
49 }
50 }
51
52 __attribute__((cold, noinline, not_tail_called, noreturn))
53 static void
54 os_ref_panic_resurrection(struct os_refcnt *rc)
55 {
56 panic("os_refcnt: used unsafely when zero (rc=%p, grp=%s)\n", rc, ref_grp_name(rc));
57 __builtin_unreachable();
58 }
59
60 static inline void
61 os_ref_assert_referenced(struct os_refcnt *rc, os_ref_count_t count)
62 {
63 if (__improbable(count == 0)) {
64 os_ref_panic_resurrection(rc);
65 }
66 }
67
68 __attribute__((cold, noinline, not_tail_called, noreturn))
69 static void
70 os_ref_panic_overflow(struct os_refcnt *rc)
71 {
72 panic("os_refcnt: overflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc));
73 __builtin_unreachable();
74 }
75
76 static inline void
77 os_ref_check_overflow(struct os_refcnt *rc, os_ref_count_t count)
78 {
79 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
80 os_ref_panic_overflow(rc);
81 }
82 }
83
84 static void
85 os_ref_check_retain(struct os_refcnt *rc, os_ref_count_t count)
86 {
87 os_ref_assert_referenced(rc, count);
88 os_ref_check_overflow(rc, count);
89 }
90
91 #if OS_REFCNT_DEBUG
92 static void
93 ref_log_op(struct os_refgrp *grp, void *elem, int op)
94 {
95 if (!ref_debug_enable || grp == NULL) {
96 return;
97 }
98
99 if (grp->grp_log == NULL) {
100 ref_log_op(grp->grp_parent, elem, op);
101 return;
102 }
103
104 uintptr_t bt[REFLOG_BTDEPTH];
105 uint32_t nframes = backtrace(bt, REFLOG_BTDEPTH);
106 btlog_add_entry((btlog_t *)grp->grp_log, elem, op, (void **)bt, nframes);
107 }
108
109 static void
110 ref_log_drop(struct os_refgrp *grp, void *elem)
111 {
112 if (!ref_debug_enable || grp == NULL) {
113 return;
114 }
115
116 if (grp->grp_log == NULL) {
117 ref_log_drop(grp->grp_parent, elem);
118 return;
119 }
120
121 btlog_remove_entries_for_element(grp->grp_log, elem);
122 }
123
124 static void
125 ref_log_init(struct os_refgrp *grp)
126 {
127 if (grp->grp_log != NULL) {
128 return;
129 }
130
131 char grpbuf[128];
132 char *refgrp = grpbuf;
133 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
134 return;
135 }
136
137 /*
138 * Enable refcount statistics if the rlog boot-arg is present,
139 * even when no specific group is logged.
140 */
141 ref_debug_enable = true;
142
143 const char *g;
144 while ((g = strsep(&refgrp, ",")) != NULL) {
145 if (strcmp(g, grp->grp_name) == 0) {
146 /* enable logging on this refgrp */
147 grp->grp_log = btlog_create(ref_log_nrecords, REFLOG_BTDEPTH, true);
148 assert(grp->grp_log);
149 return;
150 }
151 }
152 }
153
154 /*
155 * attach a new refcnt to a group
156 */
157 static void
158 ref_attach_to_group(struct os_refcnt *rc, struct os_refgrp *grp, os_ref_count_t init_count)
159 {
160 if (grp == NULL) {
161 return;
162 }
163
164 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
165 /* First reference count object in this group. Check if we should enable
166 * refcount logging. */
167 ref_log_init(grp);
168 }
169
170 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
171 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
172
173 if (grp == &global_ref_group) {
174 return;
175 }
176
177 if (grp->grp_parent == NULL) {
178 grp->grp_parent = &global_ref_group;
179 }
180
181 ref_attach_to_group(rc, grp->grp_parent, init_count);
182 }
183
184 static inline void
185 ref_retain_group(struct os_refgrp *grp)
186 {
187 if (grp) {
188 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
189 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
190 ref_retain_group(grp->grp_parent);
191 }
192 }
193
194 static inline void
195 ref_release_group(struct os_refgrp *grp, bool final)
196 {
197 if (grp) {
198 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
199 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
200 if (final) {
201 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
202 }
203
204 ref_release_group(grp->grp_parent, final);
205 }
206 }
207 #endif
208
209 #undef os_ref_init_count
210 void
211 os_ref_init_count(struct os_refcnt *rc, struct os_refgrp __debug_only *grp, os_ref_count_t count)
212 {
213 atomic_init(&rc->ref_count, count);
214
215 #if OS_REFCNT_DEBUG
216 assert(count > 0);
217 if (grp) {
218 rc->ref_group = grp;
219 } else {
220 rc->ref_group = &global_ref_group;
221 }
222
223 ref_attach_to_group(rc, rc->ref_group, count);
224
225 for (os_ref_count_t i = 0; i < count; i++) {
226 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
227 }
228 #endif
229 }
230
231 void
232 os_ref_retain(struct os_refcnt *rc)
233 {
234 os_ref_count_t old = atomic_fetch_add_explicit(&rc->ref_count, 1, memory_order_relaxed);
235 os_ref_check_retain(rc, old);
236
237 #if OS_REFCNT_DEBUG
238 if (__improbable(ref_debug_enable)) {
239 ref_retain_group(rc->ref_group);
240 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
241 }
242 #endif
243 }
244
245 bool
246 os_ref_retain_try(struct os_refcnt *rc)
247 {
248 os_ref_count_t cur = os_ref_get_count(rc);
249
250 while (1) {
251 if (__improbable(cur == 0)) {
252 return false;
253 }
254
255 os_ref_check_retain(rc, cur);
256
257 if (atomic_compare_exchange_weak_explicit(&rc->ref_count, &cur, cur + 1,
258 memory_order_relaxed, memory_order_relaxed)) {
259 #if OS_REFCNT_DEBUG
260 if (__improbable(ref_debug_enable)) {
261 ref_retain_group(rc->ref_group);
262 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
263 }
264 #endif
265 return true;
266 }
267 }
268 }
269
270 os_ref_count_t
271 os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory_order dealloc_order)
272 {
273 #if OS_REFCNT_DEBUG
274 /*
275 * Care not to use 'rc' after the decrement because it might be deallocated
276 * under us.
277 */
278 struct os_refgrp *grp = rc->ref_group;
279 if (__improbable(ref_debug_enable)) {
280 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
281 }
282 #endif
283
284 os_ref_count_t val = atomic_fetch_sub_explicit(&rc->ref_count, 1, release_order);
285 os_ref_check_underflow(rc, val);
286 if (__improbable(--val == 0)) {
287 atomic_load_explicit(&rc->ref_count, dealloc_order);
288 #if OS_REFCNT_DEBUG
289 if (__improbable(ref_debug_enable)) {
290 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
291 }
292 #endif
293 }
294
295 #if OS_REFCNT_DEBUG
296 if (__improbable(ref_debug_enable)) {
297 ref_release_group(grp, !val);
298 }
299 #endif
300
301 return val;
302 }
303
304 void
305 os_ref_retain_locked(struct os_refcnt *rc)
306 {
307 os_ref_count_t val = atomic_load_explicit(&rc->ref_count, memory_order_relaxed);
308 os_ref_check_retain(rc, val);
309 atomic_store_explicit(&rc->ref_count, ++val, memory_order_relaxed);
310
311 #if OS_REFCNT_DEBUG
312 if (__improbable(ref_debug_enable)) {
313 ref_retain_group(rc->ref_group);
314 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
315 }
316 #endif
317 }
318
319 os_ref_count_t
320 os_ref_release_locked(struct os_refcnt *rc)
321 {
322 os_ref_count_t val = atomic_load_explicit(&rc->ref_count, memory_order_relaxed);
323 os_ref_check_underflow(rc, val);
324 atomic_store_explicit(&rc->ref_count, --val, memory_order_relaxed);
325
326 #if OS_REFCNT_DEBUG
327 if (__improbable(ref_debug_enable)) {
328 ref_release_group(rc->ref_group, !val);
329 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RELEASE);
330 if (val == 0) {
331 ref_log_drop(rc->ref_group, (void *)rc);
332 }
333 }
334 #endif
335 return val;
336 }