]> git.saurik.com Git - apple/xnu.git/blob - libkern/os/refcnt.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / os / refcnt.c
1 #if KERNEL
2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
8 #endif
9 #include <os/atomic_private.h>
10
11 #include "refcnt.h"
12
13 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
14
15 #if OS_REFCNT_DEBUG
16 extern struct os_refgrp global_ref_group;
17 os_refgrp_decl(, global_ref_group, "all", NULL);
18
19 extern bool ref_debug_enable;
20 bool ref_debug_enable = false;
21 static const size_t ref_log_nrecords = 1000000;
22
23 #define REFLOG_BTDEPTH 10
24
25 __enum_closed_decl(reflog_op_t, uint8_t, {
26 REFLOG_RETAIN = 1,
27 REFLOG_RELEASE = 2
28 });
29
30 #define __debug_only
31 #else
32 # define __debug_only __unused
33 #endif /* OS_REFCNT_DEBUG */
34
35 void
36 os_ref_panic_live(void *rc)
37 {
38 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
39 __builtin_unreachable();
40 }
41
42 __abortlike
43 static void
44 os_ref_panic_underflow(void *rc)
45 {
46 panic("os_refcnt: underflow (rc=%p)\n", rc);
47 __builtin_unreachable();
48 }
49
50 __abortlike
51 static void
52 os_ref_panic_resurrection(void *rc)
53 {
54 panic("os_refcnt: attempted resurrection (rc=%p)\n", rc);
55 __builtin_unreachable();
56 }
57
58 __abortlike
59 static void
60 os_ref_panic_overflow(void *rc)
61 {
62 panic("os_refcnt: overflow (rc=%p)\n", rc);
63 __builtin_unreachable();
64 }
65
66 static inline void
67 os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n)
68 {
69 if (__improbable(count < n)) {
70 os_ref_panic_underflow(rc);
71 }
72 }
73
74 static inline void
75 os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
76 {
77 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
78 os_ref_panic_overflow(rc);
79 }
80 }
81
82 static inline void
83 os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n)
84 {
85 if (__improbable(count < n)) {
86 os_ref_panic_resurrection(rc);
87 }
88 os_ref_check_overflow(rc, count);
89 }
90
91 #if OS_REFCNT_DEBUG
92 #if KERNEL
93 __attribute__((cold, noinline))
94 static void
95 ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op)
96 {
97 if (grp == NULL) {
98 return;
99 }
100
101 if (grp->grp_log == NULL) {
102 ref_log_op(grp->grp_parent, elem, op);
103 return;
104 }
105
106 uintptr_t bt[REFLOG_BTDEPTH];
107 uint32_t nframes = backtrace(bt, REFLOG_BTDEPTH, NULL);
108 btlog_add_entry((btlog_t *)grp->grp_log, elem, op, (void **)bt, nframes);
109 }
110
111 __attribute__((cold, noinline))
112 static void
113 ref_log_drop(struct os_refgrp *grp, void *elem)
114 {
115 if (!ref_debug_enable || grp == NULL) {
116 return;
117 }
118
119 if (grp->grp_log == NULL) {
120 ref_log_drop(grp->grp_parent, elem);
121 return;
122 }
123
124 btlog_remove_entries_for_element(grp->grp_log, elem);
125 }
126
127 __attribute__((cold, noinline))
128 static void
129 ref_log_init(struct os_refgrp *grp)
130 {
131 if (grp->grp_log != NULL) {
132 return;
133 }
134
135 char grpbuf[128];
136 char *refgrp = grpbuf;
137 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
138 return;
139 }
140
141 /*
142 * Enable refcount statistics if the rlog boot-arg is present,
143 * even when no specific group is logged.
144 */
145 ref_debug_enable = true;
146
147 const char *g;
148 while ((g = strsep(&refgrp, ",")) != NULL) {
149 if (strcmp(g, grp->grp_name) == 0) {
150 /* enable logging on this refgrp */
151 grp->grp_log = btlog_create(ref_log_nrecords, REFLOG_BTDEPTH, true);
152 return;
153 }
154 }
155 }
156 #else
157
158 #ifndef ref_log_init
159 static inline void
160 ref_log_init(struct os_refgrp *grp __unused)
161 {
162 }
163 #endif
164 #ifndef ref_log_op
165 static inline void
166 ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused)
167 {
168 }
169 #endif
170 #ifndef ref_log_drop
171 static inline void
172 ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused)
173 {
174 }
175 #endif
176
177 #endif /* KERNEL */
178
179 /*
180 * attach a new refcnt to a group
181 */
182 __attribute__((cold, noinline))
183 static void
184 ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
185 {
186 if (grp == NULL) {
187 return;
188 }
189
190 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
191 /* First reference count object in this group. Check if we should enable
192 * refcount logging. */
193 ref_log_init(grp);
194 }
195
196 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
197 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
198
199 if (grp == &global_ref_group) {
200 return;
201 }
202
203 if (grp->grp_parent == NULL) {
204 grp->grp_parent = &global_ref_group;
205 }
206
207 ref_attach_to_group(rc, grp->grp_parent, init_count);
208 }
209
210 static void
211 ref_retain_group(struct os_refgrp *grp)
212 {
213 if (grp) {
214 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
215 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
216 ref_retain_group(grp->grp_parent);
217 }
218 }
219
220 __attribute__((cold, noinline))
221 static void
222 ref_release_group(struct os_refgrp *grp, bool final)
223 {
224 if (grp) {
225 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
226 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
227 if (final) {
228 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
229 }
230
231 ref_release_group(grp->grp_parent, final);
232 }
233 }
234
235 __attribute__((cold, noinline))
236 static void
237 ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
238 {
239 ref_attach_to_group(rc, grp, count);
240
241 for (os_ref_count_t i = 0; i < count; i++) {
242 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
243 }
244 }
245
246 __attribute__((cold, noinline))
247 static void
248 ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
249 {
250 ref_retain_group(grp);
251 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
252 }
253 #endif
254
255 void
256 os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
257 {
258 os_ref_check_underflow(rc, count, 1);
259 atomic_init(rc, count);
260
261 #if OS_REFCNT_DEBUG
262 if (__improbable(ref_debug_enable && grp)) {
263 ref_init_debug(rc, grp, count);
264 }
265 #endif
266 }
267
268 void
269 os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
270 {
271 os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
272 os_ref_check_retain(rc, old, 1);
273
274 #if OS_REFCNT_DEBUG
275 if (__improbable(grp && ref_debug_enable)) {
276 ref_retain_debug(rc, grp);
277 }
278 #endif
279 }
280
281 bool
282 os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
283 {
284 os_ref_count_t cur, next;
285
286 os_atomic_rmw_loop(rc, cur, next, relaxed, {
287 if (__improbable(cur == 0)) {
288 os_atomic_rmw_loop_give_up(return false);
289 }
290
291 next = cur + 1;
292 });
293
294 os_ref_check_overflow(rc, cur);
295
296 #if OS_REFCNT_DEBUG
297 if (__improbable(grp && ref_debug_enable)) {
298 ref_retain_debug(rc, grp);
299 }
300 #endif
301
302 return true;
303 }
304
305 __attribute__((always_inline))
306 static inline os_ref_count_t
307 _os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n,
308 struct os_refgrp * __debug_only grp,
309 memory_order release_order, memory_order dealloc_order)
310 {
311 os_ref_count_t val;
312
313 #if OS_REFCNT_DEBUG
314 if (__improbable(grp && ref_debug_enable)) {
315 /*
316 * Care not to use 'rc' after the decrement because it might be deallocated
317 * under us.
318 */
319 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
320 }
321 #endif
322
323 val = atomic_fetch_sub_explicit(rc, n, release_order);
324 os_ref_check_underflow(rc, val, n);
325 val -= n;
326 if (__improbable(val < n)) {
327 atomic_load_explicit(rc, dealloc_order);
328 }
329
330 #if OS_REFCNT_DEBUG
331 if (__improbable(grp && ref_debug_enable)) {
332 if (val < n) {
333 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
334 }
335 ref_release_group(grp, !val);
336 }
337 #endif
338
339 return val;
340 }
341
342 #if OS_REFCNT_DEBUG
343 __attribute__((noinline))
344 static os_ref_count_t
345 os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n,
346 struct os_refgrp * __debug_only grp,
347 memory_order release_order, memory_order dealloc_order)
348 {
349 // Legacy exported interface with bad codegen due to the barriers
350 // not being immediate
351 //
352 // Also serves as the debug function
353 return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order);
354 }
355 #endif
356
357 __attribute__((noinline))
358 os_ref_count_t
359 os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
360 memory_order release_order, memory_order dealloc_order)
361 {
362 // Legacy exported interface with bad codegen due to the barriers
363 // not being immediate
364 //
365 // Also serves as the debug function
366 return _os_ref_release_inline(rc, 1, grp, release_order, dealloc_order);
367 }
368
369 os_ref_count_t
370 os_ref_release_barrier_internal(os_ref_atomic_t *rc,
371 struct os_refgrp * __debug_only grp)
372 {
373 #if OS_REFCNT_DEBUG
374 if (__improbable(grp && ref_debug_enable)) {
375 return os_ref_release_internal(rc, grp,
376 memory_order_release, memory_order_acquire);
377 }
378 #endif
379 return _os_ref_release_inline(rc, 1, NULL,
380 memory_order_release, memory_order_acquire);
381 }
382
383 os_ref_count_t
384 os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
385 struct os_refgrp * __debug_only grp)
386 {
387 #if OS_REFCNT_DEBUG
388 if (__improbable(grp && ref_debug_enable)) {
389 return os_ref_release_internal(rc, grp,
390 memory_order_relaxed, memory_order_relaxed);
391 }
392 #endif
393 return _os_ref_release_inline(rc, 1, NULL,
394 memory_order_relaxed, memory_order_relaxed);
395 }
396
397 void
398 os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
399 {
400 os_ref_count_t val = os_ref_get_count_internal(rc);
401 os_ref_check_retain(rc, val, 1);
402 atomic_store_explicit(rc, ++val, memory_order_relaxed);
403
404 #if OS_REFCNT_DEBUG
405 if (__improbable(grp && ref_debug_enable)) {
406 ref_retain_debug(rc, grp);
407 }
408 #endif
409 }
410
411 os_ref_count_t
412 os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
413 {
414 os_ref_count_t val = os_ref_get_count_internal(rc);
415 os_ref_check_underflow(rc, val, 1);
416 atomic_store_explicit(rc, --val, memory_order_relaxed);
417
418 #if OS_REFCNT_DEBUG
419 if (__improbable(grp && ref_debug_enable)) {
420 ref_release_group(grp, !val);
421 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
422 if (val == 0) {
423 ref_log_drop(grp, (void *)rc);
424 }
425 }
426 #endif
427
428 return val;
429 }
430
431 /*
432 * Bitwise API
433 */
434
435 #undef os_ref_init_count_mask
436 void
437 os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b,
438 struct os_refgrp *__debug_only grp,
439 os_ref_count_t init_count, uint32_t init_bits)
440 {
441 assert(init_bits < (1U << b));
442 atomic_init(rc, (init_count << b) | init_bits);
443 os_ref_check_underflow(rc, (init_count << b), 1u << b);
444
445 #if OS_REFCNT_DEBUG
446 if (__improbable(ref_debug_enable && grp)) {
447 ref_init_debug(rc, grp, init_count);
448 }
449 #endif
450 }
451
452 __attribute__((always_inline))
453 static inline void
454 os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n,
455 struct os_refgrp *__debug_only grp, memory_order mo)
456 {
457 os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo);
458 os_ref_check_retain(rc, old, n);
459
460 #if OS_REFCNT_DEBUG
461 if (__improbable(grp && ref_debug_enable)) {
462 ref_retain_debug(rc, grp);
463 }
464 #endif
465 }
466
467 void
468 os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n,
469 struct os_refgrp *__debug_only grp)
470 {
471 os_ref_retain_mask_inline(rc, n, grp, memory_order_relaxed);
472 }
473
474 void
475 os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
476 struct os_refgrp *__debug_only grp)
477 {
478 os_ref_retain_mask_inline(rc, n, grp, memory_order_acquire);
479 }
480
481 uint32_t
482 os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n,
483 struct os_refgrp *__debug_only grp)
484 {
485 #if OS_REFCNT_DEBUG
486 if (__improbable(grp && ref_debug_enable)) {
487 return os_ref_release_n_internal(rc, n, grp,
488 memory_order_release, memory_order_acquire);
489 }
490 #endif
491
492 return _os_ref_release_inline(rc, n, NULL,
493 memory_order_release, memory_order_acquire);
494 }
495
496 uint32_t
497 os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n,
498 struct os_refgrp *__debug_only grp)
499 {
500 #if OS_REFCNT_DEBUG
501 if (__improbable(grp && ref_debug_enable)) {
502 return os_ref_release_n_internal(rc, n, grp,
503 memory_order_relaxed, memory_order_relaxed);
504 }
505 #endif
506
507 return _os_ref_release_inline(rc, n, NULL,
508 memory_order_relaxed, memory_order_relaxed);
509 }
510
511 bool
512 os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n,
513 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
514 {
515 os_ref_count_t cur, next;
516
517 os_atomic_rmw_loop(rc, cur, next, relaxed, {
518 if (__improbable(cur < n || (cur & reject_mask))) {
519 os_atomic_rmw_loop_give_up(return false);
520 }
521 next = cur + n;
522 });
523
524 os_ref_check_overflow(rc, cur);
525
526 #if OS_REFCNT_DEBUG
527 if (__improbable(grp && ref_debug_enable)) {
528 ref_retain_debug(rc, grp);
529 }
530 #endif
531
532 return true;
533 }
534
535 bool
536 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
537 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
538 {
539 os_ref_count_t cur, next;
540
541 os_atomic_rmw_loop(rc, cur, next, acquire, {
542 if (__improbable(cur < n || (cur & reject_mask))) {
543 os_atomic_rmw_loop_give_up(return false);
544 }
545 next = cur + n;
546 });
547
548 os_ref_check_overflow(rc, cur);
549
550 #if OS_REFCNT_DEBUG
551 if (__improbable(grp && ref_debug_enable)) {
552 ref_retain_debug(rc, grp);
553 }
554 #endif
555
556 return true;
557 }