]> git.saurik.com Git - apple/xnu.git/blame - libkern/os/refcnt.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / os / refcnt.c
CommitLineData
cb323159 1#if KERNEL
d9a64523
A
2#include <kern/assert.h>
3#include <kern/debug.h>
4#include <pexpert/pexpert.h>
5#include <kern/btlog.h>
6#include <kern/backtrace.h>
7#include <libkern/libkern.h>
cb323159 8#endif
f427ee49 9#include <os/atomic_private.h>
cb323159 10
d9a64523
A
11#include "refcnt.h"
12
13#define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
14
15#if OS_REFCNT_DEBUG
cb323159
A
16extern struct os_refgrp global_ref_group;
17os_refgrp_decl(, global_ref_group, "all", NULL);
18
19extern bool ref_debug_enable;
20bool ref_debug_enable = false;
d9a64523
A
21static const size_t ref_log_nrecords = 1000000;
22
23#define REFLOG_BTDEPTH 10
f427ee49
A
24
25__enum_closed_decl(reflog_op_t, uint8_t, {
26 REFLOG_RETAIN = 1,
27 REFLOG_RELEASE = 2
28});
d9a64523
A
29
30#define __debug_only
31#else
32# define __debug_only __unused
33#endif /* OS_REFCNT_DEBUG */
34
cb323159
A
35void
36os_ref_panic_live(void *rc)
d9a64523 37{
cb323159
A
38 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
39 __builtin_unreachable();
d9a64523
A
40}
41
cb323159 42__abortlike
d9a64523 43static void
cb323159 44os_ref_panic_underflow(void *rc)
0a7de745 45{
cb323159 46 panic("os_refcnt: underflow (rc=%p)\n", rc);
0a7de745
A
47 __builtin_unreachable();
48}
49
cb323159
A
50__abortlike
51static void
52os_ref_panic_resurrection(void *rc)
d9a64523 53{
cb323159
A
54 panic("os_refcnt: attempted resurrection (rc=%p)\n", rc);
55 __builtin_unreachable();
d9a64523
A
56}
57
cb323159 58__abortlike
d9a64523 59static void
cb323159 60os_ref_panic_overflow(void *rc)
0a7de745 61{
cb323159 62 panic("os_refcnt: overflow (rc=%p)\n", rc);
0a7de745
A
63 __builtin_unreachable();
64}
65
66static inline void
f427ee49 67os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n)
d9a64523 68{
f427ee49 69 if (__improbable(count < n)) {
cb323159 70 os_ref_panic_underflow(rc);
d9a64523
A
71 }
72}
73
cb323159
A
74static inline void
75os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
0a7de745 76{
cb323159
A
77 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
78 os_ref_panic_overflow(rc);
79 }
0a7de745
A
80}
81
82static inline void
f427ee49 83os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n)
d9a64523 84{
f427ee49 85 if (__improbable(count < n)) {
cb323159 86 os_ref_panic_resurrection(rc);
d9a64523 87 }
d9a64523
A
88 os_ref_check_overflow(rc, count);
89}
90
91#if OS_REFCNT_DEBUG
cb323159
A
92#if KERNEL
93__attribute__((cold, noinline))
d9a64523 94static void
f427ee49 95ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op)
d9a64523 96{
cb323159 97 if (grp == NULL) {
d9a64523
A
98 return;
99 }
100
101 if (grp->grp_log == NULL) {
102 ref_log_op(grp->grp_parent, elem, op);
103 return;
104 }
105
106 uintptr_t bt[REFLOG_BTDEPTH];
cb323159 107 uint32_t nframes = backtrace(bt, REFLOG_BTDEPTH, NULL);
d9a64523
A
108 btlog_add_entry((btlog_t *)grp->grp_log, elem, op, (void **)bt, nframes);
109}
110
cb323159 111__attribute__((cold, noinline))
d9a64523
A
112static void
113ref_log_drop(struct os_refgrp *grp, void *elem)
114{
115 if (!ref_debug_enable || grp == NULL) {
116 return;
117 }
118
119 if (grp->grp_log == NULL) {
120 ref_log_drop(grp->grp_parent, elem);
121 return;
122 }
123
124 btlog_remove_entries_for_element(grp->grp_log, elem);
125}
126
cb323159 127__attribute__((cold, noinline))
d9a64523
A
128static void
129ref_log_init(struct os_refgrp *grp)
130{
131 if (grp->grp_log != NULL) {
132 return;
133 }
134
135 char grpbuf[128];
136 char *refgrp = grpbuf;
137 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
138 return;
139 }
140
0a7de745
A
141 /*
142 * Enable refcount statistics if the rlog boot-arg is present,
143 * even when no specific group is logged.
144 */
145 ref_debug_enable = true;
146
d9a64523
A
147 const char *g;
148 while ((g = strsep(&refgrp, ",")) != NULL) {
149 if (strcmp(g, grp->grp_name) == 0) {
150 /* enable logging on this refgrp */
151 grp->grp_log = btlog_create(ref_log_nrecords, REFLOG_BTDEPTH, true);
d9a64523
A
152 return;
153 }
154 }
d9a64523 155}
cb323159
A
156#else
157
158#ifndef ref_log_init
f427ee49
A
159static inline void
160ref_log_init(struct os_refgrp *grp __unused)
161{
162}
cb323159
A
163#endif
164#ifndef ref_log_op
f427ee49
A
165static inline void
166ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused)
167{
168}
cb323159
A
169#endif
170#ifndef ref_log_drop
f427ee49
A
171static inline void
172ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused)
173{
174}
cb323159
A
175#endif
176
177#endif /* KERNEL */
d9a64523
A
178
179/*
180 * attach a new refcnt to a group
181 */
cb323159 182__attribute__((cold, noinline))
d9a64523 183static void
cb323159 184ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
d9a64523
A
185{
186 if (grp == NULL) {
187 return;
188 }
189
190 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
191 /* First reference count object in this group. Check if we should enable
192 * refcount logging. */
193 ref_log_init(grp);
194 }
195
196 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
197 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
198
199 if (grp == &global_ref_group) {
200 return;
201 }
202
203 if (grp->grp_parent == NULL) {
204 grp->grp_parent = &global_ref_group;
205 }
206
207 ref_attach_to_group(rc, grp->grp_parent, init_count);
208}
209
cb323159 210static void
d9a64523
A
211ref_retain_group(struct os_refgrp *grp)
212{
213 if (grp) {
214 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
215 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
216 ref_retain_group(grp->grp_parent);
217 }
218}
219
cb323159
A
220__attribute__((cold, noinline))
221static void
d9a64523
A
222ref_release_group(struct os_refgrp *grp, bool final)
223{
224 if (grp) {
225 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
226 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
227 if (final) {
228 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
229 }
230
231 ref_release_group(grp->grp_parent, final);
232 }
233}
d9a64523 234
cb323159
A
235__attribute__((cold, noinline))
236static void
237ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
d9a64523 238{
cb323159 239 ref_attach_to_group(rc, grp, count);
d9a64523 240
cb323159
A
241 for (os_ref_count_t i = 0; i < count; i++) {
242 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
d9a64523 243 }
cb323159 244}
d9a64523 245
cb323159
A
246__attribute__((cold, noinline))
247static void
248ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
249{
250 ref_retain_group(grp);
251 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
252}
253#endif
d9a64523 254
cb323159
A
255void
256os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
257{
f427ee49 258 os_ref_check_underflow(rc, count, 1);
cb323159
A
259 atomic_init(rc, count);
260
261#if OS_REFCNT_DEBUG
262 if (__improbable(ref_debug_enable && grp)) {
263 ref_init_debug(rc, grp, count);
d9a64523
A
264 }
265#endif
266}
267
268void
cb323159 269os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
d9a64523 270{
cb323159 271 os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
f427ee49 272 os_ref_check_retain(rc, old, 1);
d9a64523
A
273
274#if OS_REFCNT_DEBUG
cb323159
A
275 if (__improbable(grp && ref_debug_enable)) {
276 ref_retain_debug(rc, grp);
0a7de745 277 }
d9a64523
A
278#endif
279}
280
281bool
cb323159 282os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
d9a64523 283{
f427ee49 284 os_ref_count_t cur, next;
d9a64523 285
f427ee49 286 os_atomic_rmw_loop(rc, cur, next, relaxed, {
d9a64523 287 if (__improbable(cur == 0)) {
f427ee49 288 os_atomic_rmw_loop_give_up(return false);
d9a64523
A
289 }
290
f427ee49
A
291 next = cur + 1;
292 });
d9a64523 293
f427ee49 294 os_ref_check_overflow(rc, cur);
cb323159
A
295
296#if OS_REFCNT_DEBUG
297 if (__improbable(grp && ref_debug_enable)) {
298 ref_retain_debug(rc, grp);
299 }
300#endif
301
302 return true;
d9a64523
A
303}
304
cb323159
A
305__attribute__((always_inline))
306static inline os_ref_count_t
f427ee49
A
307_os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n,
308 struct os_refgrp * __debug_only grp,
cb323159 309 memory_order release_order, memory_order dealloc_order)
d9a64523 310{
cb323159
A
311 os_ref_count_t val;
312
d9a64523 313#if OS_REFCNT_DEBUG
cb323159
A
314 if (__improbable(grp && ref_debug_enable)) {
315 /*
316 * Care not to use 'rc' after the decrement because it might be deallocated
317 * under us.
318 */
0a7de745
A
319 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
320 }
d9a64523
A
321#endif
322
f427ee49
A
323 val = atomic_fetch_sub_explicit(rc, n, release_order);
324 os_ref_check_underflow(rc, val, n);
325 val -= n;
326 if (__improbable(val < n)) {
cb323159
A
327 atomic_load_explicit(rc, dealloc_order);
328 }
329
d9a64523 330#if OS_REFCNT_DEBUG
cb323159 331 if (__improbable(grp && ref_debug_enable)) {
f427ee49 332 if (val < n) {
0a7de745
A
333 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
334 }
cb323159
A
335 ref_release_group(grp, !val);
336 }
d9a64523 337#endif
cb323159
A
338
339 return val;
340}
341
f427ee49
A
342#if OS_REFCNT_DEBUG
343__attribute__((noinline))
344static os_ref_count_t
345os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n,
346 struct os_refgrp * __debug_only grp,
347 memory_order release_order, memory_order dealloc_order)
348{
349 // Legacy exported interface with bad codegen due to the barriers
350 // not being immediate
351 //
352 // Also serves as the debug function
353 return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order);
354}
355#endif
356
cb323159
A
357__attribute__((noinline))
358os_ref_count_t
359os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
360 memory_order release_order, memory_order dealloc_order)
361{
362 // Legacy exported interface with bad codegen due to the barriers
363 // not being immediate
364 //
365 // Also serves as the debug function
f427ee49 366 return _os_ref_release_inline(rc, 1, grp, release_order, dealloc_order);
cb323159
A
367}
368
369os_ref_count_t
370os_ref_release_barrier_internal(os_ref_atomic_t *rc,
371 struct os_refgrp * __debug_only grp)
372{
373#if OS_REFCNT_DEBUG
374 if (__improbable(grp && ref_debug_enable)) {
375 return os_ref_release_internal(rc, grp,
376 memory_order_release, memory_order_acquire);
d9a64523 377 }
cb323159 378#endif
f427ee49 379 return _os_ref_release_inline(rc, 1, NULL,
cb323159
A
380 memory_order_release, memory_order_acquire);
381}
d9a64523 382
cb323159
A
383os_ref_count_t
384os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
385 struct os_refgrp * __debug_only grp)
386{
d9a64523 387#if OS_REFCNT_DEBUG
cb323159
A
388 if (__improbable(grp && ref_debug_enable)) {
389 return os_ref_release_internal(rc, grp,
390 memory_order_relaxed, memory_order_relaxed);
391 }
392#endif
f427ee49 393 return _os_ref_release_inline(rc, 1, NULL,
cb323159
A
394 memory_order_relaxed, memory_order_relaxed);
395}
396
397void
398os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
399{
400 os_ref_count_t val = os_ref_get_count_internal(rc);
f427ee49 401 os_ref_check_retain(rc, val, 1);
cb323159
A
402 atomic_store_explicit(rc, ++val, memory_order_relaxed);
403
404#if OS_REFCNT_DEBUG
405 if (__improbable(grp && ref_debug_enable)) {
406 ref_retain_debug(rc, grp);
407 }
408#endif
409}
410
411os_ref_count_t
412os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
413{
414 os_ref_count_t val = os_ref_get_count_internal(rc);
f427ee49 415 os_ref_check_underflow(rc, val, 1);
cb323159
A
416 atomic_store_explicit(rc, --val, memory_order_relaxed);
417
418#if OS_REFCNT_DEBUG
419 if (__improbable(grp && ref_debug_enable)) {
0a7de745 420 ref_release_group(grp, !val);
cb323159
A
421 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
422 if (val == 0) {
423 ref_log_drop(grp, (void *)rc);
424 }
0a7de745 425 }
d9a64523
A
426#endif
427
428 return val;
429}
430
cb323159
A
431/*
432 * Bitwise API
433 */
434
cb323159 435#undef os_ref_init_count_mask
d9a64523 436void
f427ee49
A
437os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b,
438 struct os_refgrp *__debug_only grp,
439 os_ref_count_t init_count, uint32_t init_bits)
d9a64523 440{
cb323159 441 assert(init_bits < (1U << b));
cb323159 442 atomic_init(rc, (init_count << b) | init_bits);
f427ee49 443 os_ref_check_underflow(rc, (init_count << b), 1u << b);
cb323159
A
444
445#if OS_REFCNT_DEBUG
446 if (__improbable(ref_debug_enable && grp)) {
447 ref_init_debug(rc, grp, init_count);
448 }
449#endif
450}
451
f427ee49
A
452__attribute__((always_inline))
453static inline void
454os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n,
455 struct os_refgrp *__debug_only grp, memory_order mo)
cb323159 456{
f427ee49
A
457 os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo);
458 os_ref_check_retain(rc, old, n);
d9a64523
A
459
460#if OS_REFCNT_DEBUG
cb323159
A
461 if (__improbable(grp && ref_debug_enable)) {
462 ref_retain_debug(rc, grp);
0a7de745 463 }
d9a64523
A
464#endif
465}
466
f427ee49
A
467void
468os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n,
469 struct os_refgrp *__debug_only grp)
470{
471 os_ref_retain_mask_inline(rc, n, grp, memory_order_relaxed);
472}
473
474void
475os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
476 struct os_refgrp *__debug_only grp)
477{
478 os_ref_retain_mask_inline(rc, n, grp, memory_order_acquire);
479}
480
481uint32_t
482os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n,
483 struct os_refgrp *__debug_only grp)
d9a64523 484{
cb323159
A
485#if OS_REFCNT_DEBUG
486 if (__improbable(grp && ref_debug_enable)) {
f427ee49
A
487 return os_ref_release_n_internal(rc, n, grp,
488 memory_order_release, memory_order_acquire);
cb323159
A
489 }
490#endif
491
f427ee49
A
492 return _os_ref_release_inline(rc, n, NULL,
493 memory_order_release, memory_order_acquire);
494}
cb323159 495
f427ee49
A
496uint32_t
497os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n,
498 struct os_refgrp *__debug_only grp)
499{
cb323159
A
500#if OS_REFCNT_DEBUG
501 if (__improbable(grp && ref_debug_enable)) {
f427ee49
A
502 return os_ref_release_n_internal(rc, n, grp,
503 memory_order_relaxed, memory_order_relaxed);
cb323159
A
504 }
505#endif
506
f427ee49
A
507 return _os_ref_release_inline(rc, n, NULL,
508 memory_order_relaxed, memory_order_relaxed);
cb323159
A
509}
510
cb323159 511bool
f427ee49
A
512os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n,
513 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
cb323159 514{
f427ee49 515 os_ref_count_t cur, next;
cb323159 516
f427ee49
A
517 os_atomic_rmw_loop(rc, cur, next, relaxed, {
518 if (__improbable(cur < n || (cur & reject_mask))) {
519 os_atomic_rmw_loop_give_up(return false);
cb323159 520 }
f427ee49
A
521 next = cur + n;
522 });
cb323159 523
f427ee49 524 os_ref_check_overflow(rc, cur);
cb323159
A
525
526#if OS_REFCNT_DEBUG
527 if (__improbable(grp && ref_debug_enable)) {
528 ref_retain_debug(rc, grp);
529 }
530#endif
531
532 return true;
533}
534
f427ee49
A
535bool
536os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
537 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
cb323159 538{
f427ee49 539 os_ref_count_t cur, next;
cb323159 540
f427ee49
A
541 os_atomic_rmw_loop(rc, cur, next, acquire, {
542 if (__improbable(cur < n || (cur & reject_mask))) {
543 os_atomic_rmw_loop_give_up(return false);
544 }
545 next = cur + n;
546 });
cb323159 547
f427ee49 548 os_ref_check_overflow(rc, cur);
cb323159
A
549
550#if OS_REFCNT_DEBUG
551 if (__improbable(grp && ref_debug_enable)) {
552 ref_retain_debug(rc, grp);
553 }
554#endif
cb323159 555
f427ee49 556 return true;
d9a64523 557}