]> git.saurik.com Git - apple/xnu.git/blame - libkern/os/refcnt.h
xnu-6153.141.1.tar.gz
[apple/xnu.git] / libkern / os / refcnt.h
CommitLineData
d9a64523
A
1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _OS_REFCNT_H_
30#define _OS_REFCNT_H_
31
32/*
33 * os_refcnt reference counting API
34 *
35 * Two flavors are provided: atomic and locked. Atomic internally uses C11 atomic
36 * operations and requires no external synchronization, whereas the locked flavor
37 * assumes the refcnt object is locked by the caller. It is NOT safe to
38 * mix-and-match locked and atomic calls.
cb323159
A
39 *
40 * 'refgrp's are a way to (hierarchically) group like refcount objects for
41 * debugging purposes. The group keeps track of the total number and aggregate
42 * reference count of member refcounts, and the "rlog=" boot-arg is used to enable
43 * refcount logging by group name. Named groups can be created explicitly with
44 * os_refgrp_decl(), or implicitly by passing NULL for the refgrp when
45 * initializing a refcnt object. In the latter case, the group name is the same as
46 * the function enclosing the init call. Groups are only available on DEV or DEBUG
47 * builds, and are otherwise compiled out.
d9a64523
A
48 */
49
50#include <stdatomic.h>
51#include <stdbool.h>
52#include <os/base.h>
53
54struct os_refcnt;
55struct os_refgrp;
56typedef struct os_refcnt os_refcnt_t;
57
58/* type of the internal counter */
59typedef uint32_t os_ref_count_t;
cb323159 60typedef _Atomic(os_ref_count_t) os_ref_atomic_t;
d9a64523
A
61
62/*
cb323159
A
63 * OS_REF_INITIALIZER
64 * OS_REF_ATOMIC_INITIALIZER
65 *
66 * Static initializers that create refcnt objects with safe initial values for use
67 * between declaration and initialization (os_ref*_init()). Equivalent to zeroing.
d9a64523 68 */
d9a64523 69
cb323159
A
70#ifndef KERNEL
71# include <stdlib.h>
72# include <stdio.h>
73# ifndef __improbable
74# define __improbable(x) x
75# endif
76# ifndef panic
77# define panic(x, ...) do { fprintf(stderr, x, __VA_ARGS__); abort(); } while (0)
78# endif
d9a64523 79#endif
cb323159
A
80
81#ifndef OS_REFCNT_DEBUG
82# if DEVELOPMENT || DEBUG
83# define OS_REFCNT_DEBUG 1
84# else
85# define OS_REFCNT_DEBUG 0
86# endif
d9a64523
A
87#endif
88
89#if __has_attribute(diagnose_if)
90# define os_error_if(cond, msg) __attribute__((diagnose_if((cond), (msg), "error")))
91#else
92# define os_error_if(...)
93#endif
94
95__BEGIN_DECLS
96
97/*
98 * os_ref_init: initialize an os_refcnt with a count of 1
99 * os_ref_init_count: initialize an os_refcnt with a specific count >= 1
100 */
101#define os_ref_init(rc, grp) os_ref_init_count((rc), (grp), 1)
cb323159 102static void os_ref_init_count(struct os_refcnt *, struct os_refgrp *, os_ref_count_t count)
0a7de745 103os_error_if(count == 0, "Reference count must be non-zero initialized");
d9a64523 104
cb323159
A
105/*
106 * os_refgrp_decl(qual, var, name, parent): declare a refgroup object 'var' with
107 * given name string and parent group.
108 */
d9a64523
A
109
110/*
cb323159 111 *
d9a64523
A
112 * os_ref_retain: acquire a reference (increment reference count by 1) atomically.
113 *
114 * os_ref_release: release a reference (decrement reference count) atomically and
115 * return the new count. Memory is synchronized such that the dealloc block
116 * (i.e. code handling the final release() == 0 call) sees up-to-date memory
117 * with respect to all prior release()s on the same refcnt object. This
118 * memory ordering is sufficient for most use cases.
119 *
120 * os_ref_release_relaxed: same as release() but with weaker relaxed memory ordering.
121 * This can be used when the dealloc block is already synchronized with other
122 * accesses to the object (for example, with a lock).
123 *
124 * os_ref_release_live: release a reference that is guaranteed not to be the last one.
125 */
cb323159
A
126static void os_ref_retain(struct os_refcnt *);
127static os_ref_count_t os_ref_release(struct os_refcnt *) OS_WARN_RESULT;
128static os_ref_count_t os_ref_release_relaxed(struct os_refcnt *) OS_WARN_RESULT;
129static void os_ref_release_live(struct os_refcnt *);
d9a64523
A
130
131/*
132 * os_ref_retain_try: a variant of atomic retain that fails for objects with a
133 * zero reference count. The caller must therefore ensure that the object
134 * remains alive for any possible retain_try() caller, usually by using a
135 * lock protecting both the retain and dealloc paths. This variant is useful
136 * for objects stored in a collection, because no lock is required on the
137 * release() side until the object is deallocated.
138 */
cb323159 139static bool os_ref_retain_try(struct os_refcnt *) OS_WARN_RESULT;
d9a64523
A
140
141/*
142 * os_ref_retain_locked: acquire a reference on an object protected by a held
143 * lock. The caller must ensure mutual exclusivity of retain_locked() and
144 * release_locked() calls on the same object.
145 *
146 * os_ref_release_locked: release a reference on an object protected by a held
147 * lock.
148 */
cb323159
A
149static void os_ref_retain_locked(struct os_refcnt *);
150static os_ref_count_t os_ref_release_locked(struct os_refcnt *) OS_WARN_RESULT;
d9a64523
A
151
152/*
153 * os_ref_get_count: return the current reference count. This is unsafe for
154 * synchronization.
155 */
cb323159
A
156static os_ref_count_t os_ref_get_count(struct os_refcnt *rc);
157
158
159#if XNU_KERNEL_PRIVATE
160/*
161 * Raw API that uses a plain atomic counter (os_ref_atomic_t) and a separate
162 * refgroup. This can be used in situations where the refcount object must be
163 * fixed size, for example for embedding in structures with ABI stability
164 * requirements.
165 */
166
167#define os_ref_init_raw(rc, grp) os_ref_init_count_raw((rc), (grp), 1)
168static void os_ref_init_count_raw(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t count)
169os_error_if(count == 0, "Reference count must be non-zero initialized");
170static void os_ref_retain_raw(os_ref_atomic_t *, struct os_refgrp *);
171static os_ref_count_t os_ref_release_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
172static os_ref_count_t os_ref_release_relaxed_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
173static void os_ref_release_live_raw(os_ref_atomic_t *, struct os_refgrp *);
174static bool os_ref_retain_try_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
175static void os_ref_retain_locked_raw(os_ref_atomic_t *, struct os_refgrp *);
176static os_ref_count_t os_ref_release_locked_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT;
177static os_ref_count_t os_ref_get_count_raw(os_ref_atomic_t *rc);
178
179
180/*
181 * Bitwise API: like the raw API, but allows some bits in the refcount value to be
182 * reserved for other purposes. 'b' defines the number of trailing (LSB) reserved
183 * bits, which the refcnt_raw API will never modify (except at init()).
184 *
185 * It is assumed that users of this API always use atomic ops on the
186 * os_ref_atomic_t (or hold a lock for the locked variants), and never modify the
187 * top (32 - 'b') bits.
188 *
189 * Due to guard bits, the maximum reference count is 2^(28 - 'b') - 1, and the
190 * maximum 'b' is 26 bits. This API can also be used just to limit the max
191 * refcount.
192 */
193
194/* Initialize the reference count and reserved bits */
195#define os_ref_init_mask(rc, grp, b) os_ref_init_count_mask((rc), (grp), 1, 0, (b))
196void os_ref_init_count_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count,
197 os_ref_count_t init_bits, os_ref_count_t b)
198os_error_if(init_count == 0, "Reference count must be non-zero initialized")
199os_error_if(b > 26, "Bitwise reference count limited to 26 bits")
200os_error_if(init_bits >= (1U << b), "Bits out of range");
201
202void os_ref_retain_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b);
203static os_ref_count_t os_ref_release_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
204static os_ref_count_t os_ref_release_relaxed_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
205static void os_ref_release_live_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b);
206bool os_ref_retain_try_mask(os_ref_atomic_t *, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
207void os_ref_retain_locked_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b);
208os_ref_count_t os_ref_release_locked_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT;
209os_ref_count_t os_ref_get_count_mask(os_ref_atomic_t *rc, os_ref_count_t b);
210
211#endif /* XNU_KERNEL_PRIVATE */
d9a64523
A
212
213__END_DECLS
214
cb323159 215#include <os/refcnt_internal.h>
d9a64523 216#endif