]> git.saurik.com Git - apple/libdispatch.git/blob - src/object.c
libdispatch-339.90.1.tar.gz
[apple/libdispatch.git] / src / object.c
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 #pragma mark -
24 #pragma mark _os_object_t
25
26 unsigned long
27 _os_object_retain_count(_os_object_t obj)
28 {
29 int xref_cnt = obj->os_obj_xref_cnt;
30 if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
31 return ULONG_MAX; // global object
32 }
33 return (unsigned long)(xref_cnt + 1);
34 }
35
36 DISPATCH_NOINLINE
37 _os_object_t
38 _os_object_retain_internal(_os_object_t obj)
39 {
40 int ref_cnt = obj->os_obj_ref_cnt;
41 if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
42 return obj; // global object
43 }
44 ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed);
45 if (slowpath(ref_cnt <= 0)) {
46 DISPATCH_CRASH("Resurrection of an object");
47 }
48 return obj;
49 }
50
51 DISPATCH_NOINLINE
52 void
53 _os_object_release_internal(_os_object_t obj)
54 {
55 int ref_cnt = obj->os_obj_ref_cnt;
56 if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
57 return; // global object
58 }
59 ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
60 if (fastpath(ref_cnt >= 0)) {
61 return;
62 }
63 if (slowpath(ref_cnt < -1)) {
64 DISPATCH_CRASH("Over-release of an object");
65 }
66 #if DISPATCH_DEBUG
67 if (slowpath(obj->os_obj_xref_cnt >= 0)) {
68 DISPATCH_CRASH("Release while external references exist");
69 }
70 #endif
71 return _os_object_dispose(obj);
72 }
73
74 DISPATCH_NOINLINE
75 _os_object_t
76 _os_object_retain(_os_object_t obj)
77 {
78 int xref_cnt = obj->os_obj_xref_cnt;
79 if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
80 return obj; // global object
81 }
82 xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed);
83 if (slowpath(xref_cnt <= 0)) {
84 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
85 }
86 return obj;
87 }
88
89 DISPATCH_NOINLINE
90 void
91 _os_object_release(_os_object_t obj)
92 {
93 int xref_cnt = obj->os_obj_xref_cnt;
94 if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
95 return; // global object
96 }
97 xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed);
98 if (fastpath(xref_cnt >= 0)) {
99 return;
100 }
101 if (slowpath(xref_cnt < -1)) {
102 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
103 }
104 return _os_object_xref_dispose(obj);
105 }
106
107 bool
108 _os_object_retain_weak(_os_object_t obj)
109 {
110 int xref_cnt = obj->os_obj_xref_cnt;
111 if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
112 return true; // global object
113 }
114 retry:
115 if (slowpath(xref_cnt == -1)) {
116 return false;
117 }
118 if (slowpath(xref_cnt < -1)) {
119 goto overrelease;
120 }
121 if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt,
122 xref_cnt + 1, &xref_cnt, relaxed))) {
123 goto retry;
124 }
125 return true;
126 overrelease:
127 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
128 }
129
130 bool
131 _os_object_allows_weak_reference(_os_object_t obj)
132 {
133 int xref_cnt = obj->os_obj_xref_cnt;
134 if (slowpath(xref_cnt == -1)) {
135 return false;
136 }
137 if (slowpath(xref_cnt < -1)) {
138 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
139 }
140 return true;
141 }
142
143 #pragma mark -
144 #pragma mark dispatch_object_t
145
146 void *
147 _dispatch_alloc(const void *vtable, size_t size)
148 {
149 return _os_object_alloc_realized(vtable, size);
150 }
151
152 void
153 dispatch_retain(dispatch_object_t dou)
154 {
155 DISPATCH_OBJECT_TFB(_dispatch_objc_retain, dou);
156 (void)_os_object_retain(dou._os_obj);
157 }
158
159 void
160 _dispatch_retain(dispatch_object_t dou)
161 {
162 (void)_os_object_retain_internal(dou._os_obj);
163 }
164
165 void
166 dispatch_release(dispatch_object_t dou)
167 {
168 DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou);
169 _os_object_release(dou._os_obj);
170 }
171
172 void
173 _dispatch_release(dispatch_object_t dou)
174 {
175 _os_object_release_internal(dou._os_obj);
176 }
177
178 static void
179 _dispatch_dealloc(dispatch_object_t dou)
180 {
181 dispatch_queue_t tq = dou._do->do_targetq;
182 dispatch_function_t func = dou._do->do_finalizer;
183 void *ctxt = dou._do->do_ctxt;
184
185 _os_object_dealloc(dou._os_obj);
186
187 if (func && ctxt) {
188 dispatch_async_f(tq, ctxt, func);
189 }
190 _dispatch_release(tq);
191 }
192
193 void
194 _dispatch_xref_dispose(dispatch_object_t dou)
195 {
196 if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
197 // Arguments for and against this assert are within 6705399
198 DISPATCH_CLIENT_CRASH("Release of a suspended object");
199 }
200 #if !USE_OBJC
201 if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) {
202 _dispatch_source_xref_dispose(dou._ds);
203 } else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) {
204 _dispatch_runloop_queue_xref_dispose(dou._dq);
205 }
206 return _dispatch_release(dou._os_obj);
207 #endif
208 }
209
210 void
211 _dispatch_dispose(dispatch_object_t dou)
212 {
213 if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) {
214 DISPATCH_CRASH("Release while enqueued");
215 }
216 dx_dispose(dou._do);
217 return _dispatch_dealloc(dou);
218 }
219
220 void *
221 dispatch_get_context(dispatch_object_t dou)
222 {
223 DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou);
224 if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
225 slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
226 return NULL;
227 }
228 return dou._do->do_ctxt;
229 }
230
231 void
232 dispatch_set_context(dispatch_object_t dou, void *context)
233 {
234 DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context);
235 if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
236 slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
237 return;
238 }
239 dou._do->do_ctxt = context;
240 }
241
242 void
243 dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer)
244 {
245 DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer);
246 if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
247 slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
248 return;
249 }
250 dou._do->do_finalizer = finalizer;
251 }
252
253 void
254 dispatch_suspend(dispatch_object_t dou)
255 {
256 DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou);
257 if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
258 slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
259 return;
260 }
261 // rdar://8181908 explains why we need to do an internal retain at every
262 // suspension.
263 (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt,
264 DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
265 _dispatch_retain(dou._do);
266 }
267
268 DISPATCH_NOINLINE
269 static void
270 _dispatch_resume_slow(dispatch_object_t dou)
271 {
272 _dispatch_wakeup(dou._do);
273 // Balancing the retain() done in suspend() for rdar://8181908
274 _dispatch_release(dou._do);
275 }
276
277 void
278 dispatch_resume(dispatch_object_t dou)
279 {
280 DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou);
281 // Global objects cannot be suspended or resumed. This also has the
282 // side effect of saturating the suspend count of an object and
283 // guarding against resuming due to overflow.
284 if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
285 slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
286 return;
287 }
288 // Check the previous value of the suspend count. If the previous
289 // value was a single suspend interval, the object should be resumed.
290 // If the previous value was less than the suspend interval, the object
291 // has been over-resumed.
292 unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do,
293 do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
294 if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
295 // Balancing the retain() done in suspend() for rdar://8181908
296 return _dispatch_release(dou._do);
297 }
298 if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
299 return _dispatch_resume_slow(dou);
300 }
301 DISPATCH_CLIENT_CRASH("Over-resume of an object");
302 }
303
304 size_t
305 _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz)
306 {
307 return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, "
308 "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1,
309 dou._do->do_ref_cnt + 1,
310 dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL,
311 dou._do->do_suspend_cnt & 1);
312 }