2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #pragma mark _os_object_t
27 _os_object_retain_count(_os_object_t obj
)
29 int xref_cnt
= obj
->os_obj_xref_cnt
;
30 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
31 return ULONG_MAX
; // global object
33 return (unsigned long)(xref_cnt
+ 1);
38 _os_object_retain_internal(_os_object_t obj
)
40 int ref_cnt
= obj
->os_obj_ref_cnt
;
41 if (slowpath(ref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
42 return obj
; // global object
44 ref_cnt
= dispatch_atomic_inc2o(obj
, os_obj_ref_cnt
, relaxed
);
45 if (slowpath(ref_cnt
<= 0)) {
46 DISPATCH_CRASH("Resurrection of an object");
53 _os_object_release_internal(_os_object_t obj
)
55 int ref_cnt
= obj
->os_obj_ref_cnt
;
56 if (slowpath(ref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
57 return; // global object
59 ref_cnt
= dispatch_atomic_dec2o(obj
, os_obj_ref_cnt
, relaxed
);
60 if (fastpath(ref_cnt
>= 0)) {
63 if (slowpath(ref_cnt
< -1)) {
64 DISPATCH_CRASH("Over-release of an object");
67 if (slowpath(obj
->os_obj_xref_cnt
>= 0)) {
68 DISPATCH_CRASH("Release while external references exist");
71 return _os_object_dispose(obj
);
76 _os_object_retain(_os_object_t obj
)
78 int xref_cnt
= obj
->os_obj_xref_cnt
;
79 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
80 return obj
; // global object
82 xref_cnt
= dispatch_atomic_inc2o(obj
, os_obj_xref_cnt
, relaxed
);
83 if (slowpath(xref_cnt
<= 0)) {
84 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
91 _os_object_release(_os_object_t obj
)
93 int xref_cnt
= obj
->os_obj_xref_cnt
;
94 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
95 return; // global object
97 xref_cnt
= dispatch_atomic_dec2o(obj
, os_obj_xref_cnt
, relaxed
);
98 if (fastpath(xref_cnt
>= 0)) {
101 if (slowpath(xref_cnt
< -1)) {
102 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
104 return _os_object_xref_dispose(obj
);
108 _os_object_retain_weak(_os_object_t obj
)
110 int xref_cnt
= obj
->os_obj_xref_cnt
;
111 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
112 return true; // global object
115 if (slowpath(xref_cnt
== -1)) {
118 if (slowpath(xref_cnt
< -1)) {
121 if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj
, os_obj_xref_cnt
, xref_cnt
,
122 xref_cnt
+ 1, &xref_cnt
, relaxed
))) {
127 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
131 _os_object_allows_weak_reference(_os_object_t obj
)
133 int xref_cnt
= obj
->os_obj_xref_cnt
;
134 if (slowpath(xref_cnt
== -1)) {
137 if (slowpath(xref_cnt
< -1)) {
138 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
144 #pragma mark dispatch_object_t
147 _dispatch_alloc(const void *vtable
, size_t size
)
149 return _os_object_alloc_realized(vtable
, size
);
153 dispatch_retain(dispatch_object_t dou
)
155 DISPATCH_OBJECT_TFB(_dispatch_objc_retain
, dou
);
156 (void)_os_object_retain(dou
._os_obj
);
160 _dispatch_retain(dispatch_object_t dou
)
162 (void)_os_object_retain_internal(dou
._os_obj
);
166 dispatch_release(dispatch_object_t dou
)
168 DISPATCH_OBJECT_TFB(_dispatch_objc_release
, dou
);
169 _os_object_release(dou
._os_obj
);
173 _dispatch_release(dispatch_object_t dou
)
175 _os_object_release_internal(dou
._os_obj
);
179 _dispatch_dealloc(dispatch_object_t dou
)
181 dispatch_queue_t tq
= dou
._do
->do_targetq
;
182 dispatch_function_t func
= dou
._do
->do_finalizer
;
183 void *ctxt
= dou
._do
->do_ctxt
;
185 _os_object_dealloc(dou
._os_obj
);
188 dispatch_async_f(tq
, ctxt
, func
);
190 _dispatch_release(tq
);
194 _dispatch_xref_dispose(dispatch_object_t dou
)
196 if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou
._do
))) {
197 // Arguments for and against this assert are within 6705399
198 DISPATCH_CLIENT_CRASH("Release of a suspended object");
201 if (dx_type(dou
._do
) == DISPATCH_SOURCE_KEVENT_TYPE
) {
202 _dispatch_source_xref_dispose(dou
._ds
);
203 } else if (dou
._dq
->do_vtable
== DISPATCH_VTABLE(queue_runloop
)) {
204 _dispatch_runloop_queue_xref_dispose(dou
._dq
);
206 return _dispatch_release(dou
._os_obj
);
211 _dispatch_dispose(dispatch_object_t dou
)
213 if (slowpath(dou
._do
->do_next
!= DISPATCH_OBJECT_LISTLESS
)) {
214 DISPATCH_CRASH("Release while enqueued");
217 return _dispatch_dealloc(dou
);
221 dispatch_get_context(dispatch_object_t dou
)
223 DISPATCH_OBJECT_TFB(_dispatch_objc_get_context
, dou
);
224 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
225 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
228 return dou
._do
->do_ctxt
;
232 dispatch_set_context(dispatch_object_t dou
, void *context
)
234 DISPATCH_OBJECT_TFB(_dispatch_objc_set_context
, dou
, context
);
235 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
236 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
239 dou
._do
->do_ctxt
= context
;
243 dispatch_set_finalizer_f(dispatch_object_t dou
, dispatch_function_t finalizer
)
245 DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f
, dou
, finalizer
);
246 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
247 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
250 dou
._do
->do_finalizer
= finalizer
;
254 dispatch_suspend(dispatch_object_t dou
)
256 DISPATCH_OBJECT_TFB(_dispatch_objc_suspend
, dou
);
257 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
258 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
261 // rdar://8181908 explains why we need to do an internal retain at every
263 (void)dispatch_atomic_add2o(dou
._do
, do_suspend_cnt
,
264 DISPATCH_OBJECT_SUSPEND_INTERVAL
, relaxed
);
265 _dispatch_retain(dou
._do
);
270 _dispatch_resume_slow(dispatch_object_t dou
)
272 _dispatch_wakeup(dou
._do
);
273 // Balancing the retain() done in suspend() for rdar://8181908
274 _dispatch_release(dou
._do
);
278 dispatch_resume(dispatch_object_t dou
)
280 DISPATCH_OBJECT_TFB(_dispatch_objc_resume
, dou
);
281 // Global objects cannot be suspended or resumed. This also has the
282 // side effect of saturating the suspend count of an object and
283 // guarding against resuming due to overflow.
284 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
285 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
288 // Check the previous value of the suspend count. If the previous
289 // value was a single suspend interval, the object should be resumed.
290 // If the previous value was less than the suspend interval, the object
291 // has been over-resumed.
292 unsigned int suspend_cnt
= dispatch_atomic_sub_orig2o(dou
._do
,
293 do_suspend_cnt
, DISPATCH_OBJECT_SUSPEND_INTERVAL
, relaxed
);
294 if (fastpath(suspend_cnt
> DISPATCH_OBJECT_SUSPEND_INTERVAL
)) {
295 // Balancing the retain() done in suspend() for rdar://8181908
296 return _dispatch_release(dou
._do
);
298 if (fastpath(suspend_cnt
== DISPATCH_OBJECT_SUSPEND_INTERVAL
)) {
299 return _dispatch_resume_slow(dou
);
301 DISPATCH_CLIENT_CRASH("Over-resume of an object");
305 _dispatch_object_debug_attr(dispatch_object_t dou
, char* buf
, size_t bufsiz
)
307 return dsnprintf(buf
, bufsiz
, "xrefcnt = 0x%x, refcnt = 0x%x, "
308 "suspend_cnt = 0x%x, locked = %d, ", dou
._do
->do_xref_cnt
+ 1,
309 dou
._do
->do_ref_cnt
+ 1,
310 dou
._do
->do_suspend_cnt
/ DISPATCH_OBJECT_SUSPEND_INTERVAL
,
311 dou
._do
->do_suspend_cnt
& 1);