2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #pragma mark _os_object_t
27 _os_object_retain_count(_os_object_t obj
)
29 int xref_cnt
= obj
->os_obj_xref_cnt
;
30 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
31 return ULONG_MAX
; // global object
33 return (unsigned long)(xref_cnt
+ 1);
38 _os_object_retain_internal(_os_object_t obj
)
40 return _os_object_retain_internal_inline(obj
);
45 _os_object_release_internal(_os_object_t obj
)
47 return _os_object_release_internal_inline(obj
);
52 _os_object_retain(_os_object_t obj
)
54 int xref_cnt
= obj
->os_obj_xref_cnt
;
55 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
56 return obj
; // global object
58 xref_cnt
= dispatch_atomic_inc2o(obj
, os_obj_xref_cnt
, relaxed
);
59 if (slowpath(xref_cnt
<= 0)) {
60 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
67 _os_object_release(_os_object_t obj
)
69 int xref_cnt
= obj
->os_obj_xref_cnt
;
70 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
71 return; // global object
73 xref_cnt
= dispatch_atomic_dec2o(obj
, os_obj_xref_cnt
, relaxed
);
74 if (fastpath(xref_cnt
>= 0)) {
77 if (slowpath(xref_cnt
< -1)) {
78 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
80 return _os_object_xref_dispose(obj
);
84 _os_object_retain_weak(_os_object_t obj
)
86 int xref_cnt
= obj
->os_obj_xref_cnt
;
87 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
88 return true; // global object
91 if (slowpath(xref_cnt
== -1)) {
94 if (slowpath(xref_cnt
< -1)) {
97 if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj
, os_obj_xref_cnt
, xref_cnt
,
98 xref_cnt
+ 1, &xref_cnt
, relaxed
))) {
103 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
107 _os_object_allows_weak_reference(_os_object_t obj
)
109 int xref_cnt
= obj
->os_obj_xref_cnt
;
110 if (slowpath(xref_cnt
== -1)) {
113 if (slowpath(xref_cnt
< -1)) {
114 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
120 #pragma mark dispatch_object_t
123 _dispatch_alloc(const void *vtable
, size_t size
)
125 return _os_object_alloc_realized(vtable
, size
);
129 dispatch_retain(dispatch_object_t dou
)
131 DISPATCH_OBJECT_TFB(_dispatch_objc_retain
, dou
);
132 (void)_os_object_retain(dou
._os_obj
);
136 dispatch_release(dispatch_object_t dou
)
138 DISPATCH_OBJECT_TFB(_dispatch_objc_release
, dou
);
139 _os_object_release(dou
._os_obj
);
143 _dispatch_dealloc(dispatch_object_t dou
)
145 dispatch_queue_t tq
= dou
._do
->do_targetq
;
146 dispatch_function_t func
= dou
._do
->do_finalizer
;
147 void *ctxt
= dou
._do
->do_ctxt
;
149 _os_object_dealloc(dou
._os_obj
);
152 dispatch_async_f(tq
, ctxt
, func
);
154 _dispatch_release(tq
);
158 _dispatch_xref_dispose(dispatch_object_t dou
)
160 if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou
._do
))) {
161 // Arguments for and against this assert are within 6705399
162 DISPATCH_CLIENT_CRASH("Release of a suspended object");
165 if (dx_type(dou
._do
) == DISPATCH_SOURCE_KEVENT_TYPE
) {
166 _dispatch_source_xref_dispose(dou
._ds
);
167 } else if (dou
._dq
->do_vtable
== DISPATCH_VTABLE(queue_runloop
)) {
168 _dispatch_runloop_queue_xref_dispose(dou
._dq
);
170 return _dispatch_release(dou
._os_obj
);
175 _dispatch_dispose(dispatch_object_t dou
)
177 if (slowpath(dou
._do
->do_next
!= DISPATCH_OBJECT_LISTLESS
)) {
178 DISPATCH_CRASH("Release while enqueued");
181 return _dispatch_dealloc(dou
);
185 dispatch_get_context(dispatch_object_t dou
)
187 DISPATCH_OBJECT_TFB(_dispatch_objc_get_context
, dou
);
188 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
189 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
192 return dou
._do
->do_ctxt
;
196 dispatch_set_context(dispatch_object_t dou
, void *context
)
198 DISPATCH_OBJECT_TFB(_dispatch_objc_set_context
, dou
, context
);
199 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
200 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
203 dou
._do
->do_ctxt
= context
;
207 dispatch_set_finalizer_f(dispatch_object_t dou
, dispatch_function_t finalizer
)
209 DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f
, dou
, finalizer
);
210 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
211 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
214 dou
._do
->do_finalizer
= finalizer
;
218 dispatch_suspend(dispatch_object_t dou
)
220 DISPATCH_OBJECT_TFB(_dispatch_objc_suspend
, dou
);
221 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
222 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
225 // rdar://8181908 explains why we need to do an internal retain at every
227 (void)dispatch_atomic_add2o(dou
._do
, do_suspend_cnt
,
228 DISPATCH_OBJECT_SUSPEND_INTERVAL
, acquire
);
229 _dispatch_retain(dou
._do
);
234 _dispatch_resume_slow(dispatch_object_t dou
)
236 _dispatch_wakeup(dou
._do
);
237 // Balancing the retain() done in suspend() for rdar://8181908
238 _dispatch_release(dou
._do
);
242 dispatch_resume(dispatch_object_t dou
)
244 DISPATCH_OBJECT_TFB(_dispatch_objc_resume
, dou
);
245 // Global objects cannot be suspended or resumed. This also has the
246 // side effect of saturating the suspend count of an object and
247 // guarding against resuming due to overflow.
248 if (slowpath(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
) ||
249 slowpath(dx_type(dou
._do
) == DISPATCH_QUEUE_ROOT_TYPE
)) {
252 // Check the previous value of the suspend count. If the previous
253 // value was a single suspend interval, the object should be resumed.
254 // If the previous value was less than the suspend interval, the object
255 // has been over-resumed.
256 unsigned int suspend_cnt
= dispatch_atomic_sub_orig2o(dou
._do
,
257 do_suspend_cnt
, DISPATCH_OBJECT_SUSPEND_INTERVAL
, release
);
258 if (fastpath(suspend_cnt
> DISPATCH_OBJECT_SUSPEND_INTERVAL
)) {
259 // Balancing the retain() done in suspend() for rdar://8181908
260 return _dispatch_release(dou
._do
);
262 if (fastpath(suspend_cnt
== DISPATCH_OBJECT_SUSPEND_INTERVAL
)) {
263 return _dispatch_resume_slow(dou
);
265 DISPATCH_CLIENT_CRASH("Over-resume of an object");
269 _dispatch_object_debug_attr(dispatch_object_t dou
, char* buf
, size_t bufsiz
)
271 return dsnprintf(buf
, bufsiz
, "xrefcnt = 0x%x, refcnt = 0x%x, "
272 "suspend_cnt = 0x%x, locked = %d, ", dou
._do
->do_xref_cnt
+ 1,
273 dou
._do
->do_ref_cnt
+ 1,
274 dou
._do
->do_suspend_cnt
/ DISPATCH_OBJECT_SUSPEND_INTERVAL
,
275 dou
._do
->do_suspend_cnt
& 1);