2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #pragma mark _os_object_t
27 _os_object_retain_count(_os_object_t obj
)
29 int xref_cnt
= obj
->os_obj_xref_cnt
;
30 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
31 return ULONG_MAX
; // global object
33 return (unsigned long)(xref_cnt
+ 1);
38 _os_object_retain_internal(_os_object_t obj
)
40 return _os_object_retain_internal_n_inline(obj
, 1);
45 _os_object_retain_internal_n(_os_object_t obj
, uint16_t n
)
47 return _os_object_retain_internal_n_inline(obj
, n
);
52 _os_object_release_internal(_os_object_t obj
)
54 return _os_object_release_internal_n_inline(obj
, 1);
59 _os_object_release_internal_n(_os_object_t obj
, uint16_t n
)
61 return _os_object_release_internal_n_inline(obj
, n
);
66 _os_object_retain(_os_object_t obj
)
68 int xref_cnt
= _os_object_xrefcnt_inc(obj
);
69 if (slowpath(xref_cnt
<= 0)) {
70 _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
77 _os_object_retain_with_resurrect(_os_object_t obj
)
79 int xref_cnt
= _os_object_xrefcnt_inc(obj
);
80 if (slowpath(xref_cnt
< 0)) {
81 _OS_OBJECT_CLIENT_CRASH("Resurrection of an over-released object");
83 if (slowpath(xref_cnt
== 0)) {
84 _os_object_retain_internal(obj
);
91 _os_object_release(_os_object_t obj
)
93 int xref_cnt
= _os_object_xrefcnt_dec(obj
);
94 if (fastpath(xref_cnt
>= 0)) {
97 if (slowpath(xref_cnt
< -1)) {
98 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
100 return _os_object_xref_dispose(obj
);
104 _os_object_retain_weak(_os_object_t obj
)
106 int xref_cnt
, nxref_cnt
;
107 os_atomic_rmw_loop2o(obj
, os_obj_xref_cnt
, xref_cnt
, nxref_cnt
, relaxed
, {
108 if (slowpath(xref_cnt
== _OS_OBJECT_GLOBAL_REFCNT
)) {
109 os_atomic_rmw_loop_give_up(return true); // global object
111 if (slowpath(xref_cnt
== -1)) {
112 os_atomic_rmw_loop_give_up(return false);
114 if (slowpath(xref_cnt
< -1)) {
115 os_atomic_rmw_loop_give_up(goto overrelease
);
117 nxref_cnt
= xref_cnt
+ 1;
121 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
125 _os_object_allows_weak_reference(_os_object_t obj
)
127 int xref_cnt
= obj
->os_obj_xref_cnt
;
128 if (slowpath(xref_cnt
== -1)) {
131 if (slowpath(xref_cnt
< -1)) {
132 _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
138 #pragma mark dispatch_object_t
141 _dispatch_object_alloc(const void *vtable
, size_t size
)
143 #if OS_OBJECT_HAVE_OBJC1
144 const struct dispatch_object_vtable_s
*_vtable
= vtable
;
145 dispatch_object_t dou
;
146 dou
._os_obj
= _os_object_alloc_realized(_vtable
->_os_obj_objc_isa
, size
);
147 dou
._do
->do_vtable
= vtable
;
150 return _os_object_alloc_realized(vtable
, size
);
155 _dispatch_object_finalize(dispatch_object_t dou
)
158 objc_destructInstance((id
)dou
._do
);
165 _dispatch_object_dealloc(dispatch_object_t dou
)
167 // so that ddt doesn't pick up bad objects when malloc reuses this memory
168 dou
._os_obj
->os_obj_isa
= NULL
;
169 #if OS_OBJECT_HAVE_OBJC1
170 dou
._do
->do_vtable
= NULL
;
176 dispatch_retain(dispatch_object_t dou
)
178 DISPATCH_OBJECT_TFB(_dispatch_objc_retain
, dou
);
179 (void)_os_object_retain(dou
._os_obj
);
183 dispatch_release(dispatch_object_t dou
)
185 DISPATCH_OBJECT_TFB(_dispatch_objc_release
, dou
);
186 _os_object_release(dou
._os_obj
);
191 _dispatch_xref_dispose(dispatch_object_t dou
)
193 unsigned long metatype
= dx_metatype(dou
._do
);
194 if (metatype
== _DISPATCH_QUEUE_TYPE
|| metatype
== _DISPATCH_SOURCE_TYPE
) {
195 _dispatch_queue_xref_dispose(dou
._dq
);
197 if (dx_type(dou
._do
) == DISPATCH_SOURCE_KEVENT_TYPE
) {
198 _dispatch_source_xref_dispose(dou
._ds
);
200 } else if (dx_type(dou
._do
) == DISPATCH_MACH_CHANNEL_TYPE
) {
201 _dispatch_mach_xref_dispose(dou
._dm
);
203 } else if (dx_type(dou
._do
) == DISPATCH_QUEUE_RUNLOOP_TYPE
) {
204 _dispatch_runloop_queue_xref_dispose(dou
._dq
);
206 return _dispatch_release_tailcall(dou
._os_obj
);
211 _dispatch_dispose(dispatch_object_t dou
)
213 dispatch_queue_t tq
= dou
._do
->do_targetq
;
214 dispatch_function_t func
= dou
._do
->do_finalizer
;
215 void *ctxt
= dou
._do
->do_ctxt
;
216 bool allow_free
= true;
218 if (slowpath(dou
._do
->do_next
!= DISPATCH_OBJECT_LISTLESS
)) {
219 DISPATCH_INTERNAL_CRASH(dou
._do
->do_next
, "Release while enqueued");
222 dx_dispose(dou
._do
, &allow_free
);
224 // Past this point, the only thing left of the object is its memory
225 if (likely(allow_free
)) {
226 _dispatch_object_finalize(dou
);
227 _dispatch_object_dealloc(dou
);
230 dispatch_async_f(tq
, ctxt
, func
);
232 if (tq
) _dispatch_release_tailcall(tq
);
236 dispatch_get_context(dispatch_object_t dou
)
238 DISPATCH_OBJECT_TFB(_dispatch_objc_get_context
, dou
);
239 if (unlikely(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
||
240 dx_hastypeflag(dou
._do
, QUEUE_ROOT
) ||
241 dx_hastypeflag(dou
._do
, QUEUE_BASE
))) {
244 return dou
._do
->do_ctxt
;
248 dispatch_set_context(dispatch_object_t dou
, void *context
)
250 DISPATCH_OBJECT_TFB(_dispatch_objc_set_context
, dou
, context
);
251 if (unlikely(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
||
252 dx_hastypeflag(dou
._do
, QUEUE_ROOT
) ||
253 dx_hastypeflag(dou
._do
, QUEUE_BASE
))) {
256 dou
._do
->do_ctxt
= context
;
260 dispatch_set_finalizer_f(dispatch_object_t dou
, dispatch_function_t finalizer
)
262 DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f
, dou
, finalizer
);
263 if (unlikely(dou
._do
->do_ref_cnt
== DISPATCH_OBJECT_GLOBAL_REFCNT
||
264 dx_hastypeflag(dou
._do
, QUEUE_ROOT
) ||
265 dx_hastypeflag(dou
._do
, QUEUE_BASE
))) {
268 dou
._do
->do_finalizer
= finalizer
;
272 dispatch_set_target_queue(dispatch_object_t dou
, dispatch_queue_t tq
)
274 DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue
, dou
, tq
);
275 if (dx_vtable(dou
._do
)->do_set_targetq
) {
276 dx_vtable(dou
._do
)->do_set_targetq(dou
._do
, tq
);
277 } else if (likely(dou
._do
->do_ref_cnt
!= DISPATCH_OBJECT_GLOBAL_REFCNT
&&
278 !dx_hastypeflag(dou
._do
, QUEUE_ROOT
) &&
279 !dx_hastypeflag(dou
._do
, QUEUE_BASE
))) {
281 tq
= _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT
, false);
283 _dispatch_object_set_target_queue_inline(dou
._do
, tq
);
288 dispatch_activate(dispatch_object_t dou
)
290 DISPATCH_OBJECT_TFB(_dispatch_objc_activate
, dou
);
291 if (dx_vtable(dou
._do
)->do_resume
) {
292 dx_vtable(dou
._do
)->do_resume(dou
._do
, true);
297 dispatch_suspend(dispatch_object_t dou
)
299 DISPATCH_OBJECT_TFB(_dispatch_objc_suspend
, dou
);
300 if (dx_vtable(dou
._do
)->do_suspend
) {
301 dx_vtable(dou
._do
)->do_suspend(dou
._do
);
306 dispatch_resume(dispatch_object_t dou
)
308 DISPATCH_OBJECT_TFB(_dispatch_objc_resume
, dou
);
309 // the do_suspend below is not a typo. Having a do_resume but no do_suspend
310 // allows for objects to support activate, but have no-ops suspend/resume
311 if (dx_vtable(dou
._do
)->do_suspend
) {
312 dx_vtable(dou
._do
)->do_resume(dou
._do
, false);
317 _dispatch_object_debug_attr(dispatch_object_t dou
, char* buf
, size_t bufsiz
)
319 return dsnprintf(buf
, bufsiz
, "xref = %d, ref = %d, ",
320 dou
._do
->do_xref_cnt
+ 1, dou
._do
->do_ref_cnt
+ 1);