const pthread_attr_t *_Nullable attrs,
dispatch_block_t _Nullable configure);
#endif
+
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
+bool
+_dispatch_source_will_reenable_kevent_4NW(dispatch_source_t source);
#endif
API_AVAILABLE(macos(10.9), ios(7.0))
DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x10,
DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x20,
-
+
DISPATCH_MEMORYPRESSURE_MSL_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0xf0000000,
};
#endif
retry:
- if (wlh == DISPATCH_WLH_ANON) {
+ if (unlikely(wlh == NULL)) {
+ DISPATCH_INTERNAL_CRASH(wlh, "Invalid wlh");
+ } else if (wlh == DISPATCH_WLH_ANON) {
int kqfd = _dispatch_kq;
#if DISPATCH_USE_KEVENT_QOS
if (_dispatch_kevent_workqueue_enabled) {
#ifndef __FIREHOSE_INLINE_INTERNAL__
#define __FIREHOSE_INLINE_INTERNAL__
+#ifndef _os_atomic_basetypeof
+#define _os_atomic_basetypeof(p) \
+ typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed))
+#endif
+
#define firehose_atomic_maxv2o(p, f, v, o, m) \
os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \
if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \
})
#define firehose_atomic_max2o(p, f, v, m) ({ \
- typeof((p)->f) _old; \
+ _os_atomic_basetypeof(&(p)->f) _old; \
firehose_atomic_maxv2o(p, f, v, &_old, m); \
})
static inline _os_object_t
_os_object_retain_internal_n_inline(_os_object_t obj, int n)
{
- int ref_cnt = _os_object_refcnt_add(obj, n);
- if (unlikely(ref_cnt <= 0)) {
+ int ref_cnt = _os_object_refcnt_add_orig(obj, n);
+ if (unlikely(ref_cnt < 0)) {
_OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
}
return obj;
_os_object_t
_os_object_retain(_os_object_t obj)
{
- int xref_cnt = _os_object_xrefcnt_inc(obj);
- if (slowpath(xref_cnt <= 0)) {
+ int xref_cnt = _os_object_xrefcnt_inc_orig(obj);
+ if (slowpath(xref_cnt < 0)) {
_OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
}
return obj;
_os_object_t
_os_object_retain_with_resurrect(_os_object_t obj)
{
- int xref_cnt = _os_object_xrefcnt_inc(obj);
+ int xref_cnt = _os_object_xrefcnt_inc_orig(obj) + 1;
if (slowpath(xref_cnt < 0)) {
_OS_OBJECT_CLIENT_CRASH("Resurrection of an over-released object");
}
_ref_cnt; \
})
-#define _os_atomic_refcnt_add2o(o, m, n) \
- _os_atomic_refcnt_perform2o(o, m, add, n, relaxed)
+#define _os_atomic_refcnt_add_orig2o(o, m, n) \
+ _os_atomic_refcnt_perform2o(o, m, add_orig, n, relaxed)
#define _os_atomic_refcnt_sub2o(o, m, n) \
_os_atomic_refcnt_perform2o(o, m, sub, n, release)
/*
* Higher level _os_object_{x,}refcnt_* actions
*
- * _os_atomic_{x,}refcnt_inc(o):
+ * _os_atomic_{x,}refcnt_inc_orig(o):
* increment the external (resp. internal) refcount and
- * returns the new refcount value
+ * returns the old refcount value
*
* _os_atomic_{x,}refcnt_dec(o):
* decrement the external (resp. internal) refcount and
* (resp. internal) refcount
*
*/
-#define _os_object_xrefcnt_inc(o) \
- _os_atomic_refcnt_add2o(o, os_obj_xref_cnt, 1)
+#define _os_object_xrefcnt_inc_orig(o) \
+ _os_atomic_refcnt_add_orig2o(o, os_obj_xref_cnt, 1)
#define _os_object_xrefcnt_dec(o) \
_os_atomic_refcnt_sub2o(o, os_obj_xref_cnt, 1)
#define _os_object_xrefcnt_dispose_barrier(o) \
_os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt)
-#define _os_object_refcnt_add(o, n) \
- _os_atomic_refcnt_add2o(o, os_obj_ref_cnt, n)
+#define _os_object_refcnt_add_orig(o, n) \
+ _os_atomic_refcnt_add_orig2o(o, os_obj_ref_cnt, n)
#define _os_object_refcnt_sub(o, n) \
_os_atomic_refcnt_sub2o(o, os_obj_ref_cnt, n)
{
dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc;
uint64_t next_owner = 0, old_state, new_state;
- dispatch_wlh_t wlh = NULL;
+ dispatch_wlh_t wlh = DISPATCH_WLH_ANON;
_dispatch_trace_continuation_pop(dq, dsc->_as_dc);
#define _os_atomic_c11_op(p, v, m, o, op) \
({ _os_atomic_basetypeof(p) _v = (v), _r = \
atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
- memory_order_##m); (typeof(*(p)))(_r op _v); })
+ memory_order_##m); (typeof(_r))(_r op _v); })
#define _os_atomic_c11_op_orig(p, v, m, o, op) \
atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \
memory_order_##m)
#pragma mark -
#pragma mark dispatch_source_invoke
+bool
+_dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds)
+{
+ uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed);
+ dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+
+ if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) {
+ DISPATCH_CLIENT_CRASH(0, "_dispatch_source_will_reenable_kevent_4NW "
+ "not called from within the event handler");
+ }
+
+ return _dispatch_unote_needs_rearm(ds->ds_refs) && !(dqf & DSF_ARMED);
+}
+
static void
_dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq,
dispatch_invoke_flags_t flags)