// Contains exported global data and initialization & other routines that must
// only exist once in the shared library even when resolvers are used.
+// NOTE: this file must not contain any atomic operations
+
#include "internal.h"
#if HAVE_MACH
void
dispatch_atfork_prepare(void)
{
+ _os_object_atfork_prepare();
}
DISPATCH_EXPORT DISPATCH_NOTHROW
void
dispatch_atfork_parent(void)
{
+ _os_object_atfork_parent();
+}
+
+DISPATCH_EXPORT DISPATCH_NOTHROW
+void
+dispatch_atfork_child(void)
+{
+ _os_object_atfork_child();
+ _voucher_atfork_child();
+ _dispatch_event_loop_atfork_child();
+ if (_dispatch_is_multithreaded_inline()) {
+ _dispatch_child_of_unsafe_fork = true;
+ }
+ _dispatch_queue_atfork_child();
+ // clear the _PROHIBIT and _MULTITHREADED bits if set
+ _dispatch_unsafe_fork = 0;
+}
+
+int
+_dispatch_sigmask(void)
+{
+ sigset_t mask;
+ int r = 0;
+
+ /* Workaround: 6269619 Not all signals can be delivered on any thread */
+ r |= sigfillset(&mask);
+ r |= sigdelset(&mask, SIGILL);
+ r |= sigdelset(&mask, SIGTRAP);
+#if HAVE_DECL_SIGEMT
+ r |= sigdelset(&mask, SIGEMT);
+#endif
+ r |= sigdelset(&mask, SIGFPE);
+ r |= sigdelset(&mask, SIGBUS);
+ r |= sigdelset(&mask, SIGSEGV);
+ r |= sigdelset(&mask, SIGSYS);
+ r |= sigdelset(&mask, SIGPIPE);
+ r |= sigdelset(&mask, SIGPROF);
+ r |= pthread_sigmask(SIG_BLOCK, &mask, NULL);
+ return dispatch_assume_zero(r);
}
#pragma mark -
pthread_key_t dispatch_cache_key;
pthread_key_t dispatch_context_key;
pthread_key_t dispatch_pthread_root_queue_observer_hooks_key;
-pthread_key_t dispatch_defaultpriority_key;
+pthread_key_t dispatch_basepri_key;
#if DISPATCH_INTROSPECTION
pthread_key_t dispatch_introspection_key;
#elif DISPATCH_PERF_MON
pthread_key_t dispatch_bcounter_key;
#endif
-pthread_key_t dispatch_sema4_key;
+pthread_key_t dispatch_wlh_key;
pthread_key_t dispatch_voucher_key;
pthread_key_t dispatch_deferred_items_key;
#endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE
#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_MGR_THREAD
int _dispatch_kevent_workqueue_enabled;
#endif
-#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT && \
- DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-int _dispatch_evfilt_machport_direct_enabled;
-#endif
DISPATCH_HW_CONFIG();
uint8_t _dispatch_unsafe_fork;
return _dispatch_child_of_unsafe_fork;
}
-DISPATCH_NOINLINE
-void
-_dispatch_fork_becomes_unsafe_slow(void)
-{
- uint8_t value = os_atomic_or(&_dispatch_unsafe_fork,
- _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed);
- if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) {
- DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited");
- }
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_prohibit_transition_to_multithreaded(bool prohibit)
-{
- if (prohibit) {
- uint8_t value = os_atomic_or(&_dispatch_unsafe_fork,
- _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed);
- if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) {
- DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded");
- }
- } else {
- os_atomic_and(&_dispatch_unsafe_fork,
- (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed);
- }
-}
-
const struct dispatch_queue_offsets_s dispatch_queue_offsets = {
.dqo_version = 6,
.dqo_label = offsetof(struct dispatch_queue_s, dq_label),
.dqo_suspend_cnt_size = 0,
.dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq),
.dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq),
- .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority),
- .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority),
+ .dqo_priority = 0,
+ .dqo_priority_size = 0,
};
#if DISPATCH_USE_DIRECT_TSD
.do_targetq = &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
#endif
- .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1),
+ .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
+ DISPATCH_QUEUE_ROLE_BASE_ANON,
.dq_label = "com.apple.main-thread",
- .dq_width = 1,
- .dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC,
- .dq_override_voucher = DISPATCH_NO_VOUCHER,
+ .dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1),
.dq_serialnum = 1,
};
#pragma mark -
#pragma mark dispatch_queue_attr_t
-#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \
- { \
- DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \
- .dqa_qos_class = (qos), \
- .dqa_relative_priority = (qos) ? (prio) : 0, \
- .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \
- .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \
- .dqa_concurrent = (concurrent), \
- .dqa_inactive = (inactive), \
- }
+#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, \
+ inactive) \
+ { \
+ DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \
+ .dqa_qos_and_relpri = (_dispatch_priority_make(qos, prio) & \
+ DISPATCH_PRIORITY_REQUESTED_MASK), \
+ .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \
+ .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \
+ .dqa_concurrent = (concurrent), \
+ .dqa_inactive = (inactive), \
+ }
-#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, concurrent) \
- { \
- [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\
- qos, prio, overcommit, freq, concurrent, false), \
- [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\
- qos, prio, overcommit, freq, concurrent, true), \
- }
+#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, \
+ concurrent) \
+ { \
+ [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \
+ qos, prio, overcommit, freq, concurrent, false), \
+ [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \
+ qos, prio, overcommit, freq, concurrent, true), \
+ }
#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \
- { \
- [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \
- DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 1), \
- [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \
- DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 0), \
- [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \
- DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 1), \
- [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \
- DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 0), \
- [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \
- DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 1), \
- [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \
- DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 0), \
- }
+ { \
+ [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \
+ DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+ qos, prio, overcommit, INHERIT, 1), \
+ [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \
+ DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+ qos, prio, overcommit, INHERIT, 0), \
+ [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \
+ DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+ qos, prio, overcommit, WORK_ITEM, 1), \
+ [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \
+ DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+ qos, prio, overcommit, WORK_ITEM, 0), \
+ [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \
+ DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+ qos, prio, overcommit, NEVER, 1), \
+ [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \
+ DISPATCH_QUEUE_ATTR_ACTIVE_INIT(\
+ qos, prio, overcommit, NEVER, 0), \
+ }
#define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \
- [prio] = { \
- [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \
- DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified), \
- [DQA_INDEX_NON_OVERCOMMIT] = \
- DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \
- [DQA_INDEX_OVERCOMMIT] = \
- DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \
- }
+ [prio] = { \
+ [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \
+ DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified),\
+ [DQA_INDEX_NON_OVERCOMMIT] = \
+ DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \
+ [DQA_INDEX_OVERCOMMIT] = \
+ DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \
+ }
#define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \
- { \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \
- DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \
- }
+ { \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \
+ }
#define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \
- [DQA_INDEX_QOS_CLASS_##qos] = \
- DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos)
+ [DQA_INDEX_QOS_CLASS_##qos] = \
+ DISPATCH_QUEUE_ATTR_PRIO_INIT(DISPATCH_QOS_##qos)
// DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased
// to array member [0][0][0][0][0][0] and their properties must match!
#if DISPATCH_VARIANT_STATIC
// <rdar://problem/16778703>
struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent =
- DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0,
+ DISPATCH_QUEUE_ATTR_INIT(QOS_CLASS_UNSPECIFIED, 0,
unspecified, INHERIT, 1, false);
#endif // DISPATCH_VARIANT_STATIC
.do_dispose = _dispatch_queue_dispose,
.do_suspend = _dispatch_queue_suspend,
.do_resume = _dispatch_queue_resume,
+ .do_push = _dispatch_queue_push,
.do_invoke = _dispatch_queue_invoke,
.do_wakeup = _dispatch_queue_wakeup,
.do_debug = dispatch_queue_debug,
.do_suspend = _dispatch_queue_suspend,
.do_resume = _dispatch_queue_resume,
.do_finalize_activation = _dispatch_queue_finalize_activation,
+ .do_push = _dispatch_queue_push,
.do_invoke = _dispatch_queue_invoke,
.do_wakeup = _dispatch_queue_wakeup,
.do_debug = dispatch_queue_debug,
.do_suspend = _dispatch_queue_suspend,
.do_resume = _dispatch_queue_resume,
.do_finalize_activation = _dispatch_queue_finalize_activation,
+ .do_push = _dispatch_queue_push,
.do_invoke = _dispatch_queue_invoke,
.do_wakeup = _dispatch_queue_wakeup,
.do_debug = dispatch_queue_debug,
.do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
.do_kind = "global-queue",
.do_dispose = _dispatch_pthread_root_queue_dispose,
+ .do_push = _dispatch_root_queue_push,
+ .do_invoke = NULL,
.do_wakeup = _dispatch_root_queue_wakeup,
.do_debug = dispatch_queue_debug,
);
+
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue,
.do_type = DISPATCH_QUEUE_SERIAL_TYPE,
.do_kind = "main-queue",
.do_dispose = _dispatch_queue_dispose,
+ .do_push = _dispatch_queue_push,
.do_invoke = _dispatch_queue_invoke,
.do_wakeup = _dispatch_main_queue_wakeup,
.do_debug = dispatch_queue_debug,
.do_type = DISPATCH_QUEUE_RUNLOOP_TYPE,
.do_kind = "runloop-queue",
.do_dispose = _dispatch_runloop_queue_dispose,
+ .do_push = _dispatch_queue_push,
.do_invoke = _dispatch_queue_invoke,
.do_wakeup = _dispatch_runloop_queue_wakeup,
.do_debug = dispatch_queue_debug,
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue,
.do_type = DISPATCH_QUEUE_MGR_TYPE,
.do_kind = "mgr-queue",
+ .do_push = _dispatch_mgr_queue_push,
.do_invoke = _dispatch_mgr_thread,
.do_wakeup = _dispatch_mgr_queue_wakeup,
.do_debug = dispatch_queue_debug,
.do_type = DISPATCH_QUEUE_SPECIFIC_TYPE,
.do_kind = "queue-context",
.do_dispose = _dispatch_queue_specific_queue_dispose,
+ .do_push = (void *)_dispatch_queue_push,
.do_invoke = (void *)_dispatch_queue_invoke,
.do_wakeup = (void *)_dispatch_queue_wakeup,
.do_debug = (void *)dispatch_queue_debug,
.do_suspend = (void *)_dispatch_queue_suspend,
.do_resume = (void *)_dispatch_queue_resume,
.do_finalize_activation = _dispatch_source_finalize_activation,
+ .do_push = (void *)_dispatch_queue_push,
.do_invoke = _dispatch_source_invoke,
.do_wakeup = _dispatch_source_wakeup,
.do_debug = _dispatch_source_debug,
.do_suspend = (void *)_dispatch_queue_suspend,
.do_resume = (void *)_dispatch_queue_resume,
.do_finalize_activation = _dispatch_mach_finalize_activation,
+ .do_push = (void *)_dispatch_queue_push,
.do_invoke = _dispatch_mach_invoke,
.do_wakeup = _dispatch_mach_wakeup,
.do_debug = _dispatch_mach_debug,
.do_kind = "data",
.do_dispose = _dispatch_data_dispose,
.do_debug = _dispatch_data_debug,
+ .do_set_targetq = (void*)_dispatch_data_set_target_queue,
);
#endif
);
-const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = {
- DC_VTABLE_ENTRY(ASYNC_REDIRECT,
- .do_kind = "dc-redirect",
- .do_invoke = _dispatch_async_redirect_invoke),
-#if HAVE_MACH
- DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN,
- .do_kind = "dc-mach-send-drain",
- .do_invoke = _dispatch_mach_send_barrier_drain_invoke),
- DC_VTABLE_ENTRY(MACH_SEND_BARRIER,
- .do_kind = "dc-mach-send-barrier",
- .do_invoke = _dispatch_mach_barrier_invoke),
- DC_VTABLE_ENTRY(MACH_RECV_BARRIER,
- .do_kind = "dc-mach-recv-barrier",
- .do_invoke = _dispatch_mach_barrier_invoke),
-#endif
-#if HAVE_PTHREAD_WORKQUEUE_QOS
- DC_VTABLE_ENTRY(OVERRIDE_STEALING,
- .do_kind = "dc-override-stealing",
- .do_invoke = _dispatch_queue_override_invoke),
- DC_VTABLE_ENTRY(OVERRIDE_OWNING,
- .do_kind = "dc-override-owning",
- .do_invoke = _dispatch_queue_override_invoke),
-#endif
-};
-
void
_dispatch_vtable_init(void)
{
#endif // USE_OBJC
}
+#pragma mark -
+#pragma mark dispatch_data globals
+
+const dispatch_block_t _dispatch_data_destructor_free = ^{
+ DISPATCH_INTERNAL_CRASH(0, "free destructor called");
+};
+
+const dispatch_block_t _dispatch_data_destructor_none = ^{
+ DISPATCH_INTERNAL_CRASH(0, "none destructor called");
+};
+
+#if !HAVE_MACH
+const dispatch_block_t _dispatch_data_destructor_munmap = ^{
+ DISPATCH_INTERNAL_CRASH(0, "munmap destructor called");
+};
+#else
+// _dispatch_data_destructor_munmap is a linker alias to the following
+const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
+ DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called");
+};
+#endif
+
+const dispatch_block_t _dispatch_data_destructor_inline = ^{
+ DISPATCH_INTERNAL_CRASH(0, "inline destructor called");
+};
+
+struct dispatch_data_s _dispatch_data_empty = {
+#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
+ .do_vtable = DISPATCH_DATA_EMPTY_CLASS,
+#else
+ DISPATCH_GLOBAL_OBJECT_HEADER(data),
+ .do_next = DISPATCH_OBJECT_LISTLESS,
+#endif
+};
+
#pragma mark -
#pragma mark dispatch_bug
_dispatch_temporary_resource_shortage(void)
{
sleep(1);
+ asm(""); // prevent tailcall
}
void *
}
#if HAVE_MACH
+
+#undef _dispatch_client_callout3
+DISPATCH_NOINLINE
+void
+_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
+ dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f)
+{
+ _dispatch_get_tsd_base();
+ void *u = _dispatch_get_unwind_tsd();
+ if (fastpath(!u)) return f(ctxt, reason, dmsg);
+ _dispatch_set_unwind_tsd(NULL);
+ f(ctxt, reason, dmsg);
+ _dispatch_free_unwind_tsd();
+ _dispatch_set_unwind_tsd(u);
+}
+
#undef _dispatch_client_callout4
void
_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
}
}
+void
+_os_object_atfork_prepare(void)
+{
+ return;
+}
+
+void
+_os_object_atfork_parent(void)
+{
+ return;
+}
+
+void
+_os_object_atfork_child(void)
+{
+ return;
+}
+
#pragma mark -
#pragma mark dispatch_autorelease_pool no_objc
}
}
-void*
-_dispatch_last_resort_autorelease_pool_push(void)
+void
+_dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic)
{
- return _dispatch_autorelease_pool_push();
+ dic->dic_autorelease_pool = _dispatch_autorelease_pool_push();
}
void
-_dispatch_last_resort_autorelease_pool_pop(void *pool)
+_dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic)
{
- _dispatch_autorelease_pool_pop(pool);
+ _dispatch_autorelease_pool_pop(dic->dic_autorelease_pool);
+ dic->dic_autorelease_pool = NULL;
}
#endif // DISPATCH_COCOA_COMPAT
#endif // !USE_OBJC
-#pragma mark -
-#pragma mark dispatch_source_types
-
-static void
-dispatch_source_type_timer_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask,
- dispatch_queue_t q)
-{
- if (fastpath(!ds->ds_refs)) {
- ds->ds_refs = _dispatch_calloc(1ul,
- sizeof(struct dispatch_timer_source_refs_s));
- }
- ds->ds_needs_rearm = true;
- ds->ds_is_timer = true;
- if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0)
- || q == dispatch_get_global_queue(
- DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){
- mask |= DISPATCH_TIMER_BACKGROUND; // <rdar://problem/12200216>
- }
- ds_timer(ds->ds_refs).flags = mask;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_timer = {
- .ke = {
- .filter = DISPATCH_EVFILT_TIMER,
- },
- .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
- DISPATCH_TIMER_WALL_CLOCK,
- .init = dispatch_source_type_timer_init,
-};
-
-static void
-dispatch_source_type_after_init(dispatch_source_t ds,
- dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
- dispatch_queue_t q)
-{
- dispatch_source_type_timer_init(ds, type, handle, mask, q);
- ds->ds_needs_rearm = false;
- ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_after = {
- .ke = {
- .filter = DISPATCH_EVFILT_TIMER,
- },
- .init = dispatch_source_type_after_init,
-};
-
-static void
-dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds,
- dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
- dispatch_queue_t q)
-{
- ds->ds_refs = _dispatch_calloc(1ul,
- sizeof(struct dispatch_timer_source_aggregate_refs_s));
- dispatch_source_type_timer_init(ds, type, handle, mask, q);
- ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE;
- ds->dq_specific_q = (void*)handle;
- _dispatch_retain(ds->dq_specific_q);
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={
- .ke = {
- .filter = DISPATCH_EVFILT_TIMER,
- .ident = ~0ull,
- },
- .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
- .init = dispatch_source_type_timer_with_aggregate_init,
-};
-
-static void
-dispatch_source_type_interval_init(dispatch_source_t ds,
- dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
- dispatch_queue_t q)
-{
- dispatch_source_type_timer_init(ds, type, handle, mask, q);
- ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL;
- unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs);
- ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident;
- _dispatch_source_set_interval(ds, handle);
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_interval = {
- .ke = {
- .filter = DISPATCH_EVFILT_TIMER,
- .ident = ~0ull,
- },
- .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
- DISPATCH_INTERVAL_UI_ANIMATION,
- .init = dispatch_source_type_interval_init,
-};
-
-static void
-dispatch_source_type_readwrite_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- ds->ds_is_level = true;
-#ifdef HAVE_DECL_NOTE_LOWAT
- // bypass kernel check for device kqueue support rdar://19004921
- ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT;
-#endif
- ds->ds_dkev->dk_kevent.data = 1;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_read = {
- .ke = {
- .filter = EVFILT_READ,
- .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .init = dispatch_source_type_readwrite_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_write = {
- .ke = {
- .filter = EVFILT_WRITE,
- .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .init = dispatch_source_type_readwrite_init,
-};
-
-#if DISPATCH_USE_MEMORYSTATUS
-
-#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483
-static int _dispatch_ios_simulator_memory_warnings_fd = -1;
-static void
-_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
-{
- char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
- if (!e) return;
- _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
- if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
- (void)dispatch_assume_zero(errno);
- }
-}
-#endif
-
-#if TARGET_IPHONE_SIMULATOR
-static void
-dispatch_source_type_memorypressure_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- static dispatch_once_t pred;
- dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
- handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
- mask = NOTE_ATTRIB;
- ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE;
- ds->ds_dkev->dk_kevent.ident = handle;
- ds->ds_dkev->dk_kevent.flags |= EV_CLEAR;
- ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
- ds->ds_ident_hack = handle;
- ds->ds_pending_data_mask = mask;
- ds->ds_memorypressure_override = 1;
-}
-#else
-#define dispatch_source_type_memorypressure_init NULL
-#endif
-
-#ifndef NOTE_MEMORYSTATUS_LOW_SWAP
-#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8
-#endif
-
-const struct dispatch_source_type_s _dispatch_source_type_memorypressure = {
- .ke = {
- .filter = EVFILT_MEMORYSTATUS,
- .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN
- |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP
- |NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL,
- .init = dispatch_source_type_memorypressure_init,
-};
-
-static void
-dispatch_source_type_vm_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- // Map legacy vm pressure to memorypressure warning rdar://problem/15907505
- mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
- ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
- ds->ds_pending_data_mask = mask;
- ds->ds_vmpressure_override = 1;
-#if TARGET_IPHONE_SIMULATOR
- dispatch_source_type_memorypressure_init(ds, type, handle, mask, q);
-#endif
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
- .ke = {
- .filter = EVFILT_MEMORYSTATUS,
- .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_VM_PRESSURE,
- .init = dispatch_source_type_vm_init,
-};
-
-#elif DISPATCH_USE_VM_PRESSURE
-
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
- .ke = {
- .filter = EVFILT_VM,
- .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_VM_PRESSURE,
-};
-
-#endif // DISPATCH_USE_VM_PRESSURE
-
-const struct dispatch_source_type_s _dispatch_source_type_signal = {
- .ke = {
- .filter = EVFILT_SIGNAL,
- .flags = EV_UDATA_SPECIFIC,
- },
-};
-
-#if !defined(__linux__)
-static void
-dispatch_source_type_proc_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_proc = {
- .ke = {
- .filter = EVFILT_PROC,
- .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC
-#if HAVE_DECL_NOTE_SIGNAL
- |NOTE_SIGNAL
-#endif
-#if HAVE_DECL_NOTE_REAP
- |NOTE_REAP
-#endif
- ,
- .init = dispatch_source_type_proc_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_vnode = {
- .ke = {
- .filter = EVFILT_VNODE,
- .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|
- NOTE_RENAME|NOTE_FUNLOCK
-#if HAVE_DECL_NOTE_REVOKE
- |NOTE_REVOKE
-#endif
-#if HAVE_DECL_NOTE_NONE
- |NOTE_NONE
-#endif
- ,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_vfs = {
- .ke = {
- .filter = EVFILT_FS,
- .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
- },
- .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|
- VQ_ASSIST|VQ_NOTRESPLOCK
-#if HAVE_DECL_VQ_UPDATE
- |VQ_UPDATE
-#endif
-#if HAVE_DECL_VQ_VERYLOWDISK
- |VQ_VERYLOWDISK
-#endif
-#if HAVE_DECL_VQ_QUOTA
- |VQ_QUOTA
-#endif
- ,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_sock = {
-#ifdef EVFILT_SOCK
- .ke = {
- .filter = EVFILT_SOCK,
- .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED |
- NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND |
- NOTE_RESUME | NOTE_KEEPALIVE
-#ifdef NOTE_ADAPTIVE_WTIMO
- | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO
-#endif
-#ifdef NOTE_CONNECTED
- | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED
-#endif
-#ifdef NOTE_NOTIFY_ACK
- | NOTE_NOTIFY_ACK
-#endif
- ,
-#endif // EVFILT_SOCK
-};
-#endif // !defined(__linux__)
-
-static void
-dispatch_source_type_data_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- ds->ds_is_installed = true;
- ds->ds_is_custom_source = true;
- ds->ds_is_direct_kevent = true;
- ds->ds_pending_data_mask = ~0ul;
- ds->ds_needs_rearm = false; // not registered with kevent
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_data_add = {
- .ke = {
- .filter = DISPATCH_EVFILT_CUSTOM_ADD,
- .flags = EV_UDATA_SPECIFIC,
- },
- .init = dispatch_source_type_data_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_data_or = {
- .ke = {
- .filter = DISPATCH_EVFILT_CUSTOM_OR,
- .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
- .fflags = ~0u,
- },
- .init = dispatch_source_type_data_init,
-};
-
-#if HAVE_MACH
-
-static void
-dispatch_source_type_mach_send_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED, unsigned long mask,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- if (!mask) {
- // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
- ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD;
- ds->ds_pending_data_mask = DISPATCH_MACH_SEND_DEAD;
- }
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_mach_send = {
- .ke = {
- .filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
- .flags = EV_CLEAR,
- },
- .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
- .init = dispatch_source_type_mach_send_init,
-};
-
-static void
-dispatch_source_type_mach_recv_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
-{
- ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
- if (_dispatch_evfilt_machport_direct_enabled) return;
- ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE;
- ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
- ds->ds_is_direct_kevent = false;
-#endif
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_mach_recv = {
- .ke = {
- .filter = EVFILT_MACHPORT,
- .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .init = dispatch_source_type_mach_recv_init,
-};
-
#pragma mark -
#pragma mark dispatch_mig
+#if HAVE_MACH
void *
dispatch_mach_msg_get_context(mach_msg_header_t *msg)
_dispatch_mach_notify_port_destroyed(mach_port_t notify DISPATCH_UNUSED,
mach_port_t name)
{
- kern_return_t kr;
- // this function should never be called
- (void)dispatch_assume_zero(name);
- kr = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE,-1);
- DISPATCH_VERIFY_MIG(kr);
- (void)dispatch_assume_zero(kr);
- return KERN_SUCCESS;
+ DISPATCH_INTERNAL_CRASH(name, "unexpected receipt of port-destroyed");
+ return KERN_FAILURE;
}
kern_return_t
-_dispatch_mach_notify_no_senders(mach_port_t notify,
- mach_port_mscount_t mscnt DISPATCH_UNUSED)
+_dispatch_mach_notify_no_senders(mach_port_t notify DISPATCH_UNUSED,
+ mach_port_mscount_t mscnt)
{
- // this function should never be called
- (void)dispatch_assume_zero(notify);
- return KERN_SUCCESS;
+ DISPATCH_INTERNAL_CRASH(mscnt, "unexpected receipt of no-more-senders");
+ return KERN_FAILURE;
}
kern_return_t