]> git.saurik.com Git - apple/libdispatch.git/commitdiff
libdispatch-913.1.6.tar.gz macos-1013 v913.1.6
authorApple <opensource@apple.com>
Tue, 26 Sep 2017 16:46:25 +0000 (16:46 +0000)
committerApple <opensource@apple.com>
Tue, 26 Sep 2017 16:46:25 +0000 (16:46 +0000)
139 files changed:
.gitmodules
CMakeLists.txt [new file with mode: 0644]
INSTALL.md
Makefile.am
PATCHES
cmake/config.h.in [new file with mode: 0644]
cmake/modules/DTrace.cmake [new file with mode: 0644]
cmake/modules/DispatchAppleOptions.cmake [new file with mode: 0644]
cmake/modules/FindLibRT.cmake [new file with mode: 0644]
cmake/modules/SwiftSupport.cmake [new file with mode: 0644]
config/config.h
configure.ac
dispatch/CMakeLists.txt [new file with mode: 0644]
dispatch/base.h
dispatch/block.h
dispatch/darwin/module.modulemap
dispatch/data.h
dispatch/dispatch.h
dispatch/generic/module.modulemap
dispatch/group.h
dispatch/introspection.h
dispatch/io.h
dispatch/object.h
dispatch/once.h
dispatch/queue.h
dispatch/semaphore.h
dispatch/source.h
dispatch/time.h
libdispatch.xcodeproj/project.pbxproj
man/CMakeLists.txt [new file with mode: 0644]
man/dispatch_apply.3
man/dispatch_queue_create.3
man/dispatch_source_create.3
man/dispatch_time.3
os/CMakeLists.txt [new file with mode: 0644]
os/firehose_buffer_private.h
os/firehose_server_private.h
os/linux_base.h
os/object.h
os/object_private.h
os/voucher_activity_private.h
os/voucher_private.h
private/CMakeLists.txt [new file with mode: 0644]
private/benchmark.h
private/darwin/module.modulemap
private/data_private.h
private/generic/module.modulemap
private/introspection_private.h
private/io_private.h
private/layout_private.h
private/mach_private.h
private/private.h
private/queue_private.h
private/source_private.h
src/BlocksRuntime/data.c
src/BlocksRuntime/runtime.c
src/CMakeLists.txt [new file with mode: 0644]
src/Makefile.am
src/allocator.c
src/apply.c
src/block.cpp
src/data.c
src/data.m
src/data_internal.h
src/event/event.c [new file with mode: 0644]
src/event/event_config.h [new file with mode: 0644]
src/event/event_epoll.c [new file with mode: 0644]
src/event/event_internal.h [new file with mode: 0644]
src/event/event_kevent.c [new file with mode: 0644]
src/event/workqueue.c [new file with mode: 0644]
src/event/workqueue_internal.h [new file with mode: 0644]
src/firehose/firehose.defs
src/firehose/firehose_buffer.c
src/firehose/firehose_buffer_internal.h
src/firehose/firehose_inline_internal.h
src/firehose/firehose_internal.h
src/firehose/firehose_reply.defs
src/firehose/firehose_server.c
src/firehose/firehose_server_internal.h
src/init.c
src/inline_internal.h
src/internal.h
src/introspection.c
src/introspection_internal.h
src/io.c
src/io_internal.h
src/libdispatch.codes
src/mach.c [new file with mode: 0644]
src/mach_internal.h [new file with mode: 0644]
src/object.c
src/object.m
src/object_internal.h
src/once.c
src/provider.d
src/queue.c
src/queue_internal.h
src/semaphore.c
src/semaphore_internal.h
src/shims.h
src/shims/android_stubs.h [new file with mode: 0644]
src/shims/atomic.h
src/shims/atomic_sfb.h
src/shims/getprogname.h
src/shims/hw_config.h
src/shims/linux_stubs.c
src/shims/linux_stubs.h
src/shims/lock.c
src/shims/lock.h
src/shims/perfmon.h
src/shims/priority.h [new file with mode: 0644]
src/shims/time.h
src/shims/tsd.h
src/shims/yield.h
src/source.c
src/source_internal.h
src/swift/Data.swift
src/swift/Dispatch.apinotes
src/swift/DispatchStubs.cc
src/swift/IO.swift
src/swift/Private.swift
src/swift/Queue.swift
src/swift/Source.swift
src/swift/Time.swift
src/swift/Wrapper.swift
src/time.c
src/trace.h
src/voucher.c
src/voucher_internal.h
tools/voucher_trace.d [new file with mode: 0755]
xcodeconfig/libdispatch-dyld-stub.xcconfig
xcodeconfig/libdispatch-mp-static.xcconfig
xcodeconfig/libdispatch-resolved.xcconfig
xcodeconfig/libdispatch-resolver_iphoneos.order [deleted file]
xcodeconfig/libdispatch-up-static.xcconfig
xcodeconfig/libdispatch.aliases
xcodeconfig/libdispatch.xcconfig
xcodeconfig/libdispatch_iphoneos.order [deleted file]
xcodeconfig/libfirehose.xcconfig
xcodeconfig/libfirehose_kernel.xcconfig

index 009b5fbf1c4678e2d17c1338af1adcd20927ccd1..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 (file)
@@ -1,6 +0,0 @@
-[submodule "libpwq"]
-       path = libpwq
-       url = https://github.com/mheily/libpwq.git
-[submodule "libkqueue"]
-       path = libkqueue
-       url = https://github.com/mheily/libkqueue.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f6b078e
--- /dev/null
@@ -0,0 +1,229 @@
+
+cmake_minimum_required(VERSION 3.4.3)
+
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
+
+project(dispatch
+        VERSION 1.3
+        LANGUAGES C CXX)
+enable_testing()
+
+set(CMAKE_C_VISIBILITY_PRESET hidden)
+set(CMAKE_CXX_STANDARD 11)
+
+set(CMAKE_THREAD_PREFER_PTHREAD TRUE)
+set(THREADS_PREFER_PTHREAD_FLAG TRUE)
+find_package(Threads REQUIRED)
+
+include(CheckCSourceCompiles)
+include(CheckFunctionExists)
+include(CheckIncludeFiles)
+include(CheckLibraryExists)
+include(CheckSymbolExists)
+include(GNUInstallDirs)
+
+set(WITH_BLOCKS_RUNTIME "" CACHE PATH "Path to blocks runtime")
+
+include(DispatchAppleOptions)
+
+option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON)
+set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR})
+
+# TODO(compnerd) swift options
+
+option(BUILD_SHARED_LIBS "build shared libraries" ON)
+
+option(ENABLE_TESTING "build libdispatch tests" ON)
+
+if(CMAKE_SYSTEM_NAME STREQUAL Linux OR
+   CMAKE_SYSTEM_NAME STREQUAL Android)
+  set(USE_GOLD_LINKER_DEFAULT ON)
+else()
+  set(USE_GOLD_LINKER_DEFAULT OFF)
+endif()
+option(USE_GOLD_LINKER "use the gold linker" ${USE_GOLD_LINKER_DEFAULT})
+
+option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via __thread" ON)
+set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE})
+
+if(CMAKE_SYSTEM_NAME STREQUAL Linux OR
+   CMAKE_SYSTEM_NAME STREQUAL Android OR
+   CMAKE_SYSTEM_NAME STREQUAL Windows)
+  set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON)
+else()
+  set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT OFF)
+endif()
+option(ENABLE_INTERNAL_PTHREAD_WORKQUEUES "use libdispatch's own implementation of pthread workqueues" ${ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT})
+if(ENABLE_INTERNAL_PTHREAD_WORKQUEUES)
+  set(DISPATCH_USE_INTERNAL_WORKQUEUE 1)
+  set(HAVE_PTHREAD_WORKQUEUES 0)
+else()
+  check_include_files(pthread/workqueue_private.h HAVE_PTHREAD_WORKQUEUE_PRIVATE_H)
+  check_include_files(pthread_workqueue.h HAVE_PTHREAD_WORKQUEUE_H)
+  if(HAVE_PTHREAD_WORKQUEUE_PRIVATE_H AND HAVE_PTHREAD_WORKQUEUE_H)
+    set(HAVE_PTHREAD_WORKQUEUES 1)
+    set(DISPATCH_USE_INTERNAL_WORKQUEUE 0)
+  else()
+    set(HAVE_PTHREAD_WORKQUEUES 0)
+    set(DISPATCH_USE_INTERNAL_WORKQUEUE 1)
+  endif()
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL Linux OR
+   CMAKE_SYSTEM_NAME STREQUAL Android OR
+   CMAKE_SYSTEM_NAME STREQUAL Windows)
+  add_library(BlocksRuntime
+              STATIC
+                ${CMAKE_SOURCE_DIR}/src/BlocksRuntime/data.c
+                ${CMAKE_SOURCE_DIR}/src/BlocksRuntime/runtime.c)
+  set_target_properties(BlocksRuntime
+                        PROPERTIES
+                          POSITION_INDEPENDENT_CODE TRUE)
+  if(HAVE_OBJC AND CMAKE_DL_LIBS)
+    set_target_properties(BlocksRuntime
+                          PROPERTIES
+                            INTERFACE_LINK_LIBRARIES ${CMAKE_DL_LIBS})
+  endif()
+  set(WITH_BLOCKS_RUNTIME "${CMAKE_SOURCE_DIR}/src/BlocksRuntime" CACHE PATH "Path to blocks runtime" FORCE)
+else()
+  # TODO(compnerd) support system installed BlocksRuntime
+  # find_package(BlocksRuntime REQUIRED)
+endif()
+
+check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE)
+if(_GNU_SOURCE)
+  set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE)
+endif()
+
+check_c_source_compiles("void __attribute__((__noreturn__)) main() { __builtin_trap(); }"
+                        __BUILTIN_TRAP)
+if(__BUILTIN_TRAP)
+  set(HAVE_NORETURN_BUILTIN_TRAP 1)
+endif()
+
+find_package(LibRT)
+
+check_function_exists(_pthread_workqueue_init HAVE__PTHREAD_WORKQUEUE_INIT)
+check_function_exists(getprogname HAVE_GETPROGNAME)
+check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME)
+check_function_exists(mach_approximate_time HAVE_MACH_APPROXIMATE_TIME)
+check_function_exists(mach_port_construct HAVE_MACH_PORT_CONSTRUCT)
+check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE)
+check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP)
+check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP)
+check_function_exists(pthread_workqueue_setdispatch_np HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP)
+check_function_exists(strlcpy HAVE_STRLCPY)
+check_function_exists(sysconf HAVE_SYSCONF)
+
+if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME)
+  include(FindPkgConfig)
+  pkg_check_modules(BSD_OVERLAY libbsd-overlay)
+  if(BSD_OVERLAY_FOUND)
+    set(HAVE_STRLCPY 1 CACHE INTERNAL "Have function strlcpy" FORCE)
+    set(HAVE_GETPROGNAME 1 CACHE INTERNAL "Have function getprogname" FORCE)
+  endif()
+endif()
+
+find_package(Threads REQUIRED)
+
+check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H)
+check_include_files("dlfcn.h" HAVE_DLFCN_H)
+check_include_files("fcntl.h" HAVE_FCNTL_H)
+check_include_files("inttypes.h" HAVE_INTTYPES_H)
+check_include_files("libkern/OSAtomic.h" HAVE_LIBKERN_OSATOMIC_H)
+check_include_files("libkern/OSCrossEndian.h" HAVE_LIBKERN_OSCROSSENDIAN_H)
+check_include_files("libproc_internal.h" HAVE_LIBPROC_INTERNAL_H)
+check_include_files("mach/mach.h" HAVE_MACH)
+if(HAVE_MACH)
+  set(__DARWIN_NON_CANCELABLE 1)
+  set(USE_MACH_SEM 1)
+else()
+  set(__DARWIN_NON_CANCELABLE 0)
+  set(USE_MACH_SEM 0)
+endif()
+check_include_files("malloc/malloc.h" HAVE_MALLOC_MALLOC_H)
+check_include_files("memory.h" HAVE_MEMORY_H)
+check_include_files("pthread/qos.h" HAVE_PTHREAD_QOS_H)
+check_include_files("pthread/workqueue_private.h" HAVE_PTHREAD_WORKQUEUE_PRIVATE_H)
+check_include_files("pthread_machdep.h" HAVE_PTHREAD_MACHDEP_H)
+check_include_files("pthread_np.h" HAVE_PTHREAD_NP_H)
+check_include_files("pthread_workqueue.h" HAVE_PTHREAD_WORKQUEUE_H)
+check_include_files("stdint.h" HAVE_STDINT_H)
+check_include_files("stdlib.h" HAVE_STDLIB_H)
+check_include_files("string.h" HAVE_STRING_H)
+check_include_files("strings.h" HAVE_STRINGS_H)
+check_include_files("sys/cdefs.h" HAVE_SYS_CDEFS_H)
+check_include_files("sys/guarded.h" HAVE_SYS_GUARDED_H)
+check_include_files("sys/stat.h" HAVE_SYS_STAT_H)
+check_include_files("sys/types.h" HAVE_SYS_TYPES_H)
+check_include_files("unistd.h" HAVE_UNISTD_H)
+check_include_files("objc/objc-internal.h" HAVE_OBJC)
+
+check_library_exists(pthread sem_init "" USE_POSIX_SEM)
+if(CMAKE_SYSTEM_NAME STREQUAL Windows)
+  add_definitions(-DTARGET_OS_WIN32)
+  add_definitions(-DUSE_WIN32_SEM)
+endif()
+
+check_symbol_exists(CLOCK_UPTIME "time.h" HAVE_DECL_CLOCK_UPTIME)
+check_symbol_exists(CLOCK_UPTIME_FAST "time.h" HAVE_DECL_CLOCK_UPTIME_FAST)
+check_symbol_exists(CLOCK_MONOTONIC "time.h" HAVE_DECL_CLOCK_MONOTONIC)
+check_symbol_exists(CLOCK_REALTIME "time.h" HAVE_DECL_CLOCK_REALTIME)
+check_symbol_exists(FD_COPY "sys/select.h" HAVE_DECL_FD_COPY)
+check_symbol_exists(NOTE_LOWAT "sys/event.h" HAVE_DECL_NOTE_LOWAT)
+check_symbol_exists(NOTE_NONE "sys/event.h" HAVE_DECL_NOTE_NONE)
+check_symbol_exists(NOTE_REAP "sys/event.h" HAVE_DECL_NOTE_REAP)
+check_symbol_exists(NOTE_REVOKE "sys/event.h" HAVE_DECL_NOTE_REVOKE)
+check_symbol_exists(NOTE_SIGNAL "sys/event.h" HAVE_DECL_NOTE_SIGNAL)
+check_symbol_exists(POSIX_SPAWN_START_SUSPENDED "sys/spawn.h" HAVE_DECL_POSIX_SPAWN_START_SUSPENDED)
+check_symbol_exists(SIGEMT "signal.h" HAVE_DECL_SIGEMT)
+check_symbol_exists(VQ_DESIRED_DISK "sys/mount.h" HAVE_DECL_VQ_DESIRED_DISK)
+check_symbol_exists(VQ_NEARLOWDISK "sys/mount.h" HAVE_DECL_VQ_NEARLOWDISK)
+check_symbol_exists(VQ_QUOTA "sys/mount.h" HAVE_DECL_VQ_QUOTA)
+check_symbol_exists(VQ_UPDATE "sys/mount.h" HAVE_DECL_VQ_UPDATE)
+check_symbol_exists(VQ_VERYLOWDISK "sys/mount.h" HAVE_DECL_VQ_VERYLOWDISK)
+
+check_symbol_exists(program_invocation_name "errno.h" HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME)
+
+find_program(dtrace_EXECUTABLE dtrace)
+if(dtrace_EXECUTABLE)
+  add_definitions(-DDISPATCH_USE_DTRACE=1)
+else()
+  add_definitions(-DDISPATCH_USE_DTRACE=0)
+endif()
+
+find_program(leaks_EXECUTABLE leaks)
+if(leaks_EXECUTABLE)
+  set(HAVE_LEAKS TRUE)
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL Darwin)
+  add_custom_command(OUTPUT
+                       "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap"
+                       "${CMAKE_SOURCE_DIR}/private/module.modulemap"
+                     COMMAND
+                       ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap"
+                     COMMAND
+                       ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap")
+else()
+  add_custom_command(OUTPUT
+                       "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap"
+                       "${CMAKE_SOURCE_DIR}/private/module.modulemap"
+                     COMMAND
+                       ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap"
+                     COMMAND
+                       ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap")
+endif()
+configure_file("${CMAKE_SOURCE_DIR}/cmake/config.h.in"
+               "${CMAKE_BINARY_DIR}/config/config_ac.h")
+add_definitions(-DHAVE_CONFIG_H)
+
+add_subdirectory(dispatch)
+add_subdirectory(man)
+add_subdirectory(os)
+add_subdirectory(private)
+add_subdirectory(src)
+if(ENABLE_TESTING)
+  add_subdirectory(tests)
+endif()
+
index fd999e7046d00e24115ccfe006a9751f2f40f3a5..9940c2cf7b58ec21e2d20e8fabbe3027ddeebedc 100644 (file)
@@ -87,11 +87,6 @@ Specify the path to Apple's libpthread package, so that appropriate headers
 Specify the path to Apple's libplatform package, so that appropriate headers
        can be found and used.
 
-`--with-apple-libclosure-source`
-
-Specify the path to Apple's Libclosure package, so that appropriate headers
-       can be found and used.
-
 `--with-apple-xnu-source`
 
 Specify the path to Apple's XNU package, so that appropriate headers can be
@@ -104,11 +99,6 @@ On systems where -fblocks is supported, specify an additional library path in wh
 The following options are likely to only be useful when building libdispatch on
 OS X as a replacement for /usr/lib/system/libdispatch.dylib:
 
-`--with-apple-objc4-source`
-
-Specify the path to Apple's objc4 package, so that appropriate headers can
-       be found and used.
-
 `--disable-libdispatch-init-constructor`
 
 Do not tag libdispatch's init routine as __constructor, in which case it must be run manually before libdispatch routines can be called. This is the default when building on OS X. For /usr/lib/system/libdispatch.dylib the init routine is called automatically during process start.
@@ -131,9 +121,7 @@ libdispatch for /usr/lib/system on OS X El Capitan:
                --enable-apple-tsd-optimizations \
                --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \
                --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \
-               --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \
                --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \
-               --with-apple-objc4-source=/path/to/10.11.0/objc4-680
        make check
 
 ### Building and installing for FreeBSD
@@ -152,14 +140,7 @@ on Ubuntu; currently supported versions are 14.04, 15.10 and 16.04.
 
 1. The first thing to do is install required packages:
 
- 1a. Install build tools and clang compiler.
-    `sudo apt-get install autoconf libtool pkg-config clang`
-
- 1b. Install dtrace (to generate provider.h)
-    `sudo apt-get install systemtap-sdt-dev`
-
- 1c. Install additional libdispatch dependencies
-    `sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev`
+    `sudo apt-get install autoconf libtool pkg-config clang systemtap-sdt-dev libbsd-dev linux-libc-dev`
 
     Note: compiling libdispatch requires clang 3.8 or better and
 the gold linker. If the default clang on your Ubuntu version is
@@ -167,16 +148,7 @@ too old, see http://apt.llvm.org/ to install a newer version.
 On older Ubuntu releases, you may need to install binutils-gold
 to get the gold linker.
 
-2. Initialize git submodules.
-  We are using git submodules to incorporate specific revisions of the
-  upstream pthread_workqueue and libkqueue projects into the build.
-
-    ```
-    git submodule init
-    git submodule update
-    ```
-
-3. Build (as in the general instructions above)
+2. Build (as in the general instructions above)
 
     ```
     sh autogen.sh
index cdc642f4d2d17c83e0b47f1093d68b9859b47d9f..f1be0295163728902360a432f0ed7fd1461dbb1e 100644 (file)
@@ -4,23 +4,17 @@
 
 ACLOCAL_AMFLAGS = -I m4
 
-if BUILD_OWN_PTHREAD_WORKQUEUES
-  MAYBE_PTHREAD_WORKQUEUES = libpwq
-endif
-
-if BUILD_OWN_KQUEUES
-  MAYBE_KQUEUES = libkqueue
+if BUILD_TESTS
+  MAYBE_TESTS = tests
 endif
 
 SUBDIRS=                                               \
        dispatch                                        \
-       $(MAYBE_PTHREAD_WORKQUEUES)     \
-       $(MAYBE_KQUEUES)                        \
        man                                                     \
        os                                                      \
        private                                         \
        src                                                     \
-       tests
+       $(MAYBE_TESTS)
 
 EXTRA_DIST=                                    \
        README.md                               \
diff --git a/PATCHES b/PATCHES
index 0783ec905a7b7c70b66ecd59b6c03ed245b1e22c..c3d28b330c454a48865635d8e84ca6abc4894031 100644 (file)
--- a/PATCHES
+++ b/PATCHES
@@ -293,3 +293,63 @@ github commits starting with 29bdc2f from
 [367bd95] APPLIED rdar://28486911
 [152985f] APPLIED rdar://28486911
 [ba7802e] APPLIED rdar://28486911
+[92773e0] APPLIED rdar://30568673
+[548a1b9] APPLIED rdar://30568673
+[b628e5c] APPLIED rdar://30568673
+[a055ddb] APPLIED rdar://30568673
+[012f48b] APPLIED rdar://30568673
+[353adba] APPLIED rdar://30568673
+[eb730eb] APPLIED rdar://30568673
+[ac16fbb] APPLIED rdar://30568673
+[967876e] APPLIED rdar://30568673
+[44c2291] APPLIED rdar://30568673
+[ceb1fac] APPLIED rdar://30568673
+[c95febb] APPLIED rdar://30568673
+[b6e9cf4] APPLIED rdar://30568673
+[e199473] APPLIED rdar://30568673
+[3767ac7] APPLIED rdar://30568673
+[10eb0e4] APPLIED rdar://30568673
+[787dd92] APPLIED rdar://30568673
+[ba4cac5] APPLIED rdar://30568673
+[7974138] APPLIED rdar://30568673
+[cd12dcb] APPLIED rdar://32283666
+[ff05109] APPLIED rdar://32283666
+[73315ee] APPLIED rdar://32283666
+[fcc1924] APPLIED rdar://32283666
+[272e818] APPLIED rdar://32283666
+[b6f8908] APPLIED rdar://32283666
+[a6c16d0] APPLIED rdar://32283666
+[1cc64e1] APPLIED rdar://32283666
+[d137aa4] APPLIED rdar://32283666
+[a69853f] APPLIED rdar://32283666
+[eea0667] APPLIED rdar://32283666
+[f84d21d] APPLIED rdar://32283666
+[3da8398] APPLIED rdar://32283666
+[2df80a3] APPLIED rdar://32283666
+[97a2f06] APPLIED rdar://32283666
+[f76b8f5] APPLIED rdar://32283666
+[3828fbb] APPLIED rdar://32283666
+[5e8789e] APPLIED rdar://32283666
+[3fba60a] APPLIED rdar://32283666
+[d6eb245] APPLIED rdar://32283666
+[0b6c22e] APPLIED rdar://33531111
+[5a3c02a] APPLIED rdar://33531111
+[22df1e7] APPLIED rdar://33531111
+[21273de] APPLIED rdar://33531111
+[dc1857c] APPLIED rdar://33531111
+[56f36b6] APPLIED rdar://33531111
+[c87c6bb] APPLIED rdar://33531111
+[b791d23] APPLIED rdar://33531111
+[c2d0c49] APPLIED rdar://33531111
+[1d25040] APPLIED rdar://33531111
+[ab89c6c] APPLIED rdar://33531111
+[e591e7e] APPLIED rdar://33531111
+[ded5bab] APPLIED rdar://33531111
+[ce90d0c] APPLIED rdar://33531111
+[69c8f3e] APPLIED rdar://33531111
+[23a3a84] APPLIED rdar://33531111
+[79b7529] APPLIED rdar://33531111
+[f8e71eb] APPLIED rdar://33531111
+[8947dcf] APPLIED rdar://33531111
+[5ad9208] APPLIED rdar://33531111
+[698d085] APPLIED rdar://33531111
diff --git a/cmake/config.h.in b/cmake/config.h.in
new file mode 100644 (file)
index 0000000..6696e98
--- /dev/null
@@ -0,0 +1,248 @@
+
+/* Define if building pthread work queues from source */
+#cmakedefine01 DISPATCH_USE_INTERNAL_WORKQUEUE
+
+/* Enable usage of thread local storage via __thread */
+#cmakedefine01 DISPATCH_USE_THREAD_LOCAL_STORAGE
+
+/* Define to 1 if you have the declaration of `CLOCK_MONOTONIC', and to 0 if
+   you don't. */
+#cmakedefine01 HAVE_DECL_CLOCK_MONOTONIC
+
+/* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if
+   you don't. */
+#cmakedefine01 HAVE_DECL_CLOCK_REALTIME
+
+/* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_CLOCK_UPTIME
+
+/* Define to 1 if you have the declaration of `CLOCK_UPTIME_FAST', and to 0 if
+   you don't. */
+#cmakedefine01 HAVE_DECL_CLOCK_UPTIME_FAST
+
+/* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_FD_COPY
+
+/* Define to 1 if you have the declaration of `NOTE_LOWAT', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_NOTE_LOWAT
+
+/* Define to 1 if you have the declaration of `NOTE_NONE', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_NOTE_NONE
+
+/* Define to 1 if you have the declaration of `NOTE_REAP', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_NOTE_REAP
+
+/* Define to 1 if you have the declaration of `NOTE_REVOKE', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_NOTE_REVOKE
+
+/* Define to 1 if you have the declaration of `NOTE_SIGNAL', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_NOTE_SIGNAL
+
+/* Define to 1 if you have the declaration of `POSIX_SPAWN_START_SUSPENDED',
+   and to 0 if you don't. */
+#cmakedefine01 HAVE_DECL_POSIX_SPAWN_START_SUSPENDED
+
+/* Define to 1 if you have the declaration of `program_invocation_short_name',
+   and to 0 if you don't. */
+#cmakedefine01 HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME
+
+/* Define to 1 if you have the declaration of `SIGEMT', and to 0 if you don't.
+   */
+#cmakedefine01 HAVE_DECL_SIGEMT
+
+/* Define to 1 if you have the declaration of `VQ_DESIRED_DISK', and to 0 if
+   you don't. */
+#cmakedefine01 HAVE_DECL_VQ_DESIRED_DISK
+
+/* Define to 1 if you have the declaration of `VQ_NEARLOWDISK', and to 0 if
+   you don't. */
+#cmakedefine01 HAVE_DECL_VQ_NEARLOWDISK
+
+/* Define to 1 if you have the declaration of `VQ_QUOTA', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_VQ_QUOTA
+
+/* Define to 1 if you have the declaration of `VQ_UPDATE', and to 0 if you
+   don't. */
+#cmakedefine01 HAVE_DECL_VQ_UPDATE
+
+/* Define to 1 if you have the declaration of `VQ_VERYLOWDISK', and to 0 if
+   you don't. */
+#cmakedefine01 HAVE_DECL_VQ_VERYLOWDISK
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#cmakedefine01 HAVE_DLFCN_H
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#cmakedefine01 HAVE_FCNTL_H
+
+/* Define to 1 if you have the `getprogname' function. */
+#cmakedefine01 HAVE_GETPROGNAME
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#cmakedefine01 HAVE_INTTYPES_H
+
+/* Define if Apple leaks program is present */
+#cmakedefine HAVE_LEAKS
+
+/* Define to 1 if you have the <libkern/OSAtomic.h> header file. */
+#cmakedefine HAVE_LIBKERN_OSATOMIC_H
+
+/* Define to 1 if you have the <libkern/OSCrossEndian.h> header file. */
+#cmakedefine HAVE_LIBKERN_OSCROSSENDIAN_H
+
+/* Define to 1 if you have the <libproc_internal.h> header file. */
+#cmakedefine HAVE_LIBPROC_INTERNAL_H
+
+/* Define if mach is present */
+#cmakedefine HAVE_MACH
+
+/* Define to 1 if you have the `mach_absolute_time' function. */
+#cmakedefine HAVE_MACH_ABSOLUTE_TIME
+
+/* Define to 1 if you have the `mach_approximate_time' function. */
+#cmakedefine HAVE_MACH_APPROXIMATE_TIME
+
+/* Define to 1 if you have the `mach_port_construct' function. */
+#cmakedefine HAVE_MACH_PORT_CONSTRUCT
+
+/* Define to 1 if you have the `malloc_create_zone' function. */
+#cmakedefine HAVE_MALLOC_CREATE_ZONE
+
+/* Define to 1 if you have the <malloc/malloc.h> header file. */
+#cmakedefine HAVE_MALLOC_MALLOC_H
+
+/* Define to 1 if you have the <memory.h> header file. */
+#cmakedefine01 HAVE_MEMORY_H
+
+/* Define if __builtin_trap marked noreturn */
+#cmakedefine01 HAVE_NORETURN_BUILTIN_TRAP
+
+/* Define if you have the Objective-C runtime */
+#cmakedefine HAVE_OBJC
+
+/* Define to 1 if you have the `pthread_key_init_np' function. */
+#cmakedefine HAVE_PTHREAD_KEY_INIT_NP
+
+/* Define to 1 if you have the <pthread_machdep.h> header file. */
+#cmakedefine HAVE_PTHREAD_MACHDEP_H
+
+/* Define to 1 if you have the `pthread_main_np' function. */
+#cmakedefine HAVE_PTHREAD_MAIN_NP
+
+/* Define to 1 if you have the <pthread_np.h> header file. */
+#cmakedefine HAVE_PTHREAD_NP_H
+
+/* Define to 1 if you have the <pthread/qos.h> header file. */
+#cmakedefine HAVE_PTHREAD_QOS_H
+
+/* Define if pthread work queues are present */
+#cmakedefine01 HAVE_PTHREAD_WORKQUEUES
+
+/* Define to 1 if you have the <pthread_workqueue.h> header file. */
+#cmakedefine HAVE_PTHREAD_WORKQUEUE_H
+
+/* Define to 1 if you have the <pthread/workqueue_private.h> header file. */
+#cmakedefine HAVE_PTHREAD_WORKQUEUE_PRIVATE_H
+
+/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */
+#cmakedefine HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#cmakedefine01 HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#cmakedefine01 HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#cmakedefine01 HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#cmakedefine01 HAVE_STRING_H
+
+/* Define to 1 if you have the `strlcpy' function. */
+#cmakedefine01 HAVE_STRLCPY
+
+/* Define if building for Swift */
+#undef HAVE_SWIFT
+
+/* Define to 1 if you have the `sysconf' function. */
+#cmakedefine01 HAVE_SYSCONF
+
+/* Define to 1 if you have the <sys/cdefs.h> header file. */
+#cmakedefine01 HAVE_SYS_CDEFS_H
+
+/* Define to 1 if you have the <sys/guarded.h> header file. */
+#cmakedefine HAVE_SYS_GUARDED_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#cmakedefine01 HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#cmakedefine01 HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <TargetConditionals.h> header file. */
+#cmakedefine HAVE_TARGETCONDITIONALS_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#cmakedefine01 HAVE_UNISTD_H
+
+/* Define to 1 if you have the `_pthread_workqueue_init' function. */
+#cmakedefine HAVE__PTHREAD_WORKQUEUE_INIT
+
+/* Define to use non-portable pthread TSD optimizations for Mac OS X) */
+#cmakedefine USE_APPLE_TSD_OPTIMIZATIONS
+
+/* Define to tag libdispatch_init as a constructor */
+#cmakedefine01 USE_LIBDISPATCH_INIT_CONSTRUCTOR
+
+/* Define to use Mach semaphores */
+#cmakedefine USE_MACH_SEM
+
+/* Define to use POSIX semaphores */
+#cmakedefine01 USE_POSIX_SEM
+
+/* Enable extensions on AIX 3, Interix.  */
+#ifndef _ALL_SOURCE
+#cmakedefine01 _ALL_SOURCE
+#endif
+/* Enable GNU extensions on systems that have them.  */
+#ifndef _GNU_SOURCE
+#cmakedefine01 _GNU_SOURCE
+#endif
+/* Enable threading extensions on Solaris.  */
+#ifndef _POSIX_PTHREAD_SEMANTICS
+#cmakedefine01 _POSIX_PTHREAD_SEMANTICS
+#endif
+/* Enable extensions on HP NonStop.  */
+#ifndef _TANDEM_SOURCE
+#cmakedefine01 _TANDEM_SOURCE
+#endif
+/* Enable general extensions on Solaris.  */
+#ifndef __EXTENSIONS__
+#cmakedefine01 __EXTENSIONS__
+#endif
+
+
+/* Version number of package */
+#define VERSION "${PROJECT_VERSION}"
+
+/* Define to 1 if on MINIX. */
+#cmakedefine _MINIX
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+   this defined. */
+#cmakedefine _POSIX_1_SOURCE
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+#cmakedefine _POSIX_SOURCE
+
+/* Define if using Darwin $NOCANCEL */
+#cmakedefine __DARWIN_NON_CANCELABLE
diff --git a/cmake/modules/DTrace.cmake b/cmake/modules/DTrace.cmake
new file mode 100644 (file)
index 0000000..20a28cc
--- /dev/null
@@ -0,0 +1,26 @@
+
+function(dtrace_usdt_probe script)
+  set(options)
+  set(single_parameter_options TARGET_NAME OUTPUT_SOURCES)
+  set(multiple_parameter_options)
+
+  cmake_parse_arguments("" "${options}" "${single_parameter_options}" "${multiple_parameter_options}" ${ARGN})
+
+  get_filename_component(script_we ${script} NAME_WE)
+
+  add_custom_command(OUTPUT
+                       ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h
+                     COMMAND
+                       ${dtrace_EXECUTABLE} -h -s ${script} -o ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h
+                     DEPENDS
+                       ${script})
+  add_custom_target(dtrace-usdt-header-${script_we}
+                    DEPENDS
+                      ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h)
+  if(_TARGET_NAME)
+    set(${_TARGET_NAME} dtrace-usdt-header-${script_we} PARENT_SCOPE)
+  endif()
+  if(_OUTPUT_SOURCES)
+    set(${_OUTPUT_SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h PARENT_SCOPE)
+  endif()
+endfunction()
diff --git a/cmake/modules/DispatchAppleOptions.cmake b/cmake/modules/DispatchAppleOptions.cmake
new file mode 100644 (file)
index 0000000..1f95f88
--- /dev/null
@@ -0,0 +1,43 @@
+
+set(WITH_APPLE_PTHREAD_SOURCE "" CACHE PATH "Path to Apple's libpthread")
+set(WITH_APPLE_LIBPLATFORM_SOURCE "" CACHE PATH "Path to Apple's libplatform")
+set(WITH_APPLE_LIBCLOSURE_SOURCE "" CACHE PATH "Path to Apple's libclosure")
+set(WITH_APPLE_XNU_SOURCE "" CACHE PATH "Path to Apple's XNU")
+set(WITH_APPLE_OBJC4_SOURCE "" CACHE PATH "Path to Apple's ObjC4")
+
+if(WITH_APPLE_PTHREAD_SOURCE)
+  include_directories(SYSTEM "${WITH_APPLE_PTHREAD_SOURCE}")
+endif()
+if(WITH_APPLE_LIBPLATFORM_SOURCE)
+  include_directories(SYSTEM "${WITH_APPLE_LIBPLATFORM_SOURCE}/include")
+endif()
+if(WITH_APPLE_LIBCLOSURE_SOURCE)
+  include_directories(SYSTEM "${WITH_APPLE_LIBCLOSURE_SOURCE}")
+endif()
+if(WITH_APPLE_XNU_SOURCE)
+  # FIXME(compnerd) this should use -idirafter
+  include_directories("${WITH_APPLE_XNU_SOURCE}/libkern")
+  include_directories(SYSTEM
+                        "${WITH_APPLE_XNU_SOURCE}/bsd"
+                        "${WITH_APPLE_XNU_SOURCE}/libsyscall"
+                        "${WITH_APPLE_XNU_SOURCE}/libsyscall/wrappers/libproc")
+
+  # hack for xnu/bsd/sys/event.h EVFILT_SOCK declaration
+  add_definitions(-DPRIVATE=1)
+endif()
+
+if(IS_DIRECTORY "/System/Library/Frameworks/System.framework/PrivateHeaders")
+  include_directories(SYSTEM
+                        "/System/Library/Frameworks/System.framework/PrivateHeaders")
+endif()
+
+option(ENABLE_APPLE_TSD_OPTIMIZATIONS "use non-portable pthread TSD optimizations" OFF)
+if(ENABLE_APPLE_TSD_OPTIMIZATIONS)
+  set(USE_APPLE_TSD_OPTIMIZATIONS 1)
+else()
+  set(USE_APPLE_TSD_OPTIMIZATIONS 0)
+endif()
+
+# TODO(compnerd) link in libpthread headers
+
+
diff --git a/cmake/modules/FindLibRT.cmake b/cmake/modules/FindLibRT.cmake
new file mode 100644 (file)
index 0000000..0a9f0d8
--- /dev/null
@@ -0,0 +1,39 @@
+#.rst:
+# FindLibRT
+# ---------
+#
+# Find librt library and headers.
+#
+# The mdoule defines the following variables:
+#
+# ::
+#
+# LibRT_FOUND       - true if librt was found
+# LibRT_INCLUDE_DIR - include search path
+# LibRT_LIBRARIES   - libraries to link
+
+if(UNIX)
+  find_path(LibRT_INCLUDE_DIR
+            NAMES
+              time.h)
+  find_library(LibRT_LIBRARIES rt)
+
+  include(FindPackageHandleStandardArgs)
+  find_package_handle_standard_args(LibRT
+                                    REQUIRED_VARS
+                                      LibRT_LIBRARIES
+                                      LibRT_INCLUDE_DIR)
+
+  if(LibRT_FOUND)
+    if(NOT TARGET RT::rt)
+      add_library(RT::rt UNKNOWN IMPORTED)
+      set_target_properties(RT::rt
+                            PROPERTIES
+                              IMPORTED_LOCATION ${LibRT_LIBRARIES}
+                              INTERFACE_INCLUDE_DIRECTORIES ${LibRT_INCLUDE_DIR})
+    endif()
+  endif()
+
+  mark_as_advanced(LibRT_LIBRARIES LibRT_INCLUDE_DIR)
+endif()
+
diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake
new file mode 100644 (file)
index 0000000..1965939
--- /dev/null
@@ -0,0 +1,69 @@
+
+include(CMakeParseArguments)
+
+function(add_swift_library library)
+  set(options)
+  set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT)
+  set(multiple_value_options SOURCES;SWIFT_FLAGS;CFLAGS)
+
+  cmake_parse_arguments(ASL "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN})
+
+  set(flags ${CMAKE_SWIFT_FLAGS})
+
+  list(APPEND flags -emit-library)
+
+  if(ASL_MODULE_NAME)
+    list(APPEND flags -module-name;${ASL_MODULE_NAME})
+  endif()
+  if(ASL_MODULE_LINK_NAME)
+    list(APPEND flags -module-link-name;${ASL_MODULE_LINK_NAME})
+  endif()
+  if(ASL_MODULE_PATH)
+    list(APPEND flags -emit-module-path;${ASL_MODULE_PATH})
+  endif()
+  if(ASL_MODULE_CACHE_PATH)
+    list(APPEND flags -module-cache-path;${ASL_MODULE_CACHE_PATH})
+  endif()
+  if(ASL_SWIFT_FLAGS)
+    foreach(flag ${ASL_SWIFT_FLAGS})
+      list(APPEND flags ${flag})
+    endforeach()
+  endif()
+  if(ASL_CFLAGS)
+    foreach(flag ${ASL_CFLAGS})
+      list(APPEND flags -Xcc;${flag})
+    endforeach()
+  endif()
+
+  # FIXME: We shouldn't /have/ to build things in a single process.
+  # <rdar://problem/15972329>
+  list(APPEND flags -force-single-frontend-invocation)
+
+  set(sources)
+  foreach(source ${ASL_SOURCES})
+    get_filename_component(location ${source} PATH)
+    if(IS_ABSOLUTE ${location})
+      list(APPEND sources ${source})
+    else()
+      list(APPEND sources ${CMAKE_CURRENT_SOURCE_DIR}/${source})
+    endif()
+  endforeach()
+
+  get_filename_component(module_directory ${ASL_MODULE_PATH} DIRECTORY)
+
+  add_custom_command(OUTPUT
+                       ${ASL_OUTPUT}
+                       ${ASL_MODULE_PATH}
+                       ${module_directory}/${ASL_MODULE_NAME}.swiftdoc
+                     DEPENDS
+                       ${ASL_SOURCES}
+                     COMMAND
+                       ${CMAKE_COMMAND} -E make_directory ${module_directory}
+                     COMMAND
+                       ${CMAKE_SWIFT_COMPILER} ${flags} -c ${sources} -o ${ASL_OUTPUT})
+  add_custom_target(${library}
+                    DEPENDS
+                       ${ASL_OUTPUT}
+                       ${ASL_MODULE_PATH}
+                       ${module_directory}/${ASL_MODULE_NAME}.swiftdoc)
+endfunction()
index e39a9a9fd997012e6cacc0c88cbb097ac6d7af1a..91d7cfe8effc44e3926ac84d3887a7b399d69077 100644 (file)
@@ -5,10 +5,18 @@
    you don't. */
 #define HAVE_DECL_CLOCK_MONOTONIC 0
 
+/* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if
+   you don't. */
+#define CLOCK_REALTIME 0
+
 /* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you
    don't. */
 #define HAVE_DECL_CLOCK_UPTIME 0
 
+/* Define to 1 if you have the declaration of `HAVE_DECL_CLOCK_UPTIME_FAST',
+    and to 0 if you don't. */
+#define HAVE_DECL_CLOCK_UPTIME_FAST 0
+
 /* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you
    don't. */
 #define HAVE_DECL_FD_COPY 1
 /* Define to 1 if you have the `mach_absolute_time' function. */
 #define HAVE_MACH_ABSOLUTE_TIME 1
 
+/* Define to 1 if you have the `mach_approximate_time' function. */
+#define HAVE_MACH_APPROXIMATE_TIME 1
+
 /* Define to 1 if you have the `mach_port_construct' function. */
 #define HAVE_MACH_PORT_CONSTRUCT 1
 
index 6f66e523d2773897584f1034d0ff837df66090e8..8f38f0829762f94c5e555163b225cd46b7c4fd7f 100644 (file)
@@ -11,6 +11,10 @@ AC_CONFIG_MACRO_DIR([m4])
 ac_clean_files=a.out.dSYM
 AM_MAINTAINER_MODE
 
+AC_CANONICAL_BUILD
+AC_CANONICAL_HOST
+AC_CANONICAL_TARGET
+
 #
 # Command line argument to specify build variant (default to release).
 # Impacts default value of CFLAGS et al. so must come before AC_PROG_CC
@@ -56,6 +60,53 @@ AC_PROG_CXX([clang++ g++ c++])
 AC_PROG_OBJC([clang gcc cc])
 AC_PROG_OBJCXX([clang++ g++ c++])
 
+#
+# Android cross-compilation support
+#
+AC_ARG_WITH([android-ndk],
+  [AS_HELP_STRING([--with-android-ndk],
+    [Android NDK location])], [
+  android_ndk=${withval}
+])
+AC_ARG_WITH([android-ndk-gcc-version],
+  [AS_HELP_STRING([--with-android-ndk-gcc-version],
+    [Android NDK GCC version [defaults=4.9]])],
+  [android_ndk_gcc_version=${withval}], [android_ndk_gcc_version=4.9])
+AC_ARG_WITH([android-api-level],
+  [AS_HELP_STRING([--with-android-api-level],
+    [Android API level to link with])], [
+  android_api_level=${withval}
+])
+AC_ARG_ENABLE([android],
+  [AS_HELP_STRING([--enable-android],
+    [Compile for Android])], [
+  android=true
+
+  # Override values until there's real support for multiple Android platforms
+  host=armv7-none-linux-androideabi
+  host_alias=arm-linux-androideabi
+  host_cpu=armv7
+  host_os=linux-androideabi
+  host_vendor=unknown
+  arch=arm
+
+  sysroot=${android_ndk}/platforms/android-${android_api_level}/arch-${arch}
+  toolchain=${android_ndk}/toolchains/${host_alias}-${android_ndk_gcc_version}/prebuilt/linux-${build_cpu}
+
+  CFLAGS="$CFLAGS -target ${host_alias} --sysroot=${sysroot} -B${toolchain}/${host_alias}/bin"
+  CXXFLAGS="$CXXFLAGS -target ${host_alias} --sysroot=${sysroot} -B${toolchain}/${host_alias}/bin"
+  SWIFTC_FLAGS="-target ${host} -sdk ${sysroot} -L${toolchain}/lib/gcc/${host_alias}/${android_ndk_gcc_version}.x"
+  LIBS="$LIBS -L${toolchain}/lib/gcc/${host_alias}/${android_ndk_gcc_version}.x"
+  LDFLAGS="$LDFLAGS -Wc,'-target','${host_alias}','-B${toolchain}/${host_alias}/bin'"
+
+  # FIXME: empty CFLAGS and CXXFLAGS are assumed for this to work.
+  # FIXME: there should be a more elegant way to do this
+  ac_configure_args=`echo $ac_configure_args | sed -e "s/ 'CFLAGS='//" -e "s/ 'CXXFLAGS='//"`
+  # CFLAGS, CXXFLAGS and LIBS needs to be passed to libkqueue and libpwq
+  ac_configure_args="$ac_configure_args --enable-bionic-libc 'CFLAGS=$CFLAGS' 'CXXFLAGS=$CXXFLAGS' 'LIBS=$LIBS'"
+], [android=false])
+AM_CONDITIONAL(ANDROID, $android)
+
 #
 # On Mac OS X, some required header files come from other source packages;
 # allow specifying where those are.
@@ -74,13 +125,6 @@ AC_ARG_WITH([apple-libplatform-source],
   CPPFLAGS="$CPPFLAGS -isystem $apple_libplatform_source_include_path"
 ])
 
-AC_ARG_WITH([apple-libclosure-source],
-  [AS_HELP_STRING([--with-apple-libclosure-source],
-    [Specify path to Apple libclosure source])], [
-  apple_libclosure_source_path=${withval}
-  CPPFLAGS="$CPPFLAGS -isystem $apple_libclosure_source_path"
-])
-
 AC_ARG_WITH([apple-xnu-source],
   [AS_HELP_STRING([--with-apple-xnu-source],
     [Specify path to Apple XNU source])], [
@@ -92,12 +136,6 @@ AC_ARG_WITH([apple-xnu-source],
   CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path -isystem $apple_xnu_source_libsyscall_path -isystem $apple_xnu_source_libproc_path "
 ])
 
-AC_ARG_WITH([apple-objc4-source],
-  [AS_HELP_STRING([--with-apple-objc4-source],
-    [Specify path to Apple objc4 source])], [
-  apple_objc4_source_runtime_path=${withval}/runtime
-])
-
 AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders,
   [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders],
     [dispatch_cv_system_privateheaders=yes], [dispatch_cv_system_privateheaders=no])]
@@ -134,8 +172,6 @@ AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"],
     [Define to use non-portable pthread TSD optimizations for Mac OS X)])]
 )
 
-AC_CANONICAL_TARGET
-
 #
 # Enable building Swift overlay support into libdispatch
 #
@@ -145,6 +181,9 @@ AC_ARG_WITH([swift-toolchain],
    AC_DEFINE(HAVE_SWIFT, 1, [Define if building for Swift])
    SWIFTC="$swift_toolchain_path/bin/swiftc"
    case $target_os in
+      *android*)
+           os_string="android"
+           ;;
       linux*)
            os_string="linux"
            case $target_cpu in
@@ -164,7 +203,9 @@ AC_ARG_WITH([swift-toolchain],
 )
 AM_CONDITIONAL(HAVE_SWIFT, $have_swift)
 AC_SUBST([SWIFTC])
+AC_SUBST([SWIFTC_FLAGS])
 AC_SUBST([SWIFT_LIBDIR])
+AC_SUBST([OS_STRING], ["$os_string"])
 
 #
 # Enable use of gold linker when building the Swift overlay
@@ -174,6 +215,18 @@ AC_SUBST([SWIFT_LIBDIR])
 AC_CHECK_PROG(use_gold_linker, ld.gold, true, false)
 AM_CONDITIONAL(USE_GOLD_LINKER, $use_gold_linker)
 
+#
+# Enable an extended test suite that includes
+# tests that are too unreliable to be enabled by
+# default in the Swift CI environment, but are still
+# useful for libdispatch developers to be able to run.
+#
+AC_ARG_ENABLE([extended-test-suite],
+  [AS_HELP_STRING([--enable-extended-test-suite],
+    [Include additional test cases that may fail intermittently])]
+)
+AM_CONDITIONAL(EXTENDED_TEST_SUITE, test "x$enable_extended_test_suite" = "xyes")
+
 #
 # Enable __thread based TSD on platforms where it is efficient
 # Allow override based on command line argument to configure
@@ -229,19 +282,6 @@ esac
 AC_SEARCH_LIBS(clock_gettime, rt)
 AC_SEARCH_LIBS(pthread_create, pthread)
 
-AS_IF([test -f $srcdir/libkqueue/configure.ac],
-  [AC_DEFINE(BUILD_OWN_KQUEUES, 1, [Define if building libkqueue from source])
-   ac_configure_args="--disable-libkqueue-install $ac_configure_args"
-   AC_CONFIG_SUBDIRS([libkqueue])
-   build_own_kqueues=true],
-  [build_own_kqueues=false
-   AC_CHECK_HEADER(sys/event.h, [],
-     [PKG_CHECK_MODULES(KQUEUE, libkqueue)]
-   )
-  ]
-)
-AM_CONDITIONAL(BUILD_OWN_KQUEUES, $build_own_kqueues)
-
 AC_CHECK_FUNCS([strlcpy getprogname], [],
   [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[
     AC_DEFINE(HAVE_STRLCPY, 1, [])
@@ -267,22 +307,35 @@ AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_pa
 AC_CHECK_HEADERS([pthread_machdep.h pthread/qos.h])
 
 # pthread_workqueues.
-# Look for own version first, then system version.
-AS_IF([test -f $srcdir/libpwq/configure.ac],
-  [AC_DEFINE(BUILD_OWN_PTHREAD_WORKQUEUES, 1, [Define if building pthread work queues from source])
-   ac_configure_args="--disable-libpwq-install $ac_configure_args"
-   AC_CONFIG_SUBDIRS([libpwq])
-   build_own_pthread_workqueues=true
-   AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present])
-   have_pthread_workqueues=true],
-  [build_own_pthread_workqueues=false
-   AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h],
+# We can either use libdispatch's internal_workqueue or pthread_workqueue.
+# If not specifically configured, default to internal_workqueues on
+# Linux and pthread_workqueue on all other platforms.
+# On any platform, if pthread_workqueue is not available, fall back
+# to using internal_workqueue.
+AC_ARG_ENABLE([internal-libpwq],
+  [AS_HELP_STRING([--enable-internal-libpwq],
+    [Use libdispatch's own implementation of pthread workqueues.])],,
+  [case $target_os in
+      linux*)
+        enable_internal_libpwq=yes
+           ;;
+      *)
+        enable_internal_libpwq=no
+   esac]
+)
+AS_IF([test "x$enable_internal_libpwq" = "xyes"],
+  [AC_DEFINE(DISPATCH_USE_INTERNAL_WORKQUEUE, 1, [Use libdispatch's own implementation of pthread workqueues])
+   have_pthread_workqueues=false,
+   dispatch_use_internal_workqueue=true],
+  [AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h],
      [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present])
-      have_pthread_workqueues=true],
-     [have_pthread_workqueues=false]
-   )]
+      have_pthread_workqueues=true,
+         dispatch_use_internal_workqueue=false],
+     [have_pthread_workqueues=false,
+         dispatch_use_internal_workqueue=true]
+  )]
 )
-AM_CONDITIONAL(BUILD_OWN_PTHREAD_WORKQUEUES, $build_own_pthread_workqueues)
+AM_CONDITIONAL(DISPATCH_USE_INTERNAL_WORKQUEUE, $dispatch_use_internal_workqueue)
 AM_CONDITIONAL(HAVE_PTHREAD_WORKQUEUES, $have_pthread_workqueues)
 
 AC_CHECK_HEADERS([libproc_internal.h], [], [], [#include <mach/mach.h>])
@@ -321,24 +374,10 @@ AC_CHECK_HEADER([Foundation/Foundation.h],
   [have_foundation=true], [have_foundation=false]
 )
 AM_CONDITIONAL(HAVE_FOUNDATION, $have_foundation)
-# hack for objc4/runtime/objc-internal.h
-AS_IF([test -n "$apple_objc4_source_runtime_path"], [
-  saveCPPFLAGS="$CPPFLAGS"
-  CPPFLAGS="$CPPFLAGS -I."
-  ln -fsh "$apple_objc4_source_runtime_path" objc
-])
-AC_CHECK_HEADER([objc/objc-internal.h], [
+AC_CHECK_HEADER([objc/NSObject.h], [
   AC_DEFINE(HAVE_OBJC, 1, [Define if you have the Objective-C runtime])
-  have_objc=true], [have_objc=false],
-  [#include <objc/runtime.h>]
+  have_objc=true], [have_objc=false]
 )
-AS_IF([test -n "$apple_objc4_source_runtime_path"], [
-  rm -f objc
-  CPPFLAGS="$saveCPPFLAGS"
-  AC_CONFIG_COMMANDS([src/objc],
-    [ln -fsh "$apple_objc4_source_runtime_path" src/objc],
-    [apple_objc4_source_runtime_path="$apple_objc4_source_runtime_path"])
-])
 AM_CONDITIONAL(USE_OBJC, $have_objc)
 AC_LANG_POP([Objective C])
 
@@ -357,7 +396,7 @@ AC_CHECK_FUNCS([mach_port_construct])
 #
 # Find functions and declarations we care about.
 #
-AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME], [], [],
+AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_UPTIME_FAST], [], [],
   [[#include <time.h>]])
 AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [],
   [[#include <sys/event.h>]])
@@ -365,7 +404,7 @@ AC_CHECK_DECLS([FD_COPY], [], [], [[#include <sys/select.h>]])
 AC_CHECK_DECLS([SIGEMT], [], [], [[#include <signal.h>]])
 AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA, VQ_NEARLOWDISK, VQ_DESIRED_DISK], [], [], [[#include <sys/mount.h>]])
 AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include <errno.h>]])
-AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf])
+AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time mach_approximate_time malloc_create_zone sysconf])
 
 AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED],
   [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false],
@@ -377,6 +416,11 @@ AC_CHECK_FUNC([sem_init],
   [have_sem_init=true], [have_sem_init=false]
 )
 
+AC_CHECK_HEADER([linux/futex.h], [
+  AC_DEFINE(HAVE_FUTEX, 1, [Define if linux/futex.h is present])
+  have_futex=true], [have_futex=false]
+)
+
 #
 # We support both Mach semaphores and POSIX semaphores; if the former are
 # available, prefer them.
@@ -459,6 +503,13 @@ AC_COMPILE_IFELSE(
   [AC_DEFINE(HAVE_NORETURN_BUILTIN_TRAP, 1, [Define if __builtin_trap marked noreturn])]
 )
 
+#
+# Add option to avoid building tests
+#
+AC_ARG_ENABLE([build-tests],
+  [AS_HELP_STRING([--disable-build-tests], [Disable tests compilation])])
+AM_CONDITIONAL(BUILD_TESTS, [test "x$enable_build_tests" != "xno"])
+
 #
 # Generate Makefiles.
 #
diff --git a/dispatch/CMakeLists.txt b/dispatch/CMakeLists.txt
new file mode 100644 (file)
index 0000000..dbfb866
--- /dev/null
@@ -0,0 +1,24 @@
+
+install(FILES
+          base.h
+          block.h
+          data.h
+          dispatch.h
+          group.h
+          introspection.h
+          io.h
+          object.h
+          once.h
+          queue.h
+          semaphore.h
+          source.h
+          time.h
+        DESTINATION
+          ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch/)
+if(ENABLE_SWIFT)
+  install(FILES
+            module.modulemap
+          DESTINATION
+            ${CMAKE_INSTALL_FULL_INCLUEDIR}/dispatch/)
+endif()
+
index 8adfb0bdb503763fb36924da5aec26c068b78a39..4c82b010c368c397571b8a40053eb01e5073f805 100644 (file)
 #endif
 
 #if __has_feature(enumerator_attributes)
-#define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING
-#define DISPATCH_ENUM_AVAILABLE(os, version) __##os##_AVAILABLE(version)
+#define DISPATCH_ENUM_API_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__)
+#define DISPATCH_ENUM_API_DEPRECATED(...) API_DEPRECATED(__VA_ARGS__)
+#define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) \
+               API_DEPRECATED_WITH_REPLACEMENT(__VA_ARGS__)
 #else
-#define DISPATCH_ENUM_AVAILABLE_STARTING(...)
-#define DISPATCH_ENUM_AVAILABLE(...)
+#define DISPATCH_ENUM_API_AVAILABLE(...)
+#define DISPATCH_ENUM_API_DEPRECATED(...)
+#define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...)
 #endif
 
 #if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \
 #define DISPATCH_SWIFT_NAME(_name)
 #endif
 
+#ifndef __cplusplus
+#define DISPATCH_TRANSPARENT_UNION __attribute__((__transparent_union__))
+#else
+#define DISPATCH_TRANSPARENT_UNION
+#endif
+
 typedef void (*dispatch_function_t)(void *_Nullable);
 
 #endif
index cd56b230d7a10ec0210c86d3312f6e51f15ef991..cbdcb5eff848b766f82875522894cfa2f2b180be 100644 (file)
@@ -101,17 +101,17 @@ __BEGIN_DECLS
  */
 DISPATCH_ENUM(dispatch_block_flags, unsigned long,
        DISPATCH_BLOCK_BARRIER
-                       DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1,
+                       DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1,
        DISPATCH_BLOCK_DETACHED
-                       DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2,
+                       DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x2,
        DISPATCH_BLOCK_ASSIGN_CURRENT
-                       DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4,
+                       DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x4,
        DISPATCH_BLOCK_NO_QOS_CLASS
-                       DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x8,
+                       DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x8,
        DISPATCH_BLOCK_INHERIT_QOS_CLASS
-                       DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x10,
+                       DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x10,
        DISPATCH_BLOCK_ENFORCE_QOS_CLASS
-                       DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x20,
+                       DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x20,
 );
 
 /*!
@@ -164,7 +164,7 @@ DISPATCH_ENUM(dispatch_block_flags, unsigned long,
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -236,7 +236,7 @@ dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block);
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -269,7 +269,7 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags,
  * @param block
  * The block to create the temporary block object from.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 void
 dispatch_block_perform(dispatch_block_flags_t flags,
@@ -320,7 +320,7 @@ dispatch_block_perform(dispatch_block_flags_t flags,
  * Returns zero on success (the dispatch block object completed within the
  * specified timeout) or non-zero on error (i.e. timed out).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 long
 dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout);
@@ -361,7 +361,7 @@ dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout);
  * @param notification_block
  * The notification block to submit when the observed block object completes.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue,
@@ -393,7 +393,7 @@ dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue,
  * The result of passing NULL or a block object not returned by one of the
  * dispatch_block_create* functions is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_block_cancel(dispatch_block_t block);
@@ -412,7 +412,7 @@ dispatch_block_cancel(dispatch_block_t block);
  * @result
  * Non-zero if canceled and zero if not canceled.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 long
index addaae4365625261564952f12a1feadd7438996d..e30807f914da40bd72c1c43f6525cf6a09d2984b 100644 (file)
@@ -1,6 +1,5 @@
 module Dispatch [system] [extern_c] {
        umbrella header "dispatch.h"
-       module * { export * }
        export *
 }
 
index 7ceee06476c7d6f224a7942b8b9cd673f78797a3..33a0c9d516c8720c11b4da6826926e398a531dac 100644 (file)
@@ -50,7 +50,7 @@ DISPATCH_DATA_DECL(dispatch_data);
  */
 #define dispatch_data_empty \
                DISPATCH_GLOBAL_OBJECT(dispatch_data_t, _dispatch_data_empty)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty;
 
 /*!
@@ -83,7 +83,7 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty;
  * was allocated by the malloc() family and should be destroyed with free(3).
  */
 #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free);
 
 /*!
@@ -92,7 +92,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free);
  * from buffers that require deallocation with munmap(2).
  */
 #define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap)
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap);
 
 #ifdef __BLOCKS__
@@ -117,7 +117,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap);
  *                     is no longer needed.
  * @result             A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
 dispatch_data_create(const void *buffer,
@@ -134,7 +134,7 @@ dispatch_data_create(const void *buffer,
  * @param data The dispatch data object to query.
  * @result     The number of bytes represented by the data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_NONNULL1 DISPATCH_NOTHROW
 size_t
 dispatch_data_get_size(dispatch_data_t data);
@@ -158,7 +158,7 @@ dispatch_data_get_size(dispatch_data_t data);
  *                     size of the mapped contiguous memory region, or NULL.
  * @result             A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -181,7 +181,7 @@ dispatch_data_create_map(dispatch_data_t data,
  * @result     A newly created object representing the concatenation of the
  *             data1 and data2 objects.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -202,7 +202,7 @@ dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2);
  * @result             A newly created object representing the specified
  *                     subrange of the data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -247,10 +247,11 @@ typedef bool (^dispatch_data_applier_t)(dispatch_data_t region,
  * @result             A Boolean indicating whether traversal completed
  *                     successfully.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 bool
-dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier);
+dispatch_data_apply(dispatch_data_t data,
+       DISPATCH_NOESCAPE dispatch_data_applier_t applier);
 #endif /* __BLOCKS__ */
 
 /*!
@@ -267,7 +268,7 @@ dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier);
  *                     start of the queried data object.
  * @result             A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
index e8d69f893a9a7c2255887312e409f96d00fcce28..2d45b8356ad197186a445546ecbf3003b4ef4446 100644 (file)
 
 #ifdef __APPLE__
 #include <Availability.h>
+#include <os/availability.h>
 #include <TargetConditionals.h>
-#else
-#define __OSX_AVAILABLE_STARTING(x, y)
-#define __OSX_AVAILABLE_BUT_DEPRECATED(...)
-#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...)
-#define __OSX_AVAILABLE(...)
-#define __IOS_AVAILABLE(...)
-#define __TVOS_AVAILABLE(...)
-#define __WATCHOS_AVAILABLE(...)
-#define __OSX_DEPRECATED(...)
-#define __IOS_DEPRECATED(...)
-#define __TVOS_DEPRECATED(...)
-#define __WATCHOS_DEPRECATED(...)
-#endif // __APPLE__
+#include <os/base.h>
+#elif defined(__linux__)
+#include <os/linux_base.h>
+#endif
 
-#include <sys/cdefs.h>
 #include <sys/types.h>
 #include <stddef.h>
 #include <stdint.h>
 #include <stdbool.h>
 #include <stdarg.h>
+#if !defined(HAVE_UNISTD_H) || HAVE_UNISTD_H
 #include <unistd.h>
+#endif
 #include <fcntl.h>
 
 #if defined(__linux__) && defined(__has_feature)
@@ -55,7 +48,7 @@
 #endif
 #endif
 
-#define DISPATCH_API_VERSION 20160712
+#define DISPATCH_API_VERSION 20170124
 
 #ifndef __DISPATCH_BUILDING_DISPATCH__
 
index 5c248e5c840254491b2de99d0cc449566b1a91bf..8c3e7d016560d3dd13abbd2bd5f0bae75e303ce4 100644 (file)
@@ -11,7 +11,6 @@ module DispatchIntrospection [system] [extern_c] {
 
 module CDispatch [system] [extern_c] {
        umbrella header "dispatch.h"
-       module * { export * }
        export *
        requires blocks
        link "dispatch"
index c50ad89d17206bea7aae7f77ec4aa67e89ceaf6a..8d74ada2e413727eee6e94de97ff0bc7d35e0adb 100644 (file)
@@ -51,7 +51,7 @@ __BEGIN_DECLS
  * @result
  * The newly created group, or NULL on failure.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_group_t
@@ -81,7 +81,7 @@ dispatch_group_create(void);
  * The block to perform asynchronously.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_async(dispatch_group_t group,
@@ -115,7 +115,7 @@ dispatch_group_async(dispatch_group_t group,
  * parameter passed to this function is the context provided to
  * dispatch_group_async_f().
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4
 DISPATCH_NOTHROW
 void
@@ -158,7 +158,7 @@ dispatch_group_async_f(dispatch_group_t group,
  * Returns zero on success (all blocks associated with the group completed
  * within the specified timeout) or non-zero on error (i.e. timed out).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 long
 dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout);
@@ -194,7 +194,7 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout);
  * The block to submit when the group completes.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_notify(dispatch_group_t group,
@@ -224,7 +224,7 @@ dispatch_group_notify(dispatch_group_t group,
  * parameter passed to this function is the context provided to
  * dispatch_group_notify_f().
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4
 DISPATCH_NOTHROW
 void
@@ -248,7 +248,7 @@ dispatch_group_notify_f(dispatch_group_t group,
  * The dispatch group to update.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_enter(dispatch_group_t group);
@@ -267,7 +267,7 @@ dispatch_group_enter(dispatch_group_t group);
  * The dispatch group to update.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_leave(dispatch_group_t group);
index 9cfb4d1c0e68e3538b23cb61fbd8019ff4b6c2bc..ea7dcd8f5f1bd7c7916fc131a0f07a79081d90bf 100644 (file)
@@ -49,7 +49,7 @@ __BEGIN_DECLS
  * The newly created dispatch queue.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_create(dispatch_queue_t queue);
@@ -65,7 +65,7 @@ dispatch_introspection_hook_queue_create(dispatch_queue_t queue);
  * The dispatch queue about to be destroyed.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue);
@@ -84,7 +84,7 @@ dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue);
  * The object about to be enqueued.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue,
@@ -104,7 +104,7 @@ dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue,
  * The dequeued object.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue,
@@ -126,7 +126,7 @@ dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue,
  * Opaque dentifier for completed item. Must NOT be dereferenced.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_7_1)
+API_AVAILABLE(macos(10.10), ios(7.1))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_item_complete(dispatch_object_t item);
@@ -150,7 +150,7 @@ dispatch_introspection_hook_queue_item_complete(dispatch_object_t item);
  * this is the block object's invoke function.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue,
@@ -175,7 +175,7 @@ dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue,
  * this is the block object's invoke function.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue,
index 5814bc0f77fd690b6e4199fe3f487717444d9cb4..a9e6892e5203e2d7dfd13d300f81fff1a6d0b9ac 100644 (file)
@@ -102,7 +102,7 @@ typedef int dispatch_fd_t;
  *             param error     An errno condition for the read operation or
  *                             zero if the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW
 void
 dispatch_read(dispatch_fd_t fd,
@@ -140,7 +140,7 @@ dispatch_read(dispatch_fd_t fd,
  *             param error     An errno condition for the write operation or
  *                             zero if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL4
 DISPATCH_NOTHROW
 void
@@ -211,7 +211,7 @@ typedef unsigned long dispatch_io_type_t;
  * @result     The newly created dispatch I/O channel or NULL if an error
  *             occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_io_t
@@ -247,7 +247,7 @@ dispatch_io_create(dispatch_io_type_t type,
  * @result     The newly created dispatch I/O channel or NULL if an error
  *             occurred (invalid type or non-absolute path specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -287,7 +287,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type,
  * @result     The newly created dispatch I/O channel or NULL if an error
  *             occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -349,7 +349,7 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data,
  *     param error     An errno condition for the read operation or zero if
  *                     the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL5
 DISPATCH_NOTHROW
 void
@@ -402,7 +402,7 @@ dispatch_io_read(dispatch_io_t channel,
  *     param error     An errno condition for the write operation or zero
  *                     if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4
 DISPATCH_NONNULL5 DISPATCH_NOTHROW
 void
@@ -441,7 +441,7 @@ typedef unsigned long dispatch_io_close_flags_t;
  * @param channel      The dispatch I/O channel to close.
  * @param flags                The flags for the close operation.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags);
@@ -468,7 +468,7 @@ dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags);
  * @param channel      The dispatch I/O channel to schedule the barrier on.
  * @param barrier      The barrier block.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier);
@@ -488,7 +488,7 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier);
  * @param channel      The dispatch I/O channel to query.
  * @result             The file descriptor underlying the channel, or -1.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_fd_t
 dispatch_io_get_descriptor(dispatch_io_t channel);
@@ -509,7 +509,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel);
  * @param channel      The dispatch I/O channel on which to set the policy.
  * @param high_water   The number of bytes to use as a high water mark.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water);
@@ -540,7 +540,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water);
  * @param channel      The dispatch I/O channel on which to set the policy.
  * @param low_water    The number of bytes to use as a low water mark.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water);
@@ -579,7 +579,7 @@ typedef unsigned long dispatch_io_interval_flags_t;
  * @param flags                Flags indicating desired data delivery behavior at
  *                                     interval time.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_set_interval(dispatch_io_t channel,
index 8b2030138455f529b257174e99e688f4d5f15fb0..3ff36c2d36ab6e23ac518f49004339d74a84a12e 100644 (file)
@@ -92,14 +92,13 @@ typedef union {
        struct dispatch_source_s *_ds;
        struct dispatch_mach_s *_dm;
        struct dispatch_mach_msg_s *_dmsg;
-       struct dispatch_timer_aggregate_s *_dta;
        struct dispatch_source_attr_s *_dsa;
        struct dispatch_semaphore_s *_dsema;
        struct dispatch_data_s *_ddata;
        struct dispatch_io_s *_dchannel;
        struct dispatch_operation_s *_doperation;
        struct dispatch_disk_s *_ddisk;
-} dispatch_object_t __attribute__((__transparent_union__));
+} dispatch_object_t DISPATCH_TRANSPARENT_UNION;
 /*! @parseOnly */
 #define DISPATCH_DECL(name) typedef struct name##_s *name##_t
 /*! @parseOnly */
@@ -201,7 +200,7 @@ __BEGIN_DECLS
  * The object to retain.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC")
 void
@@ -229,7 +228,7 @@ dispatch_retain(dispatch_object_t object);
  * The object to release.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC")
 void
@@ -253,7 +252,7 @@ dispatch_release(dispatch_object_t object);
  * @result
  * The context of the object; may be NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 void *_Nullable
@@ -272,7 +271,7 @@ dispatch_get_context(dispatch_object_t object);
  * The new client defined context for the object. This may be NULL.
  *
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_set_context(dispatch_object_t object, void *_Nullable context);
@@ -298,7 +297,7 @@ dispatch_set_context(dispatch_object_t object, void *_Nullable context);
  * The context parameter passed to the finalizer function is the current
  * context of the dispatch object at the time the finalizer call is made.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_set_finalizer_f(dispatch_object_t object,
@@ -326,8 +325,7 @@ dispatch_set_finalizer_f(dispatch_object_t object,
  * The object to be activated.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_activate(dispatch_object_t object);
@@ -350,7 +348,7 @@ dispatch_activate(dispatch_object_t object);
  * The object to be suspended.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_suspend(dispatch_object_t object);
@@ -379,7 +377,7 @@ dispatch_suspend(dispatch_object_t object);
  * The object to be resumed.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_resume(dispatch_object_t object);
@@ -541,13 +539,13 @@ dispatch_testcancel(void *object);
  * @param message
  * The message to log above and beyond the introspection.
  */
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 __attribute__((__format__(printf,2,3)))
 void
 dispatch_debug(dispatch_object_t object, const char *message, ...);
 
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 __attribute__((__format__(printf,2,0)))
 void
index a8f56441ceba6a03098bbd8e851054704aa565f5..37a49506dcc06d644e1dc587c945ce9da245a262 100644 (file)
@@ -40,6 +40,14 @@ __BEGIN_DECLS
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
 typedef long dispatch_once_t;
 
+#if defined(__x86_64__) || defined(__i386__) || defined(__s390x__)
+#define DISPATCH_ONCE_INLINE_FASTPATH 1
+#elif defined(__APPLE__)
+#define DISPATCH_ONCE_INLINE_FASTPATH 1
+#else
+#define DISPATCH_ONCE_INLINE_FASTPATH 0
+#endif
+
 /*!
  * @function dispatch_once
  *
@@ -58,13 +66,14 @@ typedef long dispatch_once_t;
  * initialized by the block.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
 void
 dispatch_once(dispatch_once_t *predicate,
                DISPATCH_NOESCAPE dispatch_block_t block);
 
+#if DISPATCH_ONCE_INLINE_FASTPATH
 DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
 void
@@ -81,14 +90,16 @@ _dispatch_once(dispatch_once_t *predicate,
 #undef dispatch_once
 #define dispatch_once _dispatch_once
 #endif
+#endif // DISPATCH_ONCE_INLINE_FASTPATH
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
 void
 dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context,
                dispatch_function_t function);
 
+#if DISPATCH_ONCE_INLINE_FASTPATH
 DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3
 DISPATCH_NOTHROW
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
@@ -105,6 +116,7 @@ _dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context,
 }
 #undef dispatch_once_f
 #define dispatch_once_f _dispatch_once_f
+#endif // DISPATCH_ONCE_INLINE_FASTPATH
 
 __END_DECLS
 
index 264c34418838a3427a51ad1cd8d6985b2f55b3b0..8dab75f9d687cbfdb8bd25cd8b91f10c5cba8c45 100644 (file)
@@ -103,7 +103,7 @@ __BEGIN_DECLS
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_async(dispatch_queue_t queue, dispatch_block_t block);
@@ -133,7 +133,7 @@ dispatch_async(dispatch_queue_t queue, dispatch_block_t block);
  * dispatch_async_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_async_f(dispatch_queue_t queue,
@@ -171,7 +171,7 @@ dispatch_async_f(dispatch_queue_t queue,
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
@@ -199,22 +199,56 @@ dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
  * dispatch_sync_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_sync_f(dispatch_queue_t queue,
        void *_Nullable context,
        dispatch_function_t work);
 
+
+#if !defined(__APPLE__) || TARGET_OS_WATCH || TARGET_OS_TV || \
+               (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \
+               __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_0) || \
+               (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \
+               __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_9)
+#define DISPATCH_APPLY_AUTO_AVAILABLE 1
+#else
+#define DISPATCH_APPLY_AUTO_AVAILABLE 0
+#endif
+
+/*!
+ * @constant DISPATCH_APPLY_AUTO
+ *
+ * @abstract
+ * Constant to pass to dispatch_apply() or dispatch_apply_f() to request that
+ * the system automatically use worker threads that match the configuration of
+ * the current thread as closely as possible.
+ *
+ * @discussion
+ * When submitting a block for parallel invocation, passing this constant as the
+ * queue argument will automatically use the global concurrent queue that
+ * matches the Quality of Service of the caller most closely.
+ *
+ * No assumptions should be made about which global concurrent queue will
+ * actually be used.
+ *
+ * Using this constant deploys backward to macOS 10.9, iOS 7.0 and any tvOS or
+ * watchOS version.
+ */
+#if DISPATCH_APPLY_AUTO_AVAILABLE
+#define DISPATCH_APPLY_AUTO ((dispatch_queue_t _Nonnull)0)
+#endif
+
 /*!
  * @function dispatch_apply
  *
  * @abstract
- * Submits a block to a dispatch queue for multiple invocations.
+ * Submits a block to a dispatch queue for parallel invocation.
  *
  * @discussion
- * Submits a block to a dispatch queue for multiple invocations. This function
- * waits for the task block to complete before returning. If the target queue
+ * Submits a block to a dispatch queue for parallel invocation. This function
+ * waits for the task block to complete before returning. If the specified queue
  * is concurrent, the block may be invoked concurrently, and it must therefore
  * be reentrant safe.
  *
@@ -224,15 +258,16 @@ dispatch_sync_f(dispatch_queue_t queue,
  * The number of iterations to perform.
  *
  * @param queue
- * The target dispatch queue to which the block is submitted.
- * The result of passing NULL in this parameter is undefined.
+ * The dispatch queue to which the block is submitted.
+ * The preferred value to pass is DISPATCH_APPLY_AUTO to automatically use
+ * a queue appropriate for the calling thread.
  *
  * @param block
  * The block to be invoked the specified number of iterations.
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_apply(size_t iterations, dispatch_queue_t queue,
@@ -243,7 +278,7 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue,
  * @function dispatch_apply_f
  *
  * @abstract
- * Submits a function to a dispatch queue for multiple invocations.
+ * Submits a function to a dispatch queue for parallel invocation.
  *
  * @discussion
  * See dispatch_apply() for details.
@@ -252,20 +287,21 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue,
  * The number of iterations to perform.
  *
  * @param queue
- * The target dispatch queue to which the function is submitted.
- * The result of passing NULL in this parameter is undefined.
+ * The dispatch queue to which the function is submitted.
+ * The preferred value to pass is DISPATCH_APPLY_AUTO to automatically use
+ * a queue appropriate for the calling thread.
  *
  * @param context
  * The application-defined context parameter to pass to the function.
  *
  * @param work
- * The application-defined function to invoke on the target queue. The first
+ * The application-defined function to invoke on the specified queue. The first
  * parameter passed to this function is the context provided to
  * dispatch_apply_f(). The second parameter passed to this function is the
  * current index of iteration.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW
 void
 dispatch_apply_f(size_t iterations, dispatch_queue_t queue,
@@ -301,12 +337,12 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue,
  * @result
  * Returns the current queue.
  */
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_queue_t
 dispatch_get_current_queue(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q;
 
 /*!
@@ -415,7 +451,7 @@ typedef unsigned int dispatch_qos_class_t;
  * Returns the requested global queue or NULL if the requested global queue
  * does not exist.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_queue_t
 dispatch_get_global_queue(long identifier, unsigned long flags);
@@ -454,7 +490,7 @@ DISPATCH_DECL(dispatch_queue_attr);
 #define DISPATCH_QUEUE_CONCURRENT \
                DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \
                _dispatch_queue_attr_concurrent)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT
 struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent;
 
@@ -498,8 +534,7 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent;
  * The new value combines the attributes specified by the 'attr' parameter with
  * the initially inactive attribute.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_initially_inactive(
@@ -556,21 +591,9 @@ dispatch_queue_attr_make_initially_inactive(
  * asynchronously. This is the behavior of the global concurrent queues.
  */
 DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long,
-       DISPATCH_AUTORELEASE_FREQUENCY_INHERIT
-                       DISPATCH_ENUM_AVAILABLE(OSX, 10.12)
-                       DISPATCH_ENUM_AVAILABLE(IOS, 10.0)
-                       DISPATCH_ENUM_AVAILABLE(TVOS, 10.0)
-                       DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 0,
-       DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM
-                       DISPATCH_ENUM_AVAILABLE(OSX, 10.12)
-                       DISPATCH_ENUM_AVAILABLE(IOS, 10.0)
-                       DISPATCH_ENUM_AVAILABLE(TVOS, 10.0)
-                       DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 1,
-       DISPATCH_AUTORELEASE_FREQUENCY_NEVER
-                       DISPATCH_ENUM_AVAILABLE(OSX, 10.12)
-                       DISPATCH_ENUM_AVAILABLE(IOS, 10.0)
-                       DISPATCH_ENUM_AVAILABLE(TVOS, 10.0)
-                       DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 2,
+       DISPATCH_AUTORELEASE_FREQUENCY_INHERIT DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0,
+       DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 1,
+       DISPATCH_AUTORELEASE_FREQUENCY_NEVER DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 2,
 );
 
 /*!
@@ -610,8 +633,7 @@ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long,
  * This new value combines the attributes specified by the 'attr' parameter and
  * the chosen autorelease frequency.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_autorelease_frequency(
@@ -671,7 +693,7 @@ dispatch_queue_attr_make_with_autorelease_frequency(
  * The new value combines the attributes specified by the 'attr' parameter and
  * the new QOS class and relative priority.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr,
@@ -736,8 +758,7 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr,
  * @result
  * The newly created dispatch queue.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -788,7 +809,7 @@ dispatch_queue_create_with_target(const char *_Nullable label,
  * @result
  * The newly created dispatch queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -818,7 +839,7 @@ dispatch_queue_create(const char *_Nullable label,
  * @result
  * The label of the queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 const char *
 dispatch_queue_get_label(dispatch_queue_t _Nullable queue);
@@ -857,7 +878,7 @@ dispatch_queue_get_label(dispatch_queue_t _Nullable queue);
  *     - QOS_CLASS_BACKGROUND
  *     - QOS_CLASS_UNSPECIFIED
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 dispatch_qos_class_t
 dispatch_queue_get_qos_class(dispatch_queue_t queue,
@@ -922,7 +943,7 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue,
  * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue
  * to the default target queue for the given object type.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_set_target_queue(dispatch_object_t object,
@@ -941,7 +962,7 @@ dispatch_set_target_queue(dispatch_object_t object,
  * Applications that call NSApplicationMain() or CFRunLoopRun() on the
  * main thread do not need to call dispatch_main().
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NORETURN
 void
 dispatch_main(void);
@@ -969,7 +990,7 @@ dispatch_main(void);
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_after(dispatch_time_t when,
@@ -1002,7 +1023,7 @@ dispatch_after(dispatch_time_t when,
  * dispatch_after_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW
 void
 dispatch_after_f(dispatch_time_t when,
@@ -1049,7 +1070,7 @@ dispatch_after_f(dispatch_time_t when,
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block);
@@ -1083,7 +1104,7 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block);
  * dispatch_barrier_async_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_barrier_async_f(dispatch_queue_t queue,
@@ -1111,7 +1132,7 @@ dispatch_barrier_async_f(dispatch_queue_t queue,
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_barrier_sync(dispatch_queue_t queue,
@@ -1143,7 +1164,7 @@ dispatch_barrier_sync(dispatch_queue_t queue,
  * dispatch_barrier_sync_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_barrier_sync_f(dispatch_queue_t queue,
@@ -1186,7 +1207,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue,
  * The destructor function pointer. This may be NULL and is ignored if context
  * is NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_queue_set_specific(dispatch_queue_t queue, const void *key,
@@ -1215,7 +1236,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key,
  * @result
  * The context for the specified key or NULL if no context was found.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 void *_Nullable
@@ -1242,7 +1263,7 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key);
  * @result
  * The context for the specified key or NULL if no context was found.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 void *_Nullable
 dispatch_get_specific(const void *key);
@@ -1296,8 +1317,7 @@ dispatch_get_specific(const void *key);
  * The dispatch queue that the current block is expected to run on.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1
 void
 dispatch_assert_queue(dispatch_queue_t queue)
@@ -1323,8 +1343,7 @@ dispatch_assert_queue(dispatch_queue_t queue)
  * The dispatch queue that the current block is expected to run as a barrier on.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1
 void
 dispatch_assert_queue_barrier(dispatch_queue_t queue);
@@ -1347,8 +1366,7 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue);
  * The dispatch queue that the current block is expected not to run on.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1
 void
 dispatch_assert_queue_not(dispatch_queue_t queue)
index b6139d70d7359113c9c3e4135492569677563f7d..f5394b45dd09e1e2b54d2001a5bb43135751f0a8 100644 (file)
@@ -57,7 +57,7 @@ __BEGIN_DECLS
  * @result
  * The newly created semaphore, or NULL on failure.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_semaphore_t
@@ -83,7 +83,7 @@ dispatch_semaphore_create(long value);
  * @result
  * Returns zero on success, or non-zero if the timeout occurred.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 long
 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout);
@@ -105,7 +105,7 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout);
  * This function returns non-zero if a thread is woken. Otherwise, zero is
  * returned.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 long
 dispatch_semaphore_signal(dispatch_semaphore_t dsema);
index 63b3ff365c74b7306f5d7e43b842f686301114be..6992d422691c11b5dcf2b52e0fa376054c53e350 100644 (file)
@@ -79,7 +79,7 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t;
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(data_add);
 
 /*!
@@ -90,9 +90,24 @@ DISPATCH_SOURCE_TYPE_DECL(data_add);
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(data_or);
 
+/*!
+ * @const DISPATCH_SOURCE_TYPE_DATA_REPLACE
+ * @discussion A dispatch source that tracks data obtained via calls to
+ * dispatch_source_merge_data(). Newly obtained data values replace existing
+ * data values not yet delivered to the source handler
+ *
+ * A data value of zero will cause the source handler to not be invoked.
+ *
+ * The handle is unused (pass zero for now).
+ * The mask is unused (pass zero for now).
+ */
+#define DISPATCH_SOURCE_TYPE_DATA_REPLACE (&_dispatch_source_type_data_replace)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_SOURCE_TYPE_DECL(data_replace);
+
 /*!
  * @const DISPATCH_SOURCE_TYPE_MACH_SEND
  * @discussion A dispatch source that monitors a Mach port for dead name
@@ -101,7 +116,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or);
  * The mask is a mask of desired events from dispatch_source_mach_send_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(mach_send);
 
 /*!
@@ -111,7 +126,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send);
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(mach_recv);
 
 /*!
@@ -124,7 +139,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv);
  */
 #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \
                (&_dispatch_source_type_memorypressure)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.9), ios(8.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(memorypressure);
 
 /*!
@@ -135,7 +150,7 @@ DISPATCH_SOURCE_TYPE_DECL(memorypressure);
  * The mask is a mask of desired events from dispatch_source_proc_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(proc);
 
 /*!
@@ -146,7 +161,7 @@ DISPATCH_SOURCE_TYPE_DECL(proc);
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(read);
 
 /*!
@@ -156,7 +171,7 @@ DISPATCH_SOURCE_TYPE_DECL(read);
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(signal);
 
 /*!
@@ -167,7 +182,7 @@ DISPATCH_SOURCE_TYPE_DECL(signal);
  * The mask specifies which flags from dispatch_source_timer_flags_t to apply.
  */
 #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(timer);
 
 /*!
@@ -178,7 +193,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer);
  * The mask is a mask of desired events from dispatch_source_vnode_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(vnode);
 
 /*!
@@ -189,7 +204,7 @@ DISPATCH_SOURCE_TYPE_DECL(vnode);
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(write);
 
 /*!
@@ -361,7 +376,7 @@ typedef unsigned long dispatch_source_timer_flags_t;
  * @result
  * The newly created dispatch source. Or NULL if invalid arguments are passed.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_source_t
@@ -384,7 +399,7 @@ dispatch_source_create(dispatch_source_type_t type,
  * The event handler block to submit to the source's target queue.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_event_handler(dispatch_source_t source,
@@ -406,7 +421,7 @@ dispatch_source_set_event_handler(dispatch_source_t source,
  * The context parameter passed to the event handler function is the context of
  * the dispatch source current at the time the event handler was set.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_event_handler_f(dispatch_source_t source,
@@ -425,12 +440,13 @@ dispatch_source_set_event_handler_f(dispatch_source_t source,
  * the source's event handler block has returned.
  *
  * IMPORTANT:
- * A cancellation handler is required for file descriptor and mach port based
- * sources in order to safely close the descriptor or destroy the port. Closing
- * the descriptor or port before the cancellation handler may result in a race
- * condition. If a new descriptor is allocated with the same value as the
- * recently closed descriptor while the source's event handler is still running,
- * the event handler may read/write data to the wrong descriptor.
+ * Source cancellation and a cancellation handler are required for file
+ * descriptor and mach port based sources in order to safely close the
+ * descriptor or destroy the port.
+ * Closing the descriptor or port before the cancellation handler is invoked may
+ * result in a race condition. If a new descriptor is allocated with the same
+ * value as the recently closed descriptor while the source's event handler is
+ * still running, the event handler may read/write data to the wrong descriptor.
  *
  * @param source
  * The dispatch source to modify.
@@ -440,7 +456,7 @@ dispatch_source_set_event_handler_f(dispatch_source_t source,
  * The cancellation handler block to submit to the source's target queue.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_cancel_handler(dispatch_source_t source,
@@ -465,7 +481,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t source,
  * The context parameter passed to the event handler function is the current
  * context of the dispatch source at the time the handler call is made.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_cancel_handler_f(dispatch_source_t source,
@@ -493,7 +509,7 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t source,
  * The dispatch source to be canceled.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_source_cancel(dispatch_source_t source);
@@ -511,7 +527,7 @@ dispatch_source_cancel(dispatch_source_t source);
  * @result
  * Non-zero if canceled and zero if not canceled.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 long
@@ -542,7 +558,7 @@ dispatch_source_testcancel(dispatch_source_t source);
  *  DISPATCH_SOURCE_TYPE_VNODE:           file descriptor (int)
  *  DISPATCH_SOURCE_TYPE_WRITE:           file descriptor (int)
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 uintptr_t
@@ -573,7 +589,7 @@ dispatch_source_get_handle(dispatch_source_t source);
  *  DISPATCH_SOURCE_TYPE_VNODE:           dispatch_source_vnode_flags_t
  *  DISPATCH_SOURCE_TYPE_WRITE:           n/a
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 unsigned long
@@ -611,7 +627,7 @@ dispatch_source_get_mask(dispatch_source_t source);
  *  DISPATCH_SOURCE_TYPE_VNODE:           dispatch_source_vnode_flags_t
  *  DISPATCH_SOURCE_TYPE_WRITE:           estimated buffer space available
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 unsigned long
@@ -633,7 +649,7 @@ dispatch_source_get_data(dispatch_source_t source);
  * as specified by the dispatch source type. A value of zero has no effect
  * and will not result in the submission of the event handler block.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_source_merge_data(dispatch_source_t source, unsigned long value);
@@ -685,7 +701,7 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value);
  * @param leeway
  * The nanosecond leeway for the timer.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_source_set_timer(dispatch_source_t source,
@@ -715,7 +731,7 @@ dispatch_source_set_timer(dispatch_source_t source,
  * The registration handler block to submit to the source's target queue.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_registration_handler(dispatch_source_t source,
@@ -740,7 +756,7 @@ dispatch_source_set_registration_handler(dispatch_source_t source,
  * The context parameter passed to the registration handler function is the
  * current context of the dispatch source at the time the handler call is made.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_registration_handler_f(dispatch_source_t source,
index c2152ea1411e6c81df932a5679cf6107884824a5..ce99f27000b5711d65ef2436087ee8293a490286 100644 (file)
@@ -89,7 +89,7 @@ typedef uint64_t dispatch_time_t;
  * @result
  * A new dispatch_time_t.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_time_t
 dispatch_time(dispatch_time_t when, int64_t delta);
@@ -113,7 +113,7 @@ dispatch_time(dispatch_time_t when, int64_t delta);
  * @result
  * A new dispatch_time_t.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_time_t
 dispatch_walltime(const struct timespec *_Nullable when, int64_t delta);
index df9d7964c70e92f60a5d81577913a25e324f225d..e7134e709211678c3f5dac82e29cef6283c5e3ca 100644 (file)
                6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; };
                6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; };
                6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; };
+               6E4BACBD1D48A41500B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC21D48A42000B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC41D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC61D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; };
+               6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; };
+               6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; };
+               6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; };
                6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; };
                6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; };
                6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; };
                6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; };
                6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; };
                6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; };
+               6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; };
+               6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; };
+               6EA7938F1D458A5E00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; };
+               6EA962971D48622600759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA962981D48622700759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA9629A1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA9629C1D48622A00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A21D48625200759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A41D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
                6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; };
                6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; };
                6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; };
                6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; };
                6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; };
                6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; };
+               6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; };
                721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; };
                721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; };
                72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; };
                E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; };
                E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; };
                E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; };
+               E49BB6D11E70748100868613 /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; };
+               E49BB6D21E70748100868613 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; };
+               E49BB6D31E70748100868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; };
+               E49BB6D41E70748100868613 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; };
+               E49BB6D51E70748100868613 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; };
+               E49BB6D61E70748100868613 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+               E49BB6D71E70748100868613 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
+               E49BB6D81E70748100868613 /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+               E49BB6D91E70748100868613 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; };
+               E49BB6DA1E70748100868613 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; };
+               E49BB6DB1E70748100868613 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; };
+               E49BB6DC1E70748100868613 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; };
+               E49BB6DD1E70748100868613 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; };
+               E49BB6DE1E70748100868613 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; };
+               E49BB6DF1E70748100868613 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; };
+               E49BB6E01E70748100868613 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; };
+               E49BB6E11E70748100868613 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; };
+               E49BB6E21E70748100868613 /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+               E49BB6E31E70748100868613 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; };
+               E49BB6E41E70748100868613 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; };
+               E49BB6E51E70748100868613 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; };
+               E49BB6E61E70748100868613 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; };
+               E49BB6E71E70748100868613 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
+               E49BB6E81E70748100868613 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+               E49BB6E91E70748100868613 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; };
+               E49BB6EA1E70748100868613 /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; };
+               E49BB6EB1E70748100868613 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; };
+               E49BB6EC1E70748100868613 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; };
+               E49BB6ED1E70748100868613 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+               E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; };
+               E49BB70A1E70A3B000868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; };
                E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
                E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
                E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
                        remoteGlobalIDString = E4EC121612514715000DDBD1;
                        remoteInfo = "libdispatch mp resolved";
                };
+               E49BB6F71E7074C100868613 /* PBXContainerItemProxy */ = {
+                       isa = PBXContainerItemProxy;
+                       containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
+                       proxyType = 1;
+                       remoteGlobalIDString = E49BB6CE1E70748100868613;
+                       remoteInfo = "libdispatch alt resolved";
+               };
                E4B515DA164B317700E003AF /* PBXContainerItemProxy */ = {
                        isa = PBXContainerItemProxy;
                        containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
                6E326B161C239431002A6505 /* dispatch_timer_short.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_short.c; sourceTree = "<group>"; };
                6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = "<group>"; };
                6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = "<group>"; };
-               6E4130C91B431697001A152D /* backward-compat.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "backward-compat.xcconfig"; sourceTree = "<group>"; };
+               6E4BACBC1D48A41500B562AE /* mach.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach.c; sourceTree = "<group>"; };
+               6E4BACC91D48A89500B562AE /* mach_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_internal.h; sourceTree = "<group>"; };
                6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = "<group>"; };
+               6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_kevent.c; sourceTree = "<group>"; };
+               6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_internal.h; sourceTree = "<group>"; };
+               6E5ACCBD1D3C6719007DA2B4 /* event.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event.c; sourceTree = "<group>"; };
                6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = "<group>"; };
                6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = "<group>"; };
                6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = "<group>"; };
                6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = "<group>"; };
                6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = "<group>"; };
                6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = "<group>"; };
+               6EA7937D1D456D1300929B1B /* event_epoll.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_epoll.c; sourceTree = "<group>"; };
+               6EA793881D458A5800929B1B /* event_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_config.h; sourceTree = "<group>"; };
                6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; };
                6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = "<group>"; };
                6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = "<group>"; };
-               6EC5ABF71D4446CA004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = "<group>"; };
+               6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = "<group>"; };
+               6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = "<group>"; };
+               6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = "<group>"; };
+               6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = "<group>"; };
                6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = "<group>"; };
                6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = "<group>"; };
                6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = "<group>"; };
                6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_internal.h; sourceTree = "<group>"; };
                6EF2CAA41C88998A001ABE83 /* lock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = "<group>"; };
                6EF2CAAB1C8899D5001ABE83 /* lock.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = lock.c; path = shims/lock.c; sourceTree = "<group>"; };
+               6EFBDA4A1D61A0D600282887 /* priority.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = priority.h; sourceTree = "<group>"; };
                721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = "<group>"; };
                721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.c; };
                72406A031AF95DF800DF4E2B /* firehose_reply.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_reply.defs; sourceTree = "<group>"; };
                96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = "<group>"; };
                96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = "<group>"; };
                96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.c; };
+               B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = "<group>"; };
+               B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = "<group>"; };
+               B69878521F06F8790088F94F /* dispatch_signals.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_signals.c; sourceTree = "<group>"; };
+               B6AC73FD1EB10973009FB2F2 /* perf_thread_request.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = perf_thread_request.c; sourceTree = "<group>"; };
+               B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_create.c; sourceTree = "<group>"; };
+               B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = "<group>"; };
+               B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = "<group>"; };
+               B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_pthread_root_queue.c; sourceTree = "<group>"; };
                C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; };
                C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = "<group>"; };
                C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; };
                E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = "<group>"; };
                E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = "<group>"; };
                E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = "<group>"; };
-               E46DBC5714EE10C80001F9F6 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; };
+               E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; };
                E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-up-static.xcconfig"; sourceTree = "<group>"; };
                E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = "<group>"; };
                E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = "<group>"; };
                E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; };
                E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = "<group>"; };
+               E49BB6F21E70748100868613 /* libdispatch_alt.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_alt.a; sourceTree = BUILT_PRODUCTS_DIR; };
                E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
                E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = "<group>"; };
                E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = "<group>"; };
-               E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = "<group>"; };
-               E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = "<group>"; };
                E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = "<group>"; };
                E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
                E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = "<group>"; };
                                FC7BEDAF0E83626100161930 /* Dispatch Private Headers */,
                                FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */,
                                08FB7795FE84155DC02AAC07 /* Dispatch Source */,
-                               6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */,
-                               6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */,
                                92F3FEC91BEC687200025962 /* Darwin Tests */,
                                C6A0FF2B0290797F04C91782 /* Documentation */,
                                1AB674ADFE9D54B511CA2CBB /* Products */,
                                E4B515DC164B32E000E003AF /* introspection.c */,
                                5A27262510F26F1900751FBC /* io.c */,
                                6EF2CAAB1C8899D5001ABE83 /* lock.c */,
+                               6E4BACBC1D48A41500B562AE /* mach.c */,
                                9661E56A0F3E7DDF00749F3E /* object.c */,
                                E4FC3263145F46C9002FBDDB /* object.m */,
                                96DF70BD0F38FE3C0074BD99 /* once.c */,
                                6EA283D01CAB93270041B2E0 /* libdispatch.codes */,
                                FC7BED950E8361E600161930 /* protocol.defs */,
                                E43570B8126E93380097AB9F /* provider.d */,
+                               6E5ACCAF1D3BF2A0007DA2B4 /* event */,
+                               6EF0B2641BA8C3A0007FA4F6 /* firehose */,
                        );
                        name = "Dispatch Source";
                        path = src;
                        isa = PBXGroup;
                        children = (
                                D2AAC046055464E500DB518D /* libdispatch.dylib */,
-                               E4EC11C312514302000DDBD1 /* libdispatch_up.a */,
-                               E4EC122D12514715000DDBD1 /* libdispatch_mp.a */,
-                               E49F24DF125D57FA0057C971 /* libdispatch.dylib */,
-                               E46DBC5714EE10C80001F9F6 /* libdispatch.a */,
                                E4B515D6164B2DA300E003AF /* libdispatch.dylib */,
-                               6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */,
-                               6E040C631C499B1B00411A2E /* libfirehose_kernel.a */,
+                               E49F24DF125D57FA0057C971 /* libdispatch.dylib */,
+                               E4EC122D12514715000DDBD1 /* libdispatch_mp.a */,
+                               E4EC11C312514302000DDBD1 /* libdispatch_up.a */,
+                               E49BB6F21E70748100868613 /* libdispatch_alt.a */,
+                               E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */,
                                C01866BD1C5973210040FC07 /* libdispatch.a */,
                                C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */,
+                               6E040C631C499B1B00411A2E /* libfirehose_kernel.a */,
+                               6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */,
                        );
                        name = Products;
                        sourceTree = "<group>";
                        name = Products;
                        sourceTree = "<group>";
                };
+               6E5ACCAE1D3BF27F007DA2B4 /* event */ = {
+                       isa = PBXGroup;
+                       children = (
+                               6EA793881D458A5800929B1B /* event_config.h */,
+                               6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */,
+                       );
+                       path = event;
+                       sourceTree = "<group>";
+               };
+               6E5ACCAF1D3BF2A0007DA2B4 /* event */ = {
+                       isa = PBXGroup;
+                       children = (
+                               6E5ACCBD1D3C6719007DA2B4 /* event.c */,
+                               6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */,
+                               6EA7937D1D456D1300929B1B /* event_epoll.c */,
+                       );
+                       path = event;
+                       sourceTree = "<group>";
+               };
                6E9B6AE21BB39793009E324D /* OS Public Headers */ = {
                        isa = PBXGroup;
                        children = (
                        path = os;
                        sourceTree = "<group>";
                };
-               6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */ = {
+               6EF0B2641BA8C3A0007FA4F6 /* firehose */ = {
                        isa = PBXGroup;
                        children = (
                                72406A391AF9926000DF4E2B /* firehose_types.defs */,
                                6E21F2E51BBB23F00000C6A5 /* firehose_server.c */,
                                72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */,
                        );
-                       name = "Firehose Source";
-                       path = src/firehose;
+                       path = firehose;
                        sourceTree = "<group>";
                };
-               6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */ = {
+               6EF0B2661BA8C43D007FA4F6 /* firehose */ = {
                        isa = PBXGroup;
                        children = (
                                6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */,
                                6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */,
                                6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */,
                        );
-                       name = "Firehose Project Headers";
-                       path = src/firehose;
+                       path = firehose;
                        sourceTree = "<group>";
                };
                92F3FEC91BEC687200025962 /* Darwin Tests */ = {
                                6E326ADE1C23451A002A6505 /* dispatch_concur.c */,
                                6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */,
                                6E8E4EC71C1A61680004F5CC /* dispatch_data.m */,
-                               6EC5ABF71D4446CA004F8674 /* dispatch_deadname.c */,
+                               6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */,
                                6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */,
                                6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */,
                                6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */,
-                               6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */,
                                6E326ABD1C22A577002A6505 /* dispatch_io_net.c */,
                                6E326ABE1C22A577002A6505 /* dispatch_io.c */,
+                               6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */,
+                               6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */,
+                               B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */,
                                C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */,
                                6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */,
                                6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */,
                                6E326B441C239B61002A6505 /* dispatch_priority.c */,
                                6E326AB51C225477002A6505 /* dispatch_proc.c */,
+                               B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */,
                                6E326AB31C224870002A6505 /* dispatch_qos.c */,
+                               B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */,
                                6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */,
                                6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */,
                                6E326AB91C229866002A6505 /* dispatch_read.c */,
                                6E326ADC1C234396002A6505 /* dispatch_readsync.c */,
                                6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */,
                                6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */,
+                               B69878521F06F8790088F94F /* dispatch_signals.c */,
                                6EA2CB841C005DEF0076794A /* dispatch_source.c */,
                                6E326AE01C234780002A6505 /* dispatch_starfish.c */,
                                6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */,
                                6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */,
                                6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */,
                                6E326AB71C225FCA002A6505 /* dispatch_vnode.c */,
+                               B68330BC1EBCF6080003E71C /* dispatch_wl.c */,
                                6E67D9171C17BA7200FC98AC /* nsoperation.m */,
                                6E4FC9D11C84123600520351 /* os_venture_basic.c */,
+                               B6AE9A561D7F53C100AC007F /* perf_async_bench.m */,
+                               B6AE9A581D7F53CB00AC007F /* perf_bench.m */,
+                               6EC670C71E37E201004F10D6 /* perf_mach_async.c */,
+                               6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */,
+                               B6AC73FD1EB10973009FB2F2 /* perf_thread_request.c */,
                                92F3FE921BEC686300025962 /* Makefile */,
                                6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */,
                                6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */,
                E40041E4125E71150022B135 /* xcodeconfig */ = {
                        isa = PBXGroup;
                        children = (
-                               6E4130C91B431697001A152D /* backward-compat.xcconfig */,
                                E43D93F11097917E004F6A62 /* libdispatch.xcconfig */,
                                E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */,
                                E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */,
                                6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */,
                                E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */,
                                E448727914C6215D00BB45C2 /* libdispatch.order */,
-                               E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */,
-                               E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */,
                                E421E5FD1716BEA70090DC9B /* libdispatch.interposable */,
                        );
                        path = xcodeconfig;
                        isa = PBXGroup;
                        children = (
                                E47D6BB5125F0F800070D91C /* resolved.h */,
-                               E44EBE371251656400645D88 /* resolver.c */,
                                E44EBE331251654000645D88 /* resolver.h */,
+                               E44EBE371251656400645D88 /* resolver.c */,
                        );
                        path = resolver;
                        sourceTree = "<group>";
                                E4128ED513BA9A1700ABB2CB /* hw_config.h */,
                                6EF2CAA41C88998A001ABE83 /* lock.h */,
                                FC1832A2109923C7003403D5 /* perfmon.h */,
+                               6EFBDA4A1D61A0D600282887 /* priority.h */,
                                FC1832A3109923C7003403D5 /* time.h */,
                                FC1832A4109923C7003403D5 /* tsd.h */,
                                E48EC97B1835BADD00EAC4F1 /* yield.h */,
                                E44757D917F4572600B82CA1 /* inline_internal.h */,
                                E4C1ED6E1263E714000D3C8B /* data_internal.h */,
                                5A0095A110F274B0000E2A31 /* io_internal.h */,
+                               6E4BACC91D48A89500B562AE /* mach_internal.h */,
                                965ECC200F3EAB71004DDD89 /* object_internal.h */,
                                96929D950F3EA2170041FF5D /* queue_internal.h */,
                                5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */,
                                E422A0D412A557B5005E5BDB /* trace.h */,
                                E44F9DA816543F79001DCD38 /* introspection_internal.h */,
                                96929D830F3EA1020041FF5D /* shims.h */,
+                               6E5ACCAE1D3BF27F007DA2B4 /* event */,
+                               6EF0B2661BA8C43D007FA4F6 /* firehose */,
                                FC1832A0109923B3003403D5 /* shims */,
                        );
                        name = "Dispatch Project Headers";
                                E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */,
                                721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */,
                                FC5C9C1E0EADABE3006E462D /* group.h in Headers */,
+                               6EFBDA4B1D61A0D600282887 /* priority.h in Headers */,
                                96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */,
                                5AAB45C410D30CC7004407EA /* io.h in Headers */,
                                E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */,
                                E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */,
                                5A0095A210F274B0000E2A31 /* io_internal.h in Headers */,
                                FC1832A8109923C7003403D5 /* tsd.h in Headers */,
+                               6EA793891D458A5800929B1B /* event_config.h in Headers */,
                                96929D840F3EA1020041FF5D /* atomic.h in Headers */,
                                96929D850F3EA1020041FF5D /* shims.h in Headers */,
                                FC1832A7109923C7003403D5 /* time.h in Headers */,
+                               6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */,
                                6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */,
                                E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */,
                                2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */,
                                6EF2CAA51C88998A001ABE83 /* lock.h in Headers */,
                                E422A0D512A557B5005E5BDB /* trace.h in Headers */,
                                E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */,
+                               6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */,
                                6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */,
                                E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */,
                                E454569314746F1B00106147 /* object_private.h in Headers */,
                        files = (
                                E49F24AB125D57FA0057C971 /* dispatch.h in Headers */,
                                E49F24AC125D57FA0057C971 /* base.h in Headers */,
+                               6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */,
                                E49F24AD125D57FA0057C971 /* object.h in Headers */,
                                E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */,
                                E49F24AE125D57FA0057C971 /* queue.h in Headers */,
                                E49F24BE125D57FA0057C971 /* source_internal.h in Headers */,
                                E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */,
                                E4C1ED701263E714000D3C8B /* data_internal.h in Headers */,
+                               6EA7938F1D458A5E00929B1B /* event_config.h in Headers */,
                                6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */,
                                E49F24BF125D57FA0057C971 /* io_internal.h in Headers */,
                                E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */,
                                E44F9DB51654403F001DCD38 /* source_internal.h in Headers */,
                                E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */,
                                E44F9DB01654402B001DCD38 /* data_internal.h in Headers */,
+                               6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */,
                                6E9956081C3B21B30071D40C /* venture_internal.h in Headers */,
                                E44F9DB11654402E001DCD38 /* io_internal.h in Headers */,
                                E4630251176162D200E11F4C /* atomic_sfb.h in Headers */,
                                6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */,
                                6EF2CAB51C889D67001ABE83 /* lock.h in Headers */,
                                E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */,
+                               6EA7938E1D458A5C00929B1B /* event_config.h in Headers */,
                                6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */,
                                E44F9DB71654404F001DCD38 /* shims.h in Headers */,
                                E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */,
                        );
                        dependencies = (
                                6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */,
-                               E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */,
                                E47D6ECD125FEBA10070D91C /* PBXTargetDependency */,
+                               E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */,
+                               E49BB6F81E7074C100868613 /* PBXTargetDependency */,
                                E4B515DB164B317700E003AF /* PBXTargetDependency */,
                                C01866C21C597AEA0040FC07 /* PBXTargetDependency */,
                                E437F0D614F7441F00F0B997 /* PBXTargetDependency */,
                        );
                        name = "libdispatch up static";
                        productName = libdispatch;
-                       productReference = E46DBC5714EE10C80001F9F6 /* libdispatch.a */;
+                       productReference = E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */;
+                       productType = "com.apple.product-type.library.static";
+               };
+               E49BB6CE1E70748100868613 /* libdispatch alt resolved */ = {
+                       isa = PBXNativeTarget;
+                       buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch alt resolved" */;
+                       buildPhases = (
+                               E49BB6CF1E70748100868613 /* Mig Headers */,
+                               E49BB6D01E70748100868613 /* Sources */,
+                               E49BB6EE1E70748100868613 /* Symlink normal variant */,
+                       );
+                       buildRules = (
+                       );
+                       dependencies = (
+                       );
+                       name = "libdispatch alt resolved";
+                       productName = libdispatch;
+                       productReference = E49BB6F21E70748100868613 /* libdispatch_alt.a */;
                        productType = "com.apple.product-type.library.static";
                };
                E49F24A9125D57FA0057C971 /* libdispatch no resolver */ = {
                        isa = PBXProject;
                        attributes = {
                                BuildIndependentTargetsInParallel = YES;
-                               LastUpgradeCheck = 0800;
+                               LastUpgradeCheck = 0900;
                                TargetAttributes = {
                                        3F3C9326128E637B0042B1F7 = {
                                                ProvisioningStyle = Manual;
                                E49F24A9125D57FA0057C971 /* libdispatch no resolver */,
                                E4EC121612514715000DDBD1 /* libdispatch mp resolved */,
                                E4EC118F12514302000DDBD1 /* libdispatch up resolved */,
+                               E49BB6CE1E70748100868613 /* libdispatch alt resolved */,
                                E4B51595164B2DA300E003AF /* libdispatch introspection */,
                                E46DBC1A14EE10C80001F9F6 /* libdispatch up static */,
                                C01866A41C5973210040FC07 /* libdispatch mp static */,
                                6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */,
                                C927F35A10FD7F0600C5AB8B /* libdispatch_tools */,
                                4552540A19B1389700B88766 /* libdispatch_tests */,
+                               92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */,
+                               92F3FECA1BEC69E500025962 /* darwintests */,
                                6E040C621C499B1B00411A2E /* libfirehose_kernel */,
                                6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */,
-                               92F3FECA1BEC69E500025962 /* darwintests */,
-                               92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */,
                        );
                };
 /* End PBXProject section */
                        shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"";
                        showEnvVarsInLog = 0;
                };
+               E49BB6CF1E70748100868613 /* Mig Headers */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                       );
+                       inputPaths = (
+                               "$(SRCROOT)/src/protocol.defs",
+                               "$(SRCROOT)/src/firehose/firehose.defs",
+                               "$(SRCROOT)/src/firehose/firehose_reply.defs",
+                               "$(SRCROOT)/xcodescripts/mig-headers.sh",
+                       );
+                       name = "Mig Headers";
+                       outputPaths = (
+                               "$(DERIVED_FILE_DIR)/protocol.h",
+                               "$(DERIVED_FILE_DIR)/protocolServer.h",
+                               "$(DERIVED_FILE_DIR)/firehose.h",
+                               "$(DERIVED_FILE_DIR)/firehoseServer.h",
+                               "$(DERIVED_FILE_DIR)/firehose_reply.h",
+                               "$(DERIVED_FILE_DIR)/firehose_replyServer.h",
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+                       shellPath = "/bin/bash -e";
+                       shellScript = ". \"${SCRIPT_INPUT_FILE_3}\"";
+                       showEnvVarsInLog = 0;
+               };
+               E49BB6EE1E70748100868613 /* Symlink normal variant */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                       );
+                       inputPaths = (
+                       );
+                       name = "Symlink normal variant";
+                       outputPaths = (
+                               "$(CONFIGURATION_BUILD_DIR)/$(PRODUCT_NAME)_normal.a",
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+                       shellPath = "/bin/bash -e";
+                       shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"";
+                       showEnvVarsInLog = 0;
+               };
                E49F24D7125D57FA0057C971 /* Install Manpages */ = {
                        isa = PBXShellScriptBuildPhase;
                        buildActionMask = 8;
                        isa = PBXSourcesBuildPhase;
                        buildActionMask = 2147483647;
                        files = (
-                               6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */,
                                6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */,
-                               6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */,
                                6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */,
+                               6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */,
+                               6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        buildActionMask = 2147483647;
                        files = (
                                C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */,
+                               C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */,
+                               C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */,
                                C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */,
-                               6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */,
                                C00B0DF41C5AEBBE000330B3 /* init.c in Sources */,
-                               C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */,
-                               C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */,
-                               C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */,
+                               C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */,
                                C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */,
+                               6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */,
                                C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */,
-                               C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */,
                                C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */,
-                               C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */,
+                               C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */,
                                C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */,
-                               C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */,
-                               C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */,
                                C00B0E001C5AEBBE000330B3 /* source.c in Sources */,
-                               C00B0E011C5AEBBE000330B3 /* time.c in Sources */,
-                               C00B0E021C5AEBBE000330B3 /* data.c in Sources */,
+                               6E4BACC81D48A42400B562AE /* mach.c in Sources */,
+                               6EA9629E1D48622C00759D53 /* event.c in Sources */,
+                               6EA962A61D48625500759D53 /* event_kevent.c in Sources */,
+                               6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */,
+                               C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */,
+                               C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */,
                                C00B0E031C5AEBBE000330B3 /* io.c in Sources */,
+                               C00B0E021C5AEBBE000330B3 /* data.c in Sources */,
                                C00B0E041C5AEBBE000330B3 /* transform.c in Sources */,
+                               C00B0E011C5AEBBE000330B3 /* time.c in Sources */,
                                C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */,
+                               C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */,
+                               E49BB70A1E70A3B000868613 /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        buildActionMask = 2147483647;
                        files = (
                                C01866A61C5973210040FC07 /* protocol.defs in Sources */,
+                               C01866AB1C5973210040FC07 /* firehose.defs in Sources */,
+                               C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */,
                                C01866A71C5973210040FC07 /* resolver.c in Sources */,
-                               6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */,
                                C01866A81C5973210040FC07 /* init.c in Sources */,
-                               C01866A91C5973210040FC07 /* queue.c in Sources */,
-                               C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */,
-                               C01866AB1C5973210040FC07 /* firehose.defs in Sources */,
+                               C01866B21C5973210040FC07 /* object.c in Sources */,
                                C01866AC1C5973210040FC07 /* block.cpp in Sources */,
+                               6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */,
                                C01866AD1C5973210040FC07 /* semaphore.c in Sources */,
-                               C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */,
                                C01866AF1C5973210040FC07 /* once.c in Sources */,
-                               C01866B01C5973210040FC07 /* voucher.c in Sources */,
+                               C01866A91C5973210040FC07 /* queue.c in Sources */,
                                C01866B11C5973210040FC07 /* apply.c in Sources */,
-                               C01866B21C5973210040FC07 /* object.c in Sources */,
-                               C01866B31C5973210040FC07 /* benchmark.c in Sources */,
                                C01866B41C5973210040FC07 /* source.c in Sources */,
-                               C01866B51C5973210040FC07 /* time.c in Sources */,
-                               C01866B61C5973210040FC07 /* data.c in Sources */,
+                               6E4BACC71D48A42300B562AE /* mach.c in Sources */,
+                               6EA9629D1D48622B00759D53 /* event.c in Sources */,
+                               6EA962A51D48625400759D53 /* event_kevent.c in Sources */,
+                               6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */,
+                               C01866B01C5973210040FC07 /* voucher.c in Sources */,
+                               C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */,
                                C01866B71C5973210040FC07 /* io.c in Sources */,
+                               C01866B61C5973210040FC07 /* data.c in Sources */,
                                C01866B81C5973210040FC07 /* transform.c in Sources */,
+                               C01866B51C5973210040FC07 /* time.c in Sources */,
                                C01866B91C5973210040FC07 /* allocator.c in Sources */,
+                               C01866B31C5973210040FC07 /* benchmark.c in Sources */,
+                               E49BB7091E70A39700868613 /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        files = (
                                E43570B9126E93380097AB9F /* provider.d in Sources */,
                                FC7BEDA40E8361E600161930 /* protocol.defs in Sources */,
-                               6E9955CF1C3B218E0071D40C /* venture.c in Sources */,
                                6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */,
-                               6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */,
+                               6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */,
                                E49F2499125D48D80057C971 /* resolver.c in Sources */,
                                E44EBE3E1251659900645D88 /* init.c in Sources */,
-                               FC7BED990E8361E600161930 /* queue.c in Sources */,
-                               721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */,
+                               9661E56B0F3E7DDF00749F3E /* object.c in Sources */,
+                               E4FC3264145F46C9002FBDDB /* object.m in Sources */,
+                               E43A72501AF85BBC00BAA921 /* block.cpp in Sources */,
                                6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */,
-                               6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */,
+                               721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */,
                                96DF70BE0F38FE3C0074BD99 /* once.c in Sources */,
+                               FC7BED990E8361E600161930 /* queue.c in Sources */,
                                9676A0E10F3E755D00713ADB /* apply.c in Sources */,
-                               9661E56B0F3E7DDF00749F3E /* object.c in Sources */,
-                               965CD6350F3E806200D4E28D /* benchmark.c in Sources */,
                                96A8AA870F41E7A400CD570B /* source.c in Sources */,
-                               96032E4B0F5CC8C700241C5F /* time.c in Sources */,
-                               5AAB45C010D30B79004407EA /* data.c in Sources */,
+                               6E4BACBD1D48A41500B562AE /* mach.c in Sources */,
+                               6EA962971D48622600759D53 /* event.c in Sources */,
+                               6EA9629F1D48625000759D53 /* event_kevent.c in Sources */,
+                               6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */,
+                               E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */,
                                5A27262610F26F1900751FBC /* io.c in Sources */,
-                               E43A72501AF85BBC00BAA921 /* block.cpp in Sources */,
+                               5AAB45C010D30B79004407EA /* data.c in Sources */,
+                               E420867016027AE500EEE210 /* data.m in Sources */,
                                C9C5F80E143C1771006DC718 /* transform.c in Sources */,
-                               E4FC3264145F46C9002FBDDB /* object.m in Sources */,
+                               96032E4B0F5CC8C700241C5F /* time.c in Sources */,
                                2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */,
-                               E420867016027AE500EEE210 /* data.m in Sources */,
-                               E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               965CD6350F3E806200D4E28D /* benchmark.c in Sources */,
+                               6E9955CF1C3B218E0071D40C /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        buildActionMask = 2147483647;
                        files = (
                                E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */,
+                               6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */,
+                               6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */,
                                E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */,
-                               6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */,
                                E46DBC4214EE10C80001F9F6 /* init.c in Sources */,
-                               E46DBC4314EE10C80001F9F6 /* queue.c in Sources */,
-                               6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */,
-                               6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */,
+                               E46DBC4714EE10C80001F9F6 /* object.c in Sources */,
                                E43A72881AF85BE900BAA921 /* block.cpp in Sources */,
+                               6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */,
                                E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */,
-                               6E9956011C3B21980071D40C /* venture.c in Sources */,
-                               6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */,
                                E46DBC4514EE10C80001F9F6 /* once.c in Sources */,
-                               E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */,
+                               E46DBC4314EE10C80001F9F6 /* queue.c in Sources */,
                                E46DBC4614EE10C80001F9F6 /* apply.c in Sources */,
-                               E46DBC4714EE10C80001F9F6 /* object.c in Sources */,
-                               E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */,
                                E46DBC4914EE10C80001F9F6 /* source.c in Sources */,
-                               E46DBC4A14EE10C80001F9F6 /* time.c in Sources */,
-                               E46DBC4B14EE10C80001F9F6 /* data.c in Sources */,
+                               6E4BACC61D48A42300B562AE /* mach.c in Sources */,
+                               6EA9629C1D48622A00759D53 /* event.c in Sources */,
+                               6EA962A41D48625300759D53 /* event_kevent.c in Sources */,
+                               6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */,
+                               E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */,
+                               6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */,
                                E46DBC4C14EE10C80001F9F6 /* io.c in Sources */,
+                               E46DBC4B14EE10C80001F9F6 /* data.c in Sources */,
                                E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */,
+                               E46DBC4A14EE10C80001F9F6 /* time.c in Sources */,
                                2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */,
+                               E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */,
+                               6E9956011C3B21980071D40C /* venture.c in Sources */,
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+               };
+               E49BB6D01E70748100868613 /* Sources */ = {
+                       isa = PBXSourcesBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                               E49BB6D11E70748100868613 /* provider.d in Sources */,
+                               E49BB6D21E70748100868613 /* protocol.defs in Sources */,
+                               E49BB6D41E70748100868613 /* firehose.defs in Sources */,
+                               E49BB6DD1E70748100868613 /* firehose_reply.defs in Sources */,
+                               E49BB6D71E70748100868613 /* resolver.c in Sources */,
+                               E49BB6D91E70748100868613 /* init.c in Sources */,
+                               E49BB6E01E70748100868613 /* object.c in Sources */,
+                               E49BB6EA1E70748100868613 /* object.m in Sources */,
+                               E49BB6E71E70748100868613 /* block.cpp in Sources */,
+                               E49BB6DC1E70748100868613 /* lock.c in Sources */,
+                               E49BB6DB1E70748100868613 /* semaphore.c in Sources */,
+                               E49BB6DE1E70748100868613 /* once.c in Sources */,
+                               E49BB6D81E70748100868613 /* mach.c in Sources */,
+                               E49BB6DA1E70748100868613 /* queue.c in Sources */,
+                               E49BB6DF1E70748100868613 /* apply.c in Sources */,
+                               E49BB6E31E70748100868613 /* source.c in Sources */,
+                               E49BB6E81E70748100868613 /* event.c in Sources */,
+                               E49BB6D61E70748100868613 /* event_kevent.c in Sources */,
+                               E49BB6E21E70748100868613 /* event_epoll.c in Sources */,
+                               E49BB6ED1E70748100868613 /* voucher.c in Sources */,
+                               E49BB6D51E70748100868613 /* firehose_buffer.c in Sources */,
+                               E49BB6E61E70748100868613 /* io.c in Sources */,
+                               E49BB6E51E70748100868613 /* data.c in Sources */,
+                               E49BB6EC1E70748100868613 /* data.m in Sources */,
+                               E49BB6E91E70748100868613 /* transform.c in Sources */,
+                               E49BB6E41E70748100868613 /* time.c in Sources */,
+                               E49BB6EB1E70748100868613 /* allocator.c in Sources */,
+                               E49BB6E11E70748100868613 /* benchmark.c in Sources */,
+                               E49BB6D31E70748100868613 /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        files = (
                                E43570BA126E93380097AB9F /* provider.d in Sources */,
                                E49F24C8125D57FA0057C971 /* protocol.defs in Sources */,
-                               6E9956051C3B219B0071D40C /* venture.c in Sources */,
                                6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */,
-                               6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */,
+                               6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */,
                                E49F24C9125D57FA0057C971 /* resolver.c in Sources */,
                                E49F24CA125D57FA0057C971 /* init.c in Sources */,
-                               E49F24CB125D57FA0057C971 /* queue.c in Sources */,
-                               E49F24CC125D57FA0057C971 /* semaphore.c in Sources */,
+                               E49F24CF125D57FA0057C971 /* object.c in Sources */,
+                               E4FC3265145F46C9002FBDDB /* object.m in Sources */,
+                               E43A72841AF85BCB00BAA921 /* block.cpp in Sources */,
                                6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */,
-                               6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */,
+                               E49F24CC125D57FA0057C971 /* semaphore.c in Sources */,
                                E49F24CD125D57FA0057C971 /* once.c in Sources */,
+                               E49F24CB125D57FA0057C971 /* queue.c in Sources */,
                                E49F24CE125D57FA0057C971 /* apply.c in Sources */,
-                               E49F24CF125D57FA0057C971 /* object.c in Sources */,
-                               E49F24D0125D57FA0057C971 /* benchmark.c in Sources */,
                                E49F24D1125D57FA0057C971 /* source.c in Sources */,
-                               E49F24D2125D57FA0057C971 /* time.c in Sources */,
-                               E49F24D3125D57FA0057C971 /* data.c in Sources */,
+                               6E4BACC21D48A42000B562AE /* mach.c in Sources */,
+                               6EA962981D48622700759D53 /* event.c in Sources */,
+                               6EA962A01D48625100759D53 /* event_kevent.c in Sources */,
+                               6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */,
+                               E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */,
                                E49F24D4125D57FA0057C971 /* io.c in Sources */,
-                               E43A72841AF85BCB00BAA921 /* block.cpp in Sources */,
+                               E49F24D3125D57FA0057C971 /* data.c in Sources */,
+                               E420867116027AE500EEE210 /* data.m in Sources */,
                                C93D6165143E190E00EB9023 /* transform.c in Sources */,
-                               E4FC3265145F46C9002FBDDB /* object.m in Sources */,
+                               E49F24D2125D57FA0057C971 /* time.c in Sources */,
                                2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */,
-                               E420867116027AE500EEE210 /* data.m in Sources */,
-                               E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               E49F24D0125D57FA0057C971 /* benchmark.c in Sources */,
+                               6E9956051C3B219B0071D40C /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        files = (
                                E4B515BD164B2DA300E003AF /* provider.d in Sources */,
                                E4B515BE164B2DA300E003AF /* protocol.defs in Sources */,
-                               E4B515BF164B2DA300E003AF /* resolver.c in Sources */,
-                               6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */,
                                6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */,
+                               6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */,
+                               E4B515BF164B2DA300E003AF /* resolver.c in Sources */,
                                E4B515C0164B2DA300E003AF /* init.c in Sources */,
-                               E4B515C1164B2DA300E003AF /* queue.c in Sources */,
-                               6E9956021C3B21990071D40C /* venture.c in Sources */,
+                               E4B515C5164B2DA300E003AF /* object.c in Sources */,
+                               E4B515CC164B2DA300E003AF /* object.m in Sources */,
+                               E43A72871AF85BCD00BAA921 /* block.cpp in Sources */,
+                               6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */,
                                E4B515C2164B2DA300E003AF /* semaphore.c in Sources */,
                                E4B515C3164B2DA300E003AF /* once.c in Sources */,
-                               E43A72871AF85BCD00BAA921 /* block.cpp in Sources */,
+                               E4B515C1164B2DA300E003AF /* queue.c in Sources */,
                                E4B515C4164B2DA300E003AF /* apply.c in Sources */,
-                               E4B515C5164B2DA300E003AF /* object.c in Sources */,
-                               6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */,
-                               E4B515C6164B2DA300E003AF /* benchmark.c in Sources */,
                                E4B515C7164B2DA300E003AF /* source.c in Sources */,
-                               E4B515C8164B2DA300E003AF /* time.c in Sources */,
-                               E4B515C9164B2DA300E003AF /* data.c in Sources */,
-                               E4B515CA164B2DA300E003AF /* io.c in Sources */,
+                               6E4BACC51D48A42200B562AE /* mach.c in Sources */,
+                               6EA9629B1D48622900759D53 /* event.c in Sources */,
+                               6EA962A31D48625300759D53 /* event_kevent.c in Sources */,
+                               6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */,
                                E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */,
+                               E4B515CA164B2DA300E003AF /* io.c in Sources */,
+                               E4B515C9164B2DA300E003AF /* data.c in Sources */,
+                               E4B515CE164B2DA300E003AF /* data.m in Sources */,
                                E4B515CB164B2DA300E003AF /* transform.c in Sources */,
-                               6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */,
-                               E4B515CC164B2DA300E003AF /* object.m in Sources */,
+                               E4B515C8164B2DA300E003AF /* time.c in Sources */,
                                E4B515CD164B2DA300E003AF /* allocator.c in Sources */,
-                               E4B515CE164B2DA300E003AF /* data.m in Sources */,
+                               E4B515C6164B2DA300E003AF /* benchmark.c in Sources */,
+                               6E9956021C3B21990071D40C /* venture.c in Sources */,
                                E4B515DD164B32E000E003AF /* introspection.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                        files = (
                                E417A38412A472C4004D659D /* provider.d in Sources */,
                                E44EBE5412517EBE00645D88 /* protocol.defs in Sources */,
-                               6E9956031C3B219A0071D40C /* venture.c in Sources */,
                                6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */,
-                               6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */,
+                               6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */,
                                E49F2424125D3C970057C971 /* resolver.c in Sources */,
                                E44EBE5512517EBE00645D88 /* init.c in Sources */,
-                               E4EC11AE12514302000DDBD1 /* queue.c in Sources */,
-                               E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */,
+                               E4EC11B212514302000DDBD1 /* object.c in Sources */,
+                               E4FC3266145F46C9002FBDDB /* object.m in Sources */,
+                               E43A72861AF85BCC00BAA921 /* block.cpp in Sources */,
                                6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */,
-                               6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */,
+                               E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */,
                                E4EC11B012514302000DDBD1 /* once.c in Sources */,
+                               E4EC11AE12514302000DDBD1 /* queue.c in Sources */,
                                E4EC11B112514302000DDBD1 /* apply.c in Sources */,
-                               E4EC11B212514302000DDBD1 /* object.c in Sources */,
-                               E4EC11B312514302000DDBD1 /* benchmark.c in Sources */,
                                E4EC11B412514302000DDBD1 /* source.c in Sources */,
-                               E4EC11B512514302000DDBD1 /* time.c in Sources */,
-                               E4EC11B712514302000DDBD1 /* data.c in Sources */,
+                               6E4BACC41D48A42200B562AE /* mach.c in Sources */,
+                               6EA9629A1D48622900759D53 /* event.c in Sources */,
+                               6EA962A21D48625200759D53 /* event_kevent.c in Sources */,
+                               6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */,
+                               E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */,
                                E4EC11B812514302000DDBD1 /* io.c in Sources */,
-                               E43A72861AF85BCC00BAA921 /* block.cpp in Sources */,
+                               E4EC11B712514302000DDBD1 /* data.c in Sources */,
+                               E420867316027AE500EEE210 /* data.m in Sources */,
                                C93D6166143E190F00EB9023 /* transform.c in Sources */,
-                               E4FC3266145F46C9002FBDDB /* object.m in Sources */,
+                               E4EC11B512514302000DDBD1 /* time.c in Sources */,
                                2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */,
-                               E420867316027AE500EEE210 /* data.m in Sources */,
-                               E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               E4EC11B312514302000DDBD1 /* benchmark.c in Sources */,
+                               6E9956031C3B219A0071D40C /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        files = (
                                E417A38512A472C5004D659D /* provider.d in Sources */,
                                E44EBE5612517EBE00645D88 /* protocol.defs in Sources */,
-                               6E9956041C3B219B0071D40C /* venture.c in Sources */,
                                6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */,
-                               6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */,
+                               6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */,
                                E49F2423125D3C960057C971 /* resolver.c in Sources */,
                                E44EBE5712517EBE00645D88 /* init.c in Sources */,
-                               E4EC121A12514715000DDBD1 /* queue.c in Sources */,
-                               E4EC121B12514715000DDBD1 /* semaphore.c in Sources */,
+                               E4EC121E12514715000DDBD1 /* object.c in Sources */,
+                               E4FC3267145F46C9002FBDDB /* object.m in Sources */,
+                               E43A72851AF85BCC00BAA921 /* block.cpp in Sources */,
                                6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */,
-                               6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */,
+                               E4EC121B12514715000DDBD1 /* semaphore.c in Sources */,
                                E4EC121C12514715000DDBD1 /* once.c in Sources */,
+                               E4EC121A12514715000DDBD1 /* queue.c in Sources */,
                                E4EC121D12514715000DDBD1 /* apply.c in Sources */,
-                               E4EC121E12514715000DDBD1 /* object.c in Sources */,
-                               E4EC121F12514715000DDBD1 /* benchmark.c in Sources */,
                                E4EC122012514715000DDBD1 /* source.c in Sources */,
-                               E4EC122112514715000DDBD1 /* time.c in Sources */,
-                               E4EC122312514715000DDBD1 /* data.c in Sources */,
+                               6E4BACC31D48A42100B562AE /* mach.c in Sources */,
+                               6EA962991D48622800759D53 /* event.c in Sources */,
+                               6EA962A11D48625100759D53 /* event_kevent.c in Sources */,
+                               6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */,
+                               E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */,
                                E4EC122412514715000DDBD1 /* io.c in Sources */,
-                               E43A72851AF85BCC00BAA921 /* block.cpp in Sources */,
+                               E4EC122312514715000DDBD1 /* data.c in Sources */,
+                               E420867216027AE500EEE210 /* data.m in Sources */,
                                C93D6167143E190F00EB9023 /* transform.c in Sources */,
-                               E4FC3267145F46C9002FBDDB /* object.m in Sources */,
+                               E4EC122112514715000DDBD1 /* time.c in Sources */,
                                2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */,
-                               E420867216027AE500EEE210 /* data.m in Sources */,
-                               E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */,
+                               E4EC121F12514715000DDBD1 /* benchmark.c in Sources */,
+                               6E9956041C3B219B0071D40C /* venture.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
                        target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */;
                        targetProxy = E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */;
                };
+               E49BB6F81E7074C100868613 /* PBXTargetDependency */ = {
+                       isa = PBXTargetDependency;
+                       target = E49BB6CE1E70748100868613 /* libdispatch alt resolved */;
+                       targetProxy = E49BB6F71E7074C100868613 /* PBXContainerItemProxy */;
+               };
                E4B515DB164B317700E003AF /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
                        target = E4B51595164B2DA300E003AF /* libdispatch introspection */;
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */;
                        buildSettings = {
-                               PRODUCT_NAME = "$(PRODUCT_NAME)";
                        };
                        name = Release;
                };
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */;
                        buildSettings = {
-                               PRODUCT_NAME = "$(PRODUCT_NAME)";
                        };
                        name = Debug;
                };
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */;
                        buildSettings = {
-                               PRODUCT_NAME = "$(PRODUCT_NAME)";
                        };
                        name = Release;
                };
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */;
                        buildSettings = {
-                               PRODUCT_NAME = "$(PRODUCT_NAME)";
                        };
                        name = Debug;
                };
                        };
                        name = Debug;
                };
+               E49BB6F01E70748100868613 /* Release */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */;
+                       buildSettings = {
+                               DISPATCH_RESOLVED_VARIANT = alt;
+                       };
+                       name = Release;
+               };
+               E49BB6F11E70748100868613 /* Debug */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */;
+                       buildSettings = {
+                               DISPATCH_RESOLVED_VARIANT = alt;
+                       };
+                       name = Debug;
+               };
                E49F24D9125D57FA0057C971 /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
+                               WARNING_CFLAGS = (
+                                       "-Weverything",
+                                       "$(inherited)",
+                               );
                        };
                        name = Release;
                };
                E49F24DA125D57FA0057C971 /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
+                               ONLY_ACTIVE_ARCH = YES;
+                               WARNING_CFLAGS = (
+                                       "-Weverything",
+                                       "$(inherited)",
+                               );
                        };
                        name = Debug;
                };
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
+               E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch alt resolved" */ = {
+                       isa = XCConfigurationList;
+                       buildConfigurations = (
+                               E49BB6F01E70748100868613 /* Release */,
+                               E49BB6F11E70748100868613 /* Debug */,
+                       );
+                       defaultConfigurationIsVisible = 0;
+                       defaultConfigurationName = Release;
+               };
                E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt
new file mode 100644 (file)
index 0000000..e81b14b
--- /dev/null
@@ -0,0 +1,23 @@
+
+# TODO(compnerd) add symlinks
+if(NOT ENABLE_SWIFT)
+  install(FILES
+            dispatch.3
+            dispatch_after.3
+            dispatch_api.3
+            dispatch_apply.3
+            dispatch_async.3
+            dispatch_data_create.3
+            dispatch_group_create.3
+            dispatch_io_create.3
+            dispatch_io_read.3
+            dispatch_object.3
+            dispatch_once.3
+            dispatch_queue_create.3
+            dispatch_read.3
+            dispatch_semaphore_create.3
+            dispatch_source_create.3
+            dispatch_time.3
+          DESTINATION
+            "${CMAKE_INSTALL_FULL_MANDIR}/man3")
+endif()
index 5a43a0a13a9dee75b5fc0352f3a8e7be79024ebf..57c99a8a7416bd0c8a223b9b085e07f402c300da 100644 (file)
@@ -1,4 +1,4 @@
-.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved.
+.\" Copyright (c) 2008-2017 Apple Inc. All rights reserved.
 .Dd May 1, 2009
 .Dt dispatch_apply 3
 .Os Darwin
@@ -20,21 +20,32 @@ The
 .Fn dispatch_apply
 function provides data-level concurrency through a "for (;;)" loop like primitive:
 .Bd -literal
-dispatch_queue_t the_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
 size_t iterations = 10;
 
 // 'idx' is zero indexed, just like:
 // for (idx = 0; idx < iterations; idx++)
 
-dispatch_apply(iterations, the_queue, ^(size_t idx) {
+dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) {
        printf("%zu\\n", idx);
 });
 .Ed
 .Pp
+Although any queue can be used, it is strongly recommended to use 
+.Vt DISPATCH_APPLY_AUTO
+as the 
+.Vt queue 
+argument to both
+.Fn dispatch_apply
+and
+.Fn dispatch_apply_f , 
+as shown in the example above, since this allows the system to automatically use worker threads
+that match the configuration of the current thread as closely as possible.
+No assumptions should be made about which global concurrent queue will be used.
+.Pp
 Like a "for (;;)" loop, the
 .Fn dispatch_apply
 function is synchronous.
-If asynchronous behavior is desired, please wrap the call to
+If asynchronous behavior is desired, wrap the call to
 .Fn dispatch_apply
 with a call to
 .Fn dispatch_async
@@ -49,7 +60,7 @@ achieved (perhaps using a power of two search):
 .Bd -literal
 #define STRIDE 3
 
-dispatch_apply(count / STRIDE, queue, ^(size_t idx) {
+dispatch_apply(count / STRIDE, DISPATCH_APPLY_AUTO, ^(size_t idx) {
        size_t j = idx * STRIDE;
        size_t j_stop = j + STRIDE;
        do {
@@ -74,12 +85,21 @@ This is in contrast to asynchronous functions which must retain both the block
 and target queue for the duration of the asynchronous operation (as the calling
 function may immediately release its interest in these objects).
 .Sh FUNDAMENTALS
-Conceptually,
 .Fn dispatch_apply
-is a convenient wrapper around
+and
+.Fn dispatch_apply_f
+attempt to quickly create enough worker threads to efficiently iterate work in parallel.
+By contrast, a loop that passes work items individually to
 .Fn dispatch_async
-and a semaphore to wait for completion.
-In practice, the dispatch library optimizes this function.
+or
+.Fn dispatch_async_f
+will incur more overhead and does not express the desired parallel execution semantics to
+the system, so may not create an optimal number of worker threads for a parallel workload.
+For this reason, prefer to use 
+.Fn dispatch_apply
+or
+.Fn dispatch_apply_f
+when parallel execution is important.
 .Pp
 The
 .Fn dispatch_apply
@@ -99,5 +119,4 @@ use a for-loop around invocations of
 .Sh SEE ALSO
 .Xr dispatch 3 ,
 .Xr dispatch_async 3 ,
-.Xr dispatch_queue_create 3 ,
-.Xr dispatch_semaphore_create 3
+.Xr dispatch_queue_create 3
index f3c30514515a88c853b2b311c26130879ad9e5d4..833e564a091fb7744aab1801daf36127ed705482 100644 (file)
@@ -72,7 +72,8 @@ debugging and performance analysis. If a label is provided, it is copied.
 By convention, clients should pass a reverse DNS style label. For example:
 .Pp
 .Bd -literal -offset indent
-my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", NULL);
+my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ",
+                                DISPATCH_QUEUE_SERIAL);
 .Ed
 .Pp
 The
index e9b0fb739f224af3a554c1d70696cbc20fc91d26..b4e9a7ad8e93796553f4fd417101abeb4b324efe 100644 (file)
@@ -113,6 +113,8 @@ DISPATCH_SOURCE_TYPE_DATA_ADD
 .It
 DISPATCH_SOURCE_TYPE_DATA_OR
 .It
+DISPATCH_SOURCE_TYPE_DATA_REPLACE
+.It
 DISPATCH_SOURCE_TYPE_MACH_SEND
 .It
 DISPATCH_SOURCE_TYPE_MACH_RECV
@@ -168,12 +170,34 @@ The result of calling this function from any other context is undefined.
 The
 .Fn dispatch_source_merge_data
 function is intended for use with the
-.Vt DISPATCH_SOURCE_TYPE_DATA_ADD
-and
+.Vt DISPATCH_SOURCE_TYPE_DATA_ADD ,
 .Vt DISPATCH_SOURCE_TYPE_DATA_OR
+and
+.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE
 source types. The result of using this function with any other source type is
-undefined. Calling this function will atomically add or bitwise OR the data
-into the source's data, and trigger the delivery of the source's event handler.
+undefined. Data merging is performed according to the source type:
+.Bl -tag -width "XXDISPATCH_SOURCE_TYPE_DATA_REPLACE" -compact -offset indent
+.It \(bu DISPATCH_SOURCE_TYPE_DATA_ADD
+.Vt data
+is atomically added to the source's data
+.It \(bu DISPATCH_SOURCE_TYPE_DATA_OR
+.Vt data
+is atomically bitwise ORed into the source's data
+.It \(bu DISPATCH_SOURCE_TYPE_DATA_REPLACE
+.Vt data
+atomically replaces the source's data.
+.El
+.Pp
+If the source data value resulting from the merge operation is 0, the source
+handler will not be invoked. This can happen if:
+.Bl -bullet -compact -offset indent
+.It
+the atomic addition wraps for sources of type
+.Vt DISPATCH_SOURCE_TYPE_DATA_ADD ,
+.It
+0 is merged for sources of type
+.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE .
+.El
 .Pp
 .Sh SOURCE EVENT HANDLERS
 In order to receive events from the dispatch source, an event handler should be
@@ -265,14 +289,15 @@ The following section contains a summary of supported dispatch event types and
 the interpretation of their parameters and returned data.
 .Pp
 .Vt DISPATCH_SOURCE_TYPE_DATA_ADD ,
-.Vt DISPATCH_SOURCE_TYPE_DATA_OR
+.Vt DISPATCH_SOURCE_TYPE_DATA_OR ,
+.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE
 .Pp
 Sources of this type allow applications to manually trigger the source's event
 handler via a call to
 .Fn dispatch_source_merge_data .
 The data will be merged with the source's pending data via an atomic add or
-atomic bitwise OR (based on the source's type), and the event handler block will
-be submitted to the source's target queue. The
+atomic bitwise OR, or direct replacement (based on the source's type), and the
+event handler block will be submitted to the source's target queue. The
 .Fa data
 is application defined. These sources have no
 .Fa handle
@@ -499,19 +524,6 @@ was created with
 the timer is based on
 .Xr gettimeofday 3 .
 .Pp
-.Em Note :
-Under the C language, untyped numbers default to the
-.Vt int
-type. This can lead to truncation bugs when arithmetic operations with other
-numbers are expected to generate a
-.Vt uint64_t
-sized result. When in doubt, use
-.Vt ull
-as a suffix. For example:
-.Bd -literal -offset indent
-3ull * NSEC_PER_SEC
-.Ed
-.Pp
 .Vt DISPATCH_SOURCE_TYPE_VNODE
 .Pp
 Sources of this type monitor the virtual filesystem nodes for state changes.
index 4b4f9d8635a07c0ec50bd7e2a4ad038d0a9e11d4..685898de06a4b7d4a8a6ccff86e786407b8f66ea 100644 (file)
@@ -80,28 +80,10 @@ parameter is ignored.
 .Pp
 Underflow causes the smallest representable value to be
 returned for a given clock.
-.Sh CAVEATS
-Under the C language, untyped numbers default to the
-.Vt int
-type. This can lead to truncation bugs when arithmetic operations with other
-numbers are expected to generate a
-.Vt int64_t
-sized result, such as the
-.Fa offset
-argument to
-.Fn dispatch_time
-and
-.Fn dispatch_walltime .
-When in doubt, use
-.Vt ull
-as a suffix. For example:
-.Bd -literal -offset indent
-3ull * NSEC_PER_SEC
-.Ed
 .Sh EXAMPLES
 Create a milestone two seconds in the future:
 .Bd -literal -offset indent
-milestone = dispatch_time(DISPATCH_TIME_NOW, 2LL * NSEC_PER_SEC);
+milestone = dispatch_time(DISPATCH_TIME_NOW, 2 * NSEC_PER_SEC);
 .Ed
 .Pp
 Create a milestone for use as an infinite timeout:
@@ -116,6 +98,11 @@ ts.tv_sec = 0x7FFFFFFF;
 ts.tv_nsec = 0;
 milestone = dispatch_walltime(&ts, 0);
 .Ed
+.Pp
+Use a negative delta to create a milestone an hour before the one above:
+.Bd -literal -offset indent
+milestone = dispatch_walltime(&ts, -60 * 60 * NSEC_PER_SEC);
+.Ed
 .Sh RETURN VALUE
 These functions return an abstract value for use with
 .Fn dispatch_after ,
diff --git a/os/CMakeLists.txt b/os/CMakeLists.txt
new file mode 100644 (file)
index 0000000..6e2b415
--- /dev/null
@@ -0,0 +1,10 @@
+
+# TODO(compnerd) ensure that object_private.h voucher_activity_private.h
+# voucher_private.h are included in the source tarball
+
+install(FILES
+          object.h
+          linux_base.h
+        DESTINATION
+          "${CMAKE_INSTALL_FULL_INCLUDEDIR}/os")
+
index b73b39bf6c61f8ab247ba7ef81a62afe838aaf8d..d131d6dc4f5c4063200ac613eb31a58024fce15c 100644 (file)
 #include <stdint.h>
 #else
 #include <os/base.h>
+#include <os/availability.h>
 #include <os/base_private.h>
 #include <dispatch/dispatch.h>
 #endif
 
-#define OS_FIREHOSE_SPI_VERSION 20160318
+#define OS_FIREHOSE_SPI_VERSION 20170222
 
 /*!
  * @group Firehose SPI
index 441bb52fd948baf1323e17e72b712cd595b8cfcb..fc352da1c65fb847494309e18c3cbfe68896c605 100644 (file)
@@ -227,6 +227,23 @@ OS_NOTHROW OS_NONNULL1
 void *
 firehose_client_set_context(firehose_client_t client, void *ctxt);
 
+/*!
+ * @function firehose_client_initiate_quarantine
+ *
+ * @abstract
+ * Starts the procedure to move the given client to the high volume quarantine
+ *
+ * @discussion
+ * When the client is in the high volume quarantine, their firehose chunks
+ * have the fcp_quarantined bit set to 1.
+ *
+ * @param client
+ * The specified client.
+ */
+OS_NOTHROW OS_NONNULL1
+void
+firehose_client_initiate_quarantine(firehose_client_t client);
+
 /*!
  * @function firehose_client_metadata_stream_peek
  *
@@ -361,6 +378,36 @@ OS_NOTHROW OS_OBJECT_RETURNS_RETAINED
 dispatch_queue_t
 firehose_server_copy_queue(firehose_server_queue_t which);
 
+/*!
+ * @function firehose_server_quarantined_suspend
+ *
+ * @abstract
+ * Suspends processing of quarantined clients until
+ * firehose_server_quarantined_resume() is called for the same queue.
+ *
+ * @discussion
+ * Suspending processing of quarantined clients causes firehose_snapshot()
+ * to block until the processing is enabled again.
+ *
+ * However if this is used to pace the processing, it is a good idea to disable
+ * this pacing until the snapshot has completed.
+ *
+ * Similarly, quarantine suspension must be off during shutdown.
+ */
+OS_NOTHROW
+void
+firehose_server_quarantined_suspend(firehose_server_queue_t q);
+
+/*!
+ * @function firehose_server_quarantined_resume
+ *
+ * @abstract
+ * Resumes processing of quarantined clients.
+ */
+OS_NOTHROW
+void
+firehose_server_quarantined_resume(firehose_server_queue_t q);
+
 #pragma mark - Firehose Snapshot
 
 /*!
index 96a3c825b2fd00fc2870dde0df2a741fe6fcf6a2..c8b9cad7c08d3d52267a5ab0325194555eb1d0b7 100644 (file)
 #ifndef __OS_LINUX_BASE__
 #define __OS_LINUX_BASE__
 
-#include <sys/user.h>
 #include <sys/param.h>
 
+#if HAVE_SYS_CDEFS_H
+#include <sys/cdefs.h>
+#endif
+
+#ifndef API_AVAILABLE
+#define API_AVAILABLE(...)
+#endif
+#ifndef API_DEPRECATED
+#define API_DEPRECATED(...)
+#endif
+#ifndef API_UNAVAILABLE
+#define API_UNAVAILABLE(...)
+#endif
+#ifndef API_DEPRECATED_WITH_REPLACEMENT
+#define API_DEPRECATED_WITH_REPLACEMENT(...)
+#endif
+
 #if __GNUC__
 #define OS_EXPECT(x, v) __builtin_expect((x), (v))
+#define OS_UNUSED __attribute__((__unused__))
 #else
 #define OS_EXPECT(x, v) (x)
+#define OS_UNUSED
 #endif
 
 #ifndef os_likely
 #define __OS_CONCAT(x, y) x ## y
 #define OS_CONCAT(x, y) __OS_CONCAT(x, y)
 
+#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums)
+#define OS_ENUM(_name, _type, ...) \
+typedef enum : _type { __VA_ARGS__ } _name##_t
+#else
+#define OS_ENUM(_name, _type, ...) \
+enum { __VA_ARGS__ }; typedef _type _name##_t
+#endif
+
 /*
  * Stub out misc linking and compilation attributes
  */
index f3faa62fd66decae859de5e07da63ebbef3b5c11..100721fc0cdb802f729b91758047df684b040bfa 100644 (file)
 
 #ifdef __APPLE__
 #include <Availability.h>
+#include <os/availability.h>
 #include <TargetConditionals.h>
-#endif
-#ifndef __linux__
 #include <os/base.h>
-#else
+#elif defined(__linux__)
 #include <os/linux_base.h>
 #endif
 
@@ -75,6 +74,9 @@
 #endif // OS_OBJECT_HAVE_OBJC_SUPPORT
 
 #if OS_OBJECT_HAVE_OBJC_SUPPORT
+#if defined(__swift__) && __swift__ && !OS_OBJECT_USE_OBJC
+#define OS_OBJECT_USE_OBJC 1
+#endif
 #ifndef OS_OBJECT_USE_OBJC
 #define OS_OBJECT_USE_OBJC 1
 #endif
@@ -232,7 +234,7 @@ __BEGIN_DECLS
  * @result
  * The retained object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC")
 void*
 os_retain(void *object);
@@ -254,7 +256,7 @@ os_retain(void *object);
  * @param object
  * The object to release.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_EXPORT
 void OS_SWIFT_UNAVAILABLE("Can't be used with ARC")
 os_release(void *object);
index dc2af83456dd7dd0f517e979ab9238d4b1adace9..215c3d1469647252a2436773f934ec04a59ce400 100644 (file)
 #ifndef __OS_OBJECT_PRIVATE__
 #define __OS_OBJECT_PRIVATE__
 
-#include <sys/cdefs.h>
-#include <stddef.h>
 #include <os/object.h>
-
-#ifndef __OSX_AVAILABLE_STARTING
-#define __OSX_AVAILABLE_STARTING(x, y)
-#endif
+#include <stddef.h>
+#include <stdint.h>
 
 #if __GNUC__
 #define OS_OBJECT_NOTHROW __attribute__((__nothrow__))
 #define OS_OBJECT_NONNULL __attribute__((__nonnull__))
 #define OS_OBJECT_WARN_RESULT __attribute__((__warn_unused_result__))
 #define OS_OBJECT_MALLOC __attribute__((__malloc__))
+#ifndef OS_OBJECT_EXPORT
 #define OS_OBJECT_EXPORT extern __attribute__((visibility("default")))
+#endif
 #else
 /*! @parseOnly */
 #define OS_OBJECT_NOTHROW
 #define OS_OBJECT_WARN_RESULT
 /*! @parseOnly */
 #define OS_OBJECT_MALLOC
+#ifndef OS_OBJECT_EXPORT
+/*! @parseOnly */
 #define OS_OBJECT_EXPORT extern
 #endif
+#endif
 
 #if OS_OBJECT_USE_OBJC && __has_feature(objc_arc)
 #define _OS_OBJECT_OBJC_ARC 1
@@ -112,7 +113,7 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t;
 #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \
                OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super)
 #elif OS_OBJECT_USE_OBJC
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT
 @interface OS_OBJECT_CLASS(object) : NSObject
 - (void)_xref_dispose;
@@ -136,53 +137,65 @@ __BEGIN_DECLS
 
 #if !_OS_OBJECT_OBJC_ARC
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_alloc(const void *cls, size_t size);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_alloc_realized(const void *cls, size_t size);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 void _os_object_dealloc(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_retain(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_retain_with_resurrect(_os_object_t obj);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 void
 _os_object_release(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_retain_internal(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 void
 _os_object_release_internal(_os_object_t object);
 
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
+OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
+_os_object_t
+_os_object_retain_internal_n(_os_object_t object, uint16_t n);
+
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
+OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
+void
+_os_object_release_internal_n(_os_object_t object, uint16_t n);
+
 #endif // !_OS_OBJECT_OBJC_ARC
 
 __END_DECLS
index 456cb0c9349dd7039e5caa3fea296db63121f543..8ce0ef5836861972c1a1f8ab9be4af8c82488e86 100644 (file)
@@ -28,6 +28,7 @@
 #endif
 #ifndef __linux__
 #include <os/base.h>
+#include <os/availability.h>
 #endif
 #include <sys/uio.h>
 #include <os/object.h>
@@ -74,8 +75,7 @@ __BEGIN_DECLS
  * The current activity identifier, if any. When 0 is returned, parent_id will
  * also always be 0.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 firehose_activity_id_t
 voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id);
@@ -104,8 +104,7 @@ voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id);
  * The current activity identifier, if any. When 0 is returned, parent_id will
  * also always be 0.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 firehose_activity_id_t
 voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid,
@@ -155,16 +154,15 @@ voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid,
  * @result
  * A new voucher with an activity identifier.
  */
-__OSX_AVAILABLE(10.12.4) __IOS_AVAILABLE(10.3)
-__TVOS_AVAILABLE(10.2) __WATCHOS_AVAILABLE(3.2)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t
 voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
                voucher_t base, firehose_activity_flags_t flags,
                const void *pubdata, size_t publen);
 
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_create_with_data",
+               macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t
 voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
@@ -175,6 +173,21 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
  * SPI intended for libtrace only
  */
 
+/*!
+ * @function voucher_activity_id_allocate
+ *
+ * @abstract
+ * Allocate a new system-wide unique activity ID.
+ *
+ * @param flags
+ * The bottom-most 8 bits of the flags will be used to generate the ID.
+ * See firehose_activity_flags_t.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+OS_VOUCHER_EXPORT OS_NOTHROW
+firehose_activity_id_t
+voucher_activity_id_allocate(firehose_activity_flags_t flags);
+
 /*!
  * @function voucher_activity_flush
  *
@@ -190,8 +203,7 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
  * @param stream
  * The stream to flush.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 void
 voucher_activity_flush(firehose_stream_t stream);
@@ -217,8 +229,7 @@ voucher_activity_flush(firehose_stream_t stream);
  * @param publen
  * Length of data at 'pubdata'.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4
 firehose_tracepoint_id_t
 voucher_activity_trace(firehose_stream_t stream,
@@ -253,8 +264,7 @@ voucher_activity_trace(firehose_stream_t stream,
  * Length of data to read from the iovec after the public data for the private
  * data.
  */
-__OSX_AVAILABLE(10.12.4) __IOS_AVAILABLE(10.3)
-__TVOS_AVAILABLE(10.2) __WATCHOS_AVAILABLE(3.2)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4
 firehose_tracepoint_id_t
 voucher_activity_trace_v(firehose_stream_t stream,
@@ -262,10 +272,8 @@ voucher_activity_trace_v(firehose_stream_t stream,
                const struct iovec *iov, size_t publen, size_t privlen);
 
 
-__OSX_DEPRECATED(10.12, 10.12.4, "Use voucher_activity_trace_v")
-__IOS_DEPRECATED(10.0, 10.3, "Use voucher_activity_trace_v")
-__TVOS_DEPRECATED(10.0, 10.2, "Use voucher_activity_trace_v")
-__WATCHOS_DEPRECATED(3.0, 3.2, "Use voucher_activity_trace_v")
+API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_trace_v",
+               macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6
 firehose_tracepoint_id_t
 voucher_activity_trace_with_private_strings(firehose_stream_t stream,
@@ -274,12 +282,13 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream,
                const void *privdata, size_t privlen);
 
 typedef const struct voucher_activity_hooks_s {
-#define VOUCHER_ACTIVITY_HOOKS_VERSION     4
+#define VOUCHER_ACTIVITY_HOOKS_VERSION     5
        long vah_version;
        mach_port_t (*vah_get_logd_port)(void);
        dispatch_mach_handler_function_t vah_debug_channel_handler;
        kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *);
        void (*vah_metadata_init)(void *metadata_buffer, size_t size);
+       void (*vah_quarantine_starts)(void);
 } *voucher_activity_hooks_t;
 
 /*!
@@ -291,8 +300,7 @@ typedef const struct voucher_activity_hooks_s {
  * @param hooks
  * A pointer to a voucher_activity_hooks_s structure.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL_ALL
 void
 voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks);
@@ -310,7 +318,7 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks);
  * @result
  * Address of metadata buffer.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL
 void*
 voucher_activity_get_metadata_buffer(size_t *length);
@@ -322,8 +330,7 @@ voucher_activity_get_metadata_buffer(size_t *length);
  * Return the current voucher activity ID. Available for the dyld client stub
  * only.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
 firehose_activity_id_t
 voucher_get_activity_id_4dyld(void);
index 562a704153880def32986d2bef884d32735fa01f..aecbbc9ffe448db60444b6f026fb6759a62e2d29 100644 (file)
@@ -23,6 +23,7 @@
 
 #ifndef __linux__
 #include <os/base.h>
+#include <os/availability.h>
 #endif
 #if __has_include(<mach/mach.h>)
 #include <os/object.h>
@@ -100,7 +101,7 @@ OS_OBJECT_DECL_CLASS(voucher);
  * @result
  * The previously adopted voucher object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE
 OS_NOTHROW
 voucher_t _Nullable
@@ -116,7 +117,7 @@ voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED);
  * @result
  * The currently adopted voucher object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t _Nullable
 voucher_copy(void);
@@ -135,7 +136,7 @@ voucher_copy(void);
  * @result
  * A copy of the currently adopted voucher object, with importance removed.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t _Nullable
 voucher_copy_without_importance(void);
@@ -161,7 +162,7 @@ voucher_copy_without_importance(void);
  *
  * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 void
 voucher_replace_default_voucher(void);
@@ -179,7 +180,7 @@ voucher_replace_default_voucher(void);
  *
  * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 void
 voucher_decrement_importance_count4CF(voucher_t _Nullable voucher);
@@ -201,8 +202,23 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher);
  * This flag is ignored if a specific voucher object is assigned with the
  * dispatch_block_create_with_voucher* functions, and is equivalent to passing
  * the NULL voucher to these functions.
+ *
+ * @const DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE
+ * Flag indicating that this dispatch block object should try to reset the
+ * recorded maximum QoS of all currently enqueued items on a serial dispatch
+ * queue at the base of a queue hierarchy.
+ *
+ * This is only works if the queue becomes empty by dequeuing the block in
+ * question, and then allows that block to enqueue more work on this hierarchy
+ * without perpetuating QoS overrides resulting from items previously executed
+ * on the hierarchy.
+ *
+ * A dispatch block object created with this flag set cannot be used with
+ * dispatch_block_wait() or dispatch_block_cancel().
  */
-#define DISPATCH_BLOCK_NO_VOUCHER (0x40)
+#define DISPATCH_BLOCK_NO_VOUCHER (0x40ul)
+
+#define DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE (0x80ul)
 
 /*!
  * @function dispatch_block_create_with_voucher
@@ -263,7 +279,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher);
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -346,7 +362,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags,
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -362,52 +378,10 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags,
  * @function dispatch_queue_create_with_accounting_override_voucher
  *
  * @abstract
- * Creates a new dispatch queue with an accounting override voucher created
- * from the specified voucher.
- *
- * @discussion
- * See dispatch_queue_create() headerdoc for generic details on queue creation.
- *
- * The resource accounting attributes of the specified voucher are extracted
- * and used to create an accounting override voucher for the new queue.
- *
- * Every block executed on the returned queue will initially have this override
- * voucher adopted, any voucher automatically associated with or explicitly
- * assigned to the block will NOT be used and released immediately before block
- * execution starts.
- *
- * The accounting override voucher will be automatically propagated to any
- * asynchronous work generated from the queue following standard voucher
- * propagation rules.
- *
- * NOTE: this SPI should only be used in special circumstances when a subsystem
- * has complete control over all workitems submitted to a queue (e.g. no client
- * block is ever submitted to the queue) and if and only if such queues have a
- * one-to-one mapping with resource accounting identities.
- *
- * CAUTION: use of this SPI represents a potential voucher propagation hole. It
- * is the responsibility of the caller to ensure that any callbacks into client
- * code from the queue have the correct client voucher applied (rather than the
- * automatically propagated accounting override voucher), e.g. by use of the
- * dispatch_block_create() API to capture client state at the time the callback
- * is registered.
- *
- * @param label
- * A string label to attach to the queue.
- * This parameter is optional and may be NULL.
- *
- * @param attr
- * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to
- * the function dispatch_queue_attr_make_with_qos_class().
- *
- * @param voucher
- * A voucher whose resource accounting attributes are used to create the
- * accounting override voucher attached to the queue.
- *
- * @result
- * The newly created dispatch queue.
+ * Deprecated, do not use, will abort process if called.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_DEPRECATED("removed SPI", \
+               macos(10.11,10.12), ios(9.0,10.0), watchos(2.0,3.0), tvos(9.0,10.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -440,7 +414,7 @@ dispatch_queue_create_with_accounting_override_voucher(
  * The newly created voucher object or NULL if the message was not carrying a
  * mach voucher.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t _Nullable
 voucher_create_with_mach_msg(mach_msg_header_t *msg);
@@ -475,7 +449,7 @@ struct proc_persona_info;
  * or the persona identifier of the current process
  * or PERSONA_ID_NONE
  */
-__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2)
+API_AVAILABLE(ios(9.2))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
 uid_t
 voucher_get_current_persona(void);
@@ -498,7 +472,7 @@ voucher_get_current_persona(void);
  * 0 on success: currently adopted voucher has a PERSONA_TOKEN
  * -1 on failure: persona_info is untouched/uninitialized
  */
-__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2)
+API_AVAILABLE(ios(9.2))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1
 int
 voucher_get_current_persona_originator_info(
@@ -522,7 +496,7 @@ voucher_get_current_persona_originator_info(
  * 0 on success: currently adopted voucher has a PERSONA_TOKEN
  * -1 on failure: persona_info is untouched/uninitialized
  */
-__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2)
+API_AVAILABLE(ios(9.2))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1
 int
 voucher_get_current_persona_proximate_info(
diff --git a/private/CMakeLists.txt b/private/CMakeLists.txt
new file mode 100644 (file)
index 0000000..18788d7
--- /dev/null
@@ -0,0 +1,5 @@
+
+# TODO(compnerd) ensure that benchmark.h data_private.h introduction_private.h
+# io_private.h layout_private.h mach_private.h private.h queue_private.h
+# source_private.h are included in the source tarball
+
index ef3cdbd2fd7870bb011fdb70902f2539f8d6a27f..ab5715648fbca67622b96f1239de81c615730428 100644 (file)
@@ -70,13 +70,13 @@ __BEGIN_DECLS
  *     cache-line.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 uint64_t
 dispatch_benchmark(size_t count, dispatch_block_t block);
 #endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW
 uint64_t
 dispatch_benchmark_f(size_t count, void *_Nullable ctxt,
index 62975a59b73755bba37377a2b24fdad558f27f0b..ceb963a1f41fe560b54c9eef172684964a76a74e 100644 (file)
@@ -1,7 +1,6 @@
 module DispatchPrivate [system] [extern_c] {
        umbrella header "private.h"
        exclude header "mach_private.h"
-       module * { export * }
        export *
 }
 
index 7485525a5cf64c99bc871ec388a430c65c8fd127..364a8ffe071ed5e5e2a99b9229d6a6ae44f485df 100644 (file)
@@ -43,7 +43,7 @@ __BEGIN_DECLS
  * encapsulate buffers that should not be copied or freed by the system.
  */
 #define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none);
 
 /*!
@@ -53,7 +53,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none);
  */
 #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \
                (_dispatch_data_destructor_vm_deallocate)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate);
 
 /*!
@@ -77,7 +77,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate);
  *                     data buffer when it is no longer needed.
  * @result             A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
 dispatch_data_create_f(const void *buffer,
@@ -100,7 +100,7 @@ dispatch_data_create_f(const void *buffer,
  *                     location of the newly allocated memory region, or NULL.
  * @result             A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -142,7 +142,7 @@ typedef bool (*dispatch_data_applier_function_t)(void *_Nullable context,
  * @result             A Boolean indicating whether traversal completed
  *                     successfully.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 bool
 dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context,
@@ -163,7 +163,7 @@ dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context,
  * @result             A mach port for the newly made memory entry, or
  *                     MACH_PORT_NULL if an error occurred.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 mach_port_t
 dispatch_data_make_memory_entry(dispatch_data_t data);
@@ -198,7 +198,7 @@ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t;
  * or should be, comprised of raw data bytes with no given encoding.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(none);
 
 /*!
@@ -209,7 +209,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(none);
  * types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(base32);
 
 /*!
@@ -221,7 +221,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base32);
  */
 #define DISPATCH_DATA_FORMAT_TYPE_BASE32HEX \
                (&_dispatch_data_format_type_base32hex)
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex);
 
 /*!
@@ -232,7 +232,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex);
  * types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(base64);
 
 /*!
@@ -242,7 +242,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base64);
  * with other UTF format types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf8);
 
 /*!
@@ -252,7 +252,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf8);
  * conjunction with other UTF format types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le);
 
 /*!
@@ -262,7 +262,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le);
  * conjunction with other UTF format types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be);
 
 /*!
@@ -274,7 +274,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be);
  * format.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any);
 
 /*!
@@ -295,7 +295,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any);
  * produced, or NULL if an error occurred.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
index 62975a59b73755bba37377a2b24fdad558f27f0b..ceb963a1f41fe560b54c9eef172684964a76a74e 100644 (file)
@@ -1,7 +1,6 @@
 module DispatchPrivate [system] [extern_c] {
        umbrella header "private.h"
        exclude header "mach_private.h"
-       module * { export * }
        export *
 }
 
index fa8e49aeb90dc89f6de4f2125d17fe1fb0f8fd2f..972c688577d085677739bbfd0472938043c8cfdd 100644 (file)
@@ -68,8 +68,8 @@ typedef struct dispatch_queue_s *dispatch_queue_t;
 typedef struct dispatch_source_s *dispatch_source_t;
 typedef struct dispatch_group_s *dispatch_group_t;
 typedef struct dispatch_object_s *dispatch_object_t;
-#ifndef __OSX_AVAILABLE_STARTING
-#define __OSX_AVAILABLE_STARTING(x,y)
+#ifndef API_AVAILABLE
+#define API_AVAILABLE(...)
 #endif
 #ifndef DISPATCH_EXPORT
 #define DISPATCH_EXPORT extern
@@ -135,7 +135,7 @@ typedef struct dispatch_object_s *dispatch_object_t;
  * Size of dispatch_introspection_source_s structure.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT const struct dispatch_introspection_versions_s {
        unsigned long introspection_version;
        unsigned long hooks_version;
@@ -716,7 +716,7 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t queue,
  * hooks on output.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT void
 dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks);
 
index 0bb1e3b25f25feaec634baa80103c653be037608..2932581610b17efbecb500e89b774fea88b3c36b 100644 (file)
@@ -79,7 +79,7 @@ __BEGIN_DECLS
  *             param error     An errno condition for the read operation or
  *                             zero if the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW
 void
 dispatch_read_f(dispatch_fd_t fd,
@@ -121,7 +121,7 @@ dispatch_read_f(dispatch_fd_t fd,
  *             param error     An errno condition for the write operation or
  *                             zero if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL5
 DISPATCH_NOTHROW
 void
@@ -160,7 +160,7 @@ dispatch_write_f(dispatch_fd_t fd,
  * @result     The newly created dispatch I/O channel or NULL if an error
  *             occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_io_t
@@ -200,7 +200,7 @@ dispatch_io_create_f(dispatch_io_type_t type,
  * @result     The newly created dispatch I/O channel or NULL if an error
  *             occurred (invalid type or non-absolute path specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -244,7 +244,7 @@ dispatch_io_create_with_path_f(dispatch_io_type_t type,
  * @result     The newly created dispatch I/O channel or NULL if an error
  *             occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -311,7 +311,7 @@ typedef void (*dispatch_io_handler_function_t)(void *_Nullable context,
  *     param error     An errno condition for the read operation or zero if
  *                     the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL6
 DISPATCH_NOTHROW
 void
@@ -368,7 +368,7 @@ dispatch_io_read_f(dispatch_io_t channel,
  *     param error     An errno condition for the write operation or zero
  *                     if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4
 DISPATCH_NONNULL6 DISPATCH_NOTHROW
 void
@@ -402,7 +402,7 @@ dispatch_io_write_f(dispatch_io_t channel,
  *                     the barrier function.
  * @param barrier      The barrier function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_io_barrier_f(dispatch_io_t channel,
index bf93ee99937b8f1e29d070fd185bc3b239954be9..0c0cd942dc67e48eeeeeba4816bfc4aec8bc6bb2 100644 (file)
@@ -29,7 +29,7 @@
 __BEGIN_DECLS
 
 #if !TARGET_OS_WIN32
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT const struct dispatch_queue_offsets_s {
        // always add new fields at the end
        const uint16_t dqo_version;
@@ -60,7 +60,7 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s {
  * SPI intended for CoreSymbolication only
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT const struct dispatch_tsd_indexes_s {
        // always add new fields at the end
        const uint16_t dti_version;
index 2228436a77d712d52e2f4215de0bbc8a355dad4b..bc5322332937150f393f80535dd27097f7032676 100644 (file)
@@ -36,7 +36,7 @@ __BEGIN_DECLS
 
 #if DISPATCH_MACH_SPI
 
-#define DISPATCH_MACH_SPI_VERSION 20160505
+#define DISPATCH_MACH_SPI_VERSION 20161026
 
 #include <mach/mach.h>
 
@@ -109,6 +109,23 @@ DISPATCH_DECL(dispatch_mach);
  * result operation and never passed to a channel handler. Indicates that the
  * message passed to the send operation must not be disposed of until it is
  * returned via the channel handler.
+ *
+ * @const DISPATCH_MACH_SIGTERM_RECEIVED
+ * A SIGTERM signal has been received. This notification is delivered at most
+ * once during the lifetime of the channel. This event is sent only for XPC
+ * channels (i.e. channels that were created by calling
+ * dispatch_mach_create_4libxpc()) and only if the
+ * dmxh_enable_sigterm_notification function in the XPC hooks structure is not
+ * set or it returned true when it was called at channel activation time.
+ *
+ * @const DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
+ * The channel has been disconnected by a call to dispatch_mach_reconnect() or
+ * dispatch_mach_cancel(), an empty message is passed in the message parameter
+ * (so that associated port rights can be disposed of). The message header will
+ * contain a local port with the receive right previously allocated to receive
+ * an asynchronous reply to a message previously sent to the channel. Used 
+ * only if the channel is disconnected while waiting for a reply to a message
+ * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc().
  */
 DISPATCH_ENUM(dispatch_mach_reason, unsigned long,
        DISPATCH_MACH_CONNECTED = 1,
@@ -121,6 +138,8 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long,
        DISPATCH_MACH_CANCELED,
        DISPATCH_MACH_REPLY_RECEIVED,
        DISPATCH_MACH_NEEDS_DEFERRED_SEND,
+       DISPATCH_MACH_SIGTERM_RECEIVED,
+       DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED,
        DISPATCH_MACH_REASON_LAST, /* unused */
 );
 
@@ -202,7 +221,7 @@ DISPATCH_ENUM(dispatch_mach_msg_destructor, unsigned int,
  *                                             buffer, or NULL.
  * @result                             A newly created dispatch mach message object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_mach_msg_t
@@ -219,7 +238,7 @@ dispatch_mach_msg_create(mach_msg_header_t *_Nullable msg, size_t size,
  *                                     size of the message buffer, or NULL.
  * @result                     Pointer to message buffer underlying the object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 mach_msg_header_t*
 dispatch_mach_msg_get_msg(dispatch_mach_msg_t message,
@@ -267,7 +286,7 @@ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason,
  * @result
  * The newly created dispatch mach channel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 dispatch_mach_t
@@ -321,7 +340,7 @@ typedef void (*dispatch_mach_handler_function_t)(void *_Nullable context,
  * @result
  * The newly created dispatch mach channel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL4 DISPATCH_NOTHROW
 dispatch_mach_t
@@ -354,7 +373,7 @@ dispatch_mach_create_f(const char *_Nullable label,
  * to channel cancellation or reconnection) and the channel handler has
  * returned. May be NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive,
@@ -385,7 +404,7 @@ dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive,
  * is complete (or not peformed due to channel cancellation or reconnection)
  * and the channel handler has returned. May be NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send,
@@ -408,7 +427,7 @@ dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send,
  * @param channel
  * The mach channel to cancel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_mach_cancel(dispatch_mach_t channel);
@@ -451,7 +470,7 @@ dispatch_mach_cancel(dispatch_mach_t channel);
  * Additional send options to pass to mach_msg() when performing the send
  * operation.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW
 void
 dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message,
@@ -519,8 +538,7 @@ dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message,
  * Out parameter to return the error from the immediate send attempt.
  * If a deferred send is required, returns 0. Must not be NULL.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5
 DISPATCH_NONNULL6 DISPATCH_NOTHROW
 void
@@ -580,7 +598,7 @@ dispatch_mach_send_with_result(dispatch_mach_t channel,
  * @result
  * The received reply message object, or NULL if the channel was canceled.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.11), ios(9.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW
 dispatch_mach_msg_t _Nullable
@@ -662,8 +680,7 @@ dispatch_mach_send_and_wait_for_reply(dispatch_mach_t channel,
  * @result
  * The received reply message object, or NULL if the channel was canceled.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6
 DISPATCH_NOTHROW
@@ -688,7 +705,7 @@ dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t channel,
  * @param barrier
  * The barrier block to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier);
@@ -711,7 +728,7 @@ dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier);
  * @param barrier
  * The barrier function to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context,
@@ -731,7 +748,7 @@ dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context,
  * @param barrier
  * The barrier block to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_mach_receive_barrier(dispatch_mach_t channel,
@@ -754,7 +771,7 @@ dispatch_mach_receive_barrier(dispatch_mach_t channel,
  * @param barrier
  * The barrier function to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context,
@@ -781,11 +798,231 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context
  * @result
  * The most recently specified check-in port for the channel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 mach_port_t
 dispatch_mach_get_checkin_port(dispatch_mach_t channel);
 
+// SPI for libxpc
+/*
+ * Type for the callback for receipt of asynchronous replies to
+ * dispatch_mach_send_with_result_and_async_reply_4libxpc().
+ */
+typedef void (*_Nonnull dispatch_mach_async_reply_callback_t)(void *context,
+               dispatch_mach_reason_t reason, dispatch_mach_msg_t message);
+
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+typedef const struct dispatch_mach_xpc_hooks_s {
+#define DISPATCH_MACH_XPC_HOOKS_VERSION     3
+       unsigned long version;
+
+       /* Fields available in version 1. */
+
+       /*
+        * Called to handle a Mach message event inline if possible. Returns true
+        * if the event was handled, false if the event should be delivered to the
+        * channel event handler. The implementation should not make any assumptions
+        * about the thread in which the function is called and cannot assume that
+        * invocations of this function are serialized relative to each other or
+        * relative to the channel's event handler function. In addition, the
+        * handler must not throw an exception or call out to any code that might
+        * throw an exception.
+        */
+       bool (* _Nonnull dmxh_direct_message_handler)(void *_Nullable context,
+                       dispatch_mach_reason_t reason, dispatch_mach_msg_t message,
+                       mach_error_t error);
+
+       /* Fields available in version 2. */
+
+       /*
+        * Gets the queue to which a reply to a message sent using
+        * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be
+        * delivered. The msg_context argument is the value of the do_ctxt field
+        * of the outgoing message, as returned by dispatch_get_context(). If this
+        * function returns NULL, the reply will be delivered to the channel queue.
+        * This function should not make any assumptions about the thread on which
+        * it is called and, since it may be called more than once per message, it
+        * should execute as quickly as possible and not attempt to synchronize with
+        * other code.
+        */
+       dispatch_queue_t _Nullable (*_Nonnull dmxh_msg_context_reply_queue)(
+                       void *_Nonnull msg_context);
+
+       /*
+        * Called when a reply to a message sent by
+        * dispatch_mach_send_with_result_and_async_reply_4libxpc() is received. The
+        * message argument points to the reply message and the context argument is
+        * the context value passed to dispatch_mach_create_4libxpc() when creating
+        * the Mach channel. The handler is called on the queue that is returned by
+        * dmxh_msg_context_reply_queue() when the reply is received or if the
+        * channel is disconnected. The reason argument is
+        * DISPATCH_MACH_MESSAGE_RECEIVED if a reply has been received or
+        * DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED if the channel has been
+        * disconnected. Refer to the documentation for
+        * dispatch_mach_send_with_result_and_async_reply_4libxpc() for more
+        * details.
+        */
+       dispatch_mach_async_reply_callback_t dmxh_async_reply_handler;
+
+       /* Fields available in version 3. */
+       /**
+        * Called once when the Mach channel has been activated. If this function
+        * returns true, a DISPATCH_MACH_SIGTERM_RECEIVED notification will be
+        * delivered to the channel's event handler when a SIGTERM is received.
+        */
+       bool (* _Nullable dmxh_enable_sigterm_notification)(
+                       void *_Nullable context);
+} *dispatch_mach_xpc_hooks_t;
+
+#define DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(hooks) ((hooks)->version >= 2)
+
+/*!
+ * @function dispatch_mach_hooks_install_4libxpc
+ *
+ * @abstract
+ * installs XPC callbacks for dispatch Mach channels.
+ *
+ * @discussion
+ * In order to improve the performance of the XPC/dispatch interface, it is
+ * sometimes useful for dispatch to be able to call directly into XPC. The
+ * channel hooks structure should be initialized with pointers to XPC callback
+ * functions, or NULL for callbacks that XPC does not support. The version
+ * number in the structure must be set to reflect the fields that have been
+ * initialized. This function may be called only once.
+ *
+ * @param hooks
+ * A pointer to the channel hooks structure. This must remain valid once set.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks);
+
+/*!
+ * @function dispatch_mach_create_4libxpc
+ * Create a dispatch mach channel to asynchronously receive and send mach
+ * messages, specifically for libxpc.
+ *
+ * The specified handler will be called with the corresponding reason parameter
+ * for each message received and for each message that was successfully sent,
+ * that failed to be sent, or was not sent; as well as when a barrier block
+ * has completed, or when channel connection, reconnection or cancellation has
+ * taken effect. However, the handler will not be called for messages that 
+ * were passed to the XPC hooks dmxh_direct_message_handler function if that
+ * function returned true.
+ *
+ * Dispatch mach channels are created in a disconnected state, they must be
+ * connected via dispatch_mach_connect() to begin receiving and sending
+ * messages.
+ *
+ * @param label
+ * An optional string label to attach to the channel. The string is not copied,
+ * if it is non-NULL it must point to storage that remains valid for the
+ * lifetime of the channel object. May be NULL.
+ *
+ * @param queue
+ * The target queue of the channel, where the handler and barrier blocks will
+ * be submitted.
+ *
+ * @param context
+ * The application-defined context to pass to the handler.
+ *
+ * @param handler
+ * The handler function to submit when a message has been sent or received.
+ *
+ * @result
+ * The newly created dispatch mach channel.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
+DISPATCH_NONNULL4 DISPATCH_NOTHROW
+dispatch_mach_t
+dispatch_mach_create_4libxpc(const char *_Nullable label,
+               dispatch_queue_t _Nullable queue, void *_Nullable context,
+               dispatch_mach_handler_function_t handler);
+
+/*!
+ * @function dispatch_mach_send_with_result_and_async_reply_4libxpc
+ * SPI for XPC that asynchronously sends a message encapsulated in a dispatch
+ * mach message object to the specified mach channel. If an immediate send can
+ * be performed, returns its result via out parameters.
+ *
+ * The reply message is processed on the queue returned by the
+ * dmxh_msg_context_reply_queue function in the dispatch_mach_xpc_hooks_s
+ * structure, which is called with a single argument whose value is the
+ * do_ctxt field of the message argument to this function. The reply message is
+ * delivered to the dmxh_async_reply_handler hook function instead of being 
+ * passed to the channel event handler.
+ *
+ * If the dmxh_msg_context_reply_queue function is not implemented or returns
+ * NULL, the reply message is delivered to the channel event handler on the
+ * channel queue.
+ *
+ * Unless the message is being sent to a send-once right (as determined by the
+ * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits),
+ * the message header remote port is set to the channel send right before the
+ * send operation is performed.
+ *
+ * The message is required to expect a direct reply (as determined by the
+ * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits).
+ * The receive right specified in the message header local port will be
+ * monitored until a reply message (or a send-once notification) is received, or
+ * the channel is canceled. Hence the application must wait for the reply
+ * to be received or for a DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED message
+ * before releasing that receive right.
+ *
+ * If the message send operation is attempted but the channel is canceled
+ * before the send operation succesfully completes, the message returned to the
+ * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a
+ * pseudo-receive operation and the receive right originally specified in the
+ * message header local port will be returned in a
+ * DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED message.
+ *
+ * If an immediate send could be performed, returns the resulting reason
+ * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the
+ * send_result and send_error out parameters (instead of via the channel
+ * handler), in which case the passed-in message and associated resources
+ * can be disposed of synchronously.
+ *
+ * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND
+ * in the send_result out parameter to indicate that the passed-in message has
+ * been retained and associated resources must not be disposed of until the
+ * message is returned asynchronusly via the channel handler.
+ *
+ * @param channel
+ * The mach channel to which to send the message.
+ *
+ * @param message
+ * The message object encapsulating the message to send. Unless an immediate
+ * send could be performed, the object will be retained until the asynchronous
+ * send operation is complete and the channel handler has returned. The storage
+ * underlying the message object may be modified by the send operation.
+ *
+ * @param options
+ * Additional send options to pass to mach_msg() when performing the send
+ * operation.
+ *
+ * @param send_flags
+ * Flags to configure the send operation. Must be 0 for now.
+ *
+ * @param send_result
+ * Out parameter to return the result of the immediate send attempt.
+ * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND.
+ * Must not be NULL.
+ *
+ * @param send_error
+ * Out parameter to return the error from the immediate send attempt.
+ * If a deferred send is required, returns 0. Must not be NULL.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5
+DISPATCH_NONNULL6 DISPATCH_NOTHROW
+void
+dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel,
+               dispatch_mach_msg_t message, mach_msg_option_t options,
+               dispatch_mach_send_flags_t send_flags,
+               dispatch_mach_reason_t *send_result, mach_error_t *send_error);
+
 DISPATCH_ASSUME_NONNULL_END
 
 #endif // DISPATCH_MACH_SPI
index 7136e6d7bfda18910bf99eb4cd3999d9af41c896..ed9f876cce6343b67fa9589a872b4ec288415817 100644 (file)
@@ -43,6 +43,9 @@
 #include <sys/cdefs.h>
 #endif
 #include <pthread.h>
+#if TARGET_OS_MAC
+#include <pthread/qos.h>
+#endif
 
 #ifndef __DISPATCH_BUILDING_DISPATCH__
 #include <dispatch/dispatch.h>
@@ -66,7 +69,7 @@
 #endif /* !__DISPATCH_BUILDING_DISPATCH__ */
 
 // <rdar://problem/9627726> Check that public and private dispatch headers match
-#if DISPATCH_API_VERSION != 20160712 // Keep in sync with <dispatch/dispatch.h>
+#if DISPATCH_API_VERSION != 20170124 // Keep in sync with <dispatch/dispatch.h>
 #error "Dispatch header mismatch between /usr/include and /usr/local/include"
 #endif
 
@@ -93,7 +96,7 @@ __BEGIN_DECLS
  * Boolean indicating whether the process has used libdispatch and become
  * multithreaded.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 bool _dispatch_is_multithreaded(void);
 
@@ -117,7 +120,7 @@ bool _dispatch_is_multithreaded(void);
  * Boolean indicating whether the parent process had used libdispatch and
  * become multithreaded at the time of fork.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 bool _dispatch_is_fork_of_multithreaded_parent(void);
 
@@ -144,8 +147,7 @@ bool _dispatch_is_fork_of_multithreaded_parent(void);
  * If the program already used dispatch before the guard is enabled, then
  * this function will abort immediately.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void _dispatch_prohibit_transition_to_multithreaded(bool prohibit);
 
@@ -187,24 +189,23 @@ typedef int dispatch_runloop_handle_t;
 #endif
 
 #if TARGET_OS_MAC
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_runloop_handle_t
 _dispatch_get_main_queue_port_4CF(void);
 #endif
 
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 dispatch_runloop_handle_t
 _dispatch_get_main_queue_handle_4CF(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 _dispatch_main_queue_callback_4CF(void *_Null_unspecified msg);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -212,38 +213,53 @@ _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label,
                unsigned long flags);
 
 #if TARGET_OS_MAC
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 mach_port_t
 _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue);
+
+#ifdef __BLOCKS__
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
+DISPATCH_NOTHROW
+dispatch_queue_t
+_dispatch_network_root_queue_create_4NW(const char *_Nullable label,
+               const pthread_attr_t *_Nullable attrs,
+               dispatch_block_t _Nullable configure);
+#endif
 #endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t queue);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 bool
 _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t queue);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 _dispatch_source_set_runloop_timer_4CF(dispatch_source_t source,
                dispatch_time_t start, uint64_t interval, uint64_t leeway);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT
 void *_Nonnull (*_Nullable _dispatch_begin_NSAutoReleasePool)(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT
 void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *);
 
 #endif /* DISPATCH_COCOA_COMPAT */
 
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NOTHROW
+void
+_dispatch_poll_for_events_4launchd(void);
+
 __END_DECLS
 
 DISPATCH_ASSUME_NONNULL_END
index 33de371c87c5eb85b53f39ab57e4ede0e35da421..2b50eb89161f83497fc76334ebb68aaabd6dabf9 100644 (file)
@@ -79,7 +79,7 @@ enum {
  * This new value combines the attributes specified by the 'attr' parameter and
  * the overcommit flag.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr,
@@ -98,6 +98,39 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr,
  */
 #define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN
 
+/*!
+ * @function dispatch_queue_set_label_nocopy
+ *
+ * @abstract
+ * Set the label for a given queue, without copying the input string.
+ *
+ * @discussion
+ * The queue must have been initially created with a NULL label, else using
+ * this function to set the queue label is undefined.
+ *
+ * The caller of this function must make sure the label pointer remains valid
+ * while it is used as the queue label and while any callers to
+ * dispatch_queue_get_label() may have obtained it. Since the queue lifetime
+ * may extend past the last release, it is advised to call this function with
+ * a constant string or NULL before the queue is released, or to destroy the
+ * label from a finalizer for that queue.
+ *
+ * This function should be called before any work item could call
+ * dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL) or from the context of
+ * the queue itself.
+ *
+ * @param queue
+ * The queue to adjust. Attempts to set the label of the main queue or a global
+ * concurrent queue will be ignored.
+ *
+ * @param label
+ * The new label for the queue.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
+void
+dispatch_queue_set_label_nocopy(dispatch_queue_t queue, const char *label);
+
 /*!
  * @function dispatch_queue_set_width
  *
@@ -115,8 +148,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr,
  * with the desired concurrency width.
  *
  * @param queue
- * The queue to adjust. Passing the main queue or a global concurrent queue
- * will be ignored.
+ * The queue to adjust. Attempts to set the width of the main queue or a global
+ * concurrent queue will be ignored.
  *
  * @param width
  * The new maximum width of concurrency depending on available resources.
@@ -128,8 +161,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr,
 #define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2
 #define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS  -3
 
-__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_6,__MAC_10_10,__IPHONE_4_0,__IPHONE_8_0, \
-               "Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT) instead")
+API_DEPRECATED("Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT)",
+               macos(10.6,10.10), ios(4.0,8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_queue_set_width(dispatch_queue_t dq, long width);
@@ -189,7 +222,7 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width);
  * @result
  * The newly created dispatch pthread root queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -238,19 +271,19 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size)
  * @result
  * A new reference to a pthread root queue object or NULL.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_queue_t _Nullable
 dispatch_pthread_root_queue_copy_current(void);
 
 /*!
  * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE
- * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f()
- * functions to indicate that the root queue for the current thread should be
- * used (i.e. one of the global concurrent queues or a queue created with
- * dispatch_pthread_root_queue_create()). If there is no such queue, the
- * default priority global concurrent queue will be used.
+ *
+ * @discussion
+ * This constant is deprecated, please use DISPATCH_APPLY_AUTO.
+ *
+ * DISPATCH_APPLY_AUTO also selects the current pthread root queue if
+ * applicable.
  */
 #define DISPATCH_APPLY_CURRENT_ROOT_QUEUE ((dispatch_queue_t _Nonnull)0)
 
@@ -284,13 +317,28 @@ dispatch_pthread_root_queue_copy_current(void);
  * dispatch_async_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.11), ios(9.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_async_enforce_qos_class_f(dispatch_queue_t queue,
        void *_Nullable context, dispatch_function_t work);
 
 
+#ifdef __ANDROID__
+/*!
+ * @function _dispatch_install_thread_detach_callback
+ *
+ * @param callback
+ * Function to be called before each worker thread exits to detach JVM.
+ *
+ * Hook to be able to detach threads from the Java JVM before they exit.
+ * If JNI has been used on a thread on Android it needs to have been
+ * "detached" before the thread exits or the application will crash.
+ */
+DISPATCH_EXPORT
+void _dispatch_install_thread_detach_callback(dispatch_function_t cb);
+#endif
+
 __END_DECLS
 
 DISPATCH_ASSUME_NONNULL_END
index 1df0c1b30f9d76abda5f71754249461fba62af5a..ad22e6a6a2296cde90a770362444dfda26214dd5 100644 (file)
@@ -36,17 +36,6 @@ DISPATCH_ASSUME_NONNULL_BEGIN
 
 __BEGIN_DECLS
 
-/*!
- * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE
- * @discussion A dispatch timer source that is part of a timer aggregate.
- * The handle is the dispatch timer aggregate object.
- * The mask specifies which flags from dispatch_source_timer_flags_t to apply.
- */
-#define DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE \
-               (&_dispatch_source_type_timer_with_aggregate)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
-DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate);
-
 /*!
  * @const DISPATCH_SOURCE_TYPE_INTERVAL
  * @discussion A dispatch source that submits the event handler block at a
@@ -69,7 +58,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate);
  * The mask specifies which flags from dispatch_source_timer_flags_t to apply.
  */
 #define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_SOURCE_TYPE_DECL(interval);
 
 /*!
@@ -79,8 +68,8 @@ DISPATCH_SOURCE_TYPE_DECL(interval);
  * The handle is a process identifier (pid_t).
  */
 #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs;
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(vfs);
 
 /*!
  * @const DISPATCH_SOURCE_TYPE_VM
@@ -89,10 +78,9 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs;
  * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead.
  */
 #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm)
-__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3,
-               __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm;
+API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE",
+               macos(10.7,10.10), ios(4.3,8.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(vm);
 
 /*!
  * @const DISPATCH_SOURCE_TYPE_MEMORYSTATUS
@@ -101,21 +89,26 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm;
  * dispatch_source_memorystatus_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus)
-__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
+API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE",
+               macos(10.9, 10.12), ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0))
 DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s
-               _dispatch_source_type_memorystatus;
+DISPATCH_SOURCE_TYPE_DECL(memorystatus);
 
 /*!
  * @const DISPATCH_SOURCE_TYPE_SOCK
  * @discussion A dispatch source that monitors events on socket state changes.
  */
 #define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock;
+API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(sock);
+
+/*!
+ * @const DISPATCH_SOURCE_TYPE_NW_CHANNEL
+ * @discussion A dispatch source that monitors events on a network channel.
+ */
+#define DISPATCH_SOURCE_TYPE_NW_CHANNEL (&_dispatch_source_type_nw_channel)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(nw_channel);
 
 __END_DECLS
 
@@ -179,6 +172,16 @@ enum {
        DISPATCH_SOCK_NOTIFY_ACK = 0x00004000,
 };
 
+/*!
+ * @enum dispatch_source_nw_channel_flags_t
+ *
+ * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE
+ * Received network channel flow advisory.
+ */
+enum {
+       DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001,
+};
+
 /*!
  * @enum dispatch_source_vfs_flags_t
  *
@@ -277,10 +280,20 @@ enum {
  * @constant DISPATCH_PROC_REAP
  * The process has been reaped by the parent process via wait*().
  * This flag is deprecated and will be removed in a future release.
+ *
+ * @constant DISPATCH_PROC_EXIT_STATUS
+ * The process has exited. Specifying this flag allows the process exit status
+ * to be retrieved from the source's status value, as returned by the
+ * dispatch_source_get_extended_data() function. The macros
+ * DISPATCH_PROC_EXIT_STATUS_EXITED(), DISPATCH_PROC_EXIT_STATUS_CODE(),
+ * DISPATCH_PROC_EXIT_STATUS_SIGNALED(), DISPATCH_PROC_EXIT_STATUS_TERMSIG() and
+ * DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED() can be used to examine the status
+ * value.
  */
 enum {
-       DISPATCH_PROC_REAP __OSX_AVAILABLE_BUT_DEPRECATED(
-                       __MAC_10_6, __MAC_10_9, __IPHONE_4_0, __IPHONE_7_0) = 0x10000000,
+       DISPATCH_PROC_REAP DISPATCH_ENUM_API_DEPRECATED("unsupported flag",
+                       macos(10.6,10.9), ios(4.0,7.0)) = 0x10000000,
+       DISPATCH_PROC_EXIT_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(2.0)) = 0x04000000,
 };
 
 /*!
@@ -291,9 +304,8 @@ enum {
  */
 
 enum {
-       DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG(
-                       __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0,
-                       "Use DISPATCH_MEMORYPRESSURE_WARN instead") = 0x80000000,
+       DISPATCH_VM_PRESSURE DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.7, 10.10), ios(4.3, 8.0))
+                       = 0x80000000,
 };
 
 /*!
@@ -305,8 +317,7 @@ enum {
  * Restricted to the root user.
  */
 enum {
-       DISPATCH_MEMORYPRESSURE_LOW_SWAP
-                       __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08,
+       DISPATCH_MEMORYPRESSURE_LOW_SWAP DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x08,
 };
 
 /*!
@@ -315,29 +326,17 @@ enum {
  */
 enum {
        DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL
-                       __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-                       __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-                       __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-                       __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-                       = 0x01,
+                       DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_NORMAL", macos(10.9, 10.12),
+                       ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x01,
        DISPATCH_MEMORYSTATUS_PRESSURE_WARN
-                       __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-                       __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-                       __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-                       __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-                       = 0x02,
+                       DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.9, 10.12),
+                       ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x02,
        DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL
-                       __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-                       __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-                       __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-                       __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-                       = 0x04,
+                       DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_CRITICAL", macos(10.9, 10.12),
+                       ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x04,
        DISPATCH_MEMORYSTATUS_LOW_SWAP
-                       __OSX_DEPRECATED(10.10, 10.12, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-                       __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-                       __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-                       __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-                       = 0x08,
+                       DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_LOW_SWAP", macos(10.9, 10.12),
+                       ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x08,
 };
 
 /*!
@@ -349,20 +348,116 @@ enum {
  *
  * @constant DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL
  * The memory of the process has reached 100% of its high watermark limit.
+ *
+ * @constant DISPATCH_MEMORYPRESSURE_MSL_STATUS
+ * Mask for enabling/disabling malloc stack logging.
  */
 enum {
-       DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN
-                       __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10)
-                       __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x10,
+       DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x10,
 
-       DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL
-               __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10)
-               __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x20,
+       DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x20,
+       
+       DISPATCH_MEMORYPRESSURE_MSL_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0xf0000000,
 };
 
+/*!
+ * Macros to check the exit status obtained from the status field of the
+ * structure returned by the dispatch_source_get_extended_data() function for a
+ * source of type DISPATCH_SOURCE_TYPE_PROC when DISPATCH_PROC_EXIT_STATUS has
+ * been requested.
+ *
+ * DISPATCH_PROC_EXIT_STATUS_EXITED returns whether the process exited. If this
+ * is true, the exit status can be obtained from DISPATCH_PROC_EXIT_STATUS_CODE.
+ *
+ * DISPATCH_PROC_EXIT_STATUS_SIGNALED returns whether the process was terminated
+ * by a signal.
+ *
+ * DISPATCH_PROC_EXIT_STATUS_TERMSIG returns the signal that caused the process
+ * to terminate, or 0 if the process was not terminated by a signal.
+ *
+ * DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED returns whether a core dump of the
+ * process was created.
+ */
+#define DISPATCH_PROC_EXIT_STATUS_EXITED(status) ((bool)WIFEXITED(status))
+#define DISPATCH_PROC_EXIT_STATUS_CODE(status) ((int)WEXITSTATUS(status))
+#define DISPATCH_PROC_EXIT_STATUS_SIGNALED(status) ((bool)WIFSIGNALED(status))
+#define DISPATCH_PROC_EXIT_STATUS_TERMSIG(status) ((int)WTERMSIG(status))
+#define DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED(status) ((bool)WCOREDUMP(status))
 
 __BEGIN_DECLS
 
+/*!
+ * @function dispatch_source_set_mandatory_cancel_handler
+ *
+ * @abstract
+ * Sets the event handler block for the given dispatch source, and indicates
+ * that calling dispatch_source_cancel() is mandatory for this source object.
+ *
+ * @discussion
+ * The cancellation handler (if specified) will be submitted to the source's
+ * target queue in response to a call to dispatch_source_cancel() once the
+ * system has released all references to the source's underlying handle and
+ * the source's event handler block has returned.
+ *
+ * When this function has been used used to set a cancellation handler, then
+ * the following result in an assertion and the process being terminated:
+ * - releasing the last reference on the dispatch source without having
+ *   cancelled it by calling dispatch_source_cancel();
+ * - changing any handler after the source has been activated;
+ * - changing the target queue of the source after it has been activated.
+ *
+ * IMPORTANT:
+ * Source cancellation and a cancellation handler are required for file
+ * descriptor and mach port based sources in order to safely close the
+ * descriptor or destroy the port. Making the cancellation handler of such
+ * sources mandatory is strongly recommended.
+ * Closing the descriptor or port before the cancellation handler is invoked may
+ * result in a race condition. If a new descriptor is allocated with the same
+ * value as the recently closed descriptor while the source's event handler is
+ * still running, the event handler may read/write data to the wrong descriptor.
+ *
+ * @param source
+ * The dispatch source to modify.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param handler
+ * The cancellation handler block to submit to the source's target queue.
+ * The result of passing NULL in this parameter is undefined.
+ */
+#ifdef __BLOCKS__
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_source_set_mandatory_cancel_handler(dispatch_source_t source,
+               dispatch_block_t handler);
+#endif /* __BLOCKS__ */
+
+/*!
+ * @function dispatch_source_set_mandatory_cancel_handler_f
+ *
+ * @abstract
+ * Sets the event handler function for the given dispatch source, and causes an
+ * assertion if this source is released before having been explicitly canceled.
+ *
+ * @discussion
+ * See dispatch_source_set_mandatory_cancel_handler() for more details.
+ *
+ * @param source
+ * The dispatch source to modify.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param handler
+ * The cancellation handler function to submit to the source's target queue.
+ * The context parameter passed to the event handler function is the current
+ * context of the dispatch source at the time the handler call is made.
+ * The result of passing NULL in this parameter is undefined.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t source,
+               dispatch_function_t handler);
+
 /*!
  * @function dispatch_source_cancel_and_wait
  *
@@ -408,64 +503,11 @@ __BEGIN_DECLS
  * The dispatch source to be canceled.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10)
-__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_source_cancel_and_wait(dispatch_source_t source);
 
-/*!
- * @typedef dispatch_timer_aggregate_t
- *
- * @abstract
- * Dispatch timer aggregates are sets of related timers.
- */
-DISPATCH_DECL(dispatch_timer_aggregate);
-
-/*!
- * @function dispatch_timer_aggregate_create
- *
- * @abstract
- * Creates a new dispatch timer aggregate.
- *
- * @discussion
- * A dispatch timer aggregate is a set of related timers whose overall timing
- * parameters can be queried.
- *
- * Timers are added to an aggregate when a timer source is created with type
- * DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE.
- *
- * @result
- * The newly created dispatch timer aggregate.
- */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
-DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
-DISPATCH_NOTHROW
-dispatch_timer_aggregate_t
-dispatch_timer_aggregate_create(void);
-
-/*!
- * @function dispatch_timer_aggregate_get_delay
- *
- * @abstract
- * Retrieves the delay until a timer in the given aggregate will next fire.
- *
- * @param aggregate
- * The dispatch timer aggregate to query.
- *
- * @param leeway_ptr
- * Optional pointer to a variable filled with the leeway (in ns) that will be
- * applied to the return value. May be NULL.
- *
- * @result
- * Delay in ns from now.
- */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
-DISPATCH_EXPORT DISPATCH_NOTHROW
-uint64_t
-dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate,
-               uint64_t *_Nullable leeway_ptr);
-
 #if __has_include(<mach/mach.h>)
 /*!
  * @typedef dispatch_mig_callback_t
@@ -476,7 +518,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate,
 typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message,
                mach_msg_header_t *reply);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 mach_msg_return_t
 dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
@@ -488,13 +530,66 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
  * @abstract
  * Extract the context pointer from a mach message trailer.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL
 DISPATCH_NOTHROW
 void *_Nullable
 dispatch_mach_msg_get_context(mach_msg_header_t *msg);
 #endif
 
+/*!
+ * @typedef dispatch_source_extended_data_t
+ *
+ * @abstract
+ * Type used by dispatch_source_get_extended_data() to return a consistent
+ * snapshot of the data and status of a dispatch source.
+ */
+typedef struct dispatch_source_extended_data_s {
+    unsigned long data;
+    unsigned long status;
+} *dispatch_source_extended_data_t;
+
+/*!
+ * @function dispatch_source_get_extended_data
+ *
+ * @abstract
+ * Returns the current data and status values for a dispatch source.
+ *
+ * @discussion
+ * This function is intended to be called from within the event handler block.
+ * The result of calling this function outside of the event handler callback is
+ * undefined.
+ *
+ * @param source
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param data
+ * A pointer to a dispatch_source_extended_data_s in which the data and status
+ * will be returned. The data field is populated with the value that would be
+ * returned by dispatch_source_get_data(). The value of the status field should
+ * be interpreted according to the type of the dispatch source:
+ *
+ *  DISPATCH_SOURCE_TYPE_PROC:            dispatch_source_proc_exit_flags_t
+ *
+ * If called from the event handler of a data source type not listed above, the
+ * status value is undefined.
+ *
+ * @param size
+ * The size of the specified structure. Should be set to
+ * sizeof(dispatch_source_extended_data_s).
+ *
+ * @result
+ * The size of the structure returned in *data, which will never be greater than
+ * the value of the size argument. If this is less than the value of the size
+ * argument, the remaining space in data will have been populated with zeroes.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
+DISPATCH_NOTHROW
+size_t
+dispatch_source_get_extended_data(dispatch_source_t source,
+               dispatch_source_extended_data_t data, size_t size);
+
 __END_DECLS
 
 DISPATCH_ASSUME_NONNULL_END
index 08371764817858cdfc40fa6bc9bc3cf0b7e4d68f..dd36051d9dd7cc1deda7914afd2d263287da2da8 100644 (file)
@@ -14,7 +14,7 @@ We allocate space and export a symbol to be used as the Class for the on-stack a
 
 We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy.  Somehow these don't get unified in a common block.
 **********************/
-#define BLOCK_EXPORT extern __attribute__((visibility("default")))
+#define BLOCK_EXPORT __attribute__((visibility("default")))
 
 BLOCK_EXPORT void * _NSConcreteStackBlock[32] = { 0 };
 BLOCK_EXPORT void * _NSConcreteMallocBlock[32] = { 0 };
index 1e1063684f3cf5ba981764d78b00a7e92b433189..8c98e8d1eda0322a8733440f769d5f5b6544c066 100644 (file)
 #include <stdlib.h>
 #include <string.h>
 #include <stdint.h>
+#if HAVE_OBJC
 #define __USE_GNU
 #include <dlfcn.h>
+#endif
 #if __has_include(<os/assumes.h>)
 #include <os/assumes.h>
 #else
@@ -51,9 +53,11 @@ static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile
 Globals
 ************************/
 
+#if HAVE_OBJC
 static void *_Block_copy_class = _NSConcreteMallocBlock;
 static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
 static int _Block_copy_flag = BLOCK_NEEDS_FREE;
+#endif
 static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4;  // logical 2
 
 static bool isGC = false;
@@ -144,6 +148,8 @@ GC support stub routines
 
 
 static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
+       (void)initialCountIsOne;
+       (void)isObject;
     return malloc(size);
 }
 
@@ -152,14 +158,20 @@ static void _Block_assign_default(void *value, void **destptr) {
 }
 
 static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
+       (void)ptr;
+       (void)hasRefcount;
 }
 
+#if HAVE_OBJC
 static void _Block_do_nothing(const void *aBlock) { }
+#endif
 
 static void _Block_retain_object_default(const void *ptr) {
+       (void)ptr;
 }
 
 static void _Block_release_object_default(const void *ptr) {
+       (void)ptr;
 }
 
 static void _Block_assign_weak_default(const void *ptr, void *dest) {
@@ -174,6 +186,7 @@ static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
     memmove(dst, src, (size_t)size);
 }
 
+#if HAVE_OBJC
 static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
     void **destp = (void **)dest;
     void **srcp = (void **)src;
@@ -184,8 +197,11 @@ static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size)
         size -= sizeof(void *);
     }
 }
+#endif
 
-static void _Block_destructInstance_default(const void *aBlock) {}
+static void _Block_destructInstance_default(const void *aBlock) {
+       (void)aBlock;
+}
 
 /**************************************************************************
 GC support callout functions - initially set to stub routines
@@ -202,6 +218,7 @@ static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Bloc
 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
 
 
+#if HAVE_OBJC
 /**************************************************************************
 GC support SPI functions - called from ObjC runtime and CoreFoundation
 ***************************************************************************/
@@ -252,6 +269,7 @@ void _Block_use_RR( void (*retain)(const void *),
     _Block_release_object = release;
     _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance");
 }
+#endif // HAVE_OBJC
 
 // Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
 // without defining a new entry point.
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
new file mode 100644 (file)
index 0000000..2ec2691
--- /dev/null
@@ -0,0 +1,204 @@
+
+include(SwiftSupport)
+include(DTrace)
+
+add_library(dispatch
+              allocator.c
+              apply.c
+              benchmark.c
+              data.c
+              init.c
+              introspection.c
+              io.c
+              mach.c
+              object.c
+              once.c
+              queue.c
+              semaphore.c
+              source.c
+              time.c
+              transform.c
+              voucher.c
+              protocol.defs
+              provider.d
+              allocator_internal.h
+              data_internal.h
+              inline_internal.h
+              internal.h
+              introspection_internal.h
+              io_internal.h
+              mach_internal.h
+              object_internal.h
+              queue_internal.h
+              semaphore_internal.h
+              shims.h
+              source_internal.h
+              trace.h
+              voucher_internal.h
+              event/event.c
+              event/event_config.h
+              event/event_epoll.c
+              event/event_internal.h
+              event/event_kevent.c
+              firehose/firehose_internal.h
+              shims/android_stubs.h
+              shims/atomic.h
+              shims/atomic_sfb.h
+              shims/getprogname.h
+              shims/hw_config.h
+              shims/linux_stubs.c
+              shims/linux_stubs.h
+              shims/lock.c
+              shims/lock.h
+              shims/perfmon.h
+              shims/time.h
+              shims/tsd.h
+              shims/yield.h)
+if(DISPATCH_USE_INTERNAL_WORKQUEUE)
+  target_sources(dispatch
+                 PRIVATE
+                   event/workqueue.c
+                   event/workqueue_internal.h)
+endif()
+target_sources(dispatch
+               PRIVATE
+                 block.cpp)
+if(HAVE_OBJC)
+  target_sources(dispatch
+                 PRIVATE
+                   data.m
+                   object.m)
+endif()
+if(CMAKE_SWIFT_COMPILER)
+  set(swift_optimization_flags)
+  if(CMAKE_BUILD_TYPE MATCHES Release)
+    set(swift_optimization_flags -O)
+  endif()
+  add_swift_library(swiftDispatch
+                    MODULE_NAME
+                      Dispatch
+                    MODULE_LINK_NAME
+                      dispatch
+                    MODULE_PATH
+                      ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule
+                    OUTPUT
+                      ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o
+                    SOURCES
+                      swift/Block.swift
+                      swift/Data.swift
+                      swift/Dispatch.swift
+                      swift/IO.swift
+                      swift/Private.swift
+                      swift/Queue.swift
+                      swift/Source.swift
+                      swift/Time.swift
+                      swift/Wrapper.swift
+                    CFLAGS
+                      -fblocks
+                      -fmodule-map-file=${CMAKE_SOURCE_DIR}/dispatch/module.modulemap
+                    SWIFT_FLAGS
+                      -I ${CMAKE_SOURCE_DIR}
+                      ${swift_optimization_flags})
+  target_sources(dispatch
+                 PRIVATE
+                   swift/DispatchStubs.cc
+                   ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o)
+endif()
+if(dtrace_EXECUTABLE)
+  dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d
+                    OUTPUT_SOURCES
+                      dispatch_dtrace_provider_headers)
+  target_sources(dispatch
+                 PRIVATE
+                   ${dispatch_dtrace_provider_headers})
+endif()
+target_include_directories(dispatch
+                           PRIVATE
+                             ${CMAKE_BINARY_DIR}
+                             ${CMAKE_SOURCE_DIR}
+                             ${CMAKE_CURRENT_SOURCE_DIR}
+                             ${CMAKE_CURRENT_BINARY_DIR}
+                             ${CMAKE_SOURCE_DIR}/private)
+if(WITH_BLOCKS_RUNTIME)
+  target_include_directories(dispatch
+                             SYSTEM BEFORE PRIVATE
+                               "${WITH_BLOCKS_RUNTIME}")
+endif()
+if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC")
+  target_compile_options(dispatch PRIVATE /EHsc-)
+else()
+  target_compile_options(dispatch PRIVATE -fno-exceptions)
+endif()
+if(DISPATCH_ENABLE_ASSERTS)
+  target_compile_definitions(dispatch
+                             PRIVATE
+                               -DDISPATCH_DEBUG=1)
+endif()
+if(CMAKE_SYSTEM_NAME STREQUAL Windows)
+  target_compile_definitions(dispatch
+                             PRIVATE
+                               -D_CRT_SECURE_NO_WARNINGS)
+endif()
+if(BSD_OVERLAY_FOUND)
+  target_compile_options(dispatch
+                         PRIVATE
+                           ${BSD_OVERLAY_CFLAGS})
+endif()
+if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC")
+  target_compile_options(dispatch
+                         PRIVATE
+                           /W3)
+else()
+  target_compile_options(dispatch
+                         PRIVATE
+                           -Wall)
+endif()
+# FIXME(compnerd) add check for -fblocks?
+if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC")
+  target_compile_options(dispatch
+                         PRIVATE
+                           -Xclang -fblocks)
+else()
+  # FIXME(compnerd) add check for -momit-leaf-frame-pointer?
+  target_compile_options(dispatch
+                         PRIVATE
+                           -fblocks
+                           -momit-leaf-frame-pointer)
+endif()
+if(BSD_OVERLAY_FOUND)
+  target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS})
+endif()
+target_link_libraries(dispatch PRIVATE Threads::Threads)
+if(WITH_BLOCKS_RUNTIME)
+  target_link_libraries(dispatch PRIVATE BlocksRuntime)
+endif()
+if(CMAKE_SYSTEM_NAME STREQUAL Darwin)
+  set_property(TARGET dispatch
+               APPEND_STRING
+               PROPERTY LINK_FLAGS
+                 "-Xlinker -compatibility_version -Xlinker 1"
+                 "-Xlinker -current_version -Xlinker ${VERSION}"
+                 "-Xlinker -dead_strip"
+                 "-Xlinker -alias_list -Xlinker ${CMAKE_SOURCE_DIR}/xcodeconfig/libdispatch.aliases")
+endif()
+if(USE_GOLD_LINKER)
+  set_property(TARGET dispatch
+               APPEND_STRING
+               PROPERTY LINK_FLAGS
+                 -fuse-ld=gold)
+endif()
+
+# Temporary staging; the various swift projects that depend on libdispatch
+# all expect libdispatch.so to be in src/.libs/libdispatch.so
+# So for now, make a copy so we don't have to do a coordinated commit across
+# all the swift projects to change this assumption.
+add_custom_command(TARGET dispatch POST_BUILD
+                   COMMAND cmake -E make_directory .libs
+                   COMMAND cmake -E copy $<TARGET_FILE:dispatch> .libs
+                   COMMENT "Copying libdispatch to .libs")
+
+install(TARGETS
+          dispatch
+        DESTINATION
+          "${CMAKE_INSTALL_FULL_LIBDIR}")
+
index 9848c6baf8e123569658bdfe312265089e5236a0..8beaf1e8576baa3c3e257768323fc955e63bdb60 100644 (file)
@@ -3,44 +3,58 @@
 #
 
 if HAVE_SWIFT
-swiftlibdir=${prefix}/lib/swift/linux
+swiftlibdir=${prefix}/lib/swift/${OS_STRING}
 swiftlib_LTLIBRARIES=libdispatch.la
 else
 lib_LTLIBRARIES=libdispatch.la
 endif
 
-libdispatch_la_SOURCES=                \
-       allocator.c                             \
-       apply.c                                 \
-       benchmark.c                             \
-       data.c                                  \
+if DISPATCH_USE_INTERNAL_WORKQUEUE
+INTERNAL_WORKQUEUE_SOURCES=    \
+       event/workqueue.c               \
+       event/workqueue_internal.h
+endif
+
+libdispatch_la_SOURCES=                        \
+       allocator.c                     \
+       apply.c                         \
+       benchmark.c                     \
+       data.c                          \
+       init.c                          \
        introspection.c                 \
-       init.c                                  \
-       io.c                                    \
-       object.c                                \
-       once.c                                  \
-       queue.c                                 \
-       semaphore.c                             \
-       source.c                                \
-       time.c                                  \
-       transform.c                             \
-       voucher.c                               \
+       io.c                            \
+       mach.c                          \
+       object.c                        \
+       once.c                          \
+       queue.c                         \
+       semaphore.c                     \
+       source.c                        \
+       time.c                          \
+       transform.c                     \
+       voucher.c                       \
        protocol.defs                   \
-       provider.d                              \
-       allocator_internal.h    \
+       provider.d                      \
+       allocator_internal.h            \
        data_internal.h                 \
        inline_internal.h               \
-       internal.h                              \
+       internal.h                      \
        introspection_internal.h        \
        io_internal.h                   \
+       mach_internal.h                 \
        object_internal.h               \
        queue_internal.h                \
-       semaphore_internal.h    \
-       shims.h                                 \
+       semaphore_internal.h            \
+       shims.h                         \
        source_internal.h               \
-       trace.h                                 \
+       trace.h                         \
        voucher_internal.h              \
-       firehose/firehose_internal.h \
+       event/event.c                   \
+       event/event_config.h            \
+       event/event_epoll.c             \
+       event/event_internal.h          \
+       event/event_kevent.c            \
+       firehose/firehose_internal.h    \
+       shims/android_stubs.h   \
        shims/atomic.h                  \
        shims/atomic_sfb.h              \
        shims/getprogname.h             \
@@ -51,8 +65,9 @@ libdispatch_la_SOURCES=               \
        shims/lock.h                    \
        shims/perfmon.h                 \
        shims/time.h                    \
-       shims/tsd.h                             \
-       shims/yield.h
+       shims/tsd.h                     \
+       shims/yield.h                   \
+       $(INTERNAL_WORKQUEUE_SOURCES)
 
 EXTRA_libdispatch_la_SOURCES=
 EXTRA_libdispatch_la_DEPENDENCIES=
@@ -64,34 +79,27 @@ DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \
 if DISPATCH_ENABLE_ASSERTS
 DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1
 endif
-AM_CFLAGS= $(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
+AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
 AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
-AM_CXXFLAGS=$(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
+AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
 AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
 
-if BUILD_OWN_KQUEUES
-  KQUEUE_LIBS+=$(top_builddir)/libkqueue/libkqueue.la
-  KQUEUE_CFLAGS+=-I$(top_srcdir)/libkqueue/include
-endif
-
-if BUILD_OWN_PTHREAD_WORKQUEUES
-  PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la
-  PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include
-else
 if HAVE_PTHREAD_WORKQUEUES
   PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue
-endif
+  PTHREAD_WORKQUEUE_CFLAGS=
 endif
 
 if BUILD_OWN_BLOCKS_RUNTIME
 libdispatch_la_SOURCES+= BlocksRuntime/data.c BlocksRuntime/runtime.c
 CBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime
 CXXBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime
+if USE_OBJC
 BLOCKS_RUNTIME_LIBS=-ldl
 endif
+endif
 
 libdispatch_la_LDFLAGS=-avoid-version
-libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS)
+libdispatch_la_LIBADD=$(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS)
 
 if HAVE_DARWIN_LD
 libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \
@@ -146,53 +154,47 @@ SWIFT_SRC_FILES=\
        swift/Wrapper.swift
 
 SWIFT_ABS_SRC_FILES = $(SWIFT_SRC_FILES:%=$(abs_srcdir)/%)
-SWIFT_OBJ_FILES = $(SWIFT_SRC_FILES:%.swift=$(abs_builddir)/%.o)
-
-libdispatch_la_SOURCES+=swift/DispatchStubs.cc
-EXTRA_libdispatch_la_SOURCES+=$(SWIFT_SRC_FILES)
-
-EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule
-libdispatch_la_LIBADD+=$(SWIFT_OBJ_FILES)
+SWIFT_OBJ_FILES = $(abs_builddir)/swift/swift_overlay.o
+SWIFT_LIBTOOL_OBJ_FILES = $(abs_builddir)/swift/swift_overlay.lo
 
-SWIFT_GEN_FILES=       \
-       $(abs_builddir)/swift/Dispatch.swiftmodule \
-       $(abs_builddir)/swift/Dispatch.swiftdoc \
-       $(SWIFT_OBJ_FILES) \
-       $(SWIFT_OBJ_FILES:%=%.d) \
-       $(SWIFT_OBJ_FILES:%=%.swiftdeps) \
-       $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \
-       $(SWIFT_OBJ_FILES:%=%.~partial.swiftdoc) \
-       $(SWIFT_OBJ_FILES:%=%.~partial.swiftdeps)
-
-SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.modulemap -I$(abs_top_srcdir) -Xcc -fblocks
+SWIFTC_FLAGS+= -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.modulemap -I$(abs_top_srcdir) -Xcc -fblocks
 if DISPATCH_ENABLE_OPTIMIZATION
 SWIFTC_FLAGS+=-O
 endif
 
-$(abs_builddir)/swift/%.o:     $(abs_srcdir)/swift/%.swift $(SWIFTC)
+# this saves the object file, then tricks libtool into generating a .lo file and
+# then moves the object file back in the places libtool expects them to be for
+# the PIC and non-PIC case.
+$(abs_builddir)/swift/swift_overlay.lo:        $(abs_builddir)/swift/swift_overlay.o
+       mv $(abs_builddir)/swift/swift_overlay.o $(abs_builddir)/swift/.libs/swift_overlay.o.save
+       $(LIBTOOL) --mode=compile --tag=CC true -o $< -c /dev/null
+       cp $(abs_builddir)/swift/.libs/swift_overlay.o.save $(abs_builddir)/swift/.libs/swift_overlay.o
+       mv $(abs_builddir)/swift/.libs/swift_overlay.o.save $(abs_builddir)/swift/swift_overlay.o
+
+$(abs_builddir)/swift/swift_overlay.o: $(SWIFT_ABS_SRC_FILES) $(SWIFTC)
        @rm -f $@
-       $(SWIFTC) -frontend -c $(SWIFT_ABS_SRC_FILES) -primary-file $< \
+       $(SWIFTC) -whole-module-optimization -emit-library -c $(SWIFT_ABS_SRC_FILES) \
        $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \
-       -o $@ -emit-module-path $@.~partial.swiftmodule \
-       -emit-module-doc-path $@.~partial.swiftdoc -emit-dependencies-path $@.d \
-       -emit-reference-dependencies-path $@.swiftdeps \
-       -module-cache-path $(top_builddir)
+       -o $@ -emit-module-path $(abs_builddir)/swift/Dispatch.swiftmodule
 
-$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) $(SWIFTC)
-       @rm -f $@
-       $(SWIFTC) -frontend -emit-module $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \
-       $(SWIFTC_FLAGS) -module-cache-path $(top_builddir) -module-link-name dispatch \
-       -o $@ -emit-module-doc-path $(@:%.swiftmodule=%.swiftdoc)
+libdispatch_la_SOURCES+=swift/DispatchStubs.cc
+EXTRA_libdispatch_la_SOURCES+=$(SWIFT_SRC_FILES)
+
+EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(SWIFT_LIBTOOL_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule
+libdispatch_la_LIBADD+=$(SWIFT_LIBTOOL_OBJ_FILES)
 
-swiftmoddir=${prefix}/lib/swift/linux/${build_cpu}
+SWIFT_GEN_FILES=       \
+       $(abs_builddir)/swift/Dispatch.swiftmodule \
+       $(abs_builddir)/swift/Dispatch.swiftdoc \
+       $(SWIFT_OBJ_FILES)
+
+swiftmoddir=${prefix}/lib/swift/${OS_STRING}/${host_cpu}
 swiftmod_HEADERS=\
        $(abs_builddir)/swift/Dispatch.swiftmodule \
        $(abs_builddir)/swift/Dispatch.swiftdoc
-
 endif
 
 BUILT_SOURCES=$(MIG_SOURCES) $(DTRACE_SOURCES)
 nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES)
 CLEANFILES=$(BUILT_SOURCES) $(SWIFT_GEN_FILES)
 DISTCLEANFILES=pthread_machdep.h pthread System mach objc
-
index a3a8c650af98836f0218092f919cdd77265d4782..e6ea77217854efe0c70c232ad193bd35ac1c2db7 100644 (file)
@@ -274,22 +274,16 @@ mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap,
        dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
 #endif
        const bitmap_t mask = BITMAP_C(1) << bitmap_index;
-       bitmap_t s, s_new, s_masked;
+       bitmap_t s, s_new;
 
-       if (!bitmap_is_full(*bitmap)) {
-               return;
-       }
-       s_new = *supermap;
-       for (;;) {
-               // No barriers because supermaps are only advisory, they
-               // don't protect access to other memory.
-               s = s_new;
-               s_masked = s | mask;
-               if (os_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) ||
-                               !bitmap_is_full(*bitmap)) {
-                       return;
+       // No barriers because supermaps are only advisory, they
+       // don't protect access to other memory.
+       os_atomic_rmw_loop(supermap, s, s_new, relaxed, {
+               if (!bitmap_is_full(*bitmap)) {
+                       os_atomic_rmw_loop_give_up(return);
                }
-       }
+               s_new = s | mask;
+       });
 }
 
 #pragma mark -
index e051a1630f879294ad4dfb09fcf85eb450e339bb..6f44cf90b7fe1299994820970e93da646764f32a 100644 (file)
@@ -35,7 +35,7 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags)
        size_t idx, done = 0;
 
        idx = os_atomic_inc_orig2o(da, da_index, acquire);
-       if (!fastpath(idx < iter)) goto out;
+       if (unlikely(idx >= iter)) goto out;
 
        // da_dc is only safe to access once the 'index lock' has been acquired
        dispatch_apply_function_t const func = (void *)da->da_dc->dc_func;
@@ -52,10 +52,10 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags)
        _dispatch_thread_context_push(&apply_ctxt);
 
        dispatch_thread_frame_s dtf;
-       pthread_priority_t old_dp;
+       dispatch_priority_t old_dbp = 0;
        if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) {
                _dispatch_thread_frame_push(&dtf, dq);
-               old_dp = _dispatch_set_defaultpriority(dq->dq_priority, NULL);
+               old_dbp = _dispatch_set_basepri(dq->dq_priority);
        }
        dispatch_invoke_flags_t flags = da->da_flags;
 
@@ -67,10 +67,10 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags)
                        done++;
                        idx = os_atomic_inc_orig2o(da, da_index, relaxed);
                });
-       } while (fastpath(idx < iter));
+       } while (likely(idx < iter));
 
        if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) {
-               _dispatch_reset_defaultpriority(old_dp);
+               _dispatch_reset_basepri(old_dbp);
                _dispatch_thread_frame_pop(&dtf);
        }
 
@@ -124,7 +124,7 @@ _dispatch_apply_autorelease_frequency(dispatch_queue_t dq)
 
        while (dq && !qaf) {
                qaf = _dispatch_queue_autorelease_frequency(dq);
-               dq = slowpath(dq->do_targetq);
+               dq = dq->do_targetq;
        }
        return qaf;
 }
@@ -159,11 +159,11 @@ static inline void
 _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da,
                dispatch_function_t func)
 {
-       uint32_t i = 0;
+       int32_t i = 0;
        dispatch_continuation_t head = NULL, tail = NULL;
 
        // The current thread does not need a continuation
-       uint32_t continuation_cnt = da->da_thr_cnt - 1;
+       int32_t continuation_cnt = da->da_thr_cnt - 1;
 
        dispatch_assert(continuation_cnt);
 
@@ -181,9 +181,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da,
        }
 
        _dispatch_thread_event_init(&da->da_event);
-
-       _dispatch_queue_push_list(dq, head, tail, head->dc_priority,
-                       continuation_cnt);
+       // FIXME: dq may not be the right queue for the priority of `head`
+       _dispatch_root_queue_push_inline(dq, head, tail, continuation_cnt);
        // Call the first element directly
        _dispatch_apply_invoke_and_wait(da);
 }
@@ -193,19 +192,19 @@ static void
 _dispatch_apply_redirect(void *ctxt)
 {
        dispatch_apply_t da = (dispatch_apply_t)ctxt;
-       uint32_t da_width = da->da_thr_cnt - 1;
+       int32_t da_width = da->da_thr_cnt - 1;
        dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq;
 
        do {
-               uint32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width);
+               int32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width);
 
-               if (slowpath(da_width > width)) {
-                       uint32_t excess = da_width - width;
+               if (unlikely(da_width > width)) {
+                       int32_t excess = da_width - width;
                        for (tq = dq; tq != rq; tq = tq->do_targetq) {
                                _dispatch_queue_relinquish_width(tq, excess);
                        }
                        da_width -= excess;
-                       if (slowpath(!da_width)) {
+                       if (unlikely(!da_width)) {
                                return _dispatch_apply_serial(da);
                        }
                        da->da_thr_cnt -= excess;
@@ -217,45 +216,69 @@ _dispatch_apply_redirect(void *ctxt)
                        da->da_flags = _dispatch_queue_autorelease_frequency(dq);
                }
                rq = rq->do_targetq;
-       } while (slowpath(rq->do_targetq));
+       } while (unlikely(rq->do_targetq));
        _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke);
        do {
                _dispatch_queue_relinquish_width(dq, da_width);
                dq = dq->do_targetq;
-       } while (slowpath(dq->do_targetq));
+       } while (unlikely(dq->do_targetq));
 }
 
 #define DISPATCH_APPLY_MAX UINT16_MAX // must be < sqrt(SIZE_MAX)
 
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_t
+_dispatch_apply_root_queue(dispatch_queue_t dq)
+{
+       if (dq) {
+               while (unlikely(dq->do_targetq)) {
+                       dq = dq->do_targetq;
+               }
+               // if the current root queue is a pthread root queue, select it
+               if (!_dispatch_priority_qos(dq->dq_priority)) {
+                       return dq;
+               }
+       }
+
+       pthread_priority_t pp = _dispatch_get_priority();
+       dispatch_qos_t qos = _dispatch_qos_from_pp(pp);
+       return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, false);
+}
+
 DISPATCH_NOINLINE
 void
 dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt,
                void (*func)(void *, size_t))
 {
-       if (slowpath(iterations == 0)) {
+       if (unlikely(iterations == 0)) {
                return;
        }
-       uint32_t thr_cnt = dispatch_hw_config(active_cpus);
-       dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key);
+       dispatch_thread_context_t dtctxt =
+                       _dispatch_thread_context_find(_dispatch_apply_key);
        size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0;
        dispatch_queue_t old_dq = _dispatch_queue_get_current();
 
-       if (!slowpath(nested)) {
+       if (likely(dq == DISPATCH_APPLY_AUTO)) {
+               dq = _dispatch_apply_root_queue(old_dq);
+       }
+       dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority);
+       if (unlikely(dq->do_targetq)) {
+               // if the queue passed-in is not a root queue, use the current QoS
+               // since the caller participates in the work anyway
+               qos = _dispatch_qos_from_pp(_dispatch_get_priority());
+       }
+       int32_t thr_cnt = (int32_t)_dispatch_qos_max_parallelism(qos,
+                       DISPATCH_MAX_PARALLELISM_ACTIVE);
+
+       if (likely(!nested)) {
                nested = iterations;
        } else {
-               thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1;
+               thr_cnt = nested < (size_t)thr_cnt ? thr_cnt / (int32_t)nested : 1;
                nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX
                                ? nested * iterations : DISPATCH_APPLY_MAX;
        }
-       if (iterations < thr_cnt) {
-               thr_cnt = (uint32_t)iterations;
-       }
-       if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) {
-               dq = old_dq ? old_dq : _dispatch_get_root_queue(
-                               _DISPATCH_QOS_CLASS_DEFAULT, false);
-               while (slowpath(dq->do_targetq)) {
-                       dq = dq->do_targetq;
-               }
+       if (iterations < (size_t)thr_cnt) {
+               thr_cnt = (int32_t)iterations;
        }
        struct dispatch_continuation_s dc = {
                .dc_func = (void*)func,
@@ -276,11 +299,11 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt,
 #endif
        da->da_flags = 0;
 
-       if (slowpath(dq->dq_width == 1) || slowpath(thr_cnt <= 1)) {
+       if (unlikely(dq->dq_width == 1 || thr_cnt <= 1)) {
                return dispatch_sync_f(dq, da, _dispatch_apply_serial);
        }
-       if (slowpath(dq->do_targetq)) {
-               if (slowpath(dq == old_dq)) {
+       if (unlikely(dq->do_targetq)) {
+               if (unlikely(dq == old_dq)) {
                        return dispatch_sync_f(dq, da, _dispatch_apply_serial);
                } else {
                        return dispatch_sync_f(dq, da, _dispatch_apply_redirect);
index 3060a2a4dd587f1af89c919402a4f4785661dcf6..2a6f00799b5f6dee5f1b9743148b3c0554cb2dbd 100644 (file)
@@ -32,6 +32,8 @@ extern "C" {
 #include "internal.h"
 }
 
+// NOTE: this file must not contain any atomic operations
+
 #if DISPATCH_DEBUG && DISPATCH_BLOCK_PRIVATE_DATA_DEBUG
 #define _dispatch_block_private_data_debug(msg, ...) \
                _dispatch_debug("block_private[%p]: " msg, (this), ##__VA_ARGS__)
@@ -83,7 +85,8 @@ struct dispatch_block_private_data_s {
                        ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group);
                }
                if (dbpd_queue) {
-                       ((void (*)(os_mpsc_queue_t))_os_object_release_internal)(dbpd_queue);
+                       ((void (*)(os_mpsc_queue_t, uint16_t))
+                                       _os_object_release_internal_n)(dbpd_queue, 2);
                }
                if (dbpd_block) Block_release(dbpd_block);
                if (dbpd_voucher) voucher_release(dbpd_voucher);
index 644328911dc32d62755a1b51556561b9657f5f8f..3efab2f89bcbb2f029037b3c48974a5311a8cfa4 100644 (file)
 #define _dispatch_data_release(x) dispatch_release(x)
 #endif
 
-const dispatch_block_t _dispatch_data_destructor_free = ^{
-       DISPATCH_INTERNAL_CRASH(0, "free destructor called");
-};
-
-const dispatch_block_t _dispatch_data_destructor_none = ^{
-       DISPATCH_INTERNAL_CRASH(0, "none destructor called");
-};
-
-#if !HAVE_MACH
-const dispatch_block_t _dispatch_data_destructor_munmap = ^{
-       DISPATCH_INTERNAL_CRASH(0, "munmap destructor called");
-};
-#else
-// _dispatch_data_destructor_munmap is a linker alias to the following
-const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
-       DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called");
-};
-#endif
-
-const dispatch_block_t _dispatch_data_destructor_inline = ^{
-       DISPATCH_INTERNAL_CRASH(0, "inline destructor called");
-};
-
-struct dispatch_data_s _dispatch_data_empty = {
-#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
-       .do_vtable = DISPATCH_DATA_EMPTY_CLASS,
-#else
-       DISPATCH_GLOBAL_OBJECT_HEADER(data),
-       .do_next = DISPATCH_OBJECT_LISTLESS,
-#endif
-};
-
 DISPATCH_ALWAYS_INLINE
 static inline dispatch_data_t
 _dispatch_data_alloc(size_t n, size_t extra)
 {
        dispatch_data_t data;
        size_t size;
+       size_t base_size;
 
-       if (os_mul_and_add_overflow(n, sizeof(range_record),
-                       sizeof(struct dispatch_data_s) + extra, &size)) {
+       if (os_add_overflow(sizeof(struct dispatch_data_s), extra, &base_size)) {
+               return DISPATCH_OUT_OF_MEMORY;
+       }
+       if (os_mul_and_add_overflow(n, sizeof(range_record), base_size, &size)) {
                return DISPATCH_OUT_OF_MEMORY;
        }
 
-       data = _dispatch_alloc(DISPATCH_DATA_CLASS, size);
+       data = _dispatch_object_alloc(DISPATCH_DATA_CLASS, size);
        data->num_records = n;
 #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
        data->do_targetq = dispatch_get_global_queue(
@@ -167,6 +138,8 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size,
                mach_vm_size_t vm_size = size;
                mach_vm_address_t vm_addr = (uintptr_t)buffer;
                mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
+#else
+               (void)size;
 #endif
        } else {
                if (!queue) {
@@ -192,8 +165,8 @@ _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
 }
 
 void
-dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
-               dispatch_block_t destructor)
+_dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer,
+               size_t size, dispatch_block_t destructor)
 {
        if (!buffer || !size) {
                if (destructor) {
@@ -284,7 +257,7 @@ out:
 }
 
 void
-_dispatch_data_dispose(dispatch_data_t dd)
+_dispatch_data_dispose(dispatch_data_t dd, DISPATCH_UNUSED bool *allow_free)
 {
        if (_dispatch_data_leaf(dd)) {
                _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
@@ -298,6 +271,18 @@ _dispatch_data_dispose(dispatch_data_t dd)
        }
 }
 
+void
+_dispatch_data_set_target_queue(dispatch_data_t dd, dispatch_queue_t tq)
+{
+#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
+       _dispatch_retain(tq);
+       tq = os_atomic_xchg2o(dd, do_targetq, tq, release);
+       if (tq) _dispatch_release(tq);
+#else
+       _dispatch_object_set_target_queue_inline(dd, tq);
+#endif
+}
+
 size_t
 _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz)
 {
@@ -433,7 +418,7 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset,
 
        // find the record containing the end of the current range
        // and optimize the case when you just remove bytes at the origin
-       size_t count, last_length;
+       size_t count, last_length = 0;
 
        if (to_the_end) {
                count = dd_num_records - i;
index 190b1edd1260e28eb1497d013655af9e77a3b8eb..1d024ffe7bd63dfae02e566ab85b1c4ea5efde62 100644 (file)
 
 #include <Foundation/NSString.h>
 
+// NOTE: this file must not contain any atomic operations
+
 @interface DISPATCH_CLASS(data) () <DISPATCH_CLASS(data)>
-@property (readonly) NSUInteger length;
-@property (readonly) const void *bytes NS_RETURNS_INNER_POINTER;
+@property (readonly,nonatomic) NSUInteger length;
+@property (readonly,nonatomic) const void *bytes NS_RETURNS_INNER_POINTER;
 
 - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy
                freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm;
        } else {
                destructor = DISPATCH_DATA_DESTRUCTOR_NONE;
        }
-       dispatch_data_init(self, bytes, length, destructor);
+       _dispatch_data_init_with_bytes(self, bytes, length, destructor);
        return self;
 }
 
-#define _dispatch_data_objc_dispose(selector) \
-       struct dispatch_data_s *dd = (void*)self; \
-       _dispatch_data_dispose(self); \
-       dispatch_queue_t tq = dd->do_targetq; \
-       dispatch_function_t func = dd->finalizer; \
-       void *ctxt = dd->ctxt; \
-       [super selector]; \
-       if (func && ctxt) { \
-               if (!tq) { \
-                        tq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0);\
-               } \
-               dispatch_async_f(tq, ctxt, func); \
-       } \
-       if (tq) { \
-               _os_object_release_internal((_os_object_t)tq); \
-       }
-
 - (void)dealloc {
-       _dispatch_data_objc_dispose(dealloc);
+       struct dispatch_data_s *dd = (void*)self;
+       _dispatch_data_dispose(self, NULL);
+       dispatch_queue_t tq = dd->do_targetq;
+       dispatch_function_t func = dd->finalizer;
+       void *ctxt = dd->ctxt;
+       [super dealloc];
+       if (func && ctxt) {
+               if (!tq) {
+                        tq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0);
+               }
+               dispatch_async_f(tq, ctxt, func);
+       }
+       if (tq) {
+               _os_object_release_internal((_os_object_t)tq);
+       }
 }
 
 - (BOOL)_bytesAreVM {
 
 - (void)_setTargetQueue:(dispatch_queue_t)queue {
        struct dispatch_data_s *dd = (void*)self;
-       _os_object_retain_internal((_os_object_t)queue);
-       dispatch_queue_t prev;
-       prev = os_atomic_xchg2o(dd, do_targetq, queue, release);
-       if (prev) _os_object_release_internal((_os_object_t)prev);
+       return _dispatch_data_set_target_queue(dd, queue);
 }
 
 - (NSString *)debugDescription {
        if (!nsstring) return nil;
        char buf[2048];
        _dispatch_data_debug(self, buf, sizeof(buf));
-       return [nsstring stringWithFormat:
-                       [nsstring stringWithUTF8String:"<%s: %s>"],
-                       class_getName([self class]), buf];
+       NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+       if (!format) return nil;
+       return [nsstring stringWithFormat:format, class_getName([self class]), buf];
 }
 
 - (NSUInteger)length {
index bbef21e4108623b5c0579fdd1514c162304e5acb..19fc3d9adeb6d053650df50c6f36dc67baa95a19 100644 (file)
@@ -100,12 +100,13 @@ struct dispatch_data_format_type_s {
        dispatch_transform_t encode;
 };
 
-void dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size,
-               dispatch_block_t destructor);
-void _dispatch_data_dispose(dispatch_data_t data);
+void _dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer,
+               size_t size, dispatch_block_t destructor);
+void _dispatch_data_dispose(dispatch_data_t data, bool *allow_free);
+void _dispatch_data_set_target_queue(struct dispatch_data_s *dd,
+               dispatch_queue_t tq);
 size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz);
-const void*
-_dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd);
+const void* _dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd);
 
 #if !defined(__cplusplus)
 extern const dispatch_block_t _dispatch_data_destructor_inline;
diff --git a/src/event/event.c b/src/event/event.c
new file mode 100644 (file)
index 0000000..34abbf0
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+
+DISPATCH_NOINLINE
+static dispatch_unote_t
+_dispatch_unote_create(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask)
+{
+       dispatch_unote_linkage_t dul;
+       dispatch_unote_class_t du;
+
+       if (mask & ~dst->dst_mask) {
+               return DISPATCH_UNOTE_NULL;
+       }
+
+       if (dst->dst_filter != DISPATCH_EVFILT_TIMER) {
+               if (dst->dst_mask && !mask) {
+                       return DISPATCH_UNOTE_NULL;
+               }
+       }
+
+       if ((dst->dst_flags & EV_UDATA_SPECIFIC) ||
+                       (dst->dst_filter == DISPATCH_EVFILT_TIMER)) {
+               du = _dispatch_calloc(1u, dst->dst_size);
+       } else {
+               dul = _dispatch_calloc(1u, sizeof(*dul) + dst->dst_size);
+               du = _dispatch_unote_linkage_get_unote(dul)._du;
+       }
+       du->du_type = dst;
+       du->du_can_be_wlh = dst->dst_per_trigger_qos;
+       du->du_ident = (uint32_t)handle;
+       du->du_filter = dst->dst_filter;
+       du->du_fflags = (typeof(du->du_fflags))mask;
+       if (dst->dst_flags & EV_UDATA_SPECIFIC) {
+               du->du_is_direct = true;
+       }
+       du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR;
+       return (dispatch_unote_t){ ._du = du };
+}
+
+DISPATCH_NOINLINE
+dispatch_unote_t
+_dispatch_unote_create_with_handle(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask)
+{
+       if (!handle) {
+               return DISPATCH_UNOTE_NULL;
+       }
+       return _dispatch_unote_create(dst, handle, mask);
+}
+
+DISPATCH_NOINLINE
+dispatch_unote_t
+_dispatch_unote_create_with_fd(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask)
+{
+#if !TARGET_OS_MAC // <rdar://problem/27756657>
+       if (handle > INT_MAX) {
+               return DISPATCH_UNOTE_NULL;
+       }
+#endif
+       dispatch_unote_t du = _dispatch_unote_create(dst, handle, mask);
+       if (du._du) {
+               int16_t filter = dst->dst_filter;
+               du._du->du_data_action = (filter == EVFILT_READ||filter == EVFILT_WRITE)
+                       ? DISPATCH_UNOTE_ACTION_DATA_SET : DISPATCH_UNOTE_ACTION_DATA_OR;
+       }
+       return du;
+}
+
+DISPATCH_NOINLINE
+dispatch_unote_t
+_dispatch_unote_create_without_handle(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask)
+{
+       if (handle) {
+               return DISPATCH_UNOTE_NULL;
+       }
+       return _dispatch_unote_create(dst, handle, mask);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_unote_dispose(dispatch_unote_t du)
+{
+       void *ptr = du._du;
+#if HAVE_MACH
+       if (du._du->dmrr_handler_is_block) {
+               Block_release(du._dmrr->dmrr_handler_ctxt);
+       }
+#endif
+       if (du._du->du_is_timer) {
+               if (unlikely(du._dt->dt_heap_entry[DTH_TARGET_ID] != DTH_INVALID_ID ||
+                               du._dt->dt_heap_entry[DTH_DEADLINE_ID] != DTH_INVALID_ID)) {
+                       DISPATCH_INTERNAL_CRASH(0, "Disposing of timer still in its heap");
+               }
+               if (unlikely(du._dt->dt_pending_config)) {
+                       free(du._dt->dt_pending_config);
+                       du._dt->dt_pending_config = NULL;
+               }
+       } else if (!du._du->du_is_direct) {
+               ptr = _dispatch_unote_get_linkage(du);
+       }
+       free(ptr);
+}
+
+#pragma mark data or / add
+
+static dispatch_unote_t
+_dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle,
+               unsigned long mask)
+{
+       if (handle || mask) {
+               return DISPATCH_UNOTE_NULL;
+       }
+
+       // bypass _dispatch_unote_create() because this is always "direct"
+       // even when EV_UDATA_SPECIFIC is 0
+       dispatch_unote_class_t du = _dispatch_calloc(1u, dst->dst_size);
+       du->du_type = dst;
+       du->du_filter = dst->dst_filter;
+       du->du_is_direct = true;
+       return (dispatch_unote_t){ ._du = du };
+}
+
+const dispatch_source_type_s _dispatch_source_type_data_add = {
+       .dst_kind       = "data-add",
+       .dst_filter     = DISPATCH_EVFILT_CUSTOM_ADD,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_CLEAR,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_data_create,
+       .dst_merge_evt  = NULL,
+};
+
+const dispatch_source_type_s _dispatch_source_type_data_or = {
+       .dst_kind       = "data-or",
+       .dst_filter     = DISPATCH_EVFILT_CUSTOM_OR,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_CLEAR,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_data_create,
+       .dst_merge_evt  = NULL,
+};
+
+const dispatch_source_type_s _dispatch_source_type_data_replace = {
+       .dst_kind       = "data-replace",
+       .dst_filter     = DISPATCH_EVFILT_CUSTOM_REPLACE,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_CLEAR,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_data_create,
+       .dst_merge_evt  = NULL,
+};
+
+#pragma mark file descriptors
+
+const dispatch_source_type_s _dispatch_source_type_read = {
+       .dst_kind       = "read",
+       .dst_filter     = EVFILT_READ,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if HAVE_DECL_NOTE_LOWAT
+       .dst_fflags     = NOTE_LOWAT,
+#endif
+       .dst_data       = 1,
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_fd,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+const dispatch_source_type_s _dispatch_source_type_write = {
+       .dst_kind       = "write",
+       .dst_filter     = EVFILT_WRITE,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if HAVE_DECL_NOTE_LOWAT
+       .dst_fflags     = NOTE_LOWAT,
+#endif
+       .dst_data       = 1,
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_fd,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+#pragma mark signals
+
+static dispatch_unote_t
+_dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle,
+               unsigned long mask)
+{
+       if (handle >= NSIG) {
+               return DISPATCH_UNOTE_NULL;
+       }
+       dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask);
+       if (du._du) {
+               du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD;
+       }
+       return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_signal = {
+       .dst_kind       = "signal",
+       .dst_filter     = EVFILT_SIGNAL,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_signal_create,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+#pragma mark timers
+
+bool _dispatch_timers_reconfigure, _dispatch_timers_expired;
+uint32_t _dispatch_timers_processing_mask;
+#if DISPATCH_USE_DTRACE
+uint32_t _dispatch_timers_will_wake;
+#endif
+#define DISPATCH_TIMER_HEAP_INITIALIZER(tidx) \
+       [tidx] = { \
+               .dth_target = UINT64_MAX, \
+               .dth_deadline = UINT64_MAX, \
+       }
+#define DISPATCH_TIMER_HEAP_INIT(kind, qos) \
+               DISPATCH_TIMER_HEAP_INITIALIZER(DISPATCH_TIMER_INDEX( \
+               DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos))
+
+struct dispatch_timer_heap_s _dispatch_timers_heap[] =  {
+       DISPATCH_TIMER_HEAP_INIT(WALL, NORMAL),
+       DISPATCH_TIMER_HEAP_INIT(MACH, NORMAL),
+#if DISPATCH_HAVE_TIMER_QOS
+       DISPATCH_TIMER_HEAP_INIT(WALL, CRITICAL),
+       DISPATCH_TIMER_HEAP_INIT(MACH, CRITICAL),
+       DISPATCH_TIMER_HEAP_INIT(WALL, BACKGROUND),
+       DISPATCH_TIMER_HEAP_INIT(MACH, BACKGROUND),
+#endif
+};
+
+static dispatch_unote_t
+_dispatch_source_timer_create(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask)
+{
+       uint32_t fflags = dst->dst_fflags;
+       dispatch_unote_t du;
+
+       // normalize flags
+       if (mask & DISPATCH_TIMER_STRICT) {
+               mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND;
+       }
+
+       if (fflags & DISPATCH_TIMER_INTERVAL) {
+               if (!handle) return DISPATCH_UNOTE_NULL;
+               du = _dispatch_unote_create_without_handle(dst, 0, mask);
+       } else {
+               du = _dispatch_unote_create_without_handle(dst, handle, mask);
+       }
+
+       if (du._dt) {
+               du._dt->du_is_timer = true;
+               du._dt->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD;
+               du._dt->du_fflags |= fflags;
+               du._dt->du_ident = _dispatch_source_timer_idx(du);
+               du._dt->dt_timer.target = UINT64_MAX;
+               du._dt->dt_timer.deadline = UINT64_MAX;
+               du._dt->dt_timer.interval = UINT64_MAX;
+               du._dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID;
+               du._dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID;
+       }
+       return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_timer = {
+       .dst_kind       = "timer",
+       .dst_filter     = DISPATCH_EVFILT_TIMER,
+       .dst_flags      = EV_DISPATCH,
+       .dst_mask       = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
+       .dst_fflags     = 0,
+       .dst_size       = sizeof(struct dispatch_timer_source_refs_s),
+
+       .dst_create     = _dispatch_source_timer_create,
+};
+
+const dispatch_source_type_s _dispatch_source_type_after = {
+       .dst_kind       = "timer (after)",
+       .dst_filter     = DISPATCH_EVFILT_TIMER,
+       .dst_flags      = EV_DISPATCH,
+       .dst_mask       = 0,
+       .dst_fflags     = DISPATCH_TIMER_AFTER,
+       .dst_size       = sizeof(struct dispatch_timer_source_refs_s),
+
+       .dst_create     = _dispatch_source_timer_create,
+};
+
+const dispatch_source_type_s _dispatch_source_type_interval = {
+       .dst_kind       = "timer (interval)",
+       .dst_filter     = DISPATCH_EVFILT_TIMER,
+       .dst_flags      = EV_DISPATCH,
+       .dst_mask       = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND
+                       |DISPATCH_INTERVAL_UI_ANIMATION,
+       .dst_fflags     = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_MACH,
+       .dst_size       = sizeof(struct dispatch_timer_source_refs_s),
+
+       .dst_create     = _dispatch_source_timer_create,
+};
diff --git a/src/event/event_config.h b/src/event/event_config.h
new file mode 100644 (file)
index 0000000..60f776f
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __DISPATCH_EVENT_EVENT_CONFIG__
+#define __DISPATCH_EVENT_EVENT_CONFIG__
+
+#if defined(__linux__)
+#      include <sys/eventfd.h>
+#      define DISPATCH_EVENT_BACKEND_EPOLL 1
+#      define DISPATCH_EVENT_BACKEND_KEVENT 0
+#elif __has_include(<sys/event.h>)
+#      include <sys/event.h>
+#      define DISPATCH_EVENT_BACKEND_EPOLL 0
+#      define DISPATCH_EVENT_BACKEND_KEVENT 1
+#else
+#      error unsupported event loop
+#endif
+
+#if DISPATCH_DEBUG
+#define DISPATCH_MGR_QUEUE_DEBUG 1
+#define DISPATCH_WLH_DEBUG 1
+#endif
+
+#ifndef DISPATCH_MGR_QUEUE_DEBUG
+#define DISPATCH_MGR_QUEUE_DEBUG 0
+#endif
+
+#ifndef DISPATCH_WLH_DEBUG
+#define DISPATCH_WLH_DEBUG 0
+#endif
+
+#ifndef DISPATCH_MACHPORT_DEBUG
+#define DISPATCH_MACHPORT_DEBUG 0
+#endif
+
+#ifndef DISPATCH_TIMER_ASSERTIONS
+#if DISPATCH_DEBUG
+#define DISPATCH_TIMER_ASSERTIONS 1
+#else
+#define DISPATCH_TIMER_ASSERTIONS 0
+#endif
+#endif
+
+#if DISPATCH_TIMER_ASSERTIONS
+#define DISPATCH_TIMER_ASSERT(a, op, b, text) ({ \
+               typeof(a) _a = (a); \
+               if (unlikely(!(_a op (b)))) { \
+                       DISPATCH_CLIENT_CRASH(_a, "Timer: " text); \
+               } \
+       })
+#else
+#define DISPATCH_TIMER_ASSERT(a, op, b, text) ((void)0)
+#endif
+
+#ifndef EV_VANISHED
+#define EV_VANISHED 0x0200
+#endif
+
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#      if defined(EV_SET_QOS)
+#              define DISPATCH_USE_KEVENT_QOS 1
+#              ifndef KEVENT_FLAG_IMMEDIATE
+#              define KEVENT_FLAG_IMMEDIATE 0x001
+#              endif
+#              ifndef KEVENT_FLAG_ERROR_EVENTS
+#              define KEVENT_FLAG_ERROR_EVENTS 0x002
+#              endif
+#      else
+#              define DISPATCH_USE_KEVENT_QOS 0
+#      endif
+
+#      ifdef NOTE_LEEWAY
+#              define DISPATCH_HAVE_TIMER_COALESCING 1
+#   else
+#              define NOTE_LEEWAY 0
+#              define DISPATCH_HAVE_TIMER_COALESCING 0
+#      endif // !NOTE_LEEWAY
+#      if defined(NOTE_CRITICAL) && defined(NOTE_BACKGROUND)
+#              define DISPATCH_HAVE_TIMER_QOS 1
+#      else
+#              undef  NOTE_CRITICAL
+#              define NOTE_CRITICAL 0
+#              undef  NOTE_BACKGROUND
+#              define NOTE_BACKGROUND 0
+#              define DISPATCH_HAVE_TIMER_QOS 0
+#      endif // !defined(NOTE_CRITICAL) || !defined(NOTE_BACKGROUND)
+
+#      ifndef NOTE_FUNLOCK
+#      define NOTE_FUNLOCK 0x00000100
+#      endif
+
+#      if HAVE_DECL_NOTE_REAP
+#      if defined(NOTE_REAP) && defined(__APPLE__)
+#      undef NOTE_REAP
+#      define NOTE_REAP 0x10000000 // <rdar://problem/13338526>
+#      endif
+#      endif // HAVE_DECL_NOTE_REAP
+
+#      ifndef VQ_QUOTA
+#      undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982
+#      endif // VQ_QUOTA
+
+#      ifndef VQ_NEARLOWDISK
+#      undef HAVE_DECL_VQ_NEARLOWDISK
+#      endif // VQ_NEARLOWDISK
+
+#      ifndef VQ_DESIRED_DISK
+#      undef HAVE_DECL_VQ_DESIRED_DISK
+#      endif // VQ_DESIRED_DISK
+
+#      if !defined(EVFILT_NW_CHANNEL) && defined(__APPLE__)
+#      define EVFILT_NW_CHANNEL       (-16)
+#      define NOTE_FLOW_ADV_UPDATE     0x1
+#      endif
+#else // DISPATCH_EVENT_BACKEND_KEVENT
+#      define EV_ADD                                   0x0001
+#      define EV_DELETE                                0x0002
+#      define EV_ENABLE                                0x0004
+
+#      define EV_ONESHOT                               0x0010
+#      define EV_CLEAR                                 0x0020
+#      define EV_DISPATCH                              0x0080
+
+#      define EVFILT_READ                              (-1)
+#      define EVFILT_WRITE                             (-2)
+#      define EVFILT_SIGNAL                    (-3)
+#      define EVFILT_TIMER                             (-4)
+#      define EVFILT_SYSCOUNT                  4
+
+#      define DISPATCH_HAVE_TIMER_QOS 0
+#      define DISPATCH_HAVE_TIMER_COALESCING 0
+#      define KEVENT_FLAG_IMMEDIATE 0x001
+#endif // !DISPATCH_EVENT_BACKEND_KEVENT
+
+#ifdef EV_UDATA_SPECIFIC
+#      define DISPATCH_EV_DIRECT               (EV_UDATA_SPECIFIC|EV_DISPATCH)
+#else
+#      define DISPATCH_EV_DIRECT               0x0000
+#      define EV_UDATA_SPECIFIC                0x0000
+#      undef  EV_VANISHED
+#      define EV_VANISHED                              0x0000
+#endif
+
+#define DISPATCH_EV_MSG_NEEDS_FREE     0x10000 // mach message needs to be freed()
+
+#define DISPATCH_EVFILT_TIMER                          (-EVFILT_SYSCOUNT - 1)
+#define DISPATCH_EVFILT_CUSTOM_ADD                     (-EVFILT_SYSCOUNT - 2)
+#define DISPATCH_EVFILT_CUSTOM_OR                      (-EVFILT_SYSCOUNT - 3)
+#define DISPATCH_EVFILT_CUSTOM_REPLACE         (-EVFILT_SYSCOUNT - 4)
+#define DISPATCH_EVFILT_MACH_NOTIFICATION      (-EVFILT_SYSCOUNT - 5)
+#define DISPATCH_EVFILT_SYSCOUNT                       ( EVFILT_SYSCOUNT + 5)
+
+#if HAVE_MACH
+#      if !EV_UDATA_SPECIFIC
+#      error mach support requires EV_UDATA_SPECIFIC
+#      endif
+
+#      ifndef MACH_RCV_VOUCHER
+#      define MACH_RCV_VOUCHER 0x00000800
+#      endif
+
+#      ifndef MACH_NOTIFY_SEND_POSSIBLE
+#      undef  MACH_NOTIFY_SEND_POSSIBLE
+#      define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME
+#      endif
+
+#      ifndef NOTE_MACH_CONTINUOUS_TIME
+#      define NOTE_MACH_CONTINUOUS_TIME 0
+#      endif // NOTE_MACH_CONTINUOUS_TIME
+
+#      ifndef HOST_NOTIFY_CALENDAR_SET
+#      define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE
+#      endif // HOST_NOTIFY_CALENDAR_SET
+
+#      ifndef HOST_CALENDAR_SET_REPLYID
+#      define HOST_CALENDAR_SET_REPLYID 951
+#      endif // HOST_CALENDAR_SET_REPLYID
+
+#      ifndef MACH_SEND_OVERRIDE
+#      define MACH_SEND_OVERRIDE 0x00000020
+typedef unsigned int mach_msg_priority_t;
+#      define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0)
+#      endif // MACH_SEND_OVERRIDE
+
+#      ifndef MACH_SEND_SYNC_OVERRIDE
+#      define MACH_SEND_SYNC_OVERRIDE 0x00100000
+#      endif // MACH_SEND_SYNC_OVERRIDE
+
+#      ifndef MACH_RCV_SYNC_WAIT
+#      define MACH_RCV_SYNC_WAIT 0x00004000
+#      endif // MACH_RCV_SYNC_WAIT
+
+#      define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t)
+#      define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX
+#      define DISPATCH_MACH_RCV_OPTIONS ( \
+               MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \
+               MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \
+               MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | \
+               MACH_RCV_VOUCHER)
+#endif // HAVE_MACH
+
+#endif // __DISPATCH_EVENT_EVENT_CONFIG__
diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c
new file mode 100644 (file)
index 0000000..add4dde
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+
+#include "internal.h"
+#if DISPATCH_EVENT_BACKEND_EPOLL
+#include <linux/sockios.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/signalfd.h>
+#include <sys/timerfd.h>
+
+#ifndef EPOLLFREE
+#define EPOLLFREE 0x4000
+#endif
+
+#if !DISPATCH_USE_MGR_THREAD
+#error unsupported configuration
+#endif
+
+#define DISPATCH_EPOLL_MAX_EVENT_COUNT 16
+
+enum {
+       DISPATCH_EPOLL_EVENTFD    = 0x0001,
+       DISPATCH_EPOLL_CLOCK_WALL = 0x0002,
+       DISPATCH_EPOLL_CLOCK_MACH = 0x0003,
+};
+
+typedef struct dispatch_muxnote_s {
+       TAILQ_ENTRY(dispatch_muxnote_s) dmn_list;
+       TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_readers_head;
+       TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_writers_head;
+       int     dmn_fd;
+       uint32_t dmn_ident;
+       uint32_t dmn_events;
+       int16_t dmn_filter;
+       bool    dmn_skip_outq_ioctl;
+       bool    dmn_skip_inq_ioctl;
+} *dispatch_muxnote_t;
+
+typedef struct dispatch_epoll_timeout_s {
+       int       det_fd;
+       uint16_t  det_ident;
+       bool      det_registered;
+       bool      det_armed;
+} *dispatch_epoll_timeout_t;
+
+static int _dispatch_epfd, _dispatch_eventfd;
+
+static dispatch_once_t epoll_init_pred;
+static void _dispatch_epoll_init(void *);
+
+DISPATCH_CACHELINE_ALIGN
+static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s)
+_dispatch_sources[DSL_HASH_SIZE];
+
+#define DISPATCH_EPOLL_TIMEOUT_INITIALIZER(clock) \
+       [DISPATCH_CLOCK_##clock] = { \
+               .det_fd = -1, \
+               .det_ident = DISPATCH_EPOLL_CLOCK_##clock, \
+       }
+static struct dispatch_epoll_timeout_s _dispatch_epoll_timeout[] = {
+       DISPATCH_EPOLL_TIMEOUT_INITIALIZER(WALL),
+       DISPATCH_EPOLL_TIMEOUT_INITIALIZER(MACH),
+};
+
+#pragma mark dispatch_muxnote_t
+
+DISPATCH_ALWAYS_INLINE
+static inline struct dispatch_muxnote_bucket_s *
+_dispatch_muxnote_bucket(uint32_t ident)
+{
+       return &_dispatch_sources[DSL_HASH(ident)];
+}
+#define _dispatch_unote_muxnote_bucket(du) \
+       _dispatch_muxnote_bucket(du._du->du_ident)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_muxnote_t
+_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb,
+               uint32_t ident, int16_t filter)
+{
+       dispatch_muxnote_t dmn;
+       if (filter == EVFILT_WRITE) filter = EVFILT_READ;
+       TAILQ_FOREACH(dmn, dmb, dmn_list) {
+               if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) {
+                       break;
+               }
+       }
+       return dmn;
+}
+#define _dispatch_unote_muxnote_find(dmb, du) \
+               _dispatch_muxnote_find(dmb, du._du->du_ident, du._du->du_filter)
+
+static void
+_dispatch_muxnote_dispose(dispatch_muxnote_t dmn)
+{
+       if (dmn->dmn_filter != EVFILT_READ || (uint32_t)dmn->dmn_fd != dmn->dmn_ident) {
+               close(dmn->dmn_fd);
+       }
+       free(dmn);
+}
+
+static pthread_t manager_thread;
+
+static void
+_dispatch_muxnote_signal_block_and_raise(int signo)
+{
+       // On linux, for signals to be delivered to the signalfd, signals
+       // must be blocked, else any thread that hasn't them blocked may
+       // receive them.  Fix that by lazily noticing, blocking said signal,
+       // and raising the signal again when it happens
+       _dispatch_sigmask();
+       pthread_kill(manager_thread, signo);
+}
+
+static dispatch_muxnote_t
+_dispatch_muxnote_create(dispatch_unote_t du, uint32_t events)
+{
+       static sigset_t signals_with_unotes;
+       static struct sigaction sa = {
+               .sa_handler = _dispatch_muxnote_signal_block_and_raise,
+               .sa_flags = SA_RESTART,
+       };
+
+       dispatch_muxnote_t dmn;
+       struct stat sb;
+       int fd = (int)du._du->du_ident;
+       int16_t filter = du._du->du_filter;
+       bool skip_outq_ioctl = false, skip_inq_ioctl = false;
+       sigset_t sigmask;
+
+       switch (filter) {
+       case EVFILT_SIGNAL: {
+               int signo = (int)du._du->du_ident;
+               if (!sigismember(&signals_with_unotes, signo)) {
+                       manager_thread = pthread_self();
+                       sigaddset(&signals_with_unotes, signo);
+                       sigaction(signo, &sa, NULL);
+               }
+               sigemptyset(&sigmask);
+               sigaddset(&sigmask, signo);
+               fd = signalfd(-1, &sigmask, SFD_NONBLOCK | SFD_CLOEXEC);
+               if (fd < 0) {
+                       return NULL;
+               }
+               break;
+       }
+       case EVFILT_WRITE:
+               filter = EVFILT_READ;
+       case EVFILT_READ:
+               if (fstat(fd, &sb) < 0) {
+                       return NULL;
+               }
+               if (S_ISREG(sb.st_mode)) {
+                       // make a dummy fd that is both readable & writeable
+                       fd = eventfd(1, EFD_CLOEXEC | EFD_NONBLOCK);
+                       if (fd < 0) {
+                               return NULL;
+                       }
+                       // Linux doesn't support output queue size ioctls for regular files
+                       skip_outq_ioctl = true;
+               } else if (S_ISSOCK(sb.st_mode)) {
+                       socklen_t vlen = sizeof(int);
+                       int v;
+                       // Linux doesn't support saying how many clients are ready to be
+                       // accept()ed for sockets
+                       if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &v, &vlen) == 0) {
+                               skip_inq_ioctl = (bool)v;
+                       }
+               }
+               break;
+
+       default:
+               DISPATCH_INTERNAL_CRASH(0, "Unexpected filter");
+       }
+
+       dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s));
+       TAILQ_INIT(&dmn->dmn_readers_head);
+       TAILQ_INIT(&dmn->dmn_writers_head);
+       dmn->dmn_fd = fd;
+       dmn->dmn_ident = du._du->du_ident;
+       dmn->dmn_filter = filter;
+       dmn->dmn_events = events;
+       dmn->dmn_skip_outq_ioctl = skip_outq_ioctl;
+       dmn->dmn_skip_inq_ioctl = skip_inq_ioctl;
+       return dmn;
+}
+
+#pragma mark dispatch_unote_t
+
+static int
+_dispatch_epoll_update(dispatch_muxnote_t dmn, int op)
+{
+       dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init);
+       struct epoll_event ev = {
+               .events = dmn->dmn_events,
+               .data = { .ptr = dmn },
+       };
+       return epoll_ctl(_dispatch_epfd, op, dmn->dmn_fd, &ev);
+}
+
+bool
+_dispatch_unote_register(dispatch_unote_t du,
+               DISPATCH_UNUSED dispatch_wlh_t wlh, dispatch_priority_t pri)
+{
+       struct dispatch_muxnote_bucket_s *dmb;
+       dispatch_muxnote_t dmn;
+       uint32_t events = EPOLLFREE;
+
+       dispatch_assert(!_dispatch_unote_registered(du));
+       du._du->du_priority = pri;
+
+       switch (du._du->du_filter) {
+       case DISPATCH_EVFILT_CUSTOM_ADD:
+       case DISPATCH_EVFILT_CUSTOM_OR:
+       case DISPATCH_EVFILT_CUSTOM_REPLACE:
+               du._du->du_wlh = DISPATCH_WLH_ANON;
+               return true;
+       case EVFILT_WRITE:
+               events |= EPOLLOUT;
+               break;
+       default:
+               events |= EPOLLIN;
+               break;
+       }
+
+       if (du._du->du_type->dst_flags & EV_DISPATCH) {
+               events |= EPOLLONESHOT;
+       }
+
+       dmb = _dispatch_unote_muxnote_bucket(du);
+       dmn = _dispatch_unote_muxnote_find(dmb, du);
+       if (dmn) {
+               events &= ~dmn->dmn_events;
+               if (events) {
+                       dmn->dmn_events |= events;
+                       if (_dispatch_epoll_update(dmn, EPOLL_CTL_MOD) < 0) {
+                               dmn->dmn_events &= ~events;
+                               dmn = NULL;
+                       }
+               }
+       } else {
+               dmn = _dispatch_muxnote_create(du, events);
+               if (_dispatch_epoll_update(dmn, EPOLL_CTL_ADD) < 0) {
+                       _dispatch_muxnote_dispose(dmn);
+                       dmn = NULL;
+               } else {
+                       TAILQ_INSERT_TAIL(dmb, dmn, dmn_list);
+               }
+       }
+
+       if (dmn) {
+               dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+               if (events & EPOLLOUT) {
+                       TAILQ_INSERT_TAIL(&dmn->dmn_writers_head, dul, du_link);
+               } else {
+                       TAILQ_INSERT_TAIL(&dmn->dmn_readers_head, dul, du_link);
+               }
+               dul->du_muxnote = dmn;
+               dispatch_assert(du._du->du_wlh == NULL);
+               du._du->du_wlh = DISPATCH_WLH_ANON;
+       }
+       return dmn != NULL;
+}
+
+void
+_dispatch_unote_resume(dispatch_unote_t du)
+{
+       dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(du)->du_muxnote;
+       dispatch_assert(_dispatch_unote_registered(du));
+
+       _dispatch_epoll_update(dmn, EPOLL_CTL_MOD);
+}
+
+bool
+_dispatch_unote_unregister(dispatch_unote_t du, DISPATCH_UNUSED uint32_t flags)
+{
+       switch (du._du->du_filter) {
+       case DISPATCH_EVFILT_CUSTOM_ADD:
+       case DISPATCH_EVFILT_CUSTOM_OR:
+       case DISPATCH_EVFILT_CUSTOM_REPLACE:
+               du._du->du_wlh = NULL;
+               return true;
+       }
+       if (_dispatch_unote_registered(du)) {
+               dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+               dispatch_muxnote_t dmn = dul->du_muxnote;
+               uint32_t events = dmn->dmn_events;
+
+               if (du._du->du_filter == EVFILT_WRITE) {
+                       TAILQ_REMOVE(&dmn->dmn_writers_head, dul, du_link);
+               } else {
+                       TAILQ_REMOVE(&dmn->dmn_readers_head, dul, du_link);
+               }
+               _TAILQ_TRASH_ENTRY(dul, du_link);
+               dul->du_muxnote = NULL;
+
+               if (TAILQ_EMPTY(&dmn->dmn_readers_head)) {
+                       events &= (uint32_t)(~EPOLLIN);
+               }
+               if (TAILQ_EMPTY(&dmn->dmn_writers_head)) {
+                       events &= (uint32_t)(~EPOLLOUT);
+               }
+
+               if (events == dmn->dmn_events) {
+                       // nothing to do
+               } else if (events & (EPOLLIN | EPOLLOUT)) {
+                       dmn->dmn_events = events;
+                       _dispatch_epoll_update(dmn, EPOLL_CTL_MOD);
+               } else {
+                       epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL);
+                       TAILQ_REMOVE(_dispatch_unote_muxnote_bucket(du), dmn, dmn_list);
+                       _dispatch_muxnote_dispose(dmn);
+               }
+               dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON);
+               du._du->du_wlh = NULL;
+       }
+       return true;
+}
+
+#pragma mark timers
+
+static void
+_dispatch_event_merge_timer(dispatch_clock_t clock)
+{
+       _dispatch_timers_expired = true;
+       _dispatch_timers_processing_mask |= 1 << DISPATCH_TIMER_INDEX(clock, 0);
+#if DISPATCH_USE_DTRACE
+       _dispatch_timers_will_wake |= 1 << 0;
+#endif
+       _dispatch_epoll_timeout[clock].det_armed = false;
+       _dispatch_timers_heap[clock].dth_flags &= ~DTH_ARMED;
+}
+
+static void
+_dispatch_timeout_program(uint32_t tidx, uint64_t target,
+               DISPATCH_UNUSED uint64_t leeway)
+{
+       dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx);
+       dispatch_epoll_timeout_t timer = &_dispatch_epoll_timeout[clock];
+       struct epoll_event ev = {
+               .events = EPOLLONESHOT | EPOLLIN,
+               .data = { .u32 = timer->det_ident },
+       };
+       int op;
+
+       if (target >= INT64_MAX && !timer->det_registered) {
+               return;
+       }
+
+       if (unlikely(timer->det_fd < 0)) {
+               clockid_t clockid;
+               int fd;
+               switch (DISPATCH_TIMER_CLOCK(tidx)) {
+               case DISPATCH_CLOCK_MACH:
+                       clockid = CLOCK_MONOTONIC;
+                       break;
+               case DISPATCH_CLOCK_WALL:
+                       clockid = CLOCK_REALTIME;
+                       break;
+               }
+               fd = timerfd_create(clockid, TFD_NONBLOCK | TFD_CLOEXEC);
+               if (!dispatch_assume(fd >= 0)) {
+                       return;
+               }
+               timer->det_fd = fd;
+       }
+
+       if (target < INT64_MAX) {
+               struct itimerspec its = { .it_value = {
+                       .tv_sec  = target / NSEC_PER_SEC,
+                       .tv_nsec = target % NSEC_PER_SEC,
+               } };
+               dispatch_assume_zero(timerfd_settime(timer->det_fd, TFD_TIMER_ABSTIME,
+                               &its, NULL));
+               if (!timer->det_registered) {
+                       op = EPOLL_CTL_ADD;
+               } else if (!timer->det_armed) {
+                       op = EPOLL_CTL_MOD;
+               } else {
+                       return;
+               }
+       } else {
+               op = EPOLL_CTL_DEL;
+       }
+       dispatch_assume_zero(epoll_ctl(_dispatch_epfd, op, timer->det_fd, &ev));
+       timer->det_armed = timer->det_registered = (op != EPOLL_CTL_DEL);;
+}
+
+void
+_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range,
+               dispatch_clock_now_cache_t nows)
+{
+       uint64_t target = range.delay;
+       target += _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
+       _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED;
+       _dispatch_timeout_program(tidx, target, range.leeway);
+}
+
+void
+_dispatch_event_loop_timer_delete(uint32_t tidx)
+{
+       _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
+       _dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX);
+}
+
+#pragma mark dispatch_loop
+
+void
+_dispatch_event_loop_atfork_child(void)
+{
+}
+
+static void
+_dispatch_epoll_init(void *context DISPATCH_UNUSED)
+{
+       _dispatch_fork_becomes_unsafe();
+
+       unsigned int i;
+       for (i = 0; i < DSL_HASH_SIZE; i++) {
+               TAILQ_INIT(&_dispatch_sources[i]);
+       }
+
+       _dispatch_epfd = epoll_create1(EPOLL_CLOEXEC);
+       if (_dispatch_epfd < 0) {
+               DISPATCH_INTERNAL_CRASH(errno, "epoll_create1() failed");
+       }
+
+       _dispatch_eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+       if (_dispatch_eventfd < 0) {
+               DISPATCH_INTERNAL_CRASH(errno, "epoll_eventfd() failed");
+       }
+
+       struct epoll_event ev = {
+               .events = EPOLLIN | EPOLLFREE,
+               .data = { .u32 = DISPATCH_EPOLL_EVENTFD, },
+       };
+       int op = EPOLL_CTL_ADD;
+       if (epoll_ctl(_dispatch_epfd, op, _dispatch_eventfd, &ev) < 0) {
+               DISPATCH_INTERNAL_CRASH(errno, "epoll_ctl() failed");
+       }
+
+#if DISPATCH_USE_MGR_THREAD
+       dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
+#endif
+}
+
+void
+_dispatch_event_loop_poke(dispatch_wlh_t wlh DISPATCH_UNUSED,
+               uint64_t dq_state DISPATCH_UNUSED, uint32_t flags DISPATCH_UNUSED)
+{
+       dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init);
+       dispatch_assume_zero(eventfd_write(_dispatch_eventfd, 1));
+}
+
+static void
+_dispatch_event_merge_signal(dispatch_muxnote_t dmn)
+{
+       dispatch_unote_linkage_t dul, dul_next;
+       struct signalfd_siginfo si;
+       ssize_t rc;
+
+       // Linux has the weirdest semantics around signals: if it finds a thread
+       // that has not masked a process wide-signal, it may deliver it to this
+       // thread, meaning that the signalfd may have been made readable, but the
+       // signal consumed through the legacy delivery mechanism.
+       //
+       // Because of this we can get a misfire of the signalfd yielding EAGAIN the
+       // first time around. The _dispatch_muxnote_signal_block_and_raise() hack
+       // will kick in, the thread with the wrong mask will be fixed up, and the
+       // signal delivered to us again properly.
+       if ((rc = read(dmn->dmn_fd, &si, sizeof(si))) == sizeof(si)) {
+               TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) {
+                       dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+                       dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0, 0);
+               }
+       } else {
+               dispatch_assume(rc == -1 && errno == EAGAIN);
+       }
+}
+
+static uintptr_t
+_dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer)
+{
+       int n;
+
+       if (writer ? dmn->dmn_skip_outq_ioctl : dmn->dmn_skip_inq_ioctl) {
+               return 1;
+       }
+
+       if (ioctl((int)dmn->dmn_ident, writer ? SIOCOUTQ : SIOCINQ, &n) != 0) {
+               switch (errno) {
+               case EINVAL:
+               case ENOTTY:
+                       // this file descriptor actually doesn't support the buffer
+                       // size ioctl, remember that for next time to avoid the syscall.
+                       break;
+               default:
+                       dispatch_assume_zero(errno);
+                       break;
+               }
+               if (writer) {
+                       dmn->dmn_skip_outq_ioctl = true;
+               } else {
+                       dmn->dmn_skip_inq_ioctl = true;
+               }
+               return 1;
+       }
+       return (uintptr_t)n;
+}
+
+static void
+_dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events)
+{
+       dispatch_unote_linkage_t dul, dul_next;
+       uintptr_t data;
+
+       if (events & EPOLLIN) {
+               data = _dispatch_get_buffer_size(dmn, false);
+               TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) {
+                       dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+                       dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0);
+               }
+       }
+
+       if (events & EPOLLOUT) {
+               data = _dispatch_get_buffer_size(dmn, true);
+               TAILQ_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) {
+                       dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+                       dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0);
+               }
+       }
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_drain(uint32_t flags)
+{
+       struct epoll_event ev[DISPATCH_EPOLL_MAX_EVENT_COUNT];
+       int i, r;
+       int timeout = (flags & KEVENT_FLAG_IMMEDIATE) ? 0 : -1;
+
+retry:
+       r = epoll_wait(_dispatch_epfd, ev, countof(ev), timeout);
+       if (unlikely(r == -1)) {
+               int err = errno;
+               switch (err) {
+               case EINTR:
+                       goto retry;
+               case EBADF:
+                       DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
+                       break;
+               default:
+                       (void)dispatch_assume_zero(err);
+                       break;
+               }
+               return;
+       }
+
+       for (i = 0; i < r; i++) {
+               dispatch_muxnote_t dmn;
+               eventfd_t value;
+
+               if (ev[i].events & EPOLLFREE) {
+                       DISPATCH_CLIENT_CRASH(0, "Do not close random Unix descriptors");
+               }
+
+               switch (ev[i].data.u32) {
+               case DISPATCH_EPOLL_EVENTFD:
+                       dispatch_assume_zero(eventfd_read(_dispatch_eventfd, &value));
+                       break;
+
+               case DISPATCH_EPOLL_CLOCK_WALL:
+                       _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL);
+                       break;
+
+               case DISPATCH_EPOLL_CLOCK_MACH:
+                       _dispatch_event_merge_timer(DISPATCH_CLOCK_MACH);
+                       break;
+
+               default:
+                       dmn = ev[i].data.ptr;
+                       switch (dmn->dmn_filter) {
+                       case EVFILT_SIGNAL:
+                               _dispatch_event_merge_signal(dmn);
+                               break;
+
+                       case EVFILT_READ:
+                               _dispatch_event_merge_fd(dmn, ev[i].events);
+                               break;
+                       }
+               }
+       }
+}
+
+void
+_dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc,
+               dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state)
+{
+       (void)dsc; (void)wlh; (void)old_state; (void)new_state;
+}
+
+void
+_dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc)
+{
+       if (dsc->dsc_release_storage) {
+               _dispatch_queue_release_storage(dsc->dc_data);
+       }
+}
+
+void
+_dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state,
+               uint64_t new_state, uint32_t flags)
+{
+       (void)wlh; (void)old_state; (void)new_state; (void)flags;
+}
+
+#if DISPATCH_WLH_DEBUG
+void
+_dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh)
+{
+       (void)wlh;
+}
+#endif
+
+void
+_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state)
+{
+       (void)wlh; (void)dq_state;
+}
+
+#endif // DISPATCH_EVENT_BACKEND_EPOLL
diff --git a/src/event/event_internal.h b/src/event/event_internal.h
new file mode 100644 (file)
index 0000000..842c4ee
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_EVENT_EVENT_INTERNAL__
+#define __DISPATCH_EVENT_EVENT_INTERNAL__
+
+#include "event_config.h"
+
+struct dispatch_sync_context_s;
+typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle
+#define DISPATCH_WLH_ANON       ((dispatch_wlh_t)(void*)(~0ul))
+#define DISPATCH_WLH_MANAGER    ((dispatch_wlh_t)(void*)(~2ul))
+
+#define DISPATCH_UNOTE_DATA_ACTION_SIZE 2
+
+#define DISPATCH_UNOTE_CLASS_HEADER() \
+       dispatch_source_type_t du_type; \
+       uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \
+       dispatch_wlh_t du_wlh; \
+       uint32_t  du_ident; \
+       int8_t    du_filter; \
+       os_atomic(bool) dmsr_notification_armed; \
+       uint16_t  du_data_action : DISPATCH_UNOTE_DATA_ACTION_SIZE; \
+       uint16_t  du_is_direct : 1; \
+       uint16_t  du_is_timer : 1; \
+       uint16_t  du_memorypressure_override : 1; \
+       uint16_t  du_vmpressure_override : 1; \
+       uint16_t  du_can_be_wlh : 1; \
+       uint16_t  dmr_async_reply : 1; \
+       uint16_t  dmrr_handler_is_block : 1; \
+       uint16_t  du_unused : 7; \
+       uint32_t  du_fflags; \
+       dispatch_priority_t du_priority
+
+#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr))
+#define _dispatch_wref2ptr(ref) ((void*)~(ref))
+#define _dispatch_source_from_refs(dr) \
+               ((dispatch_source_t)_dispatch_wref2ptr((dr)->du_owner_wref))
+
+DISPATCH_ENUM(dispatch_unote_action, uint8_t,
+    DISPATCH_UNOTE_ACTION_DATA_OR = 0,
+    DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET,
+    DISPATCH_UNOTE_ACTION_DATA_SET,
+    DISPATCH_UNOTE_ACTION_DATA_ADD,
+       DISPATCH_UNOTE_ACTION_LAST = DISPATCH_UNOTE_ACTION_DATA_ADD
+);
+_Static_assert(DISPATCH_UNOTE_ACTION_LAST <
+               (1 << DISPATCH_UNOTE_DATA_ACTION_SIZE),
+               "DISPATCH_UNOTE_ACTION_LAST too large for du_data_action field");
+
+typedef struct dispatch_unote_class_s {
+       DISPATCH_UNOTE_CLASS_HEADER();
+} *dispatch_unote_class_t;
+
+
+enum {
+       DS_EVENT_HANDLER = 0,
+       DS_CANCEL_HANDLER,
+       DS_REGISTN_HANDLER,
+};
+
+#define DISPATCH_SOURCE_REFS_HEADER() \
+       DISPATCH_UNOTE_CLASS_HEADER(); \
+       struct dispatch_continuation_s *volatile ds_handler[3]
+
+// Source state which may contain references to the source object
+// Separately allocated so that 'leaks' can see sources <rdar://problem/9050566>
+typedef struct dispatch_source_refs_s {
+       DISPATCH_SOURCE_REFS_HEADER();
+} *dispatch_source_refs_t;
+
+typedef struct dispatch_timer_delay_s {
+       uint64_t delay, leeway;
+} dispatch_timer_delay_s;
+
+#define DTH_INVALID_ID  (~0u)
+#define DTH_TARGET_ID   0u
+#define DTH_DEADLINE_ID 1u
+#define DTH_ID_COUNT    2u
+
+typedef struct dispatch_timer_source_s {
+       union {
+               struct {
+                       uint64_t target;
+                       uint64_t deadline;
+               };
+               uint64_t heap_key[DTH_ID_COUNT];
+       };
+       uint64_t interval;
+} *dispatch_timer_source_t;
+
+typedef struct dispatch_timer_config_s {
+       struct dispatch_timer_source_s dtc_timer;
+       dispatch_clock_t dtc_clock;
+} *dispatch_timer_config_t;
+
+typedef struct dispatch_timer_source_refs_s {
+       DISPATCH_SOURCE_REFS_HEADER();
+       struct dispatch_timer_source_s dt_timer;
+       struct dispatch_timer_config_s *dt_pending_config;
+       uint32_t dt_heap_entry[DTH_ID_COUNT];
+} *dispatch_timer_source_refs_t;
+
+typedef struct dispatch_timer_heap_s {
+       uint64_t dth_target, dth_deadline;
+       uint32_t dth_count;
+       uint16_t dth_segments;
+#define DTH_ARMED  1u
+       uint16_t dth_flags;
+       dispatch_timer_source_refs_t dth_min[DTH_ID_COUNT];
+       void **dth_heap;
+} *dispatch_timer_heap_t;
+
+#if HAVE_MACH
+#if DISPATCH_MACHPORT_DEBUG
+void dispatch_debug_machport(mach_port_t name, const char *str);
+#define _dispatch_debug_machport(name) \
+               dispatch_debug_machport((name), __func__)
+#else
+#define _dispatch_debug_machport(name) ((void)(name))
+#endif // DISPATCH_MACHPORT_DEBUG
+
+// Mach channel state which may contain references to the channel object
+// layout must match dispatch_source_refs_s
+struct dispatch_mach_recv_refs_s {
+       DISPATCH_UNOTE_CLASS_HEADER();
+       dispatch_mach_handler_function_t dmrr_handler_func;
+       void *dmrr_handler_ctxt;
+};
+typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t;
+
+struct dispatch_mach_reply_refs_s {
+       DISPATCH_UNOTE_CLASS_HEADER();
+       dispatch_priority_t dmr_priority;
+       void *dmr_ctxt;
+       voucher_t dmr_voucher;
+       TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list;
+       mach_port_t dmr_waiter_tid;
+};
+typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t;
+
+#define _DISPATCH_MACH_STATE_UNUSED_MASK        0xffffffa000000000ull
+#define DISPATCH_MACH_STATE_DIRTY               0x0000002000000000ull
+#define DISPATCH_MACH_STATE_PENDING_BARRIER     0x0000001000000000ull
+#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE   0x0000000800000000ull
+#define DISPATCH_MACH_STATE_MAX_QOS_MASK        0x0000000700000000ull
+#define DISPATCH_MACH_STATE_MAX_QOS_SHIFT       32
+#define DISPATCH_MACH_STATE_UNLOCK_MASK         0x00000000ffffffffull
+
+struct dispatch_mach_send_refs_s {
+       DISPATCH_UNOTE_CLASS_HEADER();
+       dispatch_mach_msg_t dmsr_checkin;
+       TAILQ_HEAD(, dispatch_mach_reply_refs_s) dmsr_replies;
+       dispatch_unfair_lock_s dmsr_replies_lock;
+#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000)
+#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0)
+#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1)
+       uint32_t volatile dmsr_disconnect_cnt;
+       DISPATCH_UNION_LE(uint64_t volatile dmsr_state,
+               dispatch_unfair_lock_s dmsr_state_lock,
+               uint32_t dmsr_state_bits
+       ) DISPATCH_ATOMIC64_ALIGN;
+       struct dispatch_object_s *volatile dmsr_tail;
+       struct dispatch_object_s *volatile dmsr_head;
+       mach_port_t dmsr_send, dmsr_checkin_port;
+};
+typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t;
+
+void _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr);
+
+struct dispatch_xpc_term_refs_s {
+       DISPATCH_UNOTE_CLASS_HEADER();
+};
+typedef struct dispatch_xpc_term_refs_s *dispatch_xpc_term_refs_t;
+#endif // HAVE_MACH
+
+typedef union dispatch_unote_u {
+       dispatch_unote_class_t _du;
+       dispatch_source_refs_t _dr;
+       dispatch_timer_source_refs_t _dt;
+#if HAVE_MACH
+       dispatch_mach_recv_refs_t _dmrr;
+       dispatch_mach_send_refs_t _dmsr;
+       dispatch_mach_reply_refs_t _dmr;
+       dispatch_xpc_term_refs_t _dxtr;
+#endif
+} dispatch_unote_t DISPATCH_TRANSPARENT_UNION;
+
+#define DISPATCH_UNOTE_NULL ((dispatch_unote_t){ ._du = NULL })
+
+#if TARGET_OS_EMBEDDED
+#define DSL_HASH_SIZE  64u // must be a power of two
+#else
+#define DSL_HASH_SIZE 256u // must be a power of two
+#endif
+#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1))
+
+typedef struct dispatch_unote_linkage_s {
+       TAILQ_ENTRY(dispatch_unote_linkage_s) du_link;
+       struct dispatch_muxnote_s *du_muxnote;
+} DISPATCH_ATOMIC64_ALIGN *dispatch_unote_linkage_t;
+
+#define DU_UNREGISTER_IMMEDIATE_DELETE 0x01
+#define DU_UNREGISTER_ALREADY_DELETED  0x02
+#define DU_UNREGISTER_DISCONNECTED     0x04
+#define DU_UNREGISTER_REPLY_REMOVE     0x08
+
+typedef struct dispatch_source_type_s {
+       const char *dst_kind;
+       int8_t     dst_filter;
+       uint8_t    dst_per_trigger_qos : 1;
+       uint16_t   dst_flags;
+       uint32_t   dst_fflags;
+       uint32_t   dst_mask;
+       uint32_t   dst_size;
+#if DISPATCH_EVENT_BACKEND_KEVENT
+       uint32_t   dst_data;
+#endif
+
+       dispatch_unote_t (*dst_create)(dispatch_source_type_t dst,
+                       uintptr_t handle, unsigned long mask);
+#if DISPATCH_EVENT_BACKEND_KEVENT
+       bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn);
+#endif
+       void (*dst_merge_evt)(dispatch_unote_t du, uint32_t flags, uintptr_t data,
+                       uintptr_t status, pthread_priority_t pp);
+#if HAVE_MACH
+       void (*dst_merge_msg)(dispatch_unote_t du, uint32_t flags,
+                       mach_msg_header_t *msg, mach_msg_size_t sz);
+#endif
+} dispatch_source_type_s;
+
+#define dux_create(dst, handle, mask)  (dst)->dst_create(dst, handle, mask)
+#define dux_merge_evt(du, ...) (du)->du_type->dst_merge_evt(du, __VA_ARGS__)
+#define dux_merge_msg(du, ...) (du)->du_type->dst_merge_msg(du, __VA_ARGS__)
+
+extern const dispatch_source_type_s _dispatch_source_type_after;
+
+#if HAVE_MACH
+extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct;
+extern const dispatch_source_type_s _dispatch_mach_type_send;
+extern const dispatch_source_type_s _dispatch_mach_type_recv;
+extern const dispatch_source_type_s _dispatch_mach_type_reply;
+extern const dispatch_source_type_s _dispatch_xpc_type_sigterm;
+#endif
+
+#pragma mark -
+#pragma mark deferred items
+
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if DISPATCH_USE_KEVENT_QOS
+typedef struct kevent_qos_s dispatch_kevent_s;
+#else
+typedef struct kevent dispatch_kevent_s;
+#endif
+typedef dispatch_kevent_s *dispatch_kevent_t;
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+
+#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 16
+
+typedef struct dispatch_deferred_items_s {
+       dispatch_queue_t ddi_stashed_rq;
+       dispatch_object_t ddi_stashed_dou;
+       dispatch_qos_t ddi_stashed_qos;
+#if DISPATCH_EVENT_BACKEND_KEVENT
+       dispatch_kevent_t ddi_eventlist;
+       uint16_t ddi_nevents;
+       uint16_t ddi_maxevents;
+       bool     ddi_can_stash;
+       uint16_t ddi_wlh_needs_delete : 1;
+       uint16_t ddi_wlh_needs_update : 1;
+       uint16_t ddi_wlh_servicing : 1;
+#endif
+} dispatch_deferred_items_s, *dispatch_deferred_items_t;
+
+#pragma mark -
+#pragma mark inlines
+
+#if DISPATCH_PURE_C
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_deferred_items_set(dispatch_deferred_items_t ddi)
+{
+       _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_deferred_items_t
+_dispatch_deferred_items_get(void)
+{
+       return (dispatch_deferred_items_t)
+                       _dispatch_thread_getspecific(dispatch_deferred_items_key);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_needs_to_return_to_kernel(void)
+{
+       return (uintptr_t)_dispatch_thread_getspecific(dispatch_r2k_key) != 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_set_return_to_kernel(void)
+{
+       _dispatch_thread_setspecific(dispatch_r2k_key, (void *)1);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_clear_return_to_kernel(void)
+{
+       _dispatch_thread_setspecific(dispatch_r2k_key, (void *)0);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_unote_registered(dispatch_unote_t du)
+{
+       return du._du->du_wlh != NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_unote_wlh_changed(dispatch_unote_t du, dispatch_wlh_t expected_wlh)
+{
+       dispatch_wlh_t wlh = du._du->du_wlh;
+       return wlh && wlh != DISPATCH_WLH_ANON && wlh != expected_wlh;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_unote_linkage_t
+_dispatch_unote_get_linkage(dispatch_unote_t du)
+{
+       dispatch_assert(!du._du->du_is_direct);
+       return (dispatch_unote_linkage_t)((char *)du._du
+                       - sizeof(struct dispatch_unote_linkage_s));
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_unote_needs_rearm(dispatch_unote_t du)
+{
+       return du._du->du_type->dst_flags & (EV_ONESHOT | EV_DISPATCH);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_unote_t
+_dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul)
+{
+       return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)(dul + 1) };
+}
+
+#endif // DISPATCH_PURE_C
+
+#pragma mark -
+#pragma mark prototypes
+
+#if DISPATCH_HAVE_TIMER_QOS
+#define DISPATCH_TIMER_QOS_NORMAL       0u
+#define DISPATCH_TIMER_QOS_CRITICAL     1u
+#define DISPATCH_TIMER_QOS_BACKGROUND   2u
+#define DISPATCH_TIMER_QOS_COUNT        3u
+#else
+#define DISPATCH_TIMER_QOS_NORMAL       0u
+#define DISPATCH_TIMER_QOS_COUNT        1u
+#endif
+
+#define DISPATCH_TIMER_QOS(tidx)   (((uintptr_t)(tidx) >> 1) & 3u)
+#define DISPATCH_TIMER_CLOCK(tidx) (dispatch_clock_t)((tidx) & 1u)
+
+#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock))
+#define DISPATCH_TIMER_COUNT \
+               DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT)
+#define DISPATCH_TIMER_IDENT_CANCELED    (~0u)
+
+extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT];
+extern bool _dispatch_timers_reconfigure, _dispatch_timers_expired;
+extern uint32_t _dispatch_timers_processing_mask;
+#if DISPATCH_USE_DTRACE
+extern uint32_t _dispatch_timers_will_wake;
+#endif
+
+dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask);
+dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst,
+               uintptr_t handle, unsigned long mask);
+dispatch_unote_t _dispatch_unote_create_without_handle(
+               dispatch_source_type_t dst, uintptr_t handle, unsigned long mask);
+
+bool _dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh,
+               dispatch_priority_t pri);
+void _dispatch_unote_resume(dispatch_unote_t du);
+bool _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags);
+void _dispatch_unote_dispose(dispatch_unote_t du);
+
+void _dispatch_event_loop_atfork_child(void);
+#define DISPATCH_EVENT_LOOP_CONSUME_2 DISPATCH_WAKEUP_CONSUME_2
+#define DISPATCH_EVENT_LOOP_OVERRIDE  0x80000000
+void _dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state,
+               uint32_t flags);
+void _dispatch_event_loop_wake_owner(struct dispatch_sync_context_s *dsc,
+               dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state);
+void _dispatch_event_loop_wait_for_ownership(
+               struct dispatch_sync_context_s *dsc);
+void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh,
+               uint64_t old_state, uint64_t new_state, uint32_t flags);
+#if DISPATCH_WLH_DEBUG
+void _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh);
+#else
+#undef _dispatch_event_loop_assert_not_owned
+#define _dispatch_event_loop_assert_not_owned(wlh) ((void)wlh)
+#endif
+void _dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state);
+#if DISPATCH_EVENT_BACKEND_KEVENT
+void _dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh,
+               uint64_t dq_state);
+void _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents);
+#endif
+void _dispatch_event_loop_drain(uint32_t flags);
+void _dispatch_event_loop_timer_arm(unsigned int tidx,
+               dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows);
+void _dispatch_event_loop_timer_delete(unsigned int tidx);
+
+#endif /* __DISPATCH_EVENT_EVENT_INTERNAL__ */
diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c
new file mode 100644 (file)
index 0000000..8fe76d5
--- /dev/null
@@ -0,0 +1,2208 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if HAVE_MACH
+#include "protocol.h"
+#include "protocolServer.h"
+#endif
+
+#if DISPATCH_USE_KEVENT_WORKQUEUE && !DISPATCH_USE_KEVENT_QOS
+#error unsupported configuration
+#endif
+
+#define DISPATCH_KEVENT_MUXED_MARKER  1ul
+#define DISPATCH_MACH_AUDIT_TOKEN_PID (5)
+
+typedef struct dispatch_muxnote_s {
+       TAILQ_ENTRY(dispatch_muxnote_s) dmn_list;
+       TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head;
+       dispatch_wlh_t dmn_wlh;
+       dispatch_kevent_s dmn_kev;
+} *dispatch_muxnote_t;
+
+static bool _dispatch_timers_force_max_leeway;
+static int _dispatch_kq = -1;
+static struct {
+       dispatch_once_t pred;
+       dispatch_unfair_lock_s lock;
+} _dispatch_muxnotes;
+#if !DISPATCH_USE_KEVENT_WORKQUEUE
+#define _dispatch_muxnotes_lock() \
+               _dispatch_unfair_lock_lock(&_dispatch_muxnotes.lock)
+#define _dispatch_muxnotes_unlock() \
+               _dispatch_unfair_lock_unlock(&_dispatch_muxnotes.lock)
+#else
+#define _dispatch_muxnotes_lock()
+#define _dispatch_muxnotes_unlock()
+#endif // !DISPATCH_USE_KEVENT_WORKQUEUE
+
+DISPATCH_CACHELINE_ALIGN
+static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s)
+_dispatch_sources[DSL_HASH_SIZE];
+
+#define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME
+#define DISPATCH_NOTE_CLOCK_MACH 0
+
+static const uint32_t _dispatch_timer_index_to_fflags[] = {
+#define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \
+       [DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \
+                       DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \
+                       NOTE_NSECONDS | NOTE_LEEWAY | (note)
+       DISPATCH_TIMER_FFLAGS_INIT(WALL, NORMAL, 0),
+       DISPATCH_TIMER_FFLAGS_INIT(MACH, NORMAL, 0),
+#if DISPATCH_HAVE_TIMER_QOS
+       DISPATCH_TIMER_FFLAGS_INIT(WALL, CRITICAL, NOTE_CRITICAL),
+       DISPATCH_TIMER_FFLAGS_INIT(MACH, CRITICAL, NOTE_CRITICAL),
+       DISPATCH_TIMER_FFLAGS_INIT(WALL, BACKGROUND, NOTE_BACKGROUND),
+       DISPATCH_TIMER_FFLAGS_INIT(MACH, BACKGROUND, NOTE_BACKGROUND),
+#endif
+#undef DISPATCH_TIMER_FFLAGS_INIT
+};
+
+static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke);
+
+#pragma mark -
+#pragma mark kevent debug
+
+DISPATCH_NOINLINE
+static const char *
+_evfiltstr(short filt)
+{
+       switch (filt) {
+#define _evfilt2(f) case (f): return #f
+       _evfilt2(EVFILT_READ);
+       _evfilt2(EVFILT_WRITE);
+       _evfilt2(EVFILT_SIGNAL);
+       _evfilt2(EVFILT_TIMER);
+
+#ifdef DISPATCH_EVENT_BACKEND_KEVENT
+       _evfilt2(EVFILT_AIO);
+       _evfilt2(EVFILT_VNODE);
+       _evfilt2(EVFILT_PROC);
+#if HAVE_MACH
+       _evfilt2(EVFILT_MACHPORT);
+       _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION);
+#endif
+       _evfilt2(EVFILT_FS);
+       _evfilt2(EVFILT_USER);
+#ifdef EVFILT_SOCK
+       _evfilt2(EVFILT_SOCK);
+#endif
+#ifdef EVFILT_MEMORYSTATUS
+       _evfilt2(EVFILT_MEMORYSTATUS);
+#endif
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+
+       _evfilt2(DISPATCH_EVFILT_TIMER);
+       _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
+       _evfilt2(DISPATCH_EVFILT_CUSTOM_OR);
+       _evfilt2(DISPATCH_EVFILT_CUSTOM_REPLACE);
+       default:
+               return "EVFILT_missing";
+       }
+}
+
+#if DISPATCH_DEBUG
+static const char *
+_evflagstr2(uint16_t *flagsp)
+{
+#define _evflag2(f) \
+       if ((*flagsp & (f)) == (f) && (f)) { \
+               *flagsp &= ~(f); \
+               return #f "|"; \
+       }
+       _evflag2(EV_ADD);
+       _evflag2(EV_DELETE);
+       _evflag2(EV_ENABLE);
+       _evflag2(EV_DISABLE);
+       _evflag2(EV_ONESHOT);
+       _evflag2(EV_CLEAR);
+       _evflag2(EV_RECEIPT);
+       _evflag2(EV_DISPATCH);
+       _evflag2(EV_UDATA_SPECIFIC);
+#ifdef EV_POLL
+       _evflag2(EV_POLL);
+#endif
+#ifdef EV_OOBAND
+       _evflag2(EV_OOBAND);
+#endif
+       _evflag2(EV_ERROR);
+       _evflag2(EV_EOF);
+       _evflag2(EV_VANISHED);
+       *flagsp = 0;
+       return "EV_UNKNOWN ";
+}
+
+DISPATCH_NOINLINE
+static const char *
+_evflagstr(uint16_t flags, char *str, size_t strsize)
+{
+       str[0] = 0;
+       while (flags) {
+               strlcat(str, _evflagstr2(&flags), strsize);
+       }
+       size_t sz = strlen(str);
+       if (sz) str[sz-1] = 0;
+       return str;
+}
+
+DISPATCH_NOINLINE
+static void
+dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev,
+               int i, int n, const char *function, unsigned int line)
+{
+       char flagstr[256];
+       char i_n[31];
+
+       if (n > 1) {
+               snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n);
+       } else {
+               i_n[0] = '\0';
+       }
+       if (verb == NULL) {
+               if (kev->flags & EV_DELETE) {
+                       verb = "deleting";
+               } else if (kev->flags & EV_ADD) {
+                       verb = "adding";
+               } else {
+                       verb = "updating";
+               }
+       }
+#if DISPATCH_USE_KEVENT_QOS
+       _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
+                       "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
+                       "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, "
+                       "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident,
+                       _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
+                       sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
+                       kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3],
+                       function, line);
+#else
+       _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
+                       "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: "
+                       "%s #%u", verb, kev, i_n,
+                       kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
+                       sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
+                       function, line);
+#endif
+}
+#else
+static inline void
+dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev,
+               int i, int n, const char *function, unsigned int line)
+{
+       (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line;
+}
+#endif // DISPATCH_DEBUG
+#define _dispatch_kevent_debug_n(verb, _kev, i, n) \
+               dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__)
+#define _dispatch_kevent_debug(verb, _kev) \
+               _dispatch_kevent_debug_n(verb, _kev, 0, 0)
+#if DISPATCH_MGR_QUEUE_DEBUG
+#define _dispatch_kevent_mgr_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
+#else
+#define _dispatch_kevent_mgr_debug(verb, kev) ((void)verb, (void)kev)
+#endif // DISPATCH_MGR_QUEUE_DEBUG
+#if DISPATCH_WLH_DEBUG
+#define _dispatch_kevent_wlh_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
+#else
+#define _dispatch_kevent_wlh_debug(verb, kev)  ((void)verb, (void)kev)
+#endif // DISPATCH_WLH_DEBUG
+
+#if DISPATCH_MACHPORT_DEBUG
+#ifndef MACH_PORT_TYPE_SPREQUEST
+#define MACH_PORT_TYPE_SPREQUEST 0x40000000
+#endif
+
+DISPATCH_NOINLINE
+void
+dispatch_debug_machport(mach_port_t name, const char* str)
+{
+       mach_port_type_t type;
+       mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0;
+       unsigned int dnreqs = 0, dnrsiz;
+       kern_return_t kr = mach_port_type(mach_task_self(), name, &type);
+       if (kr) {
+               _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name,
+                               kr, mach_error_string(kr), str);
+               return;
+       }
+       if (type & MACH_PORT_TYPE_SEND) {
+               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+                               MACH_PORT_RIGHT_SEND, &ns));
+       }
+       if (type & MACH_PORT_TYPE_SEND_ONCE) {
+               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+                               MACH_PORT_RIGHT_SEND_ONCE, &nso));
+       }
+       if (type & MACH_PORT_TYPE_DEAD_NAME) {
+               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+                               MACH_PORT_RIGHT_DEAD_NAME, &nd));
+       }
+       if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) {
+               kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs);
+               if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr);
+       }
+       if (type & MACH_PORT_TYPE_RECEIVE) {
+               mach_port_status_t status = { .mps_pset = 0, };
+               mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT;
+               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+                               MACH_PORT_RIGHT_RECEIVE, &nr));
+               (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(),
+                               name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt));
+               _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
+                               "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) "
+                               "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) "
+                               "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs,
+                               type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N",
+                               status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N",
+                               status.mps_srights ? "Y":"N", status.mps_sorights,
+                               status.mps_qlimit, status.mps_msgcount, status.mps_mscount,
+                               status.mps_seqno, str);
+       } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE|
+                       MACH_PORT_TYPE_DEAD_NAME)) {
+               _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
+                               "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs,
+                               type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str);
+       } else {
+               _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type,
+                               str);
+       }
+}
+#endif
+
+#pragma mark dispatch_kevent_t
+
+#if HAVE_MACH
+
+static dispatch_once_t _dispatch_mach_host_port_pred;
+static mach_port_t _dispatch_mach_host_port;
+
+static inline void*
+_dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke)
+{
+       return (void*)ke->ext[0];
+}
+
+static inline mach_msg_size_t
+_dispatch_kevent_mach_msg_size(dispatch_kevent_t ke)
+{
+       // buffer size in the successful receive case, but message size (like
+       // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size.
+       return (mach_msg_size_t)ke->ext[1];
+}
+
+static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke);
+static inline void _dispatch_mach_host_calendar_change_register(void);
+
+// DISPATCH_MACH_NOTIFICATION_ARMED are muxnotes that aren't registered with
+// kevent for real, but with mach_port_request_notification()
+//
+// the kevent structure is used for bookkeeping:
+// - ident, filter, flags and fflags have their usual meaning
+// - data is used to monitor the actual state of the
+//   mach_port_request_notification()
+// - ext[0] is a boolean that trackes whether the notification is armed or not
+#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0])
+#endif
+
+DISPATCH_ALWAYS_INLINE
+static dispatch_muxnote_t
+_dispatch_kevent_get_muxnote(dispatch_kevent_t ke)
+{
+       uintptr_t dmn_addr = (uintptr_t)ke->udata & ~DISPATCH_KEVENT_MUXED_MARKER;
+       return (dispatch_muxnote_t)dmn_addr;
+}
+
+DISPATCH_ALWAYS_INLINE
+static dispatch_unote_t
+_dispatch_kevent_get_unote(dispatch_kevent_t ke)
+{
+       dispatch_assert((ke->udata & DISPATCH_KEVENT_MUXED_MARKER) == 0);
+       return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)ke->udata };
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_print_error(dispatch_kevent_t ke)
+{
+       _dispatch_debug("kevent[0x%llx]: handling error",
+                       (unsigned long long)ke->udata);
+       if (ke->flags & EV_DELETE) {
+               if (ke->flags & EV_UDATA_SPECIFIC) {
+                       if (ke->data == EINPROGRESS) {
+                               // deferred EV_DELETE
+                               return;
+                       }
+               }
+               // for EV_DELETE if the update was deferred we may have reclaimed
+               // the udata already, and it is unsafe to dereference it now.
+       } else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) {
+               ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags;
+       } else if (ke->udata) {
+               if (!_dispatch_unote_registered(_dispatch_kevent_get_unote(ke))) {
+                       ke->flags |= EV_ADD;
+               }
+       }
+
+#if HAVE_MACH
+       if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP &&
+                       (ke->flags & EV_ADD) && (ke->fflags & MACH_RCV_MSG)) {
+               DISPATCH_INTERNAL_CRASH(ke->ident,
+                               "Missing EVFILT_MACHPORT support for ports");
+       }
+#endif
+
+       if (ke->data) {
+               // log the unexpected error
+               _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
+                               !ke->udata ? NULL :
+                               ke->flags & EV_DELETE ? "delete" :
+                               ke->flags & EV_ADD ? "add" :
+                               ke->flags & EV_ENABLE ? "enable" : "monitor",
+                               (int)ke->data);
+       }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke)
+{
+       uintptr_t data;
+       uintptr_t status = 0;
+       pthread_priority_t pp = 0;
+#if DISPATCH_USE_KEVENT_QOS
+       pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+#endif
+       dispatch_unote_action_t action = du._du->du_data_action;
+       if (action == DISPATCH_UNOTE_ACTION_DATA_SET) {
+               // ke->data is signed and "negative available data" makes no sense
+               // zero bytes happens when EV_EOF is set
+               dispatch_assert(ke->data >= 0l);
+               data = ~(unsigned long)ke->data;
+#if HAVE_MACH
+       } else if (du._du->du_filter == EVFILT_MACHPORT) {
+               data = DISPATCH_MACH_RECV_MESSAGE;
+#endif
+       } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) {
+               data = (unsigned long)ke->data;
+       } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR) {
+               data = ke->fflags & du._du->du_fflags;
+       } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) {
+               data = ke->fflags & du._du->du_fflags;
+               status = (unsigned long)ke->data;
+       } else {
+               DISPATCH_INTERNAL_CRASH(action, "Corrupt unote action");
+       }
+       return dux_merge_evt(du._du, ke->flags, data, status, pp);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_merge_muxed(dispatch_kevent_t ke)
+{
+       dispatch_muxnote_t dmn = _dispatch_kevent_get_muxnote(ke);
+       dispatch_unote_linkage_t dul, dul_next;
+
+       TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
+               _dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul), ke);
+       }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_drain(dispatch_kevent_t ke)
+{
+       if (ke->filter == EVFILT_USER) {
+               _dispatch_kevent_mgr_debug("received", ke);
+               return;
+       }
+       _dispatch_kevent_debug("received", ke);
+       if (unlikely(ke->flags & EV_ERROR)) {
+               if (ke->filter == EVFILT_PROC && ke->data == ESRCH) {
+                       // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
+                       // <rdar://problem/5067725>. As a workaround, we simulate an exit event for
+                       // any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
+                       ke->flags &= ~(EV_ERROR | EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC);
+                       ke->flags |= EV_ONESHOT;
+                       ke->fflags = NOTE_EXIT;
+                       ke->data = 0;
+                       _dispatch_kevent_debug("synthetic NOTE_EXIT", ke);
+               } else {
+                       return _dispatch_kevent_print_error(ke);
+               }
+       }
+       if (ke->filter == EVFILT_TIMER) {
+               return _dispatch_kevent_timer_drain(ke);
+       }
+
+#if HAVE_MACH
+       if (ke->filter == EVFILT_MACHPORT) {
+               if (_dispatch_kevent_mach_msg_size(ke)) {
+                       return _dispatch_kevent_mach_msg_drain(ke);
+               }
+       }
+#endif
+
+       if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) {
+               return _dispatch_kevent_merge_muxed(ke);
+       }
+       return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke), ke);
+}
+
+#pragma mark dispatch_kq
+
+#if DISPATCH_USE_MGR_THREAD
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_create(const void *guard_ptr)
+{
+       static const dispatch_kevent_s kev = {
+               .ident = 1,
+               .filter = EVFILT_USER,
+               .flags = EV_ADD|EV_CLEAR,
+               .udata = (uintptr_t)DISPATCH_WLH_MANAGER,
+       };
+       int kqfd;
+
+       _dispatch_fork_becomes_unsafe();
+#if DISPATCH_USE_GUARDED_FD
+       guardid_t guard = (uintptr_t)guard_ptr;
+       kqfd = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP);
+#else
+       (void)guard_ptr;
+       kqfd = kqueue();
+#endif
+       if (kqfd == -1) {
+               int err = errno;
+               switch (err) {
+               case EMFILE:
+                       DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
+                                       "process is out of file descriptors");
+                       break;
+               case ENFILE:
+                       DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
+                                       "system is out of file descriptors");
+                       break;
+               case ENOMEM:
+                       DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
+                                       "kernel is out of memory");
+                       break;
+               default:
+                       DISPATCH_INTERNAL_CRASH(err, "kqueue() failure");
+                       break;
+               }
+       }
+#if DISPATCH_USE_KEVENT_QOS
+       dispatch_assume_zero(kevent_qos(kqfd, &kev, 1, NULL, 0, NULL, NULL, 0));
+#else
+       dispatch_assume_zero(kevent(kqfd, &kev, 1, NULL, 0, NULL));
+#endif
+       return kqfd;
+}
+#endif
+
+static void
+_dispatch_kq_init(void *context)
+{
+       bool *kq_initialized = context;
+
+       _dispatch_fork_becomes_unsafe();
+       if (unlikely(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
+               _dispatch_timers_force_max_leeway = true;
+       }
+       *kq_initialized = true;
+
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+       _dispatch_kevent_workqueue_init();
+       if (_dispatch_kevent_workqueue_enabled) {
+               int r;
+               int kqfd = _dispatch_kq;
+               const dispatch_kevent_s ke = {
+                       .ident = 1,
+                       .filter = EVFILT_USER,
+                       .flags = EV_ADD|EV_CLEAR,
+                       .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
+                       .udata = (uintptr_t)DISPATCH_WLH_MANAGER,
+               };
+retry:
+               r = kevent_qos(kqfd, &ke, 1, NULL, 0, NULL, NULL,
+                               KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE);
+               if (unlikely(r == -1)) {
+                       int err = errno;
+                       switch (err) {
+                       case EINTR:
+                               goto retry;
+                       default:
+                               DISPATCH_CLIENT_CRASH(err,
+                                               "Failed to initalize workqueue kevent");
+                               break;
+                       }
+               }
+               return;
+       }
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
+#if DISPATCH_USE_MGR_THREAD
+       _dispatch_kq = _dispatch_kq_create(&_dispatch_mgr_q);
+       dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
+#endif // DISPATCH_USE_MGR_THREAD
+}
+
+#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
+static void _dispatch_memorypressure_init(void);
+#else
+#define _dispatch_memorypressure_init() ((void)0)
+#endif
+
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n,
+               dispatch_kevent_t ke_out, int n_out, void *buf, size_t *avail,
+               uint32_t flags)
+{
+       static dispatch_once_t pred;
+       bool kq_initialized = false;
+       int r = 0;
+
+       dispatch_once_f(&pred, &kq_initialized, _dispatch_kq_init);
+       if (unlikely(kq_initialized)) {
+               // The calling thread was the one doing the initialization
+               //
+               // The event loop needs the memory pressure source and debug channel,
+               // however creating these will recursively call _dispatch_kq_poll(),
+               // so we can't quite initialize them under the dispatch once.
+               _dispatch_memorypressure_init();
+               _voucher_activity_debug_channel_init();
+       }
+
+
+#if !DISPATCH_USE_KEVENT_QOS
+       if (flags & KEVENT_FLAG_ERROR_EVENTS) {
+               // emulate KEVENT_FLAG_ERROR_EVENTS
+               for (r = 0; r < n; r++) {
+                       ke[r].flags |= EV_RECEIPT;
+               }
+               out_n = n;
+       }
+#endif
+
+retry:
+       if (wlh == DISPATCH_WLH_ANON) {
+               int kqfd = _dispatch_kq;
+#if DISPATCH_USE_KEVENT_QOS
+               if (_dispatch_kevent_workqueue_enabled) {
+                       flags |= KEVENT_FLAG_WORKQ;
+               }
+               r = kevent_qos(kqfd, ke, n, ke_out, n_out, buf, avail, flags);
+#else
+               const struct timespec timeout_immediately = {}, *timeout = NULL;
+               if (flags & KEVENT_FLAG_IMMEDIATE) timeout = &timeout_immediately;
+               r = kevent(kqfd, ke, n, ke_out, n_out, timeout);
+#endif
+       }
+       if (unlikely(r == -1)) {
+               int err = errno;
+               switch (err) {
+               case ENOMEM:
+                       _dispatch_temporary_resource_shortage();
+                       /* FALLTHROUGH */
+               case EINTR:
+                       goto retry;
+               case EBADF:
+                       DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
+               default:
+                       DISPATCH_CLIENT_CRASH(err, "Unexpected error from kevent");
+               }
+       }
+       return r;
+}
+
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n,
+               uint32_t flags)
+{
+       dispatch_kevent_s ke_out[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
+       bool poll_for_events = !(flags & KEVENT_FLAG_ERROR_EVENTS);
+       int i, n_out = countof(ke_out), r = 0;
+       size_t *avail = NULL;
+       void *buf = NULL;
+
+#if DISPATCH_USE_KEVENT_QOS
+       size_t size;
+       if (poll_for_events) {
+               size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE +
+                               DISPATCH_MACH_TRAILER_SIZE;
+               buf = alloca(size);
+               avail = &size;
+       }
+#endif
+
+#if DISPATCH_DEBUG
+       for (r = 0; r < n; r++) {
+               if (ke[r].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
+                       _dispatch_kevent_debug_n(NULL, ke + r, r, n);
+               }
+       }
+#endif
+
+       if (poll_for_events) _dispatch_clear_return_to_kernel();
+       n = _dispatch_kq_poll(wlh, ke, n, ke_out, n_out, buf, avail, flags);
+       if (n == 0) {
+               r = 0;
+       } else if (flags & KEVENT_FLAG_ERROR_EVENTS) {
+               for (i = 0, r = 0; i < n; i++) {
+                       if ((ke_out[i].flags & EV_ERROR) && ke_out[i].data) {
+                               _dispatch_kevent_drain(&ke_out[i]);
+                               r = (int)ke_out[i].data;
+                       }
+               }
+       } else {
+               for (i = 0, r = 0; i < n; i++) {
+                       _dispatch_kevent_drain(&ke_out[i]);
+               }
+       }
+       return r;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline int
+_dispatch_kq_update_one(dispatch_wlh_t wlh, dispatch_kevent_t ke)
+{
+       return _dispatch_kq_drain(wlh, ke, 1,
+                       KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_kq_update_all(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n)
+{
+       (void)_dispatch_kq_drain(wlh, ke, n,
+                       KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk,
+               uint16_t action)
+{
+       dispatch_unote_class_t du = _du._du;
+       dispatch_source_type_t dst = du->du_type;
+       uint16_t flags = dst->dst_flags | action;
+
+       if ((flags & EV_VANISHED) && !(flags & EV_ADD)) {
+               flags &= ~EV_VANISHED;
+       }
+       pthread_priority_t pp = _dispatch_priority_to_pp(du->du_priority);
+       *dk = (dispatch_kevent_s){
+               .ident  = du->du_ident,
+               .filter = dst->dst_filter,
+               .flags  = flags,
+               .udata  = (uintptr_t)du,
+               .fflags = du->du_fflags | dst->dst_fflags,
+               .data   = (typeof(dk->data))dst->dst_data,
+#if DISPATCH_USE_KEVENT_QOS
+               .qos    = (typeof(dk->qos))pp,
+#endif
+       };
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline int
+_dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi,
+               int16_t filter, uint64_t ident, uint64_t udata)
+{
+       dispatch_kevent_t events = ddi->ddi_eventlist;
+       int i;
+
+       for (i = 0; i < ddi->ddi_nevents; i++) {
+               if (events[i].filter == filter && events[i].ident == ident &&
+                               events[i].udata == udata) {
+                       break;
+               }
+       }
+       return i;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_kevent_t
+_dispatch_kq_deferred_reuse_slot(dispatch_wlh_t wlh,
+               dispatch_deferred_items_t ddi, int slot)
+{
+       if (wlh != DISPATCH_WLH_ANON) _dispatch_set_return_to_kernel();
+       if (unlikely(slot == ddi->ddi_maxevents)) {
+               int nevents = ddi->ddi_nevents;
+               ddi->ddi_nevents = 1;
+               _dispatch_kq_update_all(wlh, ddi->ddi_eventlist, nevents);
+               dispatch_assert(ddi->ddi_nevents == 1);
+               slot = 0;
+       } else if (slot == ddi->ddi_nevents) {
+               ddi->ddi_nevents++;
+       }
+       return ddi->ddi_eventlist + slot;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_kq_deferred_discard_slot(dispatch_deferred_items_t ddi, int slot)
+{
+       if (slot < ddi->ddi_nevents) {
+               int last = --ddi->ddi_nevents;
+               if (slot != last) {
+                       ddi->ddi_eventlist[slot] = ddi->ddi_eventlist[last];
+               }
+       }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kq_deferred_update(dispatch_wlh_t wlh, dispatch_kevent_t ke)
+{
+       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+
+       if (ddi && ddi->ddi_maxevents && wlh == _dispatch_get_wlh()) {
+               int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident,
+                               ke->udata);
+               dispatch_kevent_t dk = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot);
+               *dk = *ke;
+               if (ke->filter != EVFILT_USER) {
+                       _dispatch_kevent_mgr_debug("deferred", ke);
+               }
+       } else {
+               _dispatch_kq_update_one(wlh, ke);
+       }
+}
+
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_immediate_update(dispatch_wlh_t wlh, dispatch_kevent_t ke)
+{
+       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+       if (ddi && wlh == _dispatch_get_wlh()) {
+               int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident,
+                               ke->udata);
+               _dispatch_kq_deferred_discard_slot(ddi, slot);
+       }
+       return _dispatch_kq_update_one(wlh, ke);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du,
+               uint16_t action_flags)
+{
+       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+       dispatch_unote_class_t du = _du._du;
+       dispatch_kevent_t ke;
+       int r = 0;
+
+       if (action_flags & EV_ADD) {
+               // as soon as we register we may get an event delivery and it has to
+               // see du_wlh already set, else it will not unregister the kevent
+               dispatch_assert(du->du_wlh == NULL);
+               _dispatch_wlh_retain(wlh);
+               du->du_wlh = wlh;
+       }
+
+       if (ddi && wlh == _dispatch_get_wlh()) {
+               int slot = _dispatch_kq_deferred_find_slot(ddi,
+                               du->du_filter, du->du_ident, (uintptr_t)du);
+               if (slot < ddi->ddi_nevents) {
+                       // <rdar://problem/26202376> when deleting and an enable is pending,
+                       // we must merge EV_ENABLE to do an immediate deletion
+                       action_flags |= (ddi->ddi_eventlist[slot].flags & EV_ENABLE);
+               }
+
+               if (!(action_flags & EV_ADD) && (action_flags & EV_ENABLE)) {
+                       // can be deferred, so do it!
+                       ke = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot);
+                       _dispatch_kq_unote_set_kevent(du, ke, action_flags);
+                       _dispatch_kevent_debug("deferred", ke);
+                       goto done;
+               }
+
+               // get rid of the deferred item if any, we can't wait
+               _dispatch_kq_deferred_discard_slot(ddi, slot);
+       }
+
+       if (action_flags) {
+               dispatch_kevent_s dk;
+               _dispatch_kq_unote_set_kevent(du, &dk, action_flags);
+               r = _dispatch_kq_update_one(wlh, &dk);
+       }
+
+done:
+       if (action_flags & EV_ADD) {
+               if (unlikely(r)) {
+                       _dispatch_wlh_release(du->du_wlh);
+                       du->du_wlh = NULL;
+               }
+               return r == 0;
+       }
+
+       if (action_flags & EV_DELETE) {
+               if (r == EINPROGRESS) {
+                       return false;
+               }
+               _dispatch_wlh_release(du->du_wlh);
+               du->du_wlh = NULL;
+       }
+
+       dispatch_assume_zero(r);
+       return true;
+}
+
+#pragma mark dispatch_muxnote_t
+
+static void
+_dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED)
+{
+       uint32_t i;
+       for (i = 0; i < DSL_HASH_SIZE; i++) {
+               TAILQ_INIT(&_dispatch_sources[i]);
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline struct dispatch_muxnote_bucket_s *
+_dispatch_muxnote_bucket(uint64_t ident, int16_t filter)
+{
+       switch (filter) {
+#if HAVE_MACH
+       case EVFILT_MACHPORT:
+       case DISPATCH_EVFILT_MACH_NOTIFICATION:
+               ident = MACH_PORT_INDEX(ident);
+               break;
+#endif
+       case EVFILT_SIGNAL: // signo
+       case EVFILT_PROC: // pid_t
+       default: // fd
+               break;
+       }
+
+       dispatch_once_f(&_dispatch_muxnotes.pred, NULL, _dispatch_muxnotes_init);
+       return &_dispatch_sources[DSL_HASH((uintptr_t)ident)];
+}
+#define _dispatch_unote_muxnote_bucket(du) \
+       _dispatch_muxnote_bucket(du._du->du_ident, du._du->du_filter)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_muxnote_t
+_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb,
+               dispatch_wlh_t wlh, uint64_t ident, int16_t filter)
+{
+       dispatch_muxnote_t dmn;
+       _dispatch_muxnotes_lock();
+       TAILQ_FOREACH(dmn, dmb, dmn_list) {
+               if (dmn->dmn_wlh == wlh && dmn->dmn_kev.ident == ident &&
+                               dmn->dmn_kev.filter == filter) {
+                       break;
+               }
+       }
+       _dispatch_muxnotes_unlock();
+       return dmn;
+}
+#define _dispatch_unote_muxnote_find(dmb, du, wlh) \
+               _dispatch_muxnote_find(dmb, wlh, du._du->du_ident, du._du->du_filter)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_muxnote_t
+_dispatch_mach_muxnote_find(mach_port_t name, int16_t filter)
+{
+       struct dispatch_muxnote_bucket_s *dmb;
+       dmb = _dispatch_muxnote_bucket(name, filter);
+       return _dispatch_muxnote_find(dmb, DISPATCH_WLH_ANON, name, filter);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_unote_register_muxed(dispatch_unote_t du, dispatch_wlh_t wlh)
+{
+       struct dispatch_muxnote_bucket_s *dmb = _dispatch_unote_muxnote_bucket(du);
+       dispatch_muxnote_t dmn;
+       bool installed = true;
+
+       dmn = _dispatch_unote_muxnote_find(dmb, du, wlh);
+       if (dmn) {
+               uint32_t flags = du._du->du_fflags & ~dmn->dmn_kev.fflags;
+               if (flags) {
+                       dmn->dmn_kev.fflags |= flags;
+                       if (unlikely(du._du->du_type->dst_update_mux)) {
+                               installed = du._du->du_type->dst_update_mux(dmn);
+                       } else {
+                               installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh,
+                                               &dmn->dmn_kev);
+                       }
+                       if (!installed) dmn->dmn_kev.fflags &= ~flags;
+               }
+       } else {
+               dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s));
+               TAILQ_INIT(&dmn->dmn_unotes_head);
+               _dispatch_kq_unote_set_kevent(du, &dmn->dmn_kev, EV_ADD | EV_ENABLE);
+#if DISPATCH_USE_KEVENT_QOS
+               dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+#endif
+               dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER;
+               dmn->dmn_wlh = wlh;
+               if (unlikely(du._du->du_type->dst_update_mux)) {
+                       installed = du._du->du_type->dst_update_mux(dmn);
+               } else {
+                       installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh,
+                                       &dmn->dmn_kev);
+               }
+               if (installed) {
+                       dmn->dmn_kev.flags &= ~(EV_ADD | EV_VANISHED);
+                       _dispatch_muxnotes_lock();
+                       TAILQ_INSERT_TAIL(dmb, dmn, dmn_list);
+                       _dispatch_muxnotes_unlock();
+               } else {
+                       free(dmn);
+               }
+       }
+
+       if (installed) {
+               dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+               TAILQ_INSERT_TAIL(&dmn->dmn_unotes_head, dul, du_link);
+               dul->du_muxnote = dmn;
+
+               if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) {
+                       bool armed = DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev);
+                       os_atomic_store2o(du._dmsr, dmsr_notification_armed, armed,relaxed);
+               }
+               du._du->du_wlh = DISPATCH_WLH_ANON;
+       }
+       return installed;
+}
+
+bool
+_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh,
+               dispatch_priority_t pri)
+{
+       dispatch_assert(!_dispatch_unote_registered(du));
+       du._du->du_priority = pri;
+       switch (du._du->du_filter) {
+       case DISPATCH_EVFILT_CUSTOM_ADD:
+       case DISPATCH_EVFILT_CUSTOM_OR:
+       case DISPATCH_EVFILT_CUSTOM_REPLACE:
+               du._du->du_wlh = DISPATCH_WLH_ANON;
+               return true;
+       }
+       if (!du._du->du_is_direct) {
+               return _dispatch_unote_register_muxed(du, DISPATCH_WLH_ANON);
+       }
+       return _dispatch_kq_unote_update(wlh, du, EV_ADD | EV_ENABLE);
+}
+
+void
+_dispatch_unote_resume(dispatch_unote_t du)
+{
+       dispatch_assert(_dispatch_unote_registered(du));
+
+       if (du._du->du_is_direct) {
+               dispatch_wlh_t wlh = du._du->du_wlh;
+               _dispatch_kq_unote_update(wlh, du, EV_ENABLE);
+       } else if (unlikely(du._du->du_type->dst_update_mux)) {
+               dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+               du._du->du_type->dst_update_mux(dul->du_muxnote);
+       } else {
+               dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+               dispatch_muxnote_t dmn = dul->du_muxnote;
+               _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev);
+       }
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags)
+{
+       dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+       dispatch_muxnote_t dmn = dul->du_muxnote;
+       bool update = false, dispose = false;
+
+       if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) {
+               os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed);
+       }
+       dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON);
+       du._du->du_wlh = NULL;
+       TAILQ_REMOVE(&dmn->dmn_unotes_head, dul, du_link);
+       _TAILQ_TRASH_ENTRY(dul, du_link);
+       dul->du_muxnote = NULL;
+
+       if (TAILQ_EMPTY(&dmn->dmn_unotes_head)) {
+               dmn->dmn_kev.flags |= EV_DELETE;
+               update = dispose = true;
+       } else {
+               uint32_t fflags = du._du->du_type->dst_fflags;
+               TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
+                       du = _dispatch_unote_linkage_get_unote(dul);
+                       fflags |= du._du->du_fflags;
+               }
+               if (dmn->dmn_kev.fflags & ~fflags) {
+                       dmn->dmn_kev.fflags &= fflags;
+                       update = true;
+               }
+       }
+       if (update && !(flags & DU_UNREGISTER_ALREADY_DELETED)) {
+               if (unlikely(du._du->du_type->dst_update_mux)) {
+                       dispatch_assume(du._du->du_type->dst_update_mux(dmn));
+               } else {
+                       _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev);
+               }
+       }
+       if (dispose) {
+               struct dispatch_muxnote_bucket_s *dmb;
+               dmb = _dispatch_muxnote_bucket(dmn->dmn_kev.ident, dmn->dmn_kev.filter);
+               _dispatch_muxnotes_lock();
+               TAILQ_REMOVE(dmb, dmn, dmn_list);
+               _dispatch_muxnotes_unlock();
+               free(dmn);
+       }
+       return true;
+}
+
+bool
+_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags)
+{
+       switch (du._du->du_filter) {
+       case DISPATCH_EVFILT_CUSTOM_ADD:
+       case DISPATCH_EVFILT_CUSTOM_OR:
+       case DISPATCH_EVFILT_CUSTOM_REPLACE:
+               du._du->du_wlh = NULL;
+               return true;
+       }
+       dispatch_wlh_t wlh = du._du->du_wlh;
+       if (wlh) {
+               if (!du._du->du_is_direct) {
+                       return _dispatch_unote_unregister_muxed(du, flags);
+               }
+               uint16_t action_flags;
+               if (flags & DU_UNREGISTER_ALREADY_DELETED) {
+                       action_flags = 0;
+               } else if (flags & DU_UNREGISTER_IMMEDIATE_DELETE) {
+                       action_flags = EV_DELETE | EV_ENABLE;
+               } else {
+                       action_flags = EV_DELETE;
+               }
+               return _dispatch_kq_unote_update(wlh, du, action_flags);
+       }
+       return true;
+}
+
+#pragma mark -
+#pragma mark dispatch_event_loop
+
+void
+_dispatch_event_loop_atfork_child(void)
+{
+#if HAVE_MACH
+       _dispatch_mach_host_port_pred = 0;
+       _dispatch_mach_host_port = MACH_PORT_NULL;
+#endif
+}
+
+
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, uint32_t flags)
+{
+       if (wlh == DISPATCH_WLH_MANAGER) {
+               dispatch_kevent_s ke = (dispatch_kevent_s){
+                       .ident  = 1,
+                       .filter = EVFILT_USER,
+                       .fflags = NOTE_TRIGGER,
+                       .udata = (uintptr_t)DISPATCH_WLH_MANAGER,
+               };
+               return _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke);
+       } else if (wlh && wlh != DISPATCH_WLH_ANON) {
+               (void)dq_state; (void)flags;
+       }
+       DISPATCH_INTERNAL_CRASH(wlh, "Unsupported wlh configuration");
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_drain(uint32_t flags)
+{
+       dispatch_wlh_t wlh = _dispatch_get_wlh();
+       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+       int n;
+
+again:
+       n = ddi->ddi_nevents;
+       ddi->ddi_nevents = 0;
+       _dispatch_kq_drain(wlh, ddi->ddi_eventlist, n, flags);
+
+       if ((flags & KEVENT_FLAG_IMMEDIATE) &&
+                       !(flags & KEVENT_FLAG_ERROR_EVENTS) &&
+                       _dispatch_needs_to_return_to_kernel()) {
+               goto again;
+       }
+}
+
+void
+_dispatch_event_loop_merge(dispatch_kevent_t events, int nevents)
+{
+       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+       dispatch_kevent_s kev[nevents];
+
+       // now we can re-use the whole event list, but we need to save one slot
+       // for the event loop poke
+       memcpy(kev, events, sizeof(kev));
+       ddi->ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT - 1;
+
+       for (int i = 0; i < nevents; i++) {
+               _dispatch_kevent_drain(&kev[i]);
+       }
+
+       dispatch_wlh_t wlh = _dispatch_get_wlh();
+       if (wlh == DISPATCH_WLH_ANON && ddi->ddi_stashed_dou._do) {
+               if (ddi->ddi_nevents) {
+                       // We will drain the stashed item and not return to the kernel
+                       // right away. As a consequence, do not delay these updates.
+                       _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE |
+                                       KEVENT_FLAG_ERROR_EVENTS);
+               }
+               _dispatch_trace_continuation_push(ddi->ddi_stashed_rq,
+                               ddi->ddi_stashed_dou);
+       }
+}
+
+void
+_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state)
+{
+       (void)wlh; (void)dq_state;
+}
+
+void
+_dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh, uint64_t dq_state)
+{
+       (void)wlh; (void)dq_state;
+}
+
+void
+_dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc,
+               dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state)
+{
+       (void)dsc; (void)wlh; (void)old_state; (void)new_state;
+}
+
+void
+_dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc)
+{
+       if (dsc->dsc_release_storage) {
+               _dispatch_queue_release_storage(dsc->dc_data);
+       }
+}
+
+void
+_dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state,
+               uint64_t new_state, uint32_t flags)
+{
+       (void)wlh; (void)old_state; (void)new_state; (void)flags;
+}
+
+#if DISPATCH_WLH_DEBUG
+void
+_dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh)
+{
+       (void)wlh;
+}
+#endif // DISPATCH_WLH_DEBUG
+
+#pragma mark -
+#pragma mark dispatch_event_loop timers
+
+#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8)
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_timer_drain(dispatch_kevent_t ke)
+{
+       dispatch_assert(ke->data > 0);
+       dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) ==
+                       DISPATCH_KEVENT_TIMEOUT_IDENT_MASK);
+       uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK;
+
+       dispatch_assert(tidx < DISPATCH_TIMER_COUNT);
+       _dispatch_timers_expired = true;
+       _dispatch_timers_processing_mask |= 1 << tidx;
+       _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
+#if DISPATCH_USE_DTRACE
+       _dispatch_timers_will_wake |= 1 << DISPATCH_TIMER_QOS(tidx);
+#endif
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_event_loop_timer_program(uint32_t tidx,
+               uint64_t target, uint64_t leeway, uint16_t action)
+{
+       dispatch_kevent_s ke = {
+               .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK | tidx,
+               .filter = EVFILT_TIMER,
+               .flags = action | EV_ONESHOT,
+               .fflags = _dispatch_timer_index_to_fflags[tidx],
+               .data = (int64_t)target,
+               .udata = (uintptr_t)&_dispatch_timers_heap[tidx],
+#if DISPATCH_HAVE_TIMER_COALESCING
+               .ext[1] = leeway,
+#endif
+#if DISPATCH_USE_KEVENT_QOS
+               .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
+#endif
+       };
+
+       _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke);
+}
+
+void
+_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range,
+               dispatch_clock_now_cache_t nows)
+{
+       if (unlikely(_dispatch_timers_force_max_leeway)) {
+               range.delay += range.leeway;
+               range.leeway = 0;
+       }
+#if HAVE_MACH
+       if (DISPATCH_TIMER_CLOCK(tidx) == DISPATCH_CLOCK_WALL) {
+               _dispatch_mach_host_calendar_change_register();
+       }
+#endif
+
+       // <rdar://problem/13186331> EVFILT_TIMER NOTE_ABSOLUTE always expects
+       // a WALL deadline
+       uint64_t now = _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
+       _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED;
+       _dispatch_event_loop_timer_program(tidx, now + range.delay, range.leeway,
+                       EV_ADD | EV_ENABLE);
+}
+
+void
+_dispatch_event_loop_timer_delete(uint32_t tidx)
+{
+       _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
+       _dispatch_event_loop_timer_program(tidx, 0, 0, EV_DELETE);
+}
+
+#pragma mark -
+#pragma mark kevent specific sources
+
+static dispatch_unote_t
+_dispatch_source_proc_create(dispatch_source_type_t dst DISPATCH_UNUSED,
+               uintptr_t handle, unsigned long mask DISPATCH_UNUSED)
+{
+       dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask);
+       if (du._du && (mask & DISPATCH_PROC_EXIT_STATUS)) {
+               du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET;
+       }
+       return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_proc = {
+       .dst_kind       = "proc",
+       .dst_filter     = EVFILT_PROC,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR,
+       .dst_fflags     = NOTE_EXIT, // rdar://16655831
+       .dst_mask       = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXITSTATUS
+#if HAVE_DECL_NOTE_SIGNAL
+                       |NOTE_SIGNAL
+#endif
+#if HAVE_DECL_NOTE_REAP
+                       |NOTE_REAP
+#endif
+                       ,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_proc_create,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+const dispatch_source_type_s _dispatch_source_type_vnode = {
+       .dst_kind       = "vnode",
+       .dst_filter     = EVFILT_VNODE,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
+       .dst_mask       = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK
+                       |NOTE_RENAME|NOTE_FUNLOCK
+#if HAVE_DECL_NOTE_REVOKE
+                       |NOTE_REVOKE
+#endif
+#if HAVE_DECL_NOTE_NONE
+                       |NOTE_NONE
+#endif
+                       ,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_fd,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+const dispatch_source_type_s _dispatch_source_type_vfs = {
+       .dst_kind       = "vfs",
+       .dst_filter     = EVFILT_FS,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR,
+       .dst_mask       = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT
+                       |VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK
+#if HAVE_DECL_VQ_UPDATE
+                       |VQ_UPDATE
+#endif
+#if HAVE_DECL_VQ_VERYLOWDISK
+                       |VQ_VERYLOWDISK
+#endif
+#if HAVE_DECL_VQ_QUOTA
+                       |VQ_QUOTA
+#endif
+#if HAVE_DECL_VQ_NEARLOWDISK
+                       |VQ_NEARLOWDISK
+#endif
+#if HAVE_DECL_VQ_DESIRED_DISK
+                       |VQ_DESIRED_DISK
+#endif
+                       ,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_without_handle,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+#ifdef EVFILT_SOCK
+const dispatch_source_type_s _dispatch_source_type_sock = {
+       .dst_kind       = "sock",
+       .dst_filter     = EVFILT_SOCK,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
+       .dst_mask       = NOTE_CONNRESET|NOTE_READCLOSED|NOTE_WRITECLOSED
+                       |NOTE_TIMEOUT|NOTE_NOSRCADDR|NOTE_IFDENIED|NOTE_SUSPEND|NOTE_RESUME
+                       |NOTE_KEEPALIVE
+#ifdef NOTE_ADAPTIVE_WTIMO
+                       |NOTE_ADAPTIVE_WTIMO|NOTE_ADAPTIVE_RTIMO
+#endif
+#ifdef NOTE_CONNECTED
+                       |NOTE_CONNECTED|NOTE_DISCONNECTED|NOTE_CONNINFO_UPDATED
+#endif
+#ifdef NOTE_NOTIFY_ACK
+                       |NOTE_NOTIFY_ACK
+#endif
+               ,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_fd,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+#endif // EVFILT_SOCK
+
+#ifdef EVFILT_NW_CHANNEL
+const dispatch_source_type_s _dispatch_source_type_nw_channel = {
+       .dst_kind       = "nw_channel",
+       .dst_filter     = EVFILT_NW_CHANNEL,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
+       .dst_mask       = NOTE_FLOW_ADV_UPDATE,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+       .dst_create     = _dispatch_unote_create_with_fd,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+#endif // EVFILT_NW_CHANNEL
+
+#if DISPATCH_USE_MEMORYSTATUS
+
+#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
+#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \
+               DISPATCH_MEMORYPRESSURE_NORMAL | \
+               DISPATCH_MEMORYPRESSURE_WARN | \
+               DISPATCH_MEMORYPRESSURE_CRITICAL | \
+               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
+               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \
+               DISPATCH_MEMORYPRESSURE_MSL_STATUS)
+
+#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \
+               DISPATCH_MEMORYPRESSURE_WARN | \
+               DISPATCH_MEMORYPRESSURE_CRITICAL | \
+               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
+               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \
+               DISPATCH_MEMORYPRESSURE_MSL_STATUS)
+
+
+static void
+_dispatch_memorypressure_handler(void *context)
+{
+       dispatch_source_t ds = context;
+       unsigned long memorypressure = dispatch_source_get_data(ds);
+
+       if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) {
+               _dispatch_memory_warn = false;
+               _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT;
+#if VOUCHER_USE_MACH_VOUCHER
+               if (_firehose_task_buffer) {
+                       firehose_buffer_clear_bank_flags(_firehose_task_buffer,
+                                       FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
+               }
+#endif
+       }
+       if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) {
+               _dispatch_memory_warn = true;
+               _dispatch_continuation_cache_limit =
+                               DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN;
+#if VOUCHER_USE_MACH_VOUCHER
+               if (_firehose_task_buffer) {
+                       firehose_buffer_set_bank_flags(_firehose_task_buffer,
+                                       FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
+               }
+#endif
+       }
+       memorypressure &= DISPATCH_MEMORYPRESSURE_MALLOC_MASK;
+       if (memorypressure) {
+               malloc_memory_event_handler(memorypressure);
+       }
+}
+
+static void
+_dispatch_memorypressure_init(void)
+{
+       dispatch_source_t ds = dispatch_source_create(
+                       DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0,
+                       DISPATCH_MEMORYPRESSURE_SOURCE_MASK, &_dispatch_mgr_q);
+       dispatch_set_context(ds, ds);
+       dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler);
+       dispatch_activate(ds);
+}
+#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE
+
+#if TARGET_OS_SIMULATOR // rdar://problem/9219483
+static int _dispatch_ios_simulator_memory_warnings_fd = -1;
+static void
+_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
+{
+       char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
+       if (!e) return;
+       _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
+       if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
+               (void)dispatch_assume_zero(errno);
+       }
+}
+
+static dispatch_unote_t
+_dispatch_source_memorypressure_create(dispatch_source_type_t dst,
+       uintptr_t handle, unsigned long mask)
+{
+       static dispatch_once_t pred;
+       dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
+
+       if (handle) {
+               return DISPATCH_UNOTE_NULL;
+       }
+
+       dst = &_dispatch_source_type_vnode;
+       handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
+       mask = NOTE_ATTRIB;
+
+       dispatch_unote_t du = dux_create(dst, handle, mask);
+       if (du._du) {
+               du._du->du_memorypressure_override = true;
+       }
+       return du;
+}
+#endif // TARGET_OS_SIMULATOR
+
+const dispatch_source_type_s _dispatch_source_type_memorypressure = {
+       .dst_kind       = "memorystatus",
+       .dst_filter     = EVFILT_MEMORYSTATUS,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH,
+       .dst_mask       = NOTE_MEMORYSTATUS_PRESSURE_NORMAL
+                       |NOTE_MEMORYSTATUS_PRESSURE_WARN|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL
+                       |NOTE_MEMORYSTATUS_LOW_SWAP|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
+                       |NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
+                       |NOTE_MEMORYSTATUS_MSL_STATUS,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+#if TARGET_OS_SIMULATOR
+       .dst_create     = _dispatch_source_memorypressure_create,
+       // redirected to _dispatch_source_type_vnode
+#else
+       .dst_create     = _dispatch_unote_create_without_handle,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+#endif
+};
+
+static dispatch_unote_t
+_dispatch_source_vm_create(dispatch_source_type_t dst DISPATCH_UNUSED,
+               uintptr_t handle, unsigned long mask DISPATCH_UNUSED)
+{
+       // Map legacy vm pressure to memorypressure warning rdar://problem/15907505
+       dispatch_unote_t du = dux_create(&_dispatch_source_type_memorypressure,
+                       handle, NOTE_MEMORYSTATUS_PRESSURE_WARN);
+       if (du._du) {
+               du._du->du_vmpressure_override = 1;
+       }
+       return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_vm = {
+       .dst_kind       = "vm (deprecated)",
+       .dst_filter     = EVFILT_MEMORYSTATUS,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH,
+       .dst_mask       = NOTE_VM_PRESSURE,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_vm_create,
+       // redirected to _dispatch_source_type_memorypressure
+};
+#endif // DISPATCH_USE_MEMORYSTATUS
+
+#pragma mark mach send / notifications
+#if HAVE_MACH
+
+// Flags for all notifications that are registered/unregistered when a
+// send-possible notification is requested/delivered
+#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \
+               DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED)
+
+static void _dispatch_mach_host_notify_update(void *context);
+
+static mach_port_t _dispatch_mach_notify_port;
+static dispatch_source_t _dispatch_mach_notify_source;
+
+static void
+_dispatch_timers_calendar_change(void)
+{
+       uint32_t qos;
+
+       // calendar change may have gone past the wallclock deadline
+       _dispatch_timers_expired = true;
+       for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
+               _dispatch_timers_processing_mask |=
+                               1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos);
+       }
+}
+
+static mach_msg_audit_trailer_t *
+_dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr)
+{
+       mach_msg_trailer_t *tlr = NULL;
+       mach_msg_audit_trailer_t *audit_tlr = NULL;
+       tlr = (mach_msg_trailer_t *)((unsigned char *)hdr +
+                       round_msg(hdr->msgh_size));
+       // The trailer should always be of format zero.
+       if (tlr->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0) {
+               if (tlr->msgh_trailer_size >= sizeof(mach_msg_audit_trailer_t)) {
+                       audit_tlr = (mach_msg_audit_trailer_t *)tlr;
+               }
+       }
+       return audit_tlr;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr)
+{
+       mig_reply_error_t reply;
+       mach_msg_audit_trailer_t *tlr = NULL;
+       dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union
+               __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem));
+       dispatch_assert(sizeof(mig_reply_error_t) <
+                       DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE);
+       tlr = _dispatch_mach_msg_get_audit_trailer(hdr);
+       if (!tlr) {
+               DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer");
+       }
+       if (hdr->msgh_id <= MACH_NOTIFY_LAST
+                       && dispatch_assume_zero(tlr->msgh_audit.val[
+                       DISPATCH_MACH_AUDIT_TOKEN_PID])) {
+               mach_msg_destroy(hdr);
+               return;
+       }
+       boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head);
+       if (!success && reply.RetCode == MIG_BAD_ID &&
+                       (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID ||
+                        hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) {
+               _dispatch_debug("calendar-change notification");
+               _dispatch_timers_calendar_change();
+               _dispatch_mach_host_notify_update(NULL);
+               success = TRUE;
+               reply.RetCode = KERN_SUCCESS;
+       }
+       if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) {
+               (void)dispatch_assume_zero(reply.RetCode);
+       }
+       if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) {
+               mach_msg_destroy(hdr);
+       }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED)
+{
+       kern_return_t kr;
+#if HAVE_MACH_PORT_CONSTRUCT
+       mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT };
+#if DISPATCH_SIZEOF_PTR == 8
+       const mach_port_context_t guard = 0xfeed09071f1ca7edull;
+#else
+       const mach_port_context_t guard = 0xff1ca7edull;
+#endif
+       kr = mach_port_construct(mach_task_self(), &opts, guard,
+                       &_dispatch_mach_notify_port);
+#else
+       kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
+                       &_dispatch_mach_notify_port);
+#endif
+       DISPATCH_VERIFY_MIG(kr);
+       if (unlikely(kr)) {
+               DISPATCH_CLIENT_CRASH(kr,
+                               "mach_port_construct() failed: cannot create receive right");
+       }
+
+       static const struct dispatch_continuation_s dc = {
+               .dc_func = (void*)_dispatch_mach_notify_source_invoke,
+       };
+       _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv(
+                       _dispatch_mach_notify_port, &dc);
+       dispatch_assert(_dispatch_mach_notify_source);
+       dispatch_activate(_dispatch_mach_notify_source);
+}
+
+static void
+_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED)
+{
+       kern_return_t kr;
+       mach_port_t mp, mhp = mach_host_self();
+       kr = host_get_host_port(mhp, &mp);
+       DISPATCH_VERIFY_MIG(kr);
+       if (likely(!kr)) {
+               // mach_host_self returned the HOST_PRIV port
+               kr = mach_port_deallocate(mach_task_self(), mhp);
+               DISPATCH_VERIFY_MIG(kr);
+               mhp = mp;
+       } else if (kr != KERN_INVALID_ARGUMENT) {
+               (void)dispatch_assume_zero(kr);
+       }
+       if (unlikely(!mhp)) {
+               DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port");
+       }
+       _dispatch_mach_host_port = mhp;
+}
+
+mach_port_t
+_dispatch_get_mach_host_port(void)
+{
+       dispatch_once_f(&_dispatch_mach_host_port_pred, NULL,
+                       _dispatch_mach_host_port_init);
+       return _dispatch_mach_host_port;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_port_t
+_dispatch_get_mach_notify_port(void)
+{
+       static dispatch_once_t pred;
+       dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init);
+       return _dispatch_mach_notify_port;
+}
+
+static void
+_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED)
+{
+       static int notify_type = HOST_NOTIFY_CALENDAR_SET;
+       kern_return_t kr;
+       _dispatch_debug("registering for calendar-change notification");
+retry:
+       kr = host_request_notification(_dispatch_get_mach_host_port(),
+                       notify_type, _dispatch_get_mach_notify_port());
+       // Fallback when missing support for newer _SET variant, fires strictly more
+       if (kr == KERN_INVALID_ARGUMENT &&
+                       notify_type != HOST_NOTIFY_CALENDAR_CHANGE) {
+               notify_type = HOST_NOTIFY_CALENDAR_CHANGE;
+               goto retry;
+       }
+       DISPATCH_VERIFY_MIG(kr);
+       (void)dispatch_assume_zero(kr);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_host_calendar_change_register(void)
+{
+       static dispatch_once_t pred;
+       dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update);
+}
+
+static kern_return_t
+_dispatch_mach_notify_update(dispatch_muxnote_t dmn, uint32_t new_flags,
+               uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid,
+               mach_port_mscount_t notify_sync)
+{
+       mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident;
+       typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data;
+       kern_return_t kr, krr = 0;
+
+       // Update notification registration state.
+       dmn->dmn_kev.data |= (new_flags | dmn->dmn_kev.fflags) & mask;
+       dmn->dmn_kev.data &= ~(del_flags & mask);
+
+       _dispatch_debug_machport(port);
+       if ((dmn->dmn_kev.data & mask) && !(prev & mask)) {
+               _dispatch_debug("machport[0x%08x]: registering for send-possible "
+                               "notification", port);
+               previous = MACH_PORT_NULL;
+               krr = mach_port_request_notification(mach_task_self(), port,
+                               notify_msgid, notify_sync, _dispatch_get_mach_notify_port(),
+                               MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
+               DISPATCH_VERIFY_MIG(krr);
+
+               switch (krr) {
+               case KERN_INVALID_NAME:
+               case KERN_INVALID_RIGHT:
+                       // Suppress errors & clear registration state
+                       dmn->dmn_kev.data &= ~mask;
+                       break;
+               default:
+                       // Else, we don't expect any errors from mach. Log any errors
+                       if (dispatch_assume_zero(krr)) {
+                               // log the error & clear registration state
+                               dmn->dmn_kev.data &= ~mask;
+                       } else if (dispatch_assume_zero(previous)) {
+                               // Another subsystem has beat libdispatch to requesting the
+                               // specified Mach notification on this port. We should
+                               // technically cache the previous port and message it when the
+                               // kernel messages our port. Or we can just say screw those
+                               // subsystems and deallocate the previous port.
+                               // They should adopt libdispatch :-P
+                               kr = mach_port_deallocate(mach_task_self(), previous);
+                               DISPATCH_VERIFY_MIG(kr);
+                               (void)dispatch_assume_zero(kr);
+                               previous = MACH_PORT_NULL;
+                       }
+               }
+       } else if (!(dmn->dmn_kev.data & mask) && (prev & mask)) {
+               _dispatch_debug("machport[0x%08x]: unregistering for send-possible "
+                               "notification", port);
+               previous = MACH_PORT_NULL;
+               kr = mach_port_request_notification(mach_task_self(), port,
+                               notify_msgid, notify_sync, MACH_PORT_NULL,
+                               MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous);
+               DISPATCH_VERIFY_MIG(kr);
+
+               switch (kr) {
+               case KERN_INVALID_NAME:
+               case KERN_INVALID_RIGHT:
+               case KERN_INVALID_ARGUMENT:
+                       break;
+               default:
+                       if (dispatch_assume_zero(kr)) {
+                               // log the error
+                       }
+               }
+       } else {
+               return 0;
+       }
+       if (unlikely(previous)) {
+               // the kernel has not consumed the send-once right yet
+               (void)dispatch_assume_zero(
+                               _dispatch_send_consume_send_once_right(previous));
+       }
+       return krr;
+}
+
+static bool
+_dispatch_kevent_mach_notify_resume(dispatch_muxnote_t dmn, uint32_t new_flags,
+               uint32_t del_flags)
+{
+       kern_return_t kr = KERN_SUCCESS;
+       dispatch_assert_zero(new_flags & del_flags);
+       if ((new_flags & _DISPATCH_MACH_SP_FLAGS) ||
+                       (del_flags & _DISPATCH_MACH_SP_FLAGS)) {
+               // Requesting a (delayed) non-sync send-possible notification
+               // registers for both immediate dead-name notification and delayed-arm
+               // send-possible notification for the port.
+               // The send-possible notification is armed when a mach_msg() with the
+               // the MACH_SEND_NOTIFY to the port times out.
+               // If send-possible is unavailable, fall back to immediate dead-name
+               // registration rdar://problem/2527840&9008724
+               kr = _dispatch_mach_notify_update(dmn, new_flags, del_flags,
+                               _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE,
+                               MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME);
+       }
+       return kr == KERN_SUCCESS;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final)
+{
+       dispatch_unote_linkage_t dul, dul_next;
+       dispatch_muxnote_t dmn;
+
+       _dispatch_debug_machport(name);
+       dmn = _dispatch_mach_muxnote_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION);
+       if (!dmn) {
+               return;
+       }
+
+       dmn->dmn_kev.data &= ~_DISPATCH_MACH_SP_FLAGS;
+       if (!final) {
+               // Re-register for notification before delivery
+               final = !_dispatch_kevent_mach_notify_resume(dmn, data, 0);
+       }
+
+       uint32_t flags = final ? EV_ONESHOT : EV_ENABLE;
+       DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = 0;
+       TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
+               dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+               os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed);
+               dux_merge_evt(du._du, flags, (data & du._du->du_fflags), 0, 0);
+               if (!dul_next || DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev)) {
+                       // current merge is last in list (dmn might have been freed)
+                       // or it re-armed the notification
+                       break;
+               }
+       }
+}
+
+kern_return_t
+_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED,
+               mach_port_name_t name)
+{
+#if DISPATCH_DEBUG
+       _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x "
+                       "deleted prematurely", name);
+#endif
+       _dispatch_debug_machport(name);
+       _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true);
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED,
+               mach_port_name_t name)
+{
+       kern_return_t kr;
+
+       _dispatch_debug("machport[0x%08x]: dead-name notification", name);
+       _dispatch_debug_machport(name);
+       _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true);
+
+       // the act of receiving a dead name notification allocates a dead-name
+       // right that must be deallocated
+       kr = mach_port_deallocate(mach_task_self(), name);
+       DISPATCH_VERIFY_MIG(kr);
+       //(void)dispatch_assume_zero(kr);
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED,
+               mach_port_name_t name)
+{
+       _dispatch_debug("machport[0x%08x]: send-possible notification", name);
+       _dispatch_debug_machport(name);
+       _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false);
+       return KERN_SUCCESS;
+}
+
+void
+_dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr)
+{
+       dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote;
+       dispatch_unote_linkage_t dul;
+       dispatch_unote_t du;
+
+       if (!_dispatch_unote_registered(dmsr)) {
+               return;
+       }
+
+       DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = true;
+       TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
+               du = _dispatch_unote_linkage_get_unote(dul);
+               os_atomic_store2o(du._dmsr, dmsr_notification_armed, true, relaxed);
+       }
+}
+
+static dispatch_unote_t
+_dispatch_source_mach_send_create(dispatch_source_type_t dst,
+       uintptr_t handle, unsigned long mask)
+{
+       if (!mask) {
+               // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
+               mask = DISPATCH_MACH_SEND_DEAD;
+       }
+       if (!handle) {
+               handle = MACH_PORT_DEAD; // <rdar://problem/27651332>
+       }
+       return _dispatch_unote_create_with_handle(dst, handle, mask);
+}
+
+static bool
+_dispatch_mach_send_update(dispatch_muxnote_t dmn)
+{
+       if (dmn->dmn_kev.flags & EV_DELETE) {
+               return _dispatch_kevent_mach_notify_resume(dmn, 0, dmn->dmn_kev.fflags);
+       } else {
+               return _dispatch_kevent_mach_notify_resume(dmn, dmn->dmn_kev.fflags, 0);
+       }
+}
+
+const dispatch_source_type_s _dispatch_source_type_mach_send = {
+       .dst_kind       = "mach_send",
+       .dst_filter     = DISPATCH_EVFILT_MACH_NOTIFICATION,
+       .dst_flags      = EV_CLEAR,
+       .dst_mask       = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_source_mach_send_create,
+       .dst_update_mux = _dispatch_mach_send_update,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+static dispatch_unote_t
+_dispatch_mach_send_create(dispatch_source_type_t dst,
+       uintptr_t handle, unsigned long mask)
+{
+       // without handle because the mach code will set the ident later
+       dispatch_unote_t du =
+                       _dispatch_unote_create_without_handle(dst, handle, mask);
+       if (du._dmsr) {
+               du._dmsr->dmsr_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED;
+               TAILQ_INIT(&du._dmsr->dmsr_replies);
+       }
+       return du;
+}
+
+const dispatch_source_type_s _dispatch_mach_type_send = {
+       .dst_kind       = "mach_send (mach)",
+       .dst_filter     = DISPATCH_EVFILT_MACH_NOTIFICATION,
+       .dst_flags      = EV_CLEAR,
+       .dst_mask       = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
+       .dst_size       = sizeof(struct dispatch_mach_send_refs_s),
+
+       .dst_create     = _dispatch_mach_send_create,
+       .dst_update_mux = _dispatch_mach_send_update,
+       .dst_merge_evt  = _dispatch_mach_merge_notification,
+};
+
+#endif // HAVE_MACH
+#pragma mark mach recv / reply
+#if HAVE_MACH
+
+static void
+_dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags,
+               mach_msg_header_t *hdr)
+{
+       mach_msg_size_t siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
+       mach_port_t name = hdr->msgh_local_port;
+
+       if (!dispatch_assume(hdr->msgh_size <= UINT_MAX -
+                       DISPATCH_MACH_TRAILER_SIZE)) {
+               _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
+                               "received overlarge message");
+       } else if (!dispatch_assume(name)) {
+               _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
+                               "received message with MACH_PORT_NULL port");
+       } else {
+               _dispatch_debug_machport(name);
+               if (likely(du._du)) {
+                       return dux_merge_msg(du._du, flags, hdr, siz);
+               }
+               _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
+                               "received message with no listeners");
+       }
+
+       mach_msg_destroy(hdr);
+       if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+               free(hdr);
+       }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke)
+{
+       mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke);
+       mach_msg_size_t siz;
+       mach_msg_return_t kr = (mach_msg_return_t)ke->fflags;
+       uint32_t flags = ke->flags;
+       dispatch_unote_t du = _dispatch_kevent_get_unote(ke);
+
+       if (unlikely(!hdr)) {
+               DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message");
+       }
+       if (likely(!kr)) {
+               _dispatch_kevent_mach_msg_recv(du, flags, hdr);
+               goto out;
+       } else if (kr != MACH_RCV_TOO_LARGE) {
+               goto out;
+       } else if (!ke->data) {
+               DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity");
+       }
+       if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) {
+               DISPATCH_INTERNAL_CRASH(ke->ext[1],
+                               "EVFILT_MACHPORT with overlarge message");
+       }
+       siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE;
+       hdr = malloc(siz);
+       if (dispatch_assume(hdr)) {
+               flags |= DISPATCH_EV_MSG_NEEDS_FREE;
+       } else {
+               // Kernel will discard message too large to fit
+               hdr = NULL;
+               siz = 0;
+       }
+       mach_port_t name = (mach_port_name_t)ke->data;
+       const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS |
+                       MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE);
+       kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE,
+                       MACH_PORT_NULL);
+       if (likely(!kr)) {
+               _dispatch_kevent_mach_msg_recv(du, flags, hdr);
+               goto out;
+       } else if (kr == MACH_RCV_TOO_LARGE) {
+               _dispatch_log("BUG in libdispatch client: "
+                               "_dispatch_kevent_mach_msg_drain: dropped message too "
+                               "large to fit in memory: id = 0x%x, size = %u",
+                               hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke));
+               kr = MACH_MSG_SUCCESS;
+       }
+       if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+               free(hdr);
+       }
+out:
+       if (unlikely(kr)) {
+               _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: "
+                               "message reception failed", kr);
+       }
+}
+
+const dispatch_source_type_s _dispatch_source_type_mach_recv = {
+       .dst_kind       = "mach_recv",
+       .dst_filter     = EVFILT_MACHPORT,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+       .dst_fflags     = 0,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_handle,
+       .dst_merge_evt  = _dispatch_source_merge_evt,
+       .dst_merge_msg  = NULL, // never receives messages directly
+
+       .dst_per_trigger_qos = true,
+};
+
+static void
+_dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du, uint32_t flags,
+               mach_msg_header_t *msg, mach_msg_size_t msgsz DISPATCH_UNUSED)
+{
+       dispatch_continuation_t dc = du._dr->ds_handler[DS_EVENT_HANDLER];
+       dispatch_source_t ds = _dispatch_source_from_refs(du._dr);
+       dispatch_queue_t cq = _dispatch_queue_get_current();
+
+       // see firehose_client_push_notify_async
+       _dispatch_queue_set_current(ds->_as_dq);
+       dc->dc_func(msg);
+       _dispatch_queue_set_current(cq);
+       if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+               free(msg);
+       }
+       if ((ds->dq_atomic_flags & DSF_CANCELED) ||
+                       (flags & (EV_ONESHOT | EV_DELETE))) {
+               return _dispatch_source_merge_evt(du, flags, 0, 0, 0);
+       }
+       if (_dispatch_unote_needs_rearm(du)) {
+               return _dispatch_unote_resume(du);
+       }
+}
+
+static void
+_dispatch_mach_recv_direct_merge(dispatch_unote_t du,
+               uint32_t flags, uintptr_t data,
+               uintptr_t status DISPATCH_UNUSED,
+               pthread_priority_t pp)
+{
+       if (flags & EV_VANISHED) {
+               DISPATCH_CLIENT_CRASH(du._du->du_ident,
+                               "Unexpected EV_VANISHED (do not destroy random mach ports)");
+       }
+       return _dispatch_source_merge_evt(du, flags, data, 0, pp);
+}
+
+const dispatch_source_type_s _dispatch_source_type_mach_recv_direct = {
+       .dst_kind       = "direct mach_recv",
+       .dst_filter     = EVFILT_MACHPORT,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+       .dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+       .dst_size       = sizeof(struct dispatch_source_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_handle,
+       .dst_merge_evt  = _dispatch_mach_recv_direct_merge,
+       .dst_merge_msg  = _dispatch_source_mach_recv_direct_merge_msg,
+
+       .dst_per_trigger_qos = true,
+};
+
+const dispatch_source_type_s _dispatch_mach_type_recv = {
+       .dst_kind       = "mach_recv (channel)",
+       .dst_filter     = EVFILT_MACHPORT,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+       .dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+       .dst_size       = sizeof(struct dispatch_mach_recv_refs_s),
+
+        // without handle because the mach code will set the ident after connect
+       .dst_create     = _dispatch_unote_create_without_handle,
+       .dst_merge_evt  = _dispatch_mach_recv_direct_merge,
+       .dst_merge_msg  = _dispatch_mach_merge_msg,
+
+       .dst_per_trigger_qos = true,
+};
+
+DISPATCH_NORETURN
+static void
+_dispatch_mach_reply_merge_evt(dispatch_unote_t du,
+               uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED,
+               uintptr_t status DISPATCH_UNUSED,
+               pthread_priority_t pp DISPATCH_UNUSED)
+{
+       DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event");
+}
+
+const dispatch_source_type_s _dispatch_mach_type_reply = {
+       .dst_kind       = "mach reply",
+       .dst_filter     = EVFILT_MACHPORT,
+       .dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED,
+       .dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+       .dst_size       = sizeof(struct dispatch_mach_reply_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_handle,
+       .dst_merge_evt  = _dispatch_mach_reply_merge_evt,
+       .dst_merge_msg  = _dispatch_mach_reply_merge_msg,
+};
+
+#pragma mark Mach channel SIGTERM notification (for XPC channels only)
+
+const dispatch_source_type_s _dispatch_xpc_type_sigterm = {
+       .dst_kind       = "sigterm (xpc)",
+       .dst_filter     = EVFILT_SIGNAL,
+       .dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_ONESHOT,
+       .dst_fflags     = 0,
+       .dst_size       = sizeof(struct dispatch_xpc_term_refs_s),
+
+       .dst_create     = _dispatch_unote_create_with_handle,
+       .dst_merge_evt  = _dispatch_xpc_sigterm_merge,
+};
+
+#endif // HAVE_MACH
+
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
diff --git a/src/event/workqueue.c b/src/event/workqueue.c
new file mode 100644 (file)
index 0000000..73362a5
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2017-2017 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+
+#if DISPATCH_USE_INTERNAL_WORKQUEUE
+
+/*
+ * dispatch_workq monitors the thread pool that is
+ * executing the work enqueued on libdispatch's pthread
+ * root queues and dynamically adjusts its size.
+ *
+ * The dynamic monitoring could be implemented using either
+ *   (a) low-frequency user-level approximation of the number of runnable
+ *       worker threads via reading the /proc file system
+ *   (b) a Linux kernel extension that hooks the process change handler
+ *       to accurately track the number of runnable normal worker threads
+ * This file provides an implementation of option (a).
+ *
+ * Using either form of monitoring, if (i) there appears to be
+ * work available in the monitored pthread root queue, (ii) the
+ * number of runnable workers is below the target size for the pool,
+ * and (iii) the total number of worker threads is below an upper limit,
+ * then an additional worker thread will be added to the pool.
+ */
+
+#pragma mark static data for monitoring subsystem
+
+/*
+ * State for the user-level monitoring of a workqueue.
+ */
+typedef struct dispatch_workq_monitor_s {
+       /* The dispatch_queue we are monitoring */
+       dispatch_queue_t dq;
+
+       /* The observed number of runnable worker threads */
+       int32_t num_runnable;
+
+       /* The desired number of runnable worker threads */
+       int32_t target_runnable;
+
+       /*
+        * Tracking of registered workers; all accesses must hold lock.
+        * Invariant: registered_tids[0]...registered_tids[num_registered_tids-1]
+        *   contain the dispatch_tids of the worker threads we are monitoring.
+        */
+       dispatch_unfair_lock_s registered_tid_lock;
+       dispatch_tid *registered_tids;
+       int num_registered_tids;
+} dispatch_workq_monitor_s, *dispatch_workq_monitor_t;
+
+static dispatch_workq_monitor_s _dispatch_workq_monitors[DISPATCH_QOS_MAX];
+
+#pragma mark Implementation of the monitoring subsystem.
+
+#define WORKQ_MAX_TRACKED_TIDS DISPATCH_WORKQ_MAX_PTHREAD_COUNT
+#define WORKQ_OVERSUBSCRIBE_FACTOR 2
+
+static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED);
+static dispatch_once_t _dispatch_workq_init_once_pred;
+
+void
+_dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls)
+{
+       dispatch_once_f(&_dispatch_workq_init_once_pred, NULL, &_dispatch_workq_init_once);
+
+#if HAVE_DISPATCH_WORKQ_MONITORING
+       dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls);
+       dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1];
+       dispatch_assert(mon->dq == root_q);
+       dispatch_tid tid = _dispatch_tid_self();
+       _dispatch_unfair_lock_lock(&mon->registered_tid_lock);
+       dispatch_assert(mon->num_registered_tids < WORKQ_MAX_TRACKED_TIDS-1);
+       int worker_id = mon->num_registered_tids++;
+       mon->registered_tids[worker_id] = tid;
+       _dispatch_unfair_lock_unlock(&mon->registered_tid_lock);
+#endif // HAVE_DISPATCH_WORKQ_MONITORING
+}
+
+void
+_dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls)
+{
+#if HAVE_DISPATCH_WORKQ_MONITORING
+       dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls);
+       dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1];
+       dispatch_assert(mon->dq == root_q);
+       dispatch_tid tid = _dispatch_tid_self();
+       _dispatch_unfair_lock_lock(&mon->registered_tid_lock);
+       for (int i = 0; i < mon->num_registered_tids; i++) {
+               if (mon->registered_tids[i] == tid) {
+                       int last = mon->num_registered_tids - 1;
+                       mon->registered_tids[i] = mon->registered_tids[last];
+                       mon->registered_tids[last] = 0;
+                       mon->num_registered_tids--;
+                       break;
+               }
+       }
+       _dispatch_unfair_lock_unlock(&mon->registered_tid_lock);
+#endif // HAVE_DISPATCH_WORKQ_MONITORING
+}
+
+
+#if HAVE_DISPATCH_WORKQ_MONITORING
+#if defined(__linux__)
+/*
+ * For each pid that is a registered worker, read /proc/[pid]/stat
+ * to get a count of the number of them that are actually runnable.
+ * See the proc(5) man page for the format of the contents of /proc/[pid]/stat
+ */
+static void
+_dispatch_workq_count_runnable_workers(dispatch_workq_monitor_t mon)
+{
+       char path[128];
+       char buf[4096];
+       int running_count = 0;
+
+       _dispatch_unfair_lock_lock(&mon->registered_tid_lock);
+
+       for (int i = 0; i < mon->num_registered_tids; i++) {
+               dispatch_tid tid = mon->registered_tids[i];
+               int fd;
+               ssize_t bytes_read = -1;
+
+               int r = snprintf(path, sizeof(path), "/proc/%d/stat", tid);
+               dispatch_assert(r > 0 && r < (int)sizeof(path));
+
+               fd = open(path, O_RDONLY | O_NONBLOCK);
+               if (unlikely(fd == -1)) {
+                       DISPATCH_CLIENT_CRASH(tid,
+                                       "workq: registered worker exited prematurely");
+               } else {
+                       bytes_read = read(fd, buf, sizeof(buf)-1);
+                       (void)close(fd);
+               }
+
+               if (bytes_read > 0) {
+                       buf[bytes_read] = '\0';
+                       char state;
+                       if (sscanf(buf, "%*d %*s %c", &state) == 1) {
+                               // _dispatch_debug("workq: Worker %d, state %c\n", tid, state);
+                               if (state == 'R') {
+                                       running_count++;
+                               }
+                       } else {
+                               _dispatch_debug("workq: sscanf of state failed for %d", tid);
+                       }
+               } else {
+                       _dispatch_debug("workq: Failed to read %s", path);
+               }
+       }
+
+       mon->num_runnable = running_count;
+
+       _dispatch_unfair_lock_unlock(&mon->registered_tid_lock);
+}
+#else
+#error must define _dispatch_workq_count_runnable_workers
+#endif
+
+static void
+_dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED)
+{
+       int global_soft_max = WORKQ_OVERSUBSCRIBE_FACTOR * (int)dispatch_hw_config(active_cpus);
+       int global_runnable = 0;
+       for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) {
+               dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1];
+               dispatch_queue_t dq = mon->dq;
+
+               if (!_dispatch_queue_class_probe(dq)) {
+                       _dispatch_debug("workq: %s is empty.", dq->dq_label);
+                       continue;
+               }
+
+               _dispatch_workq_count_runnable_workers(mon);
+               _dispatch_debug("workq: %s has %d runnable wokers (target is %d)",
+                               dq->dq_label, mon->num_runnable, mon->target_runnable);
+
+               global_runnable += mon->num_runnable;
+
+               if (mon->num_runnable == 0) {
+                       // We have work, but no worker is runnable.
+                       // It is likely the program is stalled. Therefore treat
+                       // this as if dq were an overcommit queue and call poke
+                       // with the limit being the maximum number of workers for dq.
+                       int32_t floor = mon->target_runnable - WORKQ_MAX_TRACKED_TIDS;
+                       _dispatch_debug("workq: %s has no runnable workers; poking with floor %d",
+                                       dq->dq_label, floor);
+                       _dispatch_global_queue_poke(dq, 1, floor);
+                       global_runnable += 1; // account for poke in global estimate
+               } else if (mon->num_runnable < mon->target_runnable &&
+                                  global_runnable < global_soft_max) {
+                       // We are below target, but some workers are still runnable.
+                       // We want to oversubscribe to hit the desired load target.
+                       // However, this under-utilization may be transitory so set the
+                       // floor as a small multiple of threads per core.
+                       int32_t floor = (1 - WORKQ_OVERSUBSCRIBE_FACTOR) * mon->target_runnable;
+                       int32_t floor2 = mon->target_runnable - WORKQ_MAX_TRACKED_TIDS;
+                       floor = MAX(floor, floor2);
+                       _dispatch_debug("workq: %s under utilization target; poking with floor %d",
+                                       dq->dq_label, floor);
+                       _dispatch_global_queue_poke(dq, 1, floor);
+                       global_runnable += 1; // account for poke in global estimate
+               }
+       }
+}
+#endif // HAVE_DISPATCH_WORKQ_MONITORING
+
+static void
+_dispatch_workq_init_once(void *context DISPATCH_UNUSED)
+{
+#if HAVE_DISPATCH_WORKQ_MONITORING
+       int target_runnable = (int)dispatch_hw_config(active_cpus);
+       for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) {
+               dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1];
+               mon->dq = _dispatch_get_root_queue(i, false);
+               void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid));
+               mon->registered_tids = buf;
+               mon->target_runnable = target_runnable;
+       }
+
+       // Create monitoring timer that will periodically run on dispatch_mgr_q
+       dispatch_source_t ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
+                       0, 0, &_dispatch_mgr_q);
+       dispatch_source_set_timer(ds, dispatch_time(DISPATCH_TIME_NOW, 0),
+                       NSEC_PER_SEC, 0);
+       dispatch_source_set_event_handler_f(ds, _dispatch_workq_monitor_pools);
+       dispatch_set_context(ds, ds); // avoid appearing as leaked
+       dispatch_activate(ds);
+#endif // HAVE_DISPATCH_WORKQ_MONITORING
+}
+
+#endif // DISPATCH_USE_INTERNAL_WORKQUEUE
diff --git a/src/event/workqueue_internal.h b/src/event/workqueue_internal.h
new file mode 100644 (file)
index 0000000..94dfe4e
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017-2017 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_WORKQUEUE_INTERNAL__
+#define __DISPATCH_WORKQUEUE_INTERNAL__
+
+#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x1
+
+#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255
+
+void _dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls);
+void _dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls);
+
+#if defined(__linux__)
+#define HAVE_DISPATCH_WORKQ_MONITORING 1
+#else
+#define HAVE_DISPATCH_WORKQ_MONITORING 0
+#endif
+
+#endif /* __DISPATCH_WORKQUEUE_INTERNAL__ */
+
index 7ed7958274685ccd625dbf227d30ba7e4b895d03..e4fdf3324918db49ef7b9a9d777f6032a28d35a5 100644 (file)
@@ -40,12 +40,13 @@ register(
 );
 
 routine
-push(
+push_and_wait(
 RequestPort    comm_port               : mach_port_t;
 SReplyPort     reply_port              : mach_port_make_send_once_t;
                        qos_class               : qos_class_t;
                        for_io                  : boolean_t;
-out                    push_reply              : firehose_push_reply_t
+out                    push_reply              : firehose_push_reply_t;
+out                    quarantinedOut  : boolean_t
 );
 
 simpleroutine
index a9b5af27669fa7229601b6d7b0adb9c0f820a633..3bb790c7cdde85a36dc6be76eec001c3d5b60a3d 100644 (file)
 #define likely(x)   __builtin_expect(!!(x), 1)
 #define unlikely(x) __builtin_expect(!!(x), 0)
 
+#ifndef OS_FALLTHROUGH
+#define OS_FALLTHROUGH
+#endif
+
 #define DISPATCH_INTERNAL_CRASH(ac, msg) ({ panic(msg); __builtin_trap(); })
 
 #if defined(__x86_64__) || defined(__i386__)
 #define dispatch_hardware_pause() __asm__("")
 #endif
 
-#define _dispatch_wait_until(c) do { \
-               while (!fastpath(c)) { \
+#define _dispatch_wait_until(c) ({ \
+               typeof(c) _c; \
+               for (;;) { \
+                       if (likely(_c = (c))) break; \
                        dispatch_hardware_pause(); \
-               } } while (0)
+               } \
+               _c; })
 #define dispatch_compiler_barrier()  __asm__ __volatile__("" ::: "memory")
 
 typedef uint32_t dispatch_lock;
@@ -62,6 +69,8 @@ typedef struct dispatch_gate_s {
 #define DLOCK_LOCK_DATA_CONTENTION 0
 static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags);
 
+#define fcp_quarntined fcp_quarantined
+
 #include <kern/debug.h>
 #include <machine/cpu_number.h>
 #include <kern/thread.h>
@@ -443,23 +452,58 @@ firehose_client_send_push_async(firehose_buffer_t fb, qos_class_t qos,
                }
        }
 }
+
+OS_NOINLINE
+static void
+firehose_client_start_quarantine(firehose_buffer_t fb)
+{
+       if (_voucher_libtrace_hooks->vah_version < 5) return;
+       if (!_voucher_libtrace_hooks->vah_quarantine_starts) return;
+
+       _voucher_libtrace_hooks->vah_quarantine_starts();
+
+       fb->fb_header.fbh_quarantined = true;
+       firehose_buffer_stream_flush(fb, firehose_stream_special);
+       firehose_buffer_stream_flush(fb, firehose_stream_persist);
+       firehose_buffer_stream_flush(fb, firehose_stream_memory);
+}
 #endif // !KERNEL
 
 static void
 firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif,
-               firehose_push_reply_t reply, firehose_bank_state_u *state_out)
+               firehose_push_reply_t reply, bool quarantined,
+               firehose_bank_state_u *state_out)
 {
+       firehose_buffer_header_t fbh = &fb->fb_header;
        firehose_bank_state_u state;
        firehose_ring_tail_u otail, ntail;
        uint64_t old_flushed_pos, bank_updates;
        uint16_t io_delta = 0;
        uint16_t mem_delta = 0;
 
-       if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_mem_flushed,
+       if (quarantined) {
+#ifndef KERNEL
+               // this isn't a dispatch_once so that the upcall to libtrace
+               // can actually log itself without blocking on the gate.
+               if (async_notif) {
+                       if (os_atomic_xchg(&fbh->fbh_quarantined_state,
+                                       FBH_QUARANTINE_STARTED, relaxed) !=
+                                       FBH_QUARANTINE_STARTED) {
+                               firehose_client_start_quarantine(fb);
+                       }
+               } else if (os_atomic_load(&fbh->fbh_quarantined_state, relaxed) ==
+                               FBH_QUARANTINE_NONE) {
+                       os_atomic_cmpxchg(&fbh->fbh_quarantined_state, FBH_QUARANTINE_NONE,
+                                       FBH_QUARANTINE_PENDING, relaxed);
+               }
+#endif
+       }
+
+       if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_mem_flushed,
                        reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) {
                mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos);
        }
-       if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_io_flushed,
+       if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_io_flushed,
                        reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) {
                io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos);
        }
@@ -471,14 +515,14 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif,
 
        if (!mem_delta && !io_delta) {
                if (state_out) {
-                       state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header,
+                       state_out->fbs_atomic_state = os_atomic_load2o(fbh,
                                        fbh_bank.fbb_state.fbs_atomic_state, relaxed);
                }
                return;
        }
 
        __firehose_critical_region_enter();
-       os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail,
+       os_atomic_rmw_loop2o(fbh, fbh_ring_tail.frp_atomic_tail,
                        otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, {
                ntail = otail;
                // overflow handles the generation wraps
@@ -488,7 +532,7 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif,
 
        bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) |
                        ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1));
-       state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header,
+       state.fbs_atomic_state = os_atomic_sub2o(fbh,
                        fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release);
        __firehose_critical_region_leave();
 
@@ -496,29 +540,32 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif,
 
        if (async_notif) {
                if (io_delta) {
-                       os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed);
+                       os_atomic_inc2o(fbh, fbh_bank.fbb_io_notifs, relaxed);
                }
                if (mem_delta) {
-                       os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_notifs, relaxed);
+                       os_atomic_inc2o(fbh, fbh_bank.fbb_mem_notifs, relaxed);
                }
        }
 }
 
 #ifndef KERNEL
+OS_NOT_TAIL_CALLED OS_NOINLINE
 static void
-firehose_client_send_push(firehose_buffer_t fb, bool for_io,
+firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io,
                firehose_bank_state_u *state_out)
 {
        mach_port_t sendp = fb->fb_header.fbh_sendp;
        firehose_push_reply_t push_reply = { };
        qos_class_t qos = qos_class_self();
+       boolean_t quarantined = false;
        kern_return_t kr;
 
        if (slowpath(sendp == MACH_PORT_DEAD)) {
                return;
        }
        if (fastpath(sendp)) {
-               kr = firehose_send_push(sendp, qos, for_io, &push_reply);
+               kr = firehose_send_push_and_wait(sendp, qos, for_io,
+                               &push_reply, &quarantined);
                if (likely(kr == KERN_SUCCESS)) {
                        goto success;
                }
@@ -530,7 +577,8 @@ firehose_client_send_push(firehose_buffer_t fb, bool for_io,
 
        sendp = firehose_client_reconnect(fb, sendp);
        if (fastpath(MACH_PORT_VALID(sendp))) {
-               kr = firehose_send_push(sendp, qos, for_io, &push_reply);
+               kr = firehose_send_push_and_wait(sendp, qos, for_io,
+                               &push_reply, &quarantined);
                if (likely(kr == KERN_SUCCESS)) {
                        goto success;
                }
@@ -566,12 +614,22 @@ success:
        // There only is a point for multithreaded clients if:
        // - enough samples (total_flushes above some limits)
        // - the ratio is really bad (a push per cycle is definitely a problem)
-       return firehose_client_merge_updates(fb, false, push_reply, state_out);
+       return firehose_client_merge_updates(fb, false, push_reply, quarantined,
+                       state_out);
+}
+
+OS_NOT_TAIL_CALLED OS_NOINLINE
+static void
+__FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(firehose_buffer_t fb,
+               bool for_io, firehose_bank_state_u *state_out)
+{
+       firehose_client_send_push_and_wait(fb, for_io, state_out);
 }
 
 kern_return_t
 firehose_client_push_reply(mach_port_t req_port OS_UNUSED,
-       kern_return_t rtc, firehose_push_reply_t push_reply OS_UNUSED)
+       kern_return_t rtc, firehose_push_reply_t push_reply OS_UNUSED,
+       boolean_t quarantined OS_UNUSED)
 {
        DISPATCH_INTERNAL_CRASH(rtc, "firehose_push_reply should never be sent "
                        "to the buffer receive port");
@@ -579,12 +637,12 @@ firehose_client_push_reply(mach_port_t req_port OS_UNUSED,
 
 kern_return_t
 firehose_client_push_notify_async(mach_port_t server_port OS_UNUSED,
-       firehose_push_reply_t push_reply)
+       firehose_push_reply_t push_reply, boolean_t quarantined)
 {
        // see _dispatch_source_merge_mach_msg_direct
        dispatch_queue_t dq = _dispatch_queue_get_current();
        firehose_buffer_t fb = dispatch_get_context(dq);
-       firehose_client_merge_updates(fb, true, push_reply, NULL);
+       firehose_client_merge_updates(fb, true, push_reply, quarantined, NULL);
        return KERN_SUCCESS;
 }
 
@@ -646,6 +704,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc,
                .fcp_qos = firehose_buffer_qos_bits_propagate(),
                .fcp_stream = ask->stream,
                .fcp_flag_io = ask->for_io,
+               .fcp_quarantined = ask->quarantined,
        };
 
        if (privptr) {
@@ -661,7 +720,8 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb,
 {
        firehose_stream_state_u state, new_state;
        firehose_tracepoint_t ft;
-       firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[ask->stream];
+       firehose_buffer_header_t fbh = &fb->fb_header;
+       firehose_buffer_stream_t fbs = &fbh->fbh_stream[ask->stream];
        uint64_t stamp_and_len;
 
        if (fastpath(ref)) {
@@ -678,7 +738,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb,
                ft->ft_thread = _pthread_threadid_self_np_direct();
 #endif
                if (ask->stream == firehose_stream_metadata) {
-                       os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap,
+                       os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap,
                                        1ULL << ref, relaxed);
                }
                // release barrier to make the chunk init visible
@@ -709,8 +769,11 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb,
                ft = NULL;
        }
 
+       // pairs with the one in firehose_buffer_tracepoint_reserve()
+       __firehose_critical_region_leave();
+
 #ifndef KERNEL
-       if (unlikely(state.fss_gate.dgl_lock != _dispatch_tid_self())) {
+       if (unlikely(_dispatch_lock_is_locked_by_self(state.fss_gate.dgl_lock))) {
                _dispatch_gate_broadcast_slow(&fbs->fbs_state.fss_gate,
                                state.fss_gate.dgl_lock);
        }
@@ -718,10 +781,16 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb,
        if (unlikely(state.fss_current == FIREHOSE_STREAM_STATE_PRISTINE)) {
                firehose_buffer_update_limits(fb);
        }
+
+       if (unlikely(os_atomic_load2o(fbh, fbh_quarantined_state, relaxed) ==
+                       FBH_QUARANTINE_PENDING)) {
+               if (os_atomic_cmpxchg2o(fbh, fbh_quarantined_state,
+                               FBH_QUARANTINE_PENDING, FBH_QUARANTINE_STARTED, relaxed)) {
+                       firehose_client_start_quarantine(fb);
+               }
+       }
 #endif // KERNEL
 
-       // pairs with the one in firehose_buffer_tracepoint_reserve()
-       __firehose_critical_region_leave();
        return ft;
 }
 
@@ -807,7 +876,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref)
        gen = head & FIREHOSE_RING_POS_GEN_MASK;
        idx = head & FIREHOSE_RING_POS_IDX_MASK;
 
-       while (unlikely(!os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy,
+       while (unlikely(!os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy,
                        relaxed))) {
                // can only ever happen if a recycler is slow, this requires having
                // enough cores (>5 for I/O e.g.)
@@ -847,7 +916,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref)
                // a thread being preempted here for GEN_MASK worth of ring rotations,
                // it could lead to the cmpxchg succeed, and have a bogus enqueue
                // (confused enqueuer)
-               if (fastpath(os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy,
+               if (fastpath(os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy,
                                relaxed))) {
                        if (fastpath(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1,
                                        &head, release))) {
@@ -946,7 +1015,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb)
 #ifndef KERNEL
 OS_NOINLINE
 static firehose_tracepoint_t
-firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb,
+firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t fb,
                firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref)
 {
        const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io);
@@ -960,7 +1029,12 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb,
                state.fbs_atomic_state =
                                os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed);
                while ((state.fbs_atomic_state - bank_inc) & bank_unavail_mask) {
-                       firehose_client_send_push(fb, ask->for_io, &state);
+                       if (ask->quarantined) {
+                               __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb,
+                                               ask->for_io, &state);
+                       } else {
+                               firehose_client_send_push_and_wait(fb, ask->for_io, &state);
+                       }
                        if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) {
                                // logd was unloaded, give up
                                return NULL;
@@ -992,7 +1066,12 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb,
                if (fastpath(ref = firehose_buffer_ring_try_grow(fbb, fbs_max_ref))) {
                        break;
                }
-               firehose_client_send_push(fb, ask->for_io, NULL);
+               if (ask->quarantined) {
+                       __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb,
+                                       ask->for_io, &state);
+               } else {
+                       firehose_client_send_push_and_wait(fb, ask->for_io, NULL);
+               }
                if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) {
                        // logd was unloaded, give up
                        break;
@@ -1038,7 +1117,8 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb,
                        }
                }
        }
-       return firehose_buffer_tracepoint_reserve_slow2(fb, ask, privptr, ref);
+       return firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(fb, ask,
+                       privptr, ref);
 #else
        firehose_bank_state_u value;
        ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state,
@@ -1100,7 +1180,7 @@ __firehose_merge_updates(firehose_push_reply_t update)
 {
        firehose_buffer_t fb = kernel_firehose_buffer;
        if (fastpath(fb)) {
-               firehose_client_merge_updates(fb, true, update, NULL);
+               firehose_client_merge_updates(fb, true, update, false, NULL);
        }
 }
 #endif // KERNEL
index 7679c8c0d2f15462122d8d6a408fbb009be97c73..e41d9cb29805f72a5cf914f8131f09f2835ed5ed 100644 (file)
@@ -171,6 +171,11 @@ typedef struct firehose_buffer_header_s {
        dispatch_once_t                                 fbh_notifs_pred OS_ALIGNED(64);
        dispatch_source_t                               fbh_notifs_source;
        dispatch_unfair_lock_s                  fbh_logd_lock;
+#define FBH_QUARANTINE_NONE            0
+#define FBH_QUARANTINE_PENDING 1
+#define FBH_QUARANTINE_STARTED 2
+       uint8_t volatile                                fbh_quarantined_state;
+       bool                                                    fbh_quarantined;
 #endif
        uint64_t                                                fbh_unused[0];
 } OS_ALIGNED(FIREHOSE_CHUNK_SIZE) *firehose_buffer_header_t;
@@ -187,6 +192,7 @@ typedef struct firehose_tracepoint_query_s {
        firehose_stream_t stream;
        bool     is_bank_ok;
        bool     for_io;
+       bool     quarantined;
        uint64_t stamp;
 } *firehose_tracepoint_query_t;
 
index 5f89e0d18de188ad9b8aa811f78db25618c1e86d..3939ee25b633400a6c06318aa8f2acef74e0a3f3 100644 (file)
@@ -55,17 +55,11 @@ firehose_mach_port_allocate(uint32_t flags, void *ctx)
        mach_port_options_t opts = {
                .flags = flags,
        };
-       kern_return_t kr;
-
-       for (;;) {
-               kr = mach_port_construct(mach_task_self(), &opts,
-                               (mach_port_context_t)ctx, &port);
-               if (fastpath(kr == KERN_SUCCESS)) {
-                       break;
-               }
+       kern_return_t kr = mach_port_construct(mach_task_self(), &opts,
+                       (mach_port_context_t)ctx, &port);
+       if (unlikely(kr)) {
                DISPATCH_VERIFY_MIG(kr);
-               dispatch_assume_zero(kr);
-               _dispatch_temporary_resource_shortage();
+               DISPATCH_CLIENT_CRASH(kr, "Unable to allocate mach port");
        }
        return port;
 }
@@ -325,9 +319,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp,
 #if KERNEL
                new_state.fss_allocator = (uint32_t)cpu_number();
 #else
-               new_state.fss_allocator = _dispatch_tid_self();
+               new_state.fss_allocator = _dispatch_lock_value_for_self();
 #endif
-               success = os_atomic_cmpxchgvw2o(fbs, fbs_state.fss_atomic_state,
+               success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state,
                                old_state.fss_atomic_state, new_state.fss_atomic_state,
                                &old_state.fss_atomic_state, relaxed);
                if (likely(success)) {
@@ -341,6 +335,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp,
                .privsize = privsize,
                .stream = stream,
                .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0,
+#ifndef KERNEL
+               .quarantined = fb->fb_header.fbh_quarantined,
+#endif
                .stamp = stamp,
        };
        return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr);
index 29d1ad240f786b89bbb4dd7524e24338d0b2c658..7040995d1de346f34fbb94af8cfea637f8e1bd74 100644 (file)
@@ -29,6 +29,8 @@
 #define __MigTypeCheck 1
 #endif
 
+#define fcp_quarntined fcp_quarantined
+
 #include <limits.h>
 #include <machine/endian.h>
 #include <mach/mach_types.h>
index 124defa590acdd127124ccf6ad106f31fa8e00c4..c080545162cdd69141226a5bbb5057913cdb0470 100644 (file)
@@ -33,11 +33,13 @@ skip; // firehose_register
 simpleroutine push_reply(
 RequestPort    req_port                : mach_port_move_send_once_t;
 in                     rtc                             : kern_return_t;
-in                     push_reply              : firehose_push_reply_t
+in                     push_reply              : firehose_push_reply_t;
+in                     quarantined             : boolean_t
 );
 
 simpleroutine push_notify_async(
 RequestPort    comm_port               : mach_port_t;
 in                     push_reply              : firehose_push_reply_t;
+in                     quarantined             : boolean_t;
 WaitTime       timeout                 : natural_t
 );
index e27293e10dde2f572737633d279499604017e6ae..ba335dbe30631088c93a7e3edc75c763643d78b5 100644 (file)
@@ -31,6 +31,11 @@ _Static_assert(offsetof(struct firehose_client_s, fc_mem_sent_flushed_pos)
                % 8 == 0, "Make sure atomic fields are properly aligned");
 #endif
 
+typedef struct fs_client_queue_s {
+       struct firehose_client_s *volatile fs_client_head;
+       struct firehose_client_s *volatile fs_client_tail;
+} fs_client_queue_s, *fs_client_queue_t;
+
 static struct firehose_server_s {
        mach_port_t                     fs_bootstrap_port;
        dispatch_mach_t         fs_mach_channel;
@@ -41,26 +46,161 @@ static struct firehose_server_s {
        firehose_handler_t      fs_handler;
 
        firehose_snapshot_t fs_snapshot;
-       bool                fs_io_snapshot_started;
-       bool                fs_mem_snapshot_started;
-
        int                                     fs_kernel_fd;
        firehose_client_t       fs_kernel_client;
 
        TAILQ_HEAD(, firehose_client_s) fs_clients;
+       os_unfair_lock      fs_clients_lock;
+       fs_client_queue_s       fs_queues[4];
+       dispatch_source_t       fs_sources[4];
 } server_config = {
        .fs_clients = TAILQ_HEAD_INITIALIZER(server_config.fs_clients),
+       .fs_clients_lock = OS_UNFAIR_LOCK_INIT,
        .fs_kernel_fd = -1,
 };
 
-#pragma mark -
-#pragma mark firehose client state machine
+OS_ALWAYS_INLINE
+static inline void
+fs_clients_lock(void)
+{
+       os_unfair_lock_lock_with_options(&server_config.fs_clients_lock,
+                       OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
+}
+
+OS_ALWAYS_INLINE
+static inline void
+fs_clients_unlock(void)
+{
+       os_unfair_lock_unlock(&server_config.fs_clients_lock);
+}
 
 static void firehose_server_demux(firehose_client_t fc,
                mach_msg_header_t *msg_hdr);
 static void firehose_client_cancel(firehose_client_t fc);
 static void firehose_client_snapshot_finish(firehose_client_t fc,
                firehose_snapshot_t snapshot, bool for_io);
+static void firehose_client_handle_death(void *ctxt);
+
+#pragma mark -
+#pragma mark firehose client enqueueing
+
+OS_ALWAYS_INLINE
+static inline bool
+fs_idx_is_for_io(size_t idx)
+{
+       return idx & 1;
+}
+
+OS_ALWAYS_INLINE
+static inline bool
+fs_queue_is_for_io(fs_client_queue_t q)
+{
+       return (q - server_config.fs_queues) & 1;
+}
+
+OS_ALWAYS_INLINE
+static inline bool
+fs_queue_is_for_quarantined(fs_client_queue_t q)
+{
+       return (q - server_config.fs_queues) & 2;
+}
+
+OS_ALWAYS_INLINE
+static inline fs_client_queue_t
+fs_queue(bool quarantined, bool for_io)
+{
+       return &server_config.fs_queues[quarantined * 2 + for_io];
+}
+
+OS_ALWAYS_INLINE
+static inline dispatch_source_t
+fs_source(bool quarantined, bool for_io)
+{
+       return server_config.fs_sources[quarantined * 2 + for_io];
+}
+
+OS_ALWAYS_INLINE
+static inline void
+firehose_client_push(firehose_client_t fc, pthread_priority_t pp,
+               bool quarantined, bool for_io)
+{
+       fs_client_queue_t queue = fs_queue(quarantined, for_io);
+       if (fc && os_mpsc_push_update_tail(queue, fs_client, fc, fc_next[for_io])) {
+               os_mpsc_push_update_head(queue, fs_client, fc);
+               _dispatch_source_merge_data(fs_source(quarantined, for_io), pp, 1);
+       } else if (pp) {
+               _dispatch_source_merge_data(fs_source(quarantined, for_io), pp, 1);
+       }
+}
+
+OS_ALWAYS_INLINE
+static inline bool
+firehose_client_wakeup(firehose_client_t fc, pthread_priority_t pp,
+               bool for_io)
+{
+       uintptr_t canceled_bit = FC_STATE_CANCELED(for_io);
+       uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io);
+       uintptr_t old_state, new_state;
+
+       os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, {
+               if (old_state & canceled_bit) {
+                       os_atomic_rmw_loop_give_up(return false);
+               }
+               if (old_state & enqueued_bit) {
+                       os_atomic_rmw_loop_give_up(break);
+               }
+               new_state = old_state | enqueued_bit;
+       });
+       firehose_client_push(old_state & enqueued_bit ? NULL : fc, pp,
+                       fc->fc_quarantined, for_io);
+       return true;
+}
+
+OS_ALWAYS_INLINE
+static inline void
+firehose_client_start_cancel(firehose_client_t fc, bool for_io)
+{
+       uintptr_t canceling_bit = FC_STATE_CANCELING(for_io);
+       uintptr_t canceled_bit = FC_STATE_CANCELED(for_io);
+       uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io);
+       uintptr_t old_state, new_state;
+
+       os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, {
+               if (old_state & (canceled_bit | canceling_bit)) {
+                       os_atomic_rmw_loop_give_up(return);
+               }
+               new_state = old_state | enqueued_bit | canceling_bit;
+       });
+       firehose_client_push(old_state & enqueued_bit ? NULL : fc, 0,
+                       fc->fc_quarantined, for_io);
+}
+
+OS_ALWAYS_INLINE
+static inline bool
+firehose_client_dequeue(firehose_client_t fc, bool for_io)
+{
+       uintptr_t canceling_bit = FC_STATE_CANCELING(for_io);
+       uintptr_t canceled_bit = FC_STATE_CANCELED(for_io);
+       uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io);
+       uintptr_t old_state, new_state;
+
+       os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, {
+               new_state = old_state & ~(canceling_bit | enqueued_bit);
+               if (old_state & canceling_bit) {
+                       new_state |= canceled_bit;
+               }
+       });
+
+       if (((old_state ^ new_state) & FC_STATE_CANCELED_MASK) &&
+                       (new_state & FC_STATE_CANCELED_MASK) == FC_STATE_CANCELED_MASK) {
+               dispatch_async_f(server_config.fs_io_drain_queue, fc,
+                               firehose_client_handle_death);
+       }
+       return !(new_state & canceled_bit);
+}
+
+#pragma mark -
+#pragma mark firehose client state machine
 
 static void
 firehose_client_notify(firehose_client_t fc, mach_port_t reply_port)
@@ -82,9 +222,11 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port)
                }
        } else {
                if (reply_port == fc->fc_sendp) {
-                       kr = firehose_send_push_notify_async(reply_port, push_reply, 0);
+                       kr = firehose_send_push_notify_async(reply_port, push_reply,
+                                       fc->fc_quarantined, 0);
                } else {
-                       kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply);
+                       kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply,
+                                       fc->fc_quarantined);
                }
                if (kr != MACH_SEND_INVALID_DEST) {
                        DISPATCH_VERIFY_MIG(kr);
@@ -106,18 +248,6 @@ firehose_client_acquire_head(firehose_buffer_t fb, bool for_io)
        return head;
 }
 
-OS_ALWAYS_INLINE
-static inline void
-firehose_client_push_async_merge(firehose_client_t fc, pthread_priority_t pp,
-               bool for_io)
-{
-       if (for_io) {
-               _dispatch_source_merge_data(fc->fc_io_source, pp, 1);
-       } else {
-               _dispatch_source_merge_data(fc->fc_mem_source, pp, 1);
-       }
-}
-
 OS_NOINLINE OS_COLD
 static void
 firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port)
@@ -131,7 +261,7 @@ firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port)
 
        if (reply_port) {
                kern_return_t kr = firehose_send_push_reply(reply_port, 0,
-                               FIREHOSE_PUSH_REPLY_CORRUPTED);
+                               FIREHOSE_PUSH_REPLY_CORRUPTED, false);
                DISPATCH_VERIFY_MIG(kr);
                dispatch_assume_zero(kr);
        }
@@ -156,7 +286,7 @@ firehose_client_snapshot_mark_done(firehose_client_t fc,
 
 OS_NOINLINE
 static void
-firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
+firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags)
 {
        firehose_buffer_t fb = fc->fc_buffer;
        firehose_chunk_t fbc;
@@ -174,9 +304,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
                fbh_ring = fb->fb_header.fbh_io_ring;
                sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos;
                flushed = (uint16_t)fc->fc_io_flushed_pos;
-               if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) {
-                       snapshot = server_config.fs_snapshot;
-               }
+               if (fc->fc_needs_io_snapshot) snapshot = server_config.fs_snapshot;
        } else {
                evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED;
                _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED ==
@@ -184,9 +312,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
                fbh_ring = fb->fb_header.fbh_mem_ring;
                sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos;
                flushed = (uint16_t)fc->fc_mem_flushed_pos;
-               if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) {
-                       snapshot = server_config.fs_snapshot;
-               }
+               if (fc->fc_needs_mem_snapshot) snapshot = server_config.fs_snapshot;
        }
 
        if (slowpath(fc->fc_memory_corrupted)) {
@@ -273,12 +399,12 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
                        // and there's more to drain, so optimistically schedule draining
                        // again this is cheap since the queue is hot, and is fair for other
                        // clients
-                       firehose_client_push_async_merge(fc, 0, for_io);
+                       firehose_client_wakeup(fc, 0, for_io);
                }
                if (count && server_config.fs_kernel_client) {
                        // the kernel is special because it can drop messages, so if we're
                        // draining, poll the kernel each time while we're bound to a thread
-                       firehose_client_drain(server_config.fs_kernel_client,
+                       firehose_client_drain_one(server_config.fs_kernel_client,
                                        MACH_PORT_NULL, flags | FIREHOSE_DRAIN_POLL);
                }
        }
@@ -293,20 +419,36 @@ corrupt:
        // (needs_<for_io>_snapshot: false, memory_corrupted: true). we can safely
        // silence the corresponding source of drain wake-ups.
        if (fc->fc_pid) {
-               dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source);
+               firehose_client_start_cancel(fc, for_io);
        }
 }
 
 static void
-firehose_client_drain_io_async(void *ctx)
-{
-       firehose_client_drain(ctx, MACH_PORT_NULL, FIREHOSE_DRAIN_FOR_IO);
-}
-
-static void
-firehose_client_drain_mem_async(void *ctx)
+firehose_client_drain(void *ctxt)
 {
-       firehose_client_drain(ctx, MACH_PORT_NULL, 0);
+       fs_client_queue_t queue = ctxt;
+       bool for_io = fs_queue_is_for_io(queue);
+       bool quarantined = fs_queue_is_for_quarantined(queue);
+       firehose_client_t fc, fc_next;
+       size_t clients = 0;
+
+       while (queue->fs_client_tail) {
+               fc = os_mpsc_get_head(queue, fs_client);
+               do {
+                       fc_next = os_mpsc_pop_head(queue, fs_client, fc, fc_next[for_io]);
+                       if (firehose_client_dequeue(fc, for_io)) {
+                               firehose_client_drain_one(fc, MACH_PORT_NULL,
+                                               for_io ? FIREHOSE_DRAIN_FOR_IO : 0);
+                       }
+                       // process quarantined clients 4 times as slow as the other ones
+                       // also reasyncing every 4 clients allows for discovering
+                       // quarantined suspension faster
+                       if (++clients == (quarantined ? 1 : 4)) {
+                               dispatch_source_merge_data(fs_source(quarantined, for_io), 1);
+                               return;
+                       }
+               } while ((fc = fc_next));
+       }
 }
 
 OS_NOINLINE
@@ -335,7 +477,10 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED)
        }
        server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL);
 
+       fs_clients_lock();
        TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry);
+       fs_clients_unlock();
+
        dispatch_release(fc->fc_mach_channel);
        fc->fc_mach_channel = NULL;
        fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS;
@@ -413,7 +558,7 @@ firehose_client_handle_death(void *ctxt)
                        continue;
                }
                server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc);
-               if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) {
+               if (fc->fc_needs_io_snapshot) {
                        snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc);
                }
        }
@@ -431,7 +576,7 @@ firehose_client_handle_death(void *ctxt)
 
                        mem_bitmap_copy &= ~(1ULL << ref);
                        server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc);
-                       if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) {
+                       if (fc->fc_needs_mem_snapshot) {
                                snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc);
                        }
                }
@@ -445,8 +590,9 @@ static void
 firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason,
                dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED)
 {
-       mach_msg_header_t *msg_hdr;
+       mach_msg_header_t *msg_hdr = NULL;
        firehose_client_t fc = ctx;
+       mach_port_t port;
 
        switch (reason) {
        case DISPATCH_MACH_MESSAGE_RECEIVED:
@@ -460,7 +606,33 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason,
                }
                break;
 
+       case DISPATCH_MACH_DISCONNECTED:
+               msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL);
+               port = msg_hdr->msgh_remote_port;
+               if (MACH_PORT_VALID(port)) {
+                       if (port != fc->fc_sendp) {
+                               DISPATCH_INTERNAL_CRASH(port, "Unknown send-right");
+                       }
+                       firehose_mach_port_send_release(fc->fc_sendp);
+                       fc->fc_sendp = MACH_PORT_NULL;
+               }
+               port = msg_hdr->msgh_local_port;
+               if (MACH_PORT_VALID(port)) {
+                       if (port != fc->fc_recvp) {
+                               DISPATCH_INTERNAL_CRASH(port, "Unknown recv-right");
+                       }
+                       firehose_mach_port_recv_dispose(fc->fc_recvp, fc);
+                       fc->fc_recvp = MACH_PORT_NULL;
+               }
+               break;
+
        case DISPATCH_MACH_CANCELED:
+               if (MACH_PORT_VALID(fc->fc_sendp)) {
+                       DISPATCH_INTERNAL_CRASH(fc->fc_sendp, "send-right leak");
+               }
+               if (MACH_PORT_VALID(fc->fc_recvp)) {
+                       DISPATCH_INTERNAL_CRASH(fc->fc_recvp, "recv-right leak");
+               }
                firehose_client_cancel(fc);
                break;
        }
@@ -475,10 +647,8 @@ firehose_client_kernel_source_handle_event(void *ctxt)
        // resumed in firehose_client_drain for both memory and I/O
        dispatch_suspend(fc->fc_kernel_source);
        dispatch_suspend(fc->fc_kernel_source);
-       dispatch_async_f(server_config.fs_mem_drain_queue,
-                       fc, firehose_client_drain_mem_async);
-       dispatch_async_f(server_config.fs_io_drain_queue,
-                       fc, firehose_client_drain_io_async);
+       firehose_client_wakeup(fc, 0, false);
+       firehose_client_wakeup(fc, 0, true);
 }
 #endif
 
@@ -487,23 +657,23 @@ firehose_client_resume(firehose_client_t fc,
                const struct firehose_client_connected_info_s *fcci)
 {
        dispatch_assert_queue(server_config.fs_io_drain_queue);
+
+       fs_clients_lock();
        TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry);
+       fs_clients_unlock();
+
        server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci);
        if (!fc->fc_pid) {
                dispatch_activate(fc->fc_kernel_source);
        } else {
                dispatch_mach_connect(fc->fc_mach_channel,
                                fc->fc_recvp, fc->fc_sendp, NULL);
-               dispatch_activate(fc->fc_io_source);
-               dispatch_activate(fc->fc_mem_source);
        }
 }
 
 static void
 firehose_client_cancel(firehose_client_t fc)
 {
-       dispatch_block_t block;
-
        _dispatch_debug("client died (unique_pid: 0x%llx",
                        firehose_client_get_unique_pid(fc, NULL));
 
@@ -516,15 +686,8 @@ firehose_client_cancel(firehose_client_t fc)
                fc->fc_recvp = MACH_PORT_NULL;
        }
        fc->fc_use_notifs = false;
-       dispatch_source_cancel(fc->fc_io_source);
-       dispatch_source_cancel(fc->fc_mem_source);
-
-       block = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
-               dispatch_async_f(server_config.fs_io_drain_queue, fc,
-                               firehose_client_handle_death);
-       });
-       dispatch_async(server_config.fs_mem_drain_queue, block);
-       _Block_release(block);
+       firehose_client_start_cancel(fc, false);
+       firehose_client_start_cancel(fc, true);
 }
 
 static firehose_client_t
@@ -562,28 +725,10 @@ firehose_client_create(firehose_buffer_t fb, firehose_token_t token,
        uint64_t unique_pid = fb->fb_header.fbh_uniquepid;
        firehose_client_t fc = _firehose_client_create(fb);
        dispatch_mach_t dm;
-       dispatch_source_t ds;
 
        fc->fc_pid = token->pid ? token->pid : ~0;
        fc->fc_euid = token->euid;
        fc->fc_pidversion = token->execcnt;
-       ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0,
-                       server_config.fs_mem_drain_queue);
-       _os_object_retain_internal_inline(&fc->fc_as_os_object);
-       dispatch_set_context(ds, fc);
-       dispatch_set_finalizer_f(ds,
-                       (dispatch_function_t)_os_object_release_internal);
-       dispatch_source_set_event_handler_f(ds, firehose_client_drain_mem_async);
-       fc->fc_mem_source = ds;
-
-       ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0,
-                       server_config.fs_io_drain_queue);
-       _os_object_retain_internal_inline(&fc->fc_as_os_object);
-       dispatch_set_context(ds, fc);
-       dispatch_set_finalizer_f(ds,
-                       (dispatch_function_t)_os_object_release_internal);
-       dispatch_source_set_event_handler_f(ds, firehose_client_drain_io_async);
-       fc->fc_io_source = ds;
 
        _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid);
        fc->fc_recvp = comm_recvp;
@@ -656,12 +801,6 @@ _firehose_client_xref_dispose(firehose_client_t fc)
 {
        _dispatch_debug("Cleaning up client info for unique_pid 0x%llx",
                        firehose_client_get_unique_pid(fc, NULL));
-
-       dispatch_release(fc->fc_io_source);
-       fc->fc_io_source = NULL;
-
-       dispatch_release(fc->fc_mem_source);
-       fc->fc_mem_source = NULL;
 }
 
 uint64_t
@@ -706,6 +845,12 @@ firehose_client_set_context(firehose_client_t fc, void *ctxt)
        return os_atomic_xchg2o(fc, fc_ctxt, ctxt, relaxed);
 }
 
+void
+firehose_client_initiate_quarantine(firehose_client_t fc)
+{
+       fc->fc_quarantined = true;
+}
+
 #pragma mark -
 #pragma mark firehose server
 
@@ -734,22 +879,24 @@ void
 firehose_server_init(mach_port_t comm_port, firehose_handler_t handler)
 {
        struct firehose_server_s *fs = &server_config;
-       dispatch_queue_attr_t attr;
+       dispatch_queue_attr_t attr = DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL;
+       dispatch_queue_attr_t attr_ui;
        dispatch_mach_t dm;
+       dispatch_source_t ds;
 
        // just reference the string so that it's captured
        (void)os_atomic_load(&__libfirehose_serverVersionString[0], relaxed);
 
-       attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL,
+       attr_ui = dispatch_queue_attr_make_with_qos_class(attr,
                        QOS_CLASS_USER_INITIATED, 0);
        fs->fs_ipc_queue = dispatch_queue_create_with_target(
-                       "com.apple.firehose.ipc", attr, NULL);
+                       "com.apple.firehose.ipc", attr_ui, NULL);
        fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target(
-                       "com.apple.firehose.snapshot-gate", DISPATCH_QUEUE_SERIAL, NULL);
+                       "com.apple.firehose.snapshot-gate", attr, NULL);
        fs->fs_io_drain_queue = dispatch_queue_create_with_target(
-                       "com.apple.firehose.drain-io", DISPATCH_QUEUE_SERIAL, NULL);
+                       "com.apple.firehose.drain-io", attr, NULL);
        fs->fs_mem_drain_queue = dispatch_queue_create_with_target(
-                       "com.apple.firehose.drain-mem", DISPATCH_QUEUE_SERIAL, NULL);
+                       "com.apple.firehose.drain-mem", attr, NULL);
 
        dm = dispatch_mach_create_f("com.apple.firehose.listener",
                        fs->fs_ipc_queue, NULL, firehose_server_handle_mach_event);
@@ -757,6 +904,15 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler)
        fs->fs_mach_channel = dm;
        fs->fs_handler = _Block_copy(handler);
        firehose_kernel_client_create();
+
+       for (size_t i = 0; i < countof(fs->fs_sources); i++) {
+               ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0,
+                               fs_idx_is_for_io(i) ? server_config.fs_io_drain_queue :
+                               server_config.fs_mem_drain_queue);
+               dispatch_set_context(ds, &fs->fs_queues[i]);
+               dispatch_source_set_event_handler_f(ds, firehose_client_drain);
+               fs->fs_sources[i] = ds;
+       }
 }
 
 void
@@ -800,24 +956,23 @@ firehose_server_resume(void)
        }
        dispatch_mach_connect(fs->fs_mach_channel, fs->fs_bootstrap_port,
                        MACH_PORT_NULL, NULL);
-}
-
-OS_NOINLINE
-static void
-_firehose_server_cancel(void *ctxt OS_UNUSED)
-{
-       firehose_client_t fc;
-       TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) {
-               dispatch_mach_cancel(fc->fc_mach_channel);
+       for (size_t i = 0; i < countof(fs->fs_sources); i++) {
+               dispatch_activate(fs->fs_sources[i]);
        }
 }
 
 void
 firehose_server_cancel(void)
 {
+       firehose_client_t fc;
+
        dispatch_mach_cancel(server_config.fs_mach_channel);
-       dispatch_async_f(server_config.fs_io_drain_queue, NULL,
-                       _firehose_server_cancel);
+
+       fs_clients_lock();
+       TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) {
+               dispatch_mach_cancel(fc->fc_mach_channel);
+       }
+       fs_clients_unlock();
 }
 
 dispatch_queue_t
@@ -838,6 +993,37 @@ firehose_server_copy_queue(firehose_server_queue_t which)
        return dq;
 }
 
+void
+firehose_server_quarantined_suspend(firehose_server_queue_t which)
+{
+       switch (which) {
+       case FIREHOSE_SERVER_QUEUE_IO:
+               dispatch_suspend(fs_source(true, true));
+               break;
+       case FIREHOSE_SERVER_QUEUE_MEMORY:
+               dispatch_suspend(fs_source(true, false));
+               break;
+       default:
+               DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type");
+       }
+}
+
+void
+firehose_server_quarantined_resume(firehose_server_queue_t which)
+{
+       switch (which) {
+       case FIREHOSE_SERVER_QUEUE_IO:
+               dispatch_resume(fs_source(true, true));
+               break;
+       case FIREHOSE_SERVER_QUEUE_MEMORY:
+               dispatch_resume(fs_source(true, false));
+               break;
+       default:
+               DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type");
+       }
+}
+
+
 #pragma mark -
 #pragma mark firehose snapshot and peeking
 
@@ -950,73 +1136,35 @@ firehose_client_snapshot_finish(firehose_client_t fc,
 }
 
 static void
-firehose_snapshot_start(void *ctxt)
+firehose_snapshot_tickle_clients(firehose_snapshot_t fs, bool for_io)
 {
-       firehose_snapshot_t snapshot = ctxt;
-       firehose_client_t fci;
+       firehose_client_t fc;
        long n = 0;
 
-       // 0. we need to be on the IO queue so that client connection and/or death
-       //    cannot happen concurrently
-       dispatch_assert_queue(server_config.fs_io_drain_queue);
-       server_config.fs_snapshot = snapshot;
-
-       // 1. mark all the clients participating in the current snapshot
-       //    and enter the group for each bit set
-       TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) {
-               if (!fci->fc_pid) {
+       fs_clients_lock();
+       TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) {
+               if (slowpath(fc->fc_memory_corrupted)) {
+                       continue;
+               }
+               if (!fc->fc_pid) {
 #if TARGET_OS_SIMULATOR
                        continue;
 #endif
-               }
-               if (slowpath(fci->fc_memory_corrupted)) {
+               } else if (!firehose_client_wakeup(fc, 0, for_io)) {
                        continue;
                }
-               fci->fc_needs_io_snapshot = true;
-               fci->fc_needs_mem_snapshot = true;
-               n += 2;
-       }
-       if (n) {
-               // cheating: equivalent to dispatch_group_enter() n times
-               // without the acquire barriers that we don't need
-               os_atomic_add2o(snapshot->fs_group, dg_value, n, relaxed);
+               n++;
+               if (for_io) {
+                       fc->fc_needs_io_snapshot = true;
+               } else {
+                       fc->fc_needs_mem_snapshot = true;
+               }
        }
+       fs_clients_unlock();
 
-       dispatch_async(server_config.fs_mem_drain_queue, ^{
-               // 2. start the fs_mem_snapshot, this is what triggers the snapshot
-               //    logic from _drain() or handle_death()
-               server_config.fs_mem_snapshot_started = true;
-               snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL);
-
-               dispatch_async(server_config.fs_io_drain_queue, ^{
-                       firehose_client_t fcj;
-
-                       // 3. start the fs_io_snapshot, this is what triggers the snapshot
-                       //    logic from _drain() or handle_death()
-                       //    29868879: must always happen after the memory snapshot started
-                       server_config.fs_io_snapshot_started = true;
-                       snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL);
-
-                       // match group_enter from firehose_snapshot() after MEM+IO_START
-                       dispatch_group_leave(snapshot->fs_group);
-
-                       // 3. tickle all the clients. the list of clients may have changed
-                       //    since step 1, but worry not - new clients don't have
-                       //    fc_needs_*_snapshot set so drain is harmless; clients that
-                       //    were removed from the list have already left the group
-                       //    (see firehose_client_finalize())
-                       TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) {
-                               if (!fcj->fc_pid) {
-#if !TARGET_OS_SIMULATOR
-                                       firehose_client_kernel_source_handle_event(fcj);
-#endif
-                               } else {
-                                       dispatch_source_merge_data(fcj->fc_io_source, 1);
-                                       dispatch_source_merge_data(fcj->fc_mem_source, 1);
-                               }
-                       }
-               });
-       });
+       // cheating: equivalent to dispatch_group_enter() n times
+       // without the acquire barriers that we don't need
+       if (n) os_atomic_add2o(fs->fs_group, dg_value, n, relaxed);
 }
 
 static void
@@ -1026,8 +1174,6 @@ firehose_snapshot_finish(void *ctxt)
 
        fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL);
        server_config.fs_snapshot = NULL;
-       server_config.fs_mem_snapshot_started = false;
-       server_config.fs_io_snapshot_started = false;
 
        dispatch_release(fs->fs_group);
        Block_release(fs->handler);
@@ -1040,10 +1186,37 @@ firehose_snapshot_finish(void *ctxt)
 static void
 firehose_snapshot_gate(void *ctxt)
 {
+       firehose_snapshot_t fs = ctxt;
+
        // prevent other snapshots from running until done
+
        dispatch_suspend(server_config.fs_snapshot_gate_queue);
-       dispatch_async_f(server_config.fs_io_drain_queue, ctxt,
-                       firehose_snapshot_start);
+
+       server_config.fs_snapshot = fs;
+       dispatch_group_async(fs->fs_group, server_config.fs_mem_drain_queue, ^{
+               // start the fs_mem_snapshot, this is what triggers the snapshot
+               // logic from _drain() or handle_death()
+               fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL);
+               firehose_snapshot_tickle_clients(fs, false);
+
+               dispatch_group_async(fs->fs_group, server_config.fs_io_drain_queue, ^{
+                       // start the fs_io_snapshot, this is what triggers the snapshot
+                       // logic from _drain() or handle_death()
+                       // 29868879: must always happen after the memory snapshot started
+                       fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL);
+                       firehose_snapshot_tickle_clients(fs, true);
+
+#if !TARGET_OS_SIMULATOR
+                       if (server_config.fs_kernel_client) {
+                               firehose_client_kernel_source_handle_event(
+                                               server_config.fs_kernel_client);
+                       }
+#endif
+               });
+       });
+
+       dispatch_group_notify_f(fs->fs_group, server_config.fs_io_drain_queue,
+                       fs, firehose_snapshot_finish);
 }
 
 void
@@ -1054,12 +1227,6 @@ firehose_snapshot(firehose_snapshot_handler_t handler)
        snapshot->handler = Block_copy(handler);
        snapshot->fs_group = dispatch_group_create();
 
-       // keep the group entered until IO_START and MEM_START have been sent
-       // See firehose_snapshot_start()
-       dispatch_group_enter(snapshot->fs_group);
-       dispatch_group_notify_f(snapshot->fs_group, server_config.fs_io_drain_queue,
-                       snapshot, firehose_snapshot_finish);
-
        dispatch_async_f(server_config.fs_snapshot_gate_queue, snapshot,
                        firehose_snapshot_gate);
 }
@@ -1150,15 +1317,16 @@ firehose_server_push_async(mach_port_t server_port OS_UNUSED,
                if (expects_notifs && !fc->fc_use_notifs) {
                        fc->fc_use_notifs = true;
                }
-               firehose_client_push_async_merge(fc, pp, for_io);
+               firehose_client_wakeup(fc, pp, for_io);
        }
        return KERN_SUCCESS;
 }
 
 kern_return_t
-firehose_server_push(mach_port_t server_port OS_UNUSED,
+firehose_server_push_and_wait(mach_port_t server_port OS_UNUSED,
                mach_port_t reply_port, qos_class_t qos, boolean_t for_io,
-               firehose_push_reply_t *push_reply OS_UNUSED)
+               firehose_push_reply_t *push_reply OS_UNUSED,
+               boolean_t *quarantinedOut OS_UNUSED)
 {
        firehose_client_t fc = cur_client_info;
        dispatch_block_flags_t flags = DISPATCH_BLOCK_ENFORCE_QOS_CLASS;
@@ -1180,7 +1348,7 @@ firehose_server_push(mach_port_t server_port OS_UNUSED,
        }
 
        block = dispatch_block_create_with_qos_class(flags, qos, 0, ^{
-               firehose_client_drain(fc, reply_port,
+               firehose_client_drain_one(fc, reply_port,
                                for_io ? FIREHOSE_DRAIN_FOR_IO : 0);
        });
        dispatch_async(q, block);
index d805167607c8103ddd11e3e1e689c3d6c42122d4..13f52b880bd474d6cf6b60c006c5d6c919eb8f5a 100644 (file)
@@ -36,6 +36,7 @@ struct firehose_client_s {
                struct _os_object_s fc_as_os_object;
        };
        TAILQ_ENTRY(firehose_client_s) fc_entry;
+       struct firehose_client_s *volatile fc_next[2];
 
        firehose_buffer_t       fc_buffer;
        uint64_t volatile       fc_mem_sent_flushed_pos;
@@ -43,14 +44,27 @@ struct firehose_client_s {
        uint64_t volatile       fc_io_sent_flushed_pos;
        uint64_t volatile       fc_io_flushed_pos;
 
+#define FC_STATE_ENQUEUED(for_io)      (0x0001u << (for_io))
+#define FC_STATE_MEM_ENQUEUED           0x0001
+#define FC_STATE_IO_ENQUEUED            0x0002
+
+#define FC_STATE_CANCELING(for_io)     (0x0010u << (for_io))
+#define FC_STATE_MEM_CANCELING          0x0010
+#define FC_STATE_IO_CANCELING           0x0020
+
+#define FC_STATE_CANCELED(for_io)      (0x0100u << (for_io))
+#define FC_STATE_MEM_CANCELED           0x0100
+#define FC_STATE_IO_CANCELED            0x0200
+#define FC_STATE_CANCELED_MASK          0x0300
+
+       uintptr_t volatile      fc_state;
+
        void *volatile          fc_ctxt;
 
        union {
                dispatch_mach_t fc_mach_channel;
                dispatch_source_t fc_kernel_source;
        };
-       dispatch_source_t       fc_io_source;
-       dispatch_source_t       fc_mem_source;
        mach_port_t                     fc_recvp;
        mach_port_t                     fc_sendp;
        os_unfair_lock      fc_lock;
@@ -61,6 +75,7 @@ struct firehose_client_s {
        bool                            fc_memory_corrupted;
        bool                            fc_needs_io_snapshot;
        bool                            fc_needs_mem_snapshot;
+       bool                            fc_quarantined;
 };
 
 void
index 5b8d809ca025850ca1fe0344f68aed89be007303..6672fac45dbbb2fb37391bfd420e832ce6bcf14c 100644 (file)
@@ -21,6 +21,8 @@
 // Contains exported global data and initialization & other routines that must
 // only exist once in the shared library even when resolvers are used.
 
+// NOTE: this file must not contain any atomic operations
+
 #include "internal.h"
 
 #if HAVE_MACH
@@ -63,6 +65,7 @@ dispatch_atfork_child(void)
 {
        _os_object_atfork_child();
        _voucher_atfork_child();
+       _dispatch_event_loop_atfork_child();
        if (_dispatch_is_multithreaded_inline()) {
                _dispatch_child_of_unsafe_fork = true;
        }
@@ -71,6 +74,29 @@ dispatch_atfork_child(void)
        _dispatch_unsafe_fork = 0;
 }
 
+int
+_dispatch_sigmask(void)
+{
+       sigset_t mask;
+       int r = 0;
+
+       /* Workaround: 6269619 Not all signals can be delivered on any thread */
+       r |= sigfillset(&mask);
+       r |= sigdelset(&mask, SIGILL);
+       r |= sigdelset(&mask, SIGTRAP);
+#if HAVE_DECL_SIGEMT
+       r |= sigdelset(&mask, SIGEMT);
+#endif
+       r |= sigdelset(&mask, SIGFPE);
+       r |= sigdelset(&mask, SIGBUS);
+       r |= sigdelset(&mask, SIGSEGV);
+       r |= sigdelset(&mask, SIGSYS);
+       r |= sigdelset(&mask, SIGPIPE);
+       r |= sigdelset(&mask, SIGPROF);
+       r |= pthread_sigmask(SIG_BLOCK, &mask, NULL);
+       return dispatch_assume_zero(r);
+}
+
 #pragma mark -
 #pragma mark dispatch_globals
 
@@ -92,13 +118,13 @@ pthread_key_t dispatch_frame_key;
 pthread_key_t dispatch_cache_key;
 pthread_key_t dispatch_context_key;
 pthread_key_t dispatch_pthread_root_queue_observer_hooks_key;
-pthread_key_t dispatch_defaultpriority_key;
+pthread_key_t dispatch_basepri_key;
 #if DISPATCH_INTROSPECTION
 pthread_key_t dispatch_introspection_key;
 #elif DISPATCH_PERF_MON
 pthread_key_t dispatch_bcounter_key;
 #endif
-pthread_key_t dispatch_sema4_key;
+pthread_key_t dispatch_wlh_key;
 pthread_key_t dispatch_voucher_key;
 pthread_key_t dispatch_deferred_items_key;
 #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE
@@ -122,10 +148,6 @@ int _dispatch_set_qos_class_enabled;
 #if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_MGR_THREAD
 int _dispatch_kevent_workqueue_enabled;
 #endif
-#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT && \
-               DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-int _dispatch_evfilt_machport_direct_enabled;
-#endif
 
 DISPATCH_HW_CONFIG();
 uint8_t _dispatch_unsafe_fork;
@@ -149,33 +171,6 @@ _dispatch_is_fork_of_multithreaded_parent(void)
        return _dispatch_child_of_unsafe_fork;
 }
 
-DISPATCH_NOINLINE
-void
-_dispatch_fork_becomes_unsafe_slow(void)
-{
-       uint8_t value = os_atomic_or(&_dispatch_unsafe_fork,
-                       _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed);
-       if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) {
-               DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited");
-       }
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_prohibit_transition_to_multithreaded(bool prohibit)
-{
-       if (prohibit) {
-               uint8_t value = os_atomic_or(&_dispatch_unsafe_fork,
-                               _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed);
-               if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) {
-                       DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded");
-               }
-       } else {
-               os_atomic_and(&_dispatch_unsafe_fork,
-                               (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed);
-       }
-}
-
 const struct dispatch_queue_offsets_s dispatch_queue_offsets = {
        .dqo_version = 6,
        .dqo_label = offsetof(struct dispatch_queue_s, dq_label),
@@ -192,8 +187,8 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = {
        .dqo_suspend_cnt_size = 0,
        .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq),
        .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq),
-       .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority),
-       .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority),
+       .dqo_priority = 0,
+       .dqo_priority_size = 0,
 };
 
 #if DISPATCH_USE_DIRECT_TSD
@@ -214,85 +209,92 @@ struct dispatch_queue_s _dispatch_main_q = {
        .do_targetq = &_dispatch_root_queues[
                        DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
 #endif
-       .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1),
+       .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
+                       DISPATCH_QUEUE_ROLE_BASE_ANON,
        .dq_label = "com.apple.main-thread",
-       .dq_width = 1,
-       .dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC,
-       .dq_override_voucher = DISPATCH_NO_VOUCHER,
+       .dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1),
        .dq_serialnum = 1,
 };
 
 #pragma mark -
 #pragma mark dispatch_queue_attr_t
 
-#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \
-               { \
-                       DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \
-                       .dqa_qos_class = (qos), \
-                       .dqa_relative_priority = (qos) ? (prio) : 0, \
-                       .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \
-                       .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \
-                       .dqa_concurrent = (concurrent), \
-                       .dqa_inactive = (inactive), \
-               }
+#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, \
+                       inactive) \
+       { \
+               DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \
+               .dqa_qos_and_relpri = (_dispatch_priority_make(qos, prio) & \
+                               DISPATCH_PRIORITY_REQUESTED_MASK), \
+               .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \
+               .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \
+               .dqa_concurrent = (concurrent), \
+               .dqa_inactive = (inactive), \
+       }
 
-#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, concurrent) \
-               { \
-                       [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\
-                                       qos, prio, overcommit, freq, concurrent, false), \
-                       [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\
-                                       qos, prio, overcommit, freq, concurrent, true), \
-               }
+#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, \
+                       concurrent) \
+       { \
+               [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \
+                               qos, prio, overcommit, freq, concurrent, false), \
+               [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \
+                               qos, prio, overcommit, freq, concurrent, true), \
+       }
 
 #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \
-               { \
-                       [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \
-                                       DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 1), \
-                       [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \
-                                       DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 0), \
-                       [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \
-                                       DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 1), \
-                       [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \
-                                       DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 0), \
-                       [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \
-                                       DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 1), \
-                       [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \
-                                       DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 0), \
-               }
+       { \
+               [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \
+                               DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+                                               qos, prio, overcommit, INHERIT, 1), \
+               [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \
+                               DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+                                               qos, prio, overcommit, INHERIT, 0), \
+               [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \
+                               DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+                                               qos, prio, overcommit, WORK_ITEM, 1), \
+               [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \
+                               DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+                                               qos, prio, overcommit, WORK_ITEM, 0), \
+               [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \
+                               DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \
+                                               qos, prio, overcommit, NEVER, 1), \
+               [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \
+                               DISPATCH_QUEUE_ATTR_ACTIVE_INIT(\
+                                               qos, prio, overcommit, NEVER, 0), \
+       }
 
 #define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \
-               [prio] = { \
-                       [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \
-                                       DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified), \
-                       [DQA_INDEX_NON_OVERCOMMIT] = \
-                                       DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \
-                       [DQA_INDEX_OVERCOMMIT] = \
-                                       DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \
-               }
+       [prio] = { \
+               [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \
+                               DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified),\
+               [DQA_INDEX_NON_OVERCOMMIT] = \
+                               DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \
+               [DQA_INDEX_OVERCOMMIT] = \
+                               DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \
+       }
 
 #define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \
-               { \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \
-                       DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \
-               }
+       { \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \
+               DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \
+       }
 
 #define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \
-               [DQA_INDEX_QOS_CLASS_##qos] = \
-                               DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos)
+       [DQA_INDEX_QOS_CLASS_##qos] = \
+                       DISPATCH_QUEUE_ATTR_PRIO_INIT(DISPATCH_QOS_##qos)
 
 // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased
 // to array member [0][0][0][0][0][0] and their properties must match!
@@ -314,7 +316,7 @@ const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
 #if DISPATCH_VARIANT_STATIC
 // <rdar://problem/16778703>
 struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent =
-       DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0,
+       DISPATCH_QUEUE_ATTR_INIT(QOS_CLASS_UNSPECIFIED, 0,
                        unspecified, INHERIT, 1, false);
 #endif // DISPATCH_VARIANT_STATIC
 
@@ -349,6 +351,7 @@ DISPATCH_VTABLE_INSTANCE(queue,
        .do_dispose = _dispatch_queue_dispose,
        .do_suspend = _dispatch_queue_suspend,
        .do_resume = _dispatch_queue_resume,
+       .do_push = _dispatch_queue_push,
        .do_invoke = _dispatch_queue_invoke,
        .do_wakeup = _dispatch_queue_wakeup,
        .do_debug = dispatch_queue_debug,
@@ -362,6 +365,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue,
        .do_suspend = _dispatch_queue_suspend,
        .do_resume = _dispatch_queue_resume,
        .do_finalize_activation = _dispatch_queue_finalize_activation,
+       .do_push = _dispatch_queue_push,
        .do_invoke = _dispatch_queue_invoke,
        .do_wakeup = _dispatch_queue_wakeup,
        .do_debug = dispatch_queue_debug,
@@ -375,6 +379,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, queue,
        .do_suspend = _dispatch_queue_suspend,
        .do_resume = _dispatch_queue_resume,
        .do_finalize_activation = _dispatch_queue_finalize_activation,
+       .do_push = _dispatch_queue_push,
        .do_invoke = _dispatch_queue_invoke,
        .do_wakeup = _dispatch_queue_wakeup,
        .do_debug = dispatch_queue_debug,
@@ -386,14 +391,18 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue,
        .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
        .do_kind = "global-queue",
        .do_dispose = _dispatch_pthread_root_queue_dispose,
+       .do_push = _dispatch_root_queue_push,
+       .do_invoke = NULL,
        .do_wakeup = _dispatch_root_queue_wakeup,
        .do_debug = dispatch_queue_debug,
 );
 
+
 DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue,
        .do_type = DISPATCH_QUEUE_SERIAL_TYPE,
        .do_kind = "main-queue",
        .do_dispose = _dispatch_queue_dispose,
+       .do_push = _dispatch_queue_push,
        .do_invoke = _dispatch_queue_invoke,
        .do_wakeup = _dispatch_main_queue_wakeup,
        .do_debug = dispatch_queue_debug,
@@ -403,6 +412,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue,
        .do_type = DISPATCH_QUEUE_RUNLOOP_TYPE,
        .do_kind = "runloop-queue",
        .do_dispose = _dispatch_runloop_queue_dispose,
+       .do_push = _dispatch_queue_push,
        .do_invoke = _dispatch_queue_invoke,
        .do_wakeup = _dispatch_runloop_queue_wakeup,
        .do_debug = dispatch_queue_debug,
@@ -411,6 +421,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue,
 DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue,
        .do_type = DISPATCH_QUEUE_MGR_TYPE,
        .do_kind = "mgr-queue",
+       .do_push = _dispatch_mgr_queue_push,
        .do_invoke = _dispatch_mgr_thread,
        .do_wakeup = _dispatch_mgr_queue_wakeup,
        .do_debug = dispatch_queue_debug,
@@ -420,6 +431,7 @@ DISPATCH_VTABLE_INSTANCE(queue_specific_queue,
        .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE,
        .do_kind = "queue-context",
        .do_dispose = _dispatch_queue_specific_queue_dispose,
+       .do_push = (void *)_dispatch_queue_push,
        .do_invoke = (void *)_dispatch_queue_invoke,
        .do_wakeup = (void *)_dispatch_queue_wakeup,
        .do_debug = (void *)dispatch_queue_debug,
@@ -437,6 +449,7 @@ DISPATCH_VTABLE_INSTANCE(source,
        .do_suspend = (void *)_dispatch_queue_suspend,
        .do_resume = (void *)_dispatch_queue_resume,
        .do_finalize_activation = _dispatch_source_finalize_activation,
+       .do_push = (void *)_dispatch_queue_push,
        .do_invoke = _dispatch_source_invoke,
        .do_wakeup = _dispatch_source_wakeup,
        .do_debug = _dispatch_source_debug,
@@ -451,6 +464,7 @@ DISPATCH_VTABLE_INSTANCE(mach,
        .do_suspend = (void *)_dispatch_queue_suspend,
        .do_resume = (void *)_dispatch_queue_resume,
        .do_finalize_activation = _dispatch_mach_finalize_activation,
+       .do_push = (void *)_dispatch_queue_push,
        .do_invoke = _dispatch_mach_invoke,
        .do_wakeup = _dispatch_mach_wakeup,
        .do_debug = _dispatch_mach_debug,
@@ -472,6 +486,7 @@ DISPATCH_VTABLE_INSTANCE(data,
        .do_kind = "data",
        .do_dispose = _dispatch_data_dispose,
        .do_debug = _dispatch_data_debug,
+       .do_set_targetq = (void*)_dispatch_data_set_target_queue,
 );
 #endif
 
@@ -497,31 +512,6 @@ DISPATCH_VTABLE_INSTANCE(disk,
 );
 
 
-const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = {
-       DC_VTABLE_ENTRY(ASYNC_REDIRECT,
-               .do_kind = "dc-redirect",
-               .do_invoke = _dispatch_async_redirect_invoke),
-#if HAVE_MACH
-       DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN,
-               .do_kind = "dc-mach-send-drain",
-               .do_invoke = _dispatch_mach_send_barrier_drain_invoke),
-       DC_VTABLE_ENTRY(MACH_SEND_BARRIER,
-               .do_kind = "dc-mach-send-barrier",
-               .do_invoke = _dispatch_mach_barrier_invoke),
-       DC_VTABLE_ENTRY(MACH_RECV_BARRIER,
-               .do_kind = "dc-mach-recv-barrier",
-               .do_invoke = _dispatch_mach_barrier_invoke),
-#endif
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       DC_VTABLE_ENTRY(OVERRIDE_STEALING,
-               .do_kind = "dc-override-stealing",
-               .do_invoke = _dispatch_queue_override_invoke),
-       DC_VTABLE_ENTRY(OVERRIDE_OWNING,
-               .do_kind = "dc-override-owning",
-               .do_invoke = _dispatch_queue_override_invoke),
-#endif
-};
-
 void
 _dispatch_vtable_init(void)
 {
@@ -534,6 +524,41 @@ _dispatch_vtable_init(void)
 #endif // USE_OBJC
 }
 
+#pragma mark -
+#pragma mark dispatch_data globals
+
+const dispatch_block_t _dispatch_data_destructor_free = ^{
+       DISPATCH_INTERNAL_CRASH(0, "free destructor called");
+};
+
+const dispatch_block_t _dispatch_data_destructor_none = ^{
+       DISPATCH_INTERNAL_CRASH(0, "none destructor called");
+};
+
+#if !HAVE_MACH
+const dispatch_block_t _dispatch_data_destructor_munmap = ^{
+       DISPATCH_INTERNAL_CRASH(0, "munmap destructor called");
+};
+#else
+// _dispatch_data_destructor_munmap is a linker alias to the following
+const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{
+       DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called");
+};
+#endif
+
+const dispatch_block_t _dispatch_data_destructor_inline = ^{
+       DISPATCH_INTERNAL_CRASH(0, "inline destructor called");
+};
+
+struct dispatch_data_s _dispatch_data_empty = {
+#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
+       .do_vtable = DISPATCH_DATA_EMPTY_CLASS,
+#else
+       DISPATCH_GLOBAL_OBJECT_HEADER(data),
+       .do_next = DISPATCH_OBJECT_LISTLESS,
+#endif
+};
+
 #pragma mark -
 #pragma mark dispatch_bug
 
@@ -872,6 +897,7 @@ void
 _dispatch_temporary_resource_shortage(void)
 {
        sleep(1);
+       asm("");  // prevent tailcall
 }
 
 void *
@@ -978,6 +1004,22 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
 }
 
 #if HAVE_MACH
+
+#undef _dispatch_client_callout3
+DISPATCH_NOINLINE
+void
+_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
+               dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f)
+{
+       _dispatch_get_tsd_base();
+       void *u = _dispatch_get_unwind_tsd();
+       if (fastpath(!u)) return f(ctxt, reason, dmsg);
+       _dispatch_set_unwind_tsd(NULL);
+       f(ctxt, reason, dmsg);
+       _dispatch_free_unwind_tsd();
+       _dispatch_set_unwind_tsd(u);
+}
+
 #undef _dispatch_client_callout4
 void
 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
@@ -1114,392 +1156,25 @@ _dispatch_autorelease_pool_pop(void *pool)
        }
 }
 
-void*
-_dispatch_last_resort_autorelease_pool_push(void)
+void
+_dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic)
 {
-       return _dispatch_autorelease_pool_push();
+       dic->dic_autorelease_pool = _dispatch_autorelease_pool_push();
 }
 
 void
-_dispatch_last_resort_autorelease_pool_pop(void *pool)
+_dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic)
 {
-       _dispatch_autorelease_pool_pop(pool);
+       _dispatch_autorelease_pool_pop(dic->dic_autorelease_pool);
+       dic->dic_autorelease_pool = NULL;
 }
 
 #endif // DISPATCH_COCOA_COMPAT
 #endif // !USE_OBJC
 
-#pragma mark -
-#pragma mark dispatch_source_types
-
-static void
-dispatch_source_type_timer_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask)
-{
-       if (fastpath(!ds->ds_refs)) {
-               ds->ds_refs = _dispatch_calloc(1ul,
-                               sizeof(struct dispatch_timer_source_refs_s));
-       }
-       ds->ds_needs_rearm = true;
-       ds->ds_is_timer = true;
-       ds_timer(ds->ds_refs).flags = mask;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_timer = {
-       .ke = {
-               .filter = DISPATCH_EVFILT_TIMER,
-       },
-       .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
-                       DISPATCH_TIMER_WALL_CLOCK,
-       .init = dispatch_source_type_timer_init,
-};
-
-static void
-dispatch_source_type_after_init(dispatch_source_t ds,
-       dispatch_source_type_t type, uintptr_t handle, unsigned long mask)
-{
-       dispatch_source_type_timer_init(ds, type, handle, mask);
-       ds->ds_needs_rearm = false;
-       ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_after = {
-       .ke = {
-               .filter = DISPATCH_EVFILT_TIMER,
-       },
-       .init = dispatch_source_type_after_init,
-};
-
-static void
-dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds,
-       dispatch_source_type_t type, uintptr_t handle, unsigned long mask)
-{
-       ds->ds_refs = _dispatch_calloc(1ul,
-                       sizeof(struct dispatch_timer_source_aggregate_refs_s));
-       dispatch_source_type_timer_init(ds, type, handle, mask);
-       ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE;
-       ds->dq_specific_q = (void*)handle;
-       _dispatch_retain(ds->dq_specific_q);
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={
-       .ke = {
-               .filter = DISPATCH_EVFILT_TIMER,
-               .ident = ~0ull,
-       },
-       .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
-       .init = dispatch_source_type_timer_with_aggregate_init,
-};
-
-static void
-dispatch_source_type_interval_init(dispatch_source_t ds,
-       dispatch_source_type_t type, uintptr_t handle, unsigned long mask)
-{
-       dispatch_source_type_timer_init(ds, type, handle, mask);
-       ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL;
-       unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs);
-       ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident;
-       _dispatch_source_set_interval(ds, handle);
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_interval = {
-       .ke = {
-               .filter = DISPATCH_EVFILT_TIMER,
-               .ident = ~0ull,
-       },
-       .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
-                       DISPATCH_INTERVAL_UI_ANIMATION,
-       .init = dispatch_source_type_interval_init,
-};
-
-static void
-dispatch_source_type_readwrite_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       ds->ds_is_level = true;
-#if HAVE_DECL_NOTE_LOWAT
-       // bypass kernel check for device kqueue support rdar://19004921
-       ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT;
-#endif
-       ds->ds_dkev->dk_kevent.data = 1;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_read = {
-       .ke = {
-               .filter = EVFILT_READ,
-               .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-       },
-       .init = dispatch_source_type_readwrite_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_write = {
-       .ke = {
-               .filter = EVFILT_WRITE,
-               .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-       },
-       .init = dispatch_source_type_readwrite_init,
-};
-
-#if DISPATCH_USE_MEMORYSTATUS
-
-#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483
-static int _dispatch_ios_simulator_memory_warnings_fd = -1;
-static void
-_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
-{
-       char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
-       if (!e) return;
-       _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
-       if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
-               (void)dispatch_assume_zero(errno);
-       }
-}
-#endif
-
-#if TARGET_IPHONE_SIMULATOR
-static void
-dispatch_source_type_memorypressure_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       static dispatch_once_t pred;
-       dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
-       handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
-       mask = NOTE_ATTRIB;
-       ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE;
-       ds->ds_dkev->dk_kevent.ident = handle;
-       ds->ds_dkev->dk_kevent.flags |= EV_CLEAR;
-       ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
-       ds->ds_ident_hack = handle;
-       ds->ds_pending_data_mask = mask;
-       ds->ds_memorypressure_override = 1;
-}
-#else
-#define dispatch_source_type_memorypressure_init NULL
-#endif
-
-#ifndef NOTE_MEMORYSTATUS_LOW_SWAP
-#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8
-#endif
-
-const struct dispatch_source_type_s _dispatch_source_type_memorypressure = {
-       .ke = {
-               .filter = EVFILT_MEMORYSTATUS,
-               .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
-       },
-       .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN
-                       |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP
-                       |NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL,
-       .init = dispatch_source_type_memorypressure_init,
-};
-
-static void
-dispatch_source_type_vm_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       // Map legacy vm pressure to memorypressure warning rdar://problem/15907505
-       mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
-       ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
-       ds->ds_pending_data_mask = mask;
-       ds->ds_vmpressure_override = 1;
-#if TARGET_IPHONE_SIMULATOR
-       dispatch_source_type_memorypressure_init(ds, type, handle, mask);
-#endif
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
-       .ke = {
-               .filter = EVFILT_MEMORYSTATUS,
-               .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
-       },
-       .mask = NOTE_VM_PRESSURE,
-       .init = dispatch_source_type_vm_init,
-};
-
-#endif // DISPATCH_USE_MEMORYSTATUS
-
-const struct dispatch_source_type_s _dispatch_source_type_signal = {
-       .ke = {
-               .filter = EVFILT_SIGNAL,
-               .flags = EV_UDATA_SPECIFIC,
-       },
-};
-
-#if !defined(__linux__)
-static void
-dispatch_source_type_proc_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_proc = {
-       .ke = {
-               .filter = EVFILT_PROC,
-               .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
-       },
-       .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC
-#if HAVE_DECL_NOTE_SIGNAL
-                       |NOTE_SIGNAL
-#endif
-#if HAVE_DECL_NOTE_REAP
-                       |NOTE_REAP
-#endif
-                       ,
-       .init = dispatch_source_type_proc_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_vnode = {
-       .ke = {
-               .filter = EVFILT_VNODE,
-               .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC,
-       },
-       .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|
-                       NOTE_RENAME|NOTE_FUNLOCK
-#if HAVE_DECL_NOTE_REVOKE
-                       |NOTE_REVOKE
-#endif
-#if HAVE_DECL_NOTE_NONE
-                       |NOTE_NONE
-#endif
-                       ,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_vfs = {
-       .ke = {
-               .filter = EVFILT_FS,
-               .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
-       },
-       .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|
-                       VQ_ASSIST|VQ_NOTRESPLOCK
-#if HAVE_DECL_VQ_UPDATE
-                       |VQ_UPDATE
-#endif
-#if HAVE_DECL_VQ_VERYLOWDISK
-                       |VQ_VERYLOWDISK
-#endif
-#if HAVE_DECL_VQ_QUOTA
-                       |VQ_QUOTA
-#endif
-#if HAVE_DECL_VQ_NEARLOWDISK
-                       |VQ_NEARLOWDISK
-#endif
-#if HAVE_DECL_VQ_DESIRED_DISK
-                       |VQ_DESIRED_DISK
-#endif
-                       ,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_sock = {
-#ifdef EVFILT_SOCK
-       .ke = {
-               .filter = EVFILT_SOCK,
-               .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC,
-       },
-       .mask = NOTE_CONNRESET |  NOTE_READCLOSED | NOTE_WRITECLOSED |
-               NOTE_TIMEOUT | NOTE_NOSRCADDR |  NOTE_IFDENIED | NOTE_SUSPEND |
-               NOTE_RESUME | NOTE_KEEPALIVE
-#ifdef NOTE_ADAPTIVE_WTIMO
-               | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO
-#endif
-#ifdef NOTE_CONNECTED
-               | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED
-#endif
-#ifdef NOTE_NOTIFY_ACK
-               | NOTE_NOTIFY_ACK
-#endif
-               ,
-#endif // EVFILT_SOCK
-};
-#endif // !defined(__linux__)
-
-static void
-dispatch_source_type_data_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       ds->ds_is_installed = true;
-       ds->ds_is_custom_source = true;
-       ds->ds_is_direct_kevent = true;
-       ds->ds_pending_data_mask = ~0ul;
-       ds->ds_needs_rearm = false; // not registered with kevent
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_data_add = {
-       .ke = {
-               .filter = DISPATCH_EVFILT_CUSTOM_ADD,
-               .flags = EV_UDATA_SPECIFIC,
-       },
-       .init = dispatch_source_type_data_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_data_or = {
-       .ke = {
-               .filter = DISPATCH_EVFILT_CUSTOM_OR,
-               .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
-               .fflags = ~0u,
-       },
-       .init = dispatch_source_type_data_init,
-};
-
-#if HAVE_MACH
-
-static void
-dispatch_source_type_mach_send_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED, unsigned long mask)
-{
-       if (!mask) {
-               // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
-               ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD;
-               ds->ds_pending_data_mask = DISPATCH_MACH_SEND_DEAD;
-       }
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_mach_send = {
-       .ke = {
-               .filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
-               .flags = EV_CLEAR,
-       },
-       .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
-       .init = dispatch_source_type_mach_send_init,
-};
-
-static void
-dispatch_source_type_mach_recv_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-       if (_dispatch_evfilt_machport_direct_enabled) return;
-       ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE;
-       ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-       ds->ds_is_direct_kevent = false;
-#endif
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_mach_recv = {
-       .ke = {
-               .filter = EVFILT_MACHPORT,
-               .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-       },
-       .init = dispatch_source_type_mach_recv_init,
-};
-
 #pragma mark -
 #pragma mark dispatch_mig
+#if HAVE_MACH
 
 void *
 dispatch_mach_msg_get_context(mach_msg_header_t *msg)
@@ -1534,22 +1209,16 @@ kern_return_t
 _dispatch_mach_notify_port_destroyed(mach_port_t notify DISPATCH_UNUSED,
                mach_port_t name)
 {
-       kern_return_t kr;
-       // this function should never be called
-       (void)dispatch_assume_zero(name);
-       kr = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE,-1);
-       DISPATCH_VERIFY_MIG(kr);
-       (void)dispatch_assume_zero(kr);
-       return KERN_SUCCESS;
+       DISPATCH_INTERNAL_CRASH(name, "unexpected receipt of port-destroyed");
+       return KERN_FAILURE;
 }
 
 kern_return_t
-_dispatch_mach_notify_no_senders(mach_port_t notify,
-               mach_port_mscount_t mscnt DISPATCH_UNUSED)
+_dispatch_mach_notify_no_senders(mach_port_t notify DISPATCH_UNUSED,
+               mach_port_mscount_t mscnt)
 {
-       // this function should never be called
-       (void)dispatch_assume_zero(notify);
-       return KERN_SUCCESS;
+       DISPATCH_INTERNAL_CRASH(mscnt, "unexpected receipt of no-more-senders");
+       return KERN_FAILURE;
 }
 
 kern_return_t
index 79f496f594739a6fcd4d4ec20156dfe619429f90..1279874d40d543da825b5f5c77d785365c6f004c 100644 (file)
@@ -40,6 +40,9 @@ DISPATCH_NOTHROW void
 _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
 #if HAVE_MACH
 DISPATCH_NOTHROW void
+_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
+               dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f);
+DISPATCH_NOTHROW void
 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
                dispatch_mach_msg_t dmsg, mach_error_t error,
                dispatch_mach_handler_function_t f);
@@ -62,6 +65,14 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
 }
 
 #if HAVE_MACH
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
+               dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f)
+{
+       return f(ctxt, reason, dmsg);
+}
+
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
@@ -88,6 +99,13 @@ _dispatch_object_has_vtable(dispatch_object_t dou)
        return dc_flags > 0xffful;
 }
 
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_object_is_queue(dispatch_object_t dou)
+{
+       return _dispatch_object_has_vtable(dou) && dx_vtable(dou._do)->do_push;
+}
+
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_object_is_continuation(dispatch_object_t dou)
@@ -134,43 +152,31 @@ _dispatch_object_is_barrier(dispatch_object_t dou)
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_object_is_slow_item(dispatch_object_t dou)
+_dispatch_object_is_sync_waiter(dispatch_object_t dou)
 {
        if (_dispatch_object_has_vtable(dou)) {
                return false;
        }
-       return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT);
+       return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_object_is_slow_non_barrier(dispatch_object_t dou)
+_dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou)
 {
        if (_dispatch_object_has_vtable(dou)) {
                return false;
        }
        return ((dou._dc->dc_flags &
-                               (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) ==
-                               (DISPATCH_OBJ_SYNC_SLOW_BIT));
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_object_is_slow_barrier(dispatch_object_t dou)
-{
-       if (_dispatch_object_has_vtable(dou)) {
-               return false;
-       }
-       return ((dou._dc->dc_flags &
-                               (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) ==
-                               (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT));
+                               (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_WAITER_BIT)) ==
+                               (DISPATCH_OBJ_SYNC_WAITER_BIT));
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline _os_object_t
-_os_object_retain_internal_inline(_os_object_t obj)
+_os_object_retain_internal_n_inline(_os_object_t obj, int n)
 {
-       int ref_cnt = _os_object_refcnt_inc(obj);
+       int ref_cnt = _os_object_refcnt_add(obj, n);
        if (unlikely(ref_cnt <= 0)) {
                _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
        }
@@ -179,23 +185,20 @@ _os_object_retain_internal_inline(_os_object_t obj)
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_os_object_release_internal_inline_no_dispose(_os_object_t obj)
+_os_object_release_internal_n_no_dispose_inline(_os_object_t obj, int n)
 {
-       int ref_cnt = _os_object_refcnt_dec(obj);
+       int ref_cnt = _os_object_refcnt_sub(obj, n);
        if (likely(ref_cnt >= 0)) {
                return;
        }
-       if (ref_cnt == 0) {
-               _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object");
-       }
        _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_os_object_release_internal_inline(_os_object_t obj)
+_os_object_release_internal_n_inline(_os_object_t obj, int n)
 {
-       int ref_cnt = _os_object_refcnt_dec(obj);
+       int ref_cnt = _os_object_refcnt_sub(obj, n);
        if (likely(ref_cnt >= 0)) {
                return;
        }
@@ -217,74 +220,110 @@ DISPATCH_ALWAYS_INLINE_NDEBUG
 static inline void
 _dispatch_retain(dispatch_object_t dou)
 {
-       (void)_os_object_retain_internal_inline(dou._os_obj);
+       (void)_os_object_retain_internal_n_inline(dou._os_obj, 1);
+}
+
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_retain_2(dispatch_object_t dou)
+{
+       (void)_os_object_retain_internal_n_inline(dou._os_obj, 2);
+}
+
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_retain_n(dispatch_object_t dou, int n)
+{
+       (void)_os_object_retain_internal_n_inline(dou._os_obj, n);
 }
 
 DISPATCH_ALWAYS_INLINE_NDEBUG
 static inline void
 _dispatch_release(dispatch_object_t dou)
 {
-       _os_object_release_internal_inline(dou._os_obj);
+       _os_object_release_internal_n_inline(dou._os_obj, 1);
 }
 
 DISPATCH_ALWAYS_INLINE_NDEBUG
 static inline void
-_dispatch_release_tailcall(dispatch_object_t dou)
+_dispatch_release_2(dispatch_object_t dou)
 {
-       _os_object_release_internal(dou._os_obj);
+       _os_object_release_internal_n_inline(dou._os_obj, 2);
 }
 
-DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
+DISPATCH_ALWAYS_INLINE_NDEBUG
 static inline void
-_dispatch_object_set_target_queue_inline(dispatch_object_t dou,
-               dispatch_queue_t tq)
+_dispatch_release_n(dispatch_object_t dou, int n)
 {
-       _dispatch_retain(tq);
-       tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release);
-       if (tq) _dispatch_release(tq);
-       _dispatch_object_debug(dou._do, "%s", __func__);
+       _os_object_release_internal_n_inline(dou._os_obj, n);
 }
 
-#endif // DISPATCH_PURE_C
-#pragma mark -
-#pragma mark dispatch_thread
-#if DISPATCH_PURE_C
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_release_no_dispose(dispatch_object_t dou)
+{
+       _os_object_release_internal_n_no_dispose_inline(dou._os_obj, 1);
+}
 
-#define DISPATCH_DEFERRED_ITEMS_MAGIC  0xdefe55edul /* deferred */
-#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8
-#ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN
-_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >=
-               DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
-               "our list should not be longer than the kernel's");
-#endif
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_release_2_no_dispose(dispatch_object_t dou)
+{
+       _os_object_release_internal_n_no_dispose_inline(dou._os_obj, 2);
+}
+
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_release_tailcall(dispatch_object_t dou)
+{
+       _os_object_release_internal(dou._os_obj);
+}
 
-typedef struct dispatch_deferred_items_s {
-       uint32_t ddi_magic;
-       dispatch_queue_t ddi_stashed_dq;
-       struct dispatch_object_s *ddi_stashed_dou;
-       dispatch_priority_t ddi_stashed_pp;
-       int ddi_nevents;
-       int ddi_maxevents;
-       _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
-} dispatch_deferred_items_s, *dispatch_deferred_items_t;
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_release_2_tailcall(dispatch_object_t dou)
+{
+       _os_object_release_internal_n(dou._os_obj, 2);
+}
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_deferred_items_set(dispatch_deferred_items_t ddi)
+_dispatch_queue_retain_storage(dispatch_queue_t dq)
 {
-       _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi);
+       int ref_cnt = os_atomic_inc2o(dq, dq_sref_cnt, relaxed);
+       if (unlikely(ref_cnt <= 0)) {
+               _OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
+       }
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline dispatch_deferred_items_t
-_dispatch_deferred_items_get(void)
-{
-       dispatch_deferred_items_t ddi = (dispatch_deferred_items_t)
-                       _dispatch_thread_getspecific(dispatch_deferred_items_key);
-       if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) {
-               return ddi;
+static inline void
+_dispatch_queue_release_storage(dispatch_queue_t dq)
+{
+       // this refcount only delays the _dispatch_object_dealloc() and there's no
+       // need for visibility wrt to the allocation, the internal refcount already
+       // gives us that, and the object becomes immutable after the last internal
+       // refcount release.
+       int ref_cnt = os_atomic_dec2o(dq, dq_sref_cnt, relaxed);
+       if (unlikely(ref_cnt >= 0)) {
+               return;
        }
-       return NULL;
+       if (unlikely(ref_cnt < -1)) {
+               _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
+       }
+       dq->dq_state = 0xdead000000000000;
+       _dispatch_object_dealloc(dq);
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL
+static inline void
+_dispatch_object_set_target_queue_inline(dispatch_object_t dou,
+               dispatch_queue_t tq)
+{
+       _dispatch_retain(tq);
+       tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release);
+       if (tq) _dispatch_release(tq);
+       _dispatch_object_debug(dou._do, "%s", __func__);
 }
 
 #endif // DISPATCH_PURE_C
@@ -345,12 +384,12 @@ _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it)
        dispatch_queue_t dq = it->dtfi_queue;
 
        if (dtf) {
-               if (dq->do_targetq) {
-                       // redirections and trysync_f may skip some frames,
-                       // so we need to simulate seeing the missing links
-                       // however the bottom root queue is always present
-                       it->dtfi_queue = dq->do_targetq;
-                       if (it->dtfi_queue == dtf->dtf_queue) {
+               dispatch_queue_t tq = dq->do_targetq;
+               if (tq) {
+                       // redirections, dispatch_sync and dispatch_trysync_f may skip
+                       // frames, so we need to simulate seeing the missing links
+                       it->dtfi_queue = tq;
+                       if (dq == dtf->dtf_queue) {
                                it->dtfi_frame = dtf->dtf_prev;
                        }
                } else {
@@ -385,13 +424,6 @@ _dispatch_thread_frame_get_current(void)
        return _dispatch_thread_getspecific(dispatch_frame_key);
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf)
-{
-       _dispatch_thread_setspecific(dispatch_frame_key, dtf);
-}
-
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf)
@@ -407,7 +439,6 @@ _dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq)
        _dispatch_thread_frame_save_state(dtf);
        _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
                        dispatch_frame_key, dtf);
-       dtf->dtf_deferred = NULL;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -418,7 +449,6 @@ _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf,
        _dispatch_thread_frame_save_state(dtf);
        _dispatch_thread_setspecific_pair(dispatch_queue_key, dq,
                        dispatch_frame_key, new_base);
-       dtf->dtf_deferred = NULL;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -450,28 +480,28 @@ _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf)
 DISPATCH_ALWAYS_INLINE
 static inline int
 _dispatch_wqthread_override_start_check_owner(mach_port_t thread,
-               pthread_priority_t pp, mach_port_t *ulock_addr)
+               dispatch_qos_t qos, mach_port_t *ulock_addr)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
        if (!_dispatch_set_qos_class_enabled) return 0;
        return _pthread_workqueue_override_start_direct_check_owner(thread,
-                       pp, ulock_addr);
+                       _dispatch_qos_to_pp(qos), ulock_addr);
 #else
-       (void)thread; (void)pp; (void)ulock_addr;
+       (void)thread; (void)qos; (void)ulock_addr;
        return 0;
 #endif
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_wqthread_override_start(mach_port_t thread,
-               pthread_priority_t pp)
+_dispatch_wqthread_override_start(mach_port_t thread, dispatch_qos_t qos)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
        if (!_dispatch_set_qos_class_enabled) return;
-       (void)_pthread_workqueue_override_start_direct(thread, pp);
+       (void)_pthread_workqueue_override_start_direct(thread,
+                       _dispatch_qos_to_pp(qos));
 #else
-       (void)thread; (void)pp;
+       (void)thread; (void)qos;
 #endif
 }
 
@@ -510,43 +540,6 @@ _dispatch_thread_override_end(mach_port_t thread, void *resource)
 #endif
 }
 
-#if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_qos_class_is_valid(pthread_priority_t pp)
-{
-       pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT +
-                       _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) {
-               return false;
-       }
-       return true;
-}
-#define _dispatch_assert_is_valid_qos_class(pp)  ({ typeof(pp) _pp = (pp); \
-               if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \
-                       DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \
-               } \
-       })
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_qos_override_is_valid(pthread_priority_t pp)
-{
-       if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) {
-               return false;
-       }
-       return _dispatch_qos_class_is_valid(pp);
-}
-#define _dispatch_assert_is_valid_qos_override(pp)  ({ typeof(pp) _pp = (pp); \
-               if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \
-                       DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \
-               } \
-       })
-#else
-#define _dispatch_assert_is_valid_qos_override(pp) (void)(pp)
-#define _dispatch_assert_is_valid_qos_class(pp) (void)(pp)
-#endif
-
 #endif // DISPATCH_PURE_C
 #pragma mark -
 #pragma mark dispatch_queue_t state accessors
@@ -658,12 +651,116 @@ _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq,
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_queue_has_immutable_target(dispatch_queue_t dq)
+_dispatch_queue_is_legacy(dispatch_queue_t dq)
 {
-       if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) {
-               return false;
+       return _dispatch_queue_atomic_flags(dq) & DQF_LEGACY;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_wlh_retain(dispatch_wlh_t wlh)
+{
+       if (wlh && wlh != DISPATCH_WLH_ANON) {
+               _dispatch_queue_retain_storage((dispatch_queue_t)wlh);
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_wlh_release(dispatch_wlh_t wlh)
+{
+       if (wlh && wlh != DISPATCH_WLH_ANON) {
+               _dispatch_queue_release_storage((dispatch_queue_t)wlh);
+       }
+}
+
+#define DISPATCH_WLH_STORAGE_REF 1ul
+
+DISPATCH_ALWAYS_INLINE DISPATCH_PURE
+static inline dispatch_wlh_t
+_dispatch_get_wlh(void)
+{
+       return _dispatch_thread_getspecific(dispatch_wlh_key);
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_PURE
+static inline dispatch_wlh_t
+_dispatch_get_wlh_reference(void)
+{
+       dispatch_wlh_t wlh = _dispatch_thread_getspecific(dispatch_wlh_key);
+       if (wlh != DISPATCH_WLH_ANON) {
+               wlh = (dispatch_wlh_t)((uintptr_t)wlh & ~DISPATCH_WLH_STORAGE_REF);
+       }
+       return wlh;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_adopt_wlh_anon_recurse(void)
+{
+       dispatch_wlh_t cur_wlh = _dispatch_get_wlh_reference();
+       if (cur_wlh == DISPATCH_WLH_ANON) return false;
+       _dispatch_debug("wlh[anon]: set current (releasing %p)", cur_wlh);
+       _dispatch_wlh_release(cur_wlh);
+       _dispatch_thread_setspecific(dispatch_wlh_key, (void *)DISPATCH_WLH_ANON);
+       return true;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_adopt_wlh_anon(void)
+{
+       if (unlikely(!_dispatch_adopt_wlh_anon_recurse())) {
+               DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON");
        }
-       return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_adopt_wlh(dispatch_wlh_t wlh)
+{
+       dispatch_wlh_t cur_wlh = _dispatch_get_wlh_reference();
+       _dispatch_debug("wlh[%p]: adopt current (releasing %p)", wlh, cur_wlh);
+       if (cur_wlh == DISPATCH_WLH_ANON) {
+               DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON");
+       }
+       if (cur_wlh != wlh) {
+               dispatch_assert(wlh);
+               _dispatch_wlh_release(cur_wlh);
+               _dispatch_wlh_retain(wlh);
+       }
+       _dispatch_thread_setspecific(dispatch_wlh_key, (void *)wlh);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_preserve_wlh_storage_reference(dispatch_wlh_t wlh)
+{
+       dispatch_assert(wlh != DISPATCH_WLH_ANON);
+       dispatch_assert(wlh == _dispatch_get_wlh());
+       _dispatch_thread_setspecific(dispatch_wlh_key,
+                       (void *)((uintptr_t)wlh | DISPATCH_WLH_STORAGE_REF));
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_reset_wlh(void)
+{
+       dispatch_assert(_dispatch_get_wlh() == DISPATCH_WLH_ANON);
+       _dispatch_debug("wlh[anon]: clear current");
+       _dispatch_thread_setspecific(dispatch_wlh_key, NULL);
+       _dispatch_clear_return_to_kernel();
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_wlh_should_poll_unote(dispatch_unote_t du)
+{
+       if (likely(_dispatch_needs_to_return_to_kernel())) {
+               dispatch_wlh_t wlh = _dispatch_get_wlh();
+               return wlh != DISPATCH_WLH_ANON && du._du->du_wlh == wlh;
+       }
+       return false;
 }
 
 #endif // DISPATCH_PURE_C
@@ -684,30 +781,30 @@ _dq_state_has_side_suspend_cnt(uint64_t dq_state)
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline uint32_t
+static inline int32_t
 _dq_state_extract_width_bits(uint64_t dq_state)
 {
        dq_state &= DISPATCH_QUEUE_WIDTH_MASK;
-       return (uint32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT);
+       return (int32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT);
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline uint32_t
+static inline int32_t
 _dq_state_available_width(uint64_t dq_state)
 {
-       uint32_t full = DISPATCH_QUEUE_WIDTH_FULL;
-       if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) {
+       int32_t full = DISPATCH_QUEUE_WIDTH_FULL;
+       if (likely(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) {
                return full - _dq_state_extract_width_bits(dq_state);
        }
        return 0;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline uint32_t
+static inline int32_t
 _dq_state_used_width(uint64_t dq_state, uint16_t dq_width)
 {
-       uint32_t full = DISPATCH_QUEUE_WIDTH_FULL;
-       uint32_t width = _dq_state_extract_width_bits(dq_state);
+       int32_t full = DISPATCH_QUEUE_WIDTH_FULL;
+       int32_t width = _dq_state_extract_width_bits(dq_state);
 
        if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) {
                // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width
@@ -723,7 +820,8 @@ _dq_state_is_suspended(uint64_t dq_state)
 {
        return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION;
 }
-#define DISPATCH_QUEUE_IS_SUSPENDED(x)  _dq_state_is_suspended((x)->dq_state)
+#define DISPATCH_QUEUE_IS_SUSPENDED(x) \
+               _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed))
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
@@ -767,58 +865,129 @@ _dq_state_is_dirty(uint64_t dq_state)
        return dq_state & DISPATCH_QUEUE_DIRTY;
 }
 
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dq_state_is_base_wlh(uint64_t dq_state)
+{
+       return dq_state & DISPATCH_QUEUE_ROLE_BASE_WLH;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dq_state_is_base_anon(uint64_t dq_state)
+{
+       return dq_state & DISPATCH_QUEUE_ROLE_BASE_ANON;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dq_state_is_inner_queue(uint64_t dq_state)
+{
+       return (dq_state & DISPATCH_QUEUE_ROLE_MASK) == DISPATCH_QUEUE_ROLE_INNER;
+}
+
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dq_state_is_enqueued(uint64_t dq_state)
+{
+       return dq_state & (DISPATCH_QUEUE_ENQUEUED|DISPATCH_QUEUE_ENQUEUED_ON_MGR);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dq_state_is_enqueued_on_target(uint64_t dq_state)
 {
        return dq_state & DISPATCH_QUEUE_ENQUEUED;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_has_override(uint64_t dq_state)
+_dq_state_is_enqueued_on_manager(uint64_t dq_state)
 {
-       return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE;
+       return dq_state & DISPATCH_QUEUE_ENQUEUED_ON_MGR;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline dispatch_lock_owner
-_dq_state_drain_owner(uint64_t dq_state)
+static inline bool
+_dq_state_in_sync_transfer(uint64_t dq_state)
 {
-       return _dispatch_lock_owner((dispatch_lock)dq_state);
+       return dq_state & DISPATCH_QUEUE_SYNC_TRANSFER;
 }
-#define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
-       _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_drain_pended(uint64_t dq_state)
+_dq_state_received_override(uint64_t dq_state)
 {
-       return (dq_state & DISPATCH_QUEUE_DRAIN_PENDED);
+       return _dq_state_is_base_anon(dq_state) &&
+                       (dq_state & DISPATCH_QUEUE_RECEIVED_OVERRIDE);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_drain_locked_by(uint64_t dq_state, uint32_t owner)
+_dq_state_received_sync_wait(uint64_t dq_state)
 {
-       if (_dq_state_drain_pended(dq_state)) {
-               return false;
+       return _dq_state_is_base_wlh(dq_state) &&
+                       (dq_state & DISPATCH_QUEUE_RECEIVED_SYNC_WAIT);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dq_state_max_qos(uint64_t dq_state)
+{
+       dq_state &= DISPATCH_QUEUE_MAX_QOS_MASK;
+       return (dispatch_qos_t)(dq_state >> DISPATCH_QUEUE_MAX_QOS_SHIFT);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dq_state_from_qos(dispatch_qos_t qos)
+{
+       return (uint64_t)(qos) << DISPATCH_QUEUE_MAX_QOS_SHIFT;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dq_state_merge_qos(uint64_t dq_state, dispatch_qos_t qos)
+{
+       uint64_t qos_bits = _dq_state_from_qos(qos);
+       if ((dq_state & DISPATCH_QUEUE_MAX_QOS_MASK) < qos_bits) {
+               dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+               dq_state |= qos_bits;
+               if (unlikely(_dq_state_is_base_anon(dq_state))) {
+                       dq_state |= DISPATCH_QUEUE_RECEIVED_OVERRIDE;
+               }
        }
-       return _dq_state_drain_owner(dq_state) == owner;
+       return dq_state;
 }
 
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_tid
+_dq_state_drain_owner(uint64_t dq_state)
+{
+       return _dispatch_lock_owner((dispatch_lock)dq_state);
+}
+#define DISPATCH_QUEUE_DRAIN_OWNER(dq) \
+       _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed))
+
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_drain_locked(uint64_t dq_state)
+_dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid)
 {
-       return (dq_state & DISPATCH_QUEUE_DRAIN_OWNER_MASK) != 0;
+       return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_has_waiters(uint64_t dq_state)
+_dq_state_drain_locked_by_self(uint64_t dq_state)
 {
-       return _dispatch_lock_has_waiters((dispatch_lock)dq_state);
+       return _dispatch_lock_is_locked_by_self((dispatch_lock)dq_state);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dq_state_drain_locked(uint64_t dq_state)
+{
+       return _dispatch_lock_is_locked((dispatch_lock)dq_state);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -837,64 +1006,58 @@ _dq_state_is_runnable(uint64_t dq_state)
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_should_wakeup(uint64_t dq_state)
+_dq_state_should_override(uint64_t dq_state)
 {
-       return _dq_state_is_runnable(dq_state) &&
-                       !_dq_state_is_enqueued(dq_state) &&
-                       !_dq_state_drain_locked(dq_state);
+       if (_dq_state_is_suspended(dq_state) ||
+                       _dq_state_is_enqueued_on_manager(dq_state)) {
+               return false;
+       }
+       if (_dq_state_is_enqueued_on_target(dq_state)) {
+               return true;
+       }
+       if (_dq_state_is_base_wlh(dq_state)) {
+               return false;
+       }
+       return _dq_state_drain_locked(dq_state);
 }
 
+
 #endif // __cplusplus
 #pragma mark -
 #pragma mark dispatch_queue_t state machine
-#ifndef __cplusplus
 
-static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu,
-               pthread_priority_t pp);
-static inline bool _dispatch_queue_need_override_retain(
-               dispatch_queue_class_t dqu, pthread_priority_t pp);
-static inline dispatch_priority_t _dispatch_queue_reset_override_priority(
-               dispatch_queue_class_t dqu, bool qp_is_floor);
-static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
-               dispatch_priority_t new_op);
-static inline pthread_priority_t _dispatch_get_defaultpriority(void);
-static inline void _dispatch_set_defaultpriority_override(void);
-static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp);
 static inline pthread_priority_t _dispatch_get_priority(void);
-static inline pthread_priority_t _dispatch_set_defaultpriority(
-               pthread_priority_t pp, pthread_priority_t *new_pp);
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_xref_dispose(struct dispatch_queue_s *dq)
-{
-       if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) {
-               // Arguments for and against this assert are within 6705399
-               DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object");
-       }
-       os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed);
-}
+static inline dispatch_priority_t _dispatch_get_basepri(void);
+static inline dispatch_qos_t _dispatch_get_basepri_override_qos_floor(void);
+static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos);
+static inline void _dispatch_reset_basepri(dispatch_priority_t dbp);
+static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp);
+static inline bool _dispatch_queue_need_override_retain(
+               dispatch_queue_class_t dqu, dispatch_qos_t qos);
 
-#endif
 #if DISPATCH_PURE_C
 
 // Note to later developers: ensure that any initialization changes are
 // made for statically allocated queues (i.e. _dispatch_main_q).
 static inline void
 _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf,
-               uint16_t width, bool inactive)
+               uint16_t width, uint64_t initial_state_bits)
 {
        uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
 
-       if (inactive) {
-               dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
-               dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume
+       dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
+                       DISPATCH_QUEUE_INACTIVE)) == 0);
+
+       if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
+               dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
+               dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_queue_resume
        }
+
+       dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK);
        dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
-       dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT;
+       dqf |= DQF_WIDTH(width);
        os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
        dq->dq_state = dq_state;
-       dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
        dq->dq_serialnum =
                        os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
 }
@@ -909,16 +1072,16 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
 _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq)
 {
-       uint64_t dq_state, value;
+       uint64_t old_state, new_state;
 
-       (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
-               if (!fastpath(_dq_state_is_inactive(dq_state))) {
+       (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               if (unlikely(!_dq_state_is_inactive(old_state))) {
                        os_atomic_rmw_loop_give_up(return false);
                }
-               value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL;
+               new_state = old_state + DISPATCH_QUEUE_SUSPEND_INTERVAL;
        });
-       if (slowpath(!_dq_state_is_suspended(dq_state)) ||
-                       slowpath(_dq_state_has_side_suspend_cnt(dq_state))) {
+       if (unlikely(!_dq_state_is_suspended(old_state) ||
+                       _dq_state_has_side_suspend_cnt(old_state))) {
                // Crashing here means that 128+ dispatch_suspend() calls have been
                // made on an inactive object and then dispatch_set_target_queue() or
                // dispatch_set_*_handler() has been called.
@@ -932,98 +1095,157 @@ _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq)
        return true;
 }
 
-/* Must be used by any caller meaning to do a speculative wakeup when the caller
- * was preventing other wakeups (for example dispatch_resume() or a drainer not
- * doing a drain_try_unlock() and not observing DIRTY)
- *
- * In that case this call loads DIRTY with an acquire barrier so that when
- * other threads have made changes (such as dispatch_source_cancel()) the
- * caller can take these state machine changes into account in its decision to
- * wake up the object.
- */
 DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state,
-               dispatch_wakeup_flags_t flags)
+static inline bool
+_dq_state_needs_lock_override(uint64_t dq_state, dispatch_qos_t qos)
 {
-       if (_dq_state_is_runnable(dq_state) &&
-                       !_dq_state_drain_locked(dq_state) &&
-                       (!_dq_state_is_enqueued(dq_state) ||
-                       (flags & DISPATCH_WAKEUP_WAITER_HANDOFF))) {
-               if (slowpath(_dq_state_is_dirty(dq_state))) {
-                       // <rdar://problem/14637483>
-                       // seq_cst wrt state changes that were flushed and not acted upon
-                       os_atomic_thread_fence(acquire);
-               }
-               return dx_wakeup(dq, 0, flags);
-       }
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
-       }
+       return _dq_state_is_base_anon(dq_state) &&
+                       qos < _dq_state_max_qos(dq_state);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_queue_override_self(uint64_t dq_state)
+{
+       dispatch_qos_t qos = _dq_state_max_qos(dq_state);
+       _dispatch_wqthread_override_start(_dispatch_tid_self(), qos);
+       // ensure that the root queue sees
+       // that this thread was overridden.
+       _dispatch_set_basepri_override_qos(qos);
+       return qos;
 }
 
-/* Used by:
- * - _dispatch_queue_class_invoke (normal path)
- * - _dispatch_queue_override_invoke (stealer)
- *
- * Initial state must be { sc:0, ib:0, qf:0, dl:0 }
- * Final state forces { dl:self, qf:1, d: 0 }
- *    ib:1 is forced when the width acquired is equivalent to the barrier width
- */
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline uint64_t
 _dispatch_queue_drain_try_lock(dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags, uint64_t *dq_state)
+               dispatch_invoke_flags_t flags)
 {
        uint64_t pending_barrier_width =
                        (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
-       uint64_t xor_owner_and_set_full_width =
-                       _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT;
-       uint64_t clear_enqueued_bit, old_state, new_state;
+       uint64_t set_owner_and_set_full_width =
+                       _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT;
+       uint64_t lock_fail_mask, old_state, new_state, dequeue_mask;
+
+       // same as !_dq_state_is_runnable()
+       lock_fail_mask  = ~(DISPATCH_QUEUE_WIDTH_FULL_BIT - 1);
+       // same as _dq_state_drain_locked()
+       lock_fail_mask |= DISPATCH_QUEUE_DRAIN_OWNER_MASK;
 
        if (flags & DISPATCH_INVOKE_STEALING) {
-               clear_enqueued_bit = 0;
+               lock_fail_mask |= DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+               dequeue_mask = 0;
+       } else if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) {
+               dequeue_mask = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
        } else {
-               clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED;
+               lock_fail_mask |= DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+               dequeue_mask = DISPATCH_QUEUE_ENQUEUED;
        }
+       dispatch_assert(!(flags & DISPATCH_INVOKE_WLH));
 
+       dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor();
+retry:
        os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
                new_state = old_state;
-               new_state ^= clear_enqueued_bit;
-               if (likely(_dq_state_is_runnable(old_state) &&
-                               !_dq_state_drain_locked(old_state))) {
+               if (likely(!(old_state & lock_fail_mask))) {
+                       if (unlikely(_dq_state_needs_lock_override(old_state, oq_floor))) {
+                               os_atomic_rmw_loop_give_up({
+                                       oq_floor = _dispatch_queue_override_self(old_state);
+                                       goto retry;
+                               });
+                       }
                        //
-                       // Only keep the HAS_WAITER bit (and ENQUEUED if stealing).
-                       // In particular acquiring the drain lock clears the DIRTY bit
+                       // Only keep the HAS_WAITER, MAX_QOS and ENQUEUED bits
+                       // In particular acquiring the drain lock clears the DIRTY and
+                       // RECEIVED_OVERRIDE bits.
                        //
                        new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
-                       //
-                       // For the NOWAITERS_BIT case, the thread identity
-                       // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above,
-                       // so the xor below flips the NOWAITERS_BIT to 0 as expected.
-                       //
-                       // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in
-                       // the thread identity, and the xor leaves the bit alone.
-                       //
-                       new_state ^= xor_owner_and_set_full_width;
+                       new_state |= set_owner_and_set_full_width;
                        if (_dq_state_has_pending_barrier(old_state) ||
                                        old_state + pending_barrier_width <
                                        DISPATCH_QUEUE_WIDTH_FULL_BIT) {
                                new_state |= DISPATCH_QUEUE_IN_BARRIER;
                        }
-               } else if (!clear_enqueued_bit) {
+               } else if (dequeue_mask) {
+                       // dequeue_mask is in a register, xor yields better assembly
+                       new_state ^= dequeue_mask;
+               } else {
+                       os_atomic_rmw_loop_give_up(break);
+               }
+       });
+
+       dispatch_assert((old_state & dequeue_mask) == dequeue_mask);
+       if (likely(!(old_state & lock_fail_mask))) {
+               new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT |
+                               dequeue_mask;
+               old_state &= DISPATCH_QUEUE_WIDTH_MASK;
+               return new_state - old_state;
+       }
+       return 0;
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline bool
+_dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state)
+{
+       uint64_t old_state, new_state;
+       uint64_t lock_bits = _dispatch_lock_value_for_self() |
+                       DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+               new_state = old_state;
+               if (unlikely(_dq_state_is_suspended(old_state))) {
+                       new_state &= ~DISPATCH_QUEUE_ENQUEUED;
+               } else if (unlikely(_dq_state_drain_locked(old_state))) {
                        os_atomic_rmw_loop_give_up(break);
+               } else {
+                       new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
+                       new_state |= lock_bits;
+               }
+       });
+       if (unlikely(!_dq_state_is_base_wlh(old_state) ||
+                       !_dq_state_is_enqueued_on_target(old_state) ||
+                       _dq_state_is_enqueued_on_manager(old_state))) {
+#if !__LP64__
+               old_state >>= 32;
+#endif
+               DISPATCH_INTERNAL_CRASH(old_state, "Invalid wlh state");
+       }
+
+       if (dq_state) *dq_state = new_state;
+       return !_dq_state_is_suspended(old_state) &&
+                       !_dq_state_drain_locked(old_state);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_mgr_lock(dispatch_queue_t dq)
+{
+       uint64_t old_state, new_state, set_owner_and_set_full_width =
+                       _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+               new_state = old_state;
+               if (unlikely(!_dq_state_is_runnable(old_state) ||
+                               _dq_state_drain_locked(old_state))) {
+                       DISPATCH_INTERNAL_CRASH((uintptr_t)old_state,
+                                       "Locking the manager should not fail");
                }
+               new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
+               new_state |= set_owner_and_set_full_width;
        });
+}
 
-       if (dq_state) *dq_state = new_state;
-       if (likely(_dq_state_is_runnable(old_state) &&
-                       !_dq_state_drain_locked(old_state))) {
-               new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT;
-               old_state &= DISPATCH_QUEUE_WIDTH_MASK;
-               return new_state - old_state;
-       }
-       return 0;
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_mgr_unlock(dispatch_queue_t dq)
+{
+       uint64_t old_state, new_state;
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+               new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
+               new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+               new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+       });
+       return _dq_state_is_dirty(old_state);
 }
 
 /* Used by _dispatch_barrier_{try,}sync
@@ -1039,13 +1261,29 @@ _dispatch_queue_drain_try_lock(dispatch_queue_t dq,
  */
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
-_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq)
-{
-       uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
-       value |= _dispatch_tid_self();
+_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_queue_t dq,
+               uint32_t tid, uint64_t suspend_count)
+{
+       uint64_t init  = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
+       uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
+                       _dispatch_lock_value_from_tid(tid) |
+                       (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
+       uint64_t old_state, new_state;
+
+       return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+               uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
+               if (old_state != (init | role)) {
+                       os_atomic_rmw_loop_give_up(break);
+               }
+               new_state = value | role;
+       });
+}
 
-       return os_atomic_cmpxchg2o(dq, dq_state,
-                       DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire);
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline bool
+_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq, uint32_t tid)
+{
+       return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, 0);
 }
 
 /* Used by _dispatch_sync for root queues and some drain codepaths
@@ -1073,15 +1311,23 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
 _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq)
 {
-       uint64_t dq_state, value;
+       uint64_t old_state, new_state;
+
+       // <rdar://problem/24738102&24743140> reserving non barrier width
+       // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width
+       // equivalent), so we have to check that this thread hasn't enqueued
+       // anything ahead of this call or we can break ordering
+       if (unlikely(dq->dq_items_tail)) {
+               return false;
+       }
 
-       return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
-               if (!fastpath(_dq_state_is_sync_runnable(dq_state)) ||
-                               slowpath(_dq_state_is_dirty(dq_state)) ||
-                               slowpath(_dq_state_has_pending_barrier(dq_state))) {
+       return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               if (unlikely(!_dq_state_is_sync_runnable(old_state)) ||
+                               _dq_state_is_dirty(old_state) ||
+                               _dq_state_has_pending_barrier(old_state)) {
                        os_atomic_rmw_loop_give_up(return false);
                }
-               value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
+               new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
        });
 }
 
@@ -1091,21 +1337,21 @@ _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq)
  * possibly 0
  */
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
-static inline uint32_t
-_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width)
+static inline int32_t
+_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, int32_t da_width)
 {
-       uint64_t dq_state, value;
-       uint32_t width;
+       uint64_t old_state, new_state;
+       int32_t width;
 
-       (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
-               width = _dq_state_available_width(dq_state);
-               if (!fastpath(width)) {
+       (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               width = (int32_t)_dq_state_available_width(old_state);
+               if (unlikely(!width)) {
                        os_atomic_rmw_loop_give_up(return 0);
                }
                if (width > da_width) {
                        width = da_width;
                }
-               value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL;
+               new_state = old_state + (uint64_t)width * DISPATCH_QUEUE_WIDTH_INTERVAL;
        });
        return width;
 }
@@ -1116,10 +1362,10 @@ _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width)
  */
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_queue_relinquish_width(dispatch_queue_t dq, uint32_t da_width)
+_dispatch_queue_relinquish_width(dispatch_queue_t dq, int32_t da_width)
 {
        (void)os_atomic_sub2o(dq, dq_state,
-                       da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
+                       (uint64_t)da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed);
 }
 
 /* Used by target-queue recursing code
@@ -1131,16 +1377,49 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
 _dispatch_queue_try_acquire_async(dispatch_queue_t dq)
 {
-       uint64_t dq_state, value;
+       uint64_t old_state, new_state;
 
-       return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, {
-               if (!fastpath(_dq_state_is_runnable(dq_state)) ||
-                               slowpath(_dq_state_is_dirty(dq_state)) ||
-                               slowpath(_dq_state_has_pending_barrier(dq_state))) {
+       return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+               if (unlikely(!_dq_state_is_runnable(old_state) ||
+                               _dq_state_is_dirty(old_state) ||
+                               _dq_state_has_pending_barrier(old_state))) {
                        os_atomic_rmw_loop_give_up(return false);
                }
-               value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
+               new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
+       });
+}
+
+/* Used by concurrent drain
+ *
+ * Either acquires the full barrier width, in which case the Final state is:
+ *   { ib:1 qf:1 pb:0 d:0 }
+ * Or if there isn't enough width prepare the queue with the PENDING_BARRIER bit
+ *   { ib:0 pb:1 d:0}
+ *
+ * This always clears the dirty bit as we know for sure we shouldn't reevaluate
+ * the state machine here
+ */
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline bool
+_dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq, uint64_t owned)
+{
+       uint64_t old_state, new_state;
+       uint64_t pending_barrier_width = DISPATCH_QUEUE_PENDING_BARRIER +
+                       (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+               new_state = old_state - owned;
+               if (likely(!_dq_state_has_pending_barrier(old_state))) {
+                       new_state += pending_barrier_width;
+               }
+               if (likely(_dq_state_is_runnable(new_state))) {
+                       new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
+                       new_state += DISPATCH_QUEUE_IN_BARRIER;
+                       new_state -= DISPATCH_QUEUE_PENDING_BARRIER;
+               }
+               new_state &= ~DISPATCH_QUEUE_DIRTY;
        });
+       return new_state & DISPATCH_QUEUE_IN_BARRIER;
 }
 
 /* Used at the end of Drainers
@@ -1155,7 +1434,7 @@ _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned,
 {
        uint64_t reservation;
 
-       if (slowpath(dq->dq_width > 1)) {
+       if (unlikely(dq->dq_width > 1)) {
                if (next_dc && _dispatch_object_is_barrier(next_dc)) {
                        reservation  = DISPATCH_QUEUE_PENDING_BARRIER;
                        reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
@@ -1171,112 +1450,42 @@ _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned,
  * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used
  * as a signal to renew the drain lock instead of releasing it.
  *
- * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned`
+ * Successful unlock forces { dl:0, d:!done, qo:0 } and gives back `owned`
  */
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
-_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned)
-{
-       uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed);
-       uint64_t new_state;
-       dispatch_priority_t pp = 0, op;
-
-       do {
-               if (unlikely(_dq_state_is_dirty(old_state) &&
-                               !_dq_state_is_suspended(old_state))) {
-                       // just renew the drain lock with an acquire barrier, to see
-                       // what the enqueuer that set DIRTY has done.
-                       os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire);
-                       _dispatch_queue_reinstate_override_priority(dq, pp);
-                       return false;
-               }
-               new_state = old_state - owned;
-               if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) ||
-                               _dq_state_is_suspended(old_state)) {
-                       // the test for the WIDTH_FULL_BIT is about narrow concurrent queues
-                       // releasing the drain lock while being at the width limit
-                       //
-                       // _non_barrier_complete() will set the DIRTY bit when going back
-                       // under the limit which will cause the try_unlock to fail
-                       new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
+_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done)
+{
+       uint64_t old_state, new_state;
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+               new_state  = old_state - owned;
+               new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+               if (unlikely(_dq_state_is_suspended(old_state))) {
+                       // nothing to do
+               } else if (unlikely(_dq_state_is_dirty(old_state))) {
+                       os_atomic_rmw_loop_give_up({
+                               // just renew the drain lock with an acquire barrier, to see
+                               // what the enqueuer that set DIRTY has done.
+                               // the xor generates better assembly as DISPATCH_QUEUE_DIRTY
+                               // is already in a register
+                               os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
+                               return false;
+                       });
+               } else if (likely(done)) {
+                       new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
                } else {
-                       new_state &= ~DISPATCH_QUEUE_DIRTY;
-                       new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
-                       // This current owner is the only one that can clear HAS_OVERRIDE,
-                       // so accumulating reset overrides here is valid.
-                       if (unlikely(_dq_state_has_override(new_state))) {
-                               new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE;
-                               dispatch_assert(!_dispatch_queue_is_thread_bound(dq));
-                               op = _dispatch_queue_reset_override_priority(dq, false);
-                               if (op > pp) pp = op;
-                       }
+                       new_state |= DISPATCH_QUEUE_DIRTY;
                }
-       } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state,
-                       old_state, new_state, &old_state, release)));
-
-       if (_dq_state_has_override(old_state)) {
-               // Ensure that the root queue sees that this thread was overridden.
-               _dispatch_set_defaultpriority_override();
-       }
-       return true;
-}
-
-/* Used at the end of Drainers when the next work item is known
- * and that the dirty-head check isn't needed.
- *
- * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen.
- */
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq,
-               uint64_t owned, mach_port_t next_owner, uint64_t *orig_state)
-{
-       uint64_t dq_state, value;
-
-#ifdef DLOCK_NOWAITERS_BIT
-       // The NOWAITERS_BIT state must not change through the transfer. It means
-       // that if next_owner is 0 the bit must be flipped in the rmw_loop below,
-       // and if next_owner is set, then the bit must be left unchanged.
-       //
-       // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner,
-       //   which causes the second xor to flip the bit as expected.
-       // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to
-       //   clear it so that the second xor leaves the NOWAITERS_BIT alone.
-       next_owner ^= DLOCK_NOWAITERS_BIT;
-#endif
-       os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, {
-               value = dq_state - owned;
-               // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
-               // but we want to be more efficient wrt the WAITERS_BIT
-               value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
-               value &= ~DISPATCH_QUEUE_DRAIN_PENDED;
-               value &= ~DISPATCH_QUEUE_DIRTY;
-               value ^= next_owner;
        });
 
-       if (_dq_state_has_override(dq_state)) {
+       if (_dq_state_received_override(old_state)) {
                // Ensure that the root queue sees that this thread was overridden.
-               _dispatch_set_defaultpriority_override();
-       }
-       if (orig_state) *orig_state = dq_state;
-       return value;
-}
-#define _dispatch_queue_drain_unlock(dq, owned, orig) \
-               _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig)
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
-               uint64_t to_unlock, dispatch_object_t dou)
-{
-       mach_port_t th_next = 0;
-       if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) {
-               th_next = (mach_port_t)dou._dc->dc_data;
+               _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
        }
-       _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL);
+       return true;
 }
 
-
 #pragma mark -
 #pragma mark os_mpsc_queue
 
@@ -1297,7 +1506,7 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
                os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
                _tail->_o_next = NULL; \
                _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
-               if (fastpath(_prev)) { \
+               if (likely(_prev)) { \
                        os_atomic_store2o(_prev, _o_next, _head, relaxed); \
                } \
                (_prev == NULL); \
@@ -1317,20 +1526,22 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
 // Single Consumer calls, can NOT be used safely concurrently
 //
 
-#define os_mpsc_get_head(q, _ns)  ({ \
-               os_mpsc_node_type(q, _ns) _head; \
-               _dispatch_wait_until(_head = (q)->_ns##_head); \
-               _head; \
-       })
+#define os_mpsc_get_head(q, _ns) \
+               _dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency))
+
+#define os_mpsc_get_next(_n, _o_next) \
+               _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency))
 
 #define os_mpsc_pop_head(q, _ns, head, _o_next)  ({ \
                typeof(q) _q = (q); \
-               os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \
+               os_mpsc_node_type(_q, _ns) _head = (head), _n; \
+               _n = os_atomic_load2o(_head, _o_next, dependency); \
                os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
                /* 22708742: set tail to NULL with release, so that NULL write */ \
                /* to head above doesn't clobber head from concurrent enqueuer */ \
-               if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \
-                       _dispatch_wait_until(_n = fastpath(_head->_o_next)); \
+               if (unlikely(!_n && \
+                               !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \
+                       _n = os_mpsc_get_next(_head, _o_next); \
                        os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
                } \
                _n; \
@@ -1339,17 +1550,17 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next)  ({ \
                typeof(q) _q = (q); \
                os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
-               if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \
-                       _dispatch_wait_until(_n = _q->_ns##_head); \
-                       _head->_o_next = _n; \
+               if (unlikely(!_n && \
+                               !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \
+                       _n = os_mpsc_get_head(q, _ns); \
+                       os_atomic_store2o(_head, _o_next, _n, relaxed); \
                } \
                os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
        })
 
 #define os_mpsc_capture_snapshot(q, _ns, tail)  ({ \
                typeof(q) _q = (q); \
-               os_mpsc_node_type(_q, _ns) _head; \
-               _dispatch_wait_until(_head = _q->_ns##_head); \
+               os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \
                os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
                /* 22708742: set tail to NULL with release, so that NULL write */ \
                /* to head above doesn't clobber head from concurrent enqueuer */ \
@@ -1360,17 +1571,17 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
                os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
                if (_head != (tail)) { \
-                       _dispatch_wait_until(_n = _head->_o_next); \
+                       _n = os_mpsc_get_next(_head, _o_next); \
                }; \
                _n; })
 
 #define os_mpsc_prepend(q, _ns, head, tail, _o_next)  ({ \
                typeof(q) _q = (q); \
                os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
-               _tail->_o_next = NULL; \
-               if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \
-                       _dispatch_wait_until(_n = _q->_ns##_head); \
-                       _tail->_o_next = _n; \
+               os_atomic_store2o(_tail, _o_next, NULL, relaxed); \
+               if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \
+                       _n = os_mpsc_get_head(q, _ns); \
+                       os_atomic_store2o(_tail, _o_next, _n, relaxed); \
                } \
                os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
        })
@@ -1380,13 +1591,13 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq,
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp)
+_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos)
 {
-       dispatch_lock_owner owner;
+       dispatch_tid owner;
        if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) {
                return true;
        }
-       _dispatch_wqthread_override_start_check_owner(owner, pp,
+       _dispatch_wqthread_override_start_check_owner(owner, qos,
                        &dq->dq_sidelock.dul_lock);
        return false;
 }
@@ -1406,7 +1617,9 @@ _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq)
                return true;
        }
        // Ensure that the root queue sees that this thread was overridden.
-       _dispatch_set_defaultpriority_override();
+       // Since we don't know which override QoS was used, use MAINTENANCE
+       // as a marker for _dispatch_reset_basepri_override()
+       _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE);
        return false;
 }
 
@@ -1416,7 +1629,9 @@ _dispatch_queue_sidelock_unlock(dispatch_queue_t dq)
 {
        if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) {
                // Ensure that the root queue sees that this thread was overridden.
-               _dispatch_set_defaultpriority_override();
+               // Since we don't know which override QoS was used, use MAINTENANCE
+               // as a marker for _dispatch_reset_basepri_override()
+               _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE);
        }
 }
 
@@ -1476,141 +1691,85 @@ _dispatch_queue_push_update_tail_list(dispatch_queue_t dq,
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_queue_push_update_head(dispatch_queue_t dq,
-               struct dispatch_object_s *head, bool retained)
+               struct dispatch_object_s *head)
 {
-       if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
-               dispatch_assert(!retained);
-               // Lie about "retained" here, it generates better assembly in this
-               // hotpath, and _dispatch_root_queue_wakeup knows to ignore this
-               // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH.
-               //
-               // We need to bypass the retain below because pthread root queues
-               // are not global and retaining them would be wrong.
-               //
-               // We should eventually have a typeflag for "POOL" kind of root queues.
-               retained = true;
-       }
-       // The queue must be retained before dq_items_head is written in order
-       // to ensure that the reference is still valid when _dispatch_queue_wakeup
-       // is called. Otherwise, if preempted between the assignment to
-       // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the
-       // queue may release the last reference to the queue when invoked by
-       // _dispatch_queue_drain. <rdar://problem/6932776>
-       if (!retained) _dispatch_retain(dq);
        os_mpsc_push_update_head(dq, dq_items, head);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
-               dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
+_dispatch_root_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _head,
+               dispatch_object_t _tail, int n)
 {
        struct dispatch_object_s *head = _head._do, *tail = _tail._do;
-       bool override = _dispatch_queue_need_override_retain(dq, pp);
-       dispatch_queue_flags_t flags;
-       if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
-               _dispatch_queue_push_update_head(dq, head, override);
-               if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) {
-                       return _dispatch_queue_push_list_slow(dq, n);
-               }
-               flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
-       } else if (override) {
-               flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
-       } else {
-               return;
+       if (unlikely(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
+               _dispatch_queue_push_update_head(dq, head);
+               return _dispatch_global_queue_poke(dq, n, 0);
        }
-       dx_wakeup(dq, pp, flags);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+               dispatch_qos_t qos)
 {
        struct dispatch_object_s *tail = _tail._do;
-       bool override = _dispatch_queue_need_override(dq, pp);
-       if (flags & DISPATCH_WAKEUP_SLOW_WAITER) {
-               // when SLOW_WAITER is set, we borrow the reference of the caller
-               if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
-                       _dispatch_queue_push_update_head(dq, tail, true);
-                       flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH;
-               } else if (override) {
-                       flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING;
-               } else {
-                       flags = DISPATCH_WAKEUP_SLOW_WAITER;
-               }
+       dispatch_wakeup_flags_t flags = 0;
+       // If we are going to call dx_wakeup(), the queue must be retained before
+       // the item we're pushing can be dequeued, which means:
+       // - before we exchange the tail if we may have to override
+       // - before we set the head if we made the queue non empty.
+       // Otherwise, if preempted between one of these and the call to dx_wakeup()
+       // the blocks submitted to the queue may release the last reference to the
+       // queue when invoked by _dispatch_queue_drain. <rdar://problem/6932776>
+       bool overriding = _dispatch_queue_need_override_retain(dq, qos);
+       if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
+               if (!overriding) _dispatch_retain_2(dq->_as_os_obj);
+               _dispatch_queue_push_update_head(dq, tail);
+               flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
+       } else if (overriding) {
+               flags = DISPATCH_WAKEUP_CONSUME_2;
        } else {
-               if (override) _dispatch_retain(dq);
-               if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
-                       _dispatch_queue_push_update_head(dq, tail, override);
-                       flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
-               } else if (override) {
-                       flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
-               } else {
-                       return;
-               }
+               return;
        }
-       return dx_wakeup(dq, pp, flags);
+       return dx_wakeup(dq, qos, flags);
 }
 
-struct _dispatch_identity_s {
-       pthread_priority_t old_pp;
-};
-
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di,
-               pthread_priority_t pp)
+_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq,
+               uint64_t dq_state)
 {
-       // assumed_rq was set by the caller, we need to fake the priorities
-       dispatch_queue_t assumed_rq = _dispatch_queue_get_current();
-
-       dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
-
-       di->old_pp = _dispatch_get_defaultpriority();
-
-       if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
-               if (!pp) {
-                       pp = _dispatch_get_priority();
-                       // _dispatch_root_queue_drain_deferred_item() may turn a manager
-                       // thread into a regular root queue, and we must never try to
-                       // restore the manager flag once we became a regular work queue
-                       // thread.
-                       pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-               }
-               if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >
-                               (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-                       _dispatch_wqthread_override_start(_dispatch_tid_self(), pp);
-                       // Ensure that the root queue sees that this thread was overridden.
-                       _dispatch_set_defaultpriority_override();
-               }
-       }
-       _dispatch_reset_defaultpriority(assumed_rq->dq_priority);
+       return dx_push(tq, dq, _dq_state_max_qos(dq_state));
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di)
+static inline dispatch_priority_t
+_dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq)
 {
-       _dispatch_reset_defaultpriority(di->old_pp);
+       dispatch_priority_t old_dbp = _dispatch_get_basepri();
+       dispatch_assert(dx_hastypeflag(assumed_rq, QUEUE_ROOT));
+       _dispatch_reset_basepri(assumed_rq->dq_priority);
+       _dispatch_queue_set_current(assumed_rq);
+       return old_dbp;
 }
 
-typedef dispatch_queue_t
+typedef dispatch_queue_wakeup_target_t
 _dispatch_queue_class_invoke_handler_t(dispatch_object_t,
-               dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **);
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t,
+               uint64_t *owned);
 
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_queue_class_invoke(dispatch_object_t dou,
-               dispatch_invoke_flags_t flags,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               dispatch_invoke_flags_t const_restrict_flags,
                _dispatch_queue_class_invoke_handler_t invoke)
 {
        dispatch_queue_t dq = dou._dq;
-       struct dispatch_object_s *dc = NULL;
-       dispatch_queue_t tq = NULL;
-       uint64_t dq_state, to_unlock = 0;
-       bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING);
-       bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING);
+       dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
+       bool owning = !(flags & DISPATCH_INVOKE_STEALING);
+       uint64_t owned = 0;
 
        // When called from a plain _dispatch_queue_drain:
        //   overriding = false
@@ -1619,39 +1778,45 @@ _dispatch_queue_class_invoke(dispatch_object_t dou,
        // When called from an override continuation:
        //   overriding = true
        //   owning depends on whether the override embedded the queue or steals
-       DISPATCH_COMPILER_CAN_ASSUME(owning || overriding);
 
-       if (owning) {
+       if (!(flags & (DISPATCH_INVOKE_STEALING | DISPATCH_INVOKE_WLH))) {
                dq->do_next = DISPATCH_OBJECT_LISTLESS;
        }
-       to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state);
-       if (likely(to_unlock)) {
-               struct _dispatch_identity_s di;
-               pthread_priority_t old_dp;
-
-drain_pending_barrier:
-               if (overriding) {
-                       _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
-                                       _dispatch_tid_self(), _dispatch_get_defaultpriority());
-                       _dispatch_root_queue_identity_assume(&di, 0);
-               }
-
+       flags |= const_restrict_flags;
+       if (likely(flags & DISPATCH_INVOKE_WLH)) {
+               owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED | DISPATCH_QUEUE_ENQUEUED;
+       } else {
+               owned = _dispatch_queue_drain_try_lock(dq, flags);
+       }
+       if (likely(owned)) {
+               dispatch_priority_t old_dbp;
                if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
-                       pthread_priority_t op, dp;
-
-                       old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp);
-                       op = dq->dq_override;
-                       if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-                               _dispatch_wqthread_override_start(_dispatch_tid_self(), op);
-                               // Ensure that the root queue sees that this thread was overridden.
-                               _dispatch_set_defaultpriority_override();
-                       }
+                       old_dbp = _dispatch_set_basepri(dq->dq_priority);
+               } else {
+                       old_dbp = 0;
                }
 
                flags = _dispatch_queue_merge_autorelease_frequency(dq, flags);
 attempt_running_slow_head:
-               tq = invoke(dq, flags, &to_unlock, &dc);
-               if (slowpath(tq)) {
+#if DISPATCH_COCOA_COMPAT
+               if ((flags & DISPATCH_INVOKE_WLH) &&
+                               !(flags & DISPATCH_INVOKE_AUTORELEASE_ALWAYS)) {
+                       _dispatch_last_resort_autorelease_pool_push(dic);
+               }
+#endif // DISPATCH_COCOA_COMPAT
+               tq = invoke(dq, dic, flags, &owned);
+#if DISPATCH_COCOA_COMPAT
+               if ((flags & DISPATCH_INVOKE_WLH) &&
+                               !(flags & DISPATCH_INVOKE_AUTORELEASE_ALWAYS)) {
+                       dispatch_thread_frame_s dtf;
+                       _dispatch_thread_frame_push(&dtf, dq);
+                       _dispatch_last_resort_autorelease_pool_pop(dic);
+                       _dispatch_thread_frame_pop(&dtf);
+               }
+#endif // DISPATCH_COCOA_COMPAT
+               dispatch_assert(tq != DISPATCH_QUEUE_WAKEUP_TARGET);
+               if (unlikely(tq != DISPATCH_QUEUE_WAKEUP_NONE &&
+                               tq != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT)) {
                        // Either dc is set, which is a deferred invoke case
                        //
                        // or only tq is and it means a reenqueue is required, because of:
@@ -1660,78 +1825,60 @@ attempt_running_slow_head:
                        // In both cases, we want to bypass the check for DIRTY.
                        // That may cause us to leave DIRTY in place but all drain lock
                        // acquirers clear it
-               } else {
-                       if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) {
+               } else if (!_dispatch_queue_drain_try_unlock(dq, owned,
+                               tq == DISPATCH_QUEUE_WAKEUP_NONE)) {
+                       tq = _dispatch_queue_get_current();
+                       if (dx_hastypeflag(tq, QUEUE_ROOT) || !owning) {
                                goto attempt_running_slow_head;
                        }
-                       to_unlock = 0;
-               }
-               if (overriding) {
-                       _dispatch_root_queue_identity_restore(&di);
+                       DISPATCH_COMPILER_CAN_ASSUME(tq != DISPATCH_QUEUE_WAKEUP_NONE);
+               } else {
+                       owned = 0;
+                       tq = NULL;
                }
                if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
-                       _dispatch_reset_defaultpriority(old_dp);
-               }
-       } else if (overriding) {
-               uint32_t owner = _dq_state_drain_owner(dq_state);
-               pthread_priority_t p = dq->dq_override;
-               if (owner && p) {
-                       _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
-                                       owner, p);
-                       _dispatch_wqthread_override_start_check_owner(owner, p,
-                                       &dq->dq_state_lock);
+                       _dispatch_reset_basepri(old_dbp);
                }
        }
-
-       if (owning) {
+       if (likely(owning)) {
                _dispatch_introspection_queue_item_complete(dq);
        }
 
-       if (tq && dc) {
-               return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc);
-       }
-
        if (tq) {
-               bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current());
-               uint64_t old_state, new_state;
+               if (const_restrict_flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS) {
+                       dispatch_assert(dic->dic_deferred == NULL);
+               } else if (dic->dic_deferred) {
+                       return _dispatch_queue_drain_sync_waiter(dq, dic,
+                                       flags, owned);
+               }
 
+               uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED;
+               if (tq == DISPATCH_QUEUE_WAKEUP_MGR) {
+                       enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+               }
                os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
-                       new_state = old_state - to_unlock;
-                       if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) &&
-                                       _dq_state_has_pending_barrier(new_state)) {
-                               new_state += DISPATCH_QUEUE_IN_BARRIER;
-                               new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
-                               new_state -= DISPATCH_QUEUE_PENDING_BARRIER;
-                               new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
-                       } else {
-                               new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
-                               if (_dq_state_should_wakeup(new_state)) {
-                                       // drain was not interupted for suspension
-                                       // we will reenqueue right away, just put ENQUEUED back
-                                       new_state |= DISPATCH_QUEUE_ENQUEUED;
-                                       new_state |= DISPATCH_QUEUE_DIRTY;
-                               }
+                       new_state  = old_state - owned;
+                       new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+                       new_state |= DISPATCH_QUEUE_DIRTY;
+                       if (_dq_state_is_runnable(new_state) &&
+                                       !_dq_state_is_enqueued(new_state)) {
+                               // drain was not interupted for suspension
+                               // we will reenqueue right away, just put ENQUEUED back
+                               new_state |= enqueued;
                        }
                });
-               if (_dq_state_is_in_barrier(new_state)) {
-                       // we did a "full width upgrade" and just added IN_BARRIER
-                       // so adjust what we own and drain again
-                       to_unlock &= DISPATCH_QUEUE_ENQUEUED;
-                       to_unlock += DISPATCH_QUEUE_IN_BARRIER;
-                       to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
-                       goto drain_pending_barrier;
-               }
-               if (_dq_state_has_override(old_state)) {
+               old_state -= owned;
+               if (_dq_state_received_override(old_state)) {
                        // Ensure that the root queue sees that this thread was overridden.
-                       _dispatch_set_defaultpriority_override();
+                       _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state));
                }
-
-               if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
-                       return _dispatch_queue_push(tq, dq, 0);
+               if ((old_state ^ new_state) & enqueued) {
+                       dispatch_assert(_dq_state_is_enqueued(new_state));
+                       return _dispatch_queue_push_queue(tq, dq, new_state);
                }
        }
 
-       return _dispatch_release_tailcall(dq);
+       _dispatch_release_2_tailcall(dq);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -1742,7 +1889,7 @@ _dispatch_queue_class_probe(dispatch_queue_class_t dqu)
        // seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
        // seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
        tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered);
-       return slowpath(tail != NULL);
+       return unlikely(tail != NULL);
 }
 
 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
@@ -1755,87 +1902,12 @@ _dispatch_is_in_root_queues_array(dispatch_queue_t dq)
 
 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
 static inline dispatch_queue_t
-_dispatch_get_root_queue(qos_class_t priority, bool overcommit)
-{
-       if (overcommit) switch (priority) {
-       case _DISPATCH_QOS_CLASS_MAINTENANCE:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
-       case _DISPATCH_QOS_CLASS_BACKGROUND:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
-       case _DISPATCH_QOS_CLASS_UTILITY:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
-       case _DISPATCH_QOS_CLASS_DEFAULT:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
-       case _DISPATCH_QOS_CLASS_USER_INITIATED:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
-       case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
-       } else switch (priority) {
-       case _DISPATCH_QOS_CLASS_MAINTENANCE:
-               return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
-       case _DISPATCH_QOS_CLASS_BACKGROUND:
-               return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
-       case _DISPATCH_QOS_CLASS_UTILITY:
-               return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
-       case _DISPATCH_QOS_CLASS_DEFAULT:
-               return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
-       case _DISPATCH_QOS_CLASS_USER_INITIATED:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
-       case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-               return &_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
-       }
-       return NULL;
-}
-
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-DISPATCH_ALWAYS_INLINE DISPATCH_CONST
-static inline dispatch_queue_t
-_dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit)
-{
-       uint32_t idx;
-
-       pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       idx = (uint32_t)__builtin_ffs((int)pp);
-       if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
-                       .dq_priority)) {
-               // If kernel doesn't support maintenance, bottom bit is background.
-               // Shift to our idea of where background bit is.
-               idx++;
-       }
-       // ffs starts at 1, and account for the QOS_CLASS_SHIFT
-       // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than
-       // DISPATCH_QOS_COUNT
-       idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1);
-       if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) {
-               DISPATCH_CLIENT_CRASH(pp, "Corrupted priority");
-       }
-       return &_dispatch_root_queues[2 * idx + overcommit];
-}
-#endif
-
-DISPATCH_ALWAYS_INLINE DISPATCH_CONST
-static inline dispatch_queue_t
-_dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit)
+_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
 {
-       bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-       // root queues in _dispatch_root_queues are not overcommit for even indices
-       // and overcommit for odd ones, so fixing overcommit is either returning
-       // the same queue, or picking its neighbour in _dispatch_root_queues
-       if (overcommit && !rq_overcommit) {
-               return rq + 1;
-       }
-       if (!overcommit && rq_overcommit) {
-               return rq - 1;
+       if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) {
+               DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
        }
-       return rq;
+       return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -1844,23 +1916,21 @@ _dispatch_queue_set_bound_thread(dispatch_queue_t dq)
 {
        // Tag thread-bound queues with the owning thread
        dispatch_assert(_dispatch_queue_is_thread_bound(dq));
-       mach_port_t old_owner, self = _dispatch_tid_self();
-       uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed);
-       if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) {
-               DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice");
-       }
+       uint64_t old_state, new_state;
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               new_state = old_state;
+               new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
+               new_state |= _dispatch_lock_value_for_self();
+       });
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
 {
-       uint64_t dq_state, value;
-
        dispatch_assert(_dispatch_queue_is_thread_bound(dq));
-       os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
-               value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state);
-       });
+       _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC);
+       os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DRAIN_OWNER_MASK, relaxed);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -1884,13 +1954,12 @@ _dispatch_set_pthread_root_queue_observer_hooks(
 #pragma mark dispatch_priority
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_get_defaultpriority(void)
+static inline dispatch_priority_t
+_dispatch_get_basepri(void)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific(
-                       dispatch_defaultpriority_key);
-       return pp;
+       return (dispatch_priority_t)(uintptr_t)_dispatch_thread_getspecific(
+                       dispatch_basepri_key);
 #else
        return 0;
 #endif
@@ -1898,99 +1967,107 @@ _dispatch_get_defaultpriority(void)
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_reset_defaultpriority(pthread_priority_t pp)
+_dispatch_reset_basepri(dispatch_priority_t dbp)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t old_pp = _dispatch_get_defaultpriority();
+       dispatch_priority_t old_dbp = _dispatch_get_basepri();
        // If an inner-loop or'd in the override flag to the per-thread priority,
        // it needs to be propagated up the chain.
-       pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
-       _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
+       dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+       dbp |= (old_dbp & DISPATCH_PRIORITY_OVERRIDE_MASK);
+       _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
 #else
-       (void)pp;
+       (void)dbp;
 #endif
 }
 
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_get_basepri_override_qos_floor(void)
+{
+       dispatch_priority_t dbp = _dispatch_get_basepri();
+       dispatch_qos_t qos = _dispatch_priority_qos(dbp);
+       dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp);
+       dispatch_qos_t qos_floor = MAX(qos, oqos);
+       return qos_floor ? qos_floor : DISPATCH_QOS_SATURATED;
+}
+
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_set_defaultpriority_override(void)
+_dispatch_set_basepri_override_qos(dispatch_qos_t qos)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t old_pp = _dispatch_get_defaultpriority();
-       pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG;
-
-       _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
+       dispatch_priority_t dbp = _dispatch_get_basepri();
+       if (_dispatch_priority_override_qos(dbp) >= qos) return;
+       dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+       dbp |= qos << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+       _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
+#else
+       (void)qos;
 #endif
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_reset_defaultpriority_override(void)
+_dispatch_reset_basepri_override(void)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t old_pp = _dispatch_get_defaultpriority();
-       pthread_priority_t pp = old_pp &
-                       ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
-
-       _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
-       return unlikely(pp != old_pp);
+       dispatch_priority_t dbp = _dispatch_get_basepri();
+       dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp);
+       if (oqos) {
+               dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+               _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
+               return oqos != DISPATCH_QOS_SATURATED;
+       }
 #endif
        return false;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
-               dispatch_queue_t tq)
+static inline dispatch_priority_t
+_dispatch_set_basepri(dispatch_priority_t dbp)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
-       const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
-       const dispatch_priority_t defaultqueue_flag =
-                       _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
-       dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
-       if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) &&
-                       (tqp & rootqueue_flag)) {
-               if (tqp & defaultqueue_flag) {
-                       dq->dq_priority = 0;
-               } else {
-                       dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
+       const dispatch_priority_t preserved_mask =
+                       DISPATCH_PRIORITY_OVERRIDE_MASK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+       dispatch_priority_t old_dbp = _dispatch_get_basepri();
+       if (old_dbp) {
+               dispatch_priority_t flags, defaultqueue, basepri;
+               flags = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
+               defaultqueue = (old_dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
+               basepri = old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK;
+               dbp &= DISPATCH_PRIORITY_REQUESTED_MASK;
+               if (!dbp) {
+                       flags = DISPATCH_PRIORITY_FLAG_INHERIT | defaultqueue;
+                       dbp = basepri;
+               } else if (dbp < basepri && !defaultqueue) { // rdar://16349734
+                       dbp = basepri;
                }
+               dbp |= flags | (old_dbp & preserved_mask);
+       } else {
+               dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
        }
+       _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
+       return old_dbp;
 #else
-       (void)dq; (void)tq;
+       (void)dbp;
+       return 0;
 #endif
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp)
+static inline dispatch_priority_t
+_dispatch_set_basepri_wlh(dispatch_priority_t dbp)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       const pthread_priority_t default_priority_preserved_flags =
-                       _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-       pthread_priority_t old_pp = _dispatch_get_defaultpriority();
-       if (old_pp) {
-               pthread_priority_t flags, defaultqueue, basepri;
-               flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
-               defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
-               basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK);
-               pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-               if (!pp) {
-                       flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
-                       pp = basepri;
-               } else if (pp < basepri && !defaultqueue) { // rdar://16349734
-                       pp = basepri;
-               }
-               pp |= flags | (old_pp & default_priority_preserved_flags);
-       }
-       _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
-       if (new_pp) *new_pp = pp;
-       return old_pp;
+       dispatch_assert(!_dispatch_get_basepri());
+       // _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED)
+       dbp |= DISPATCH_QOS_SATURATED << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+       _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
 #else
-       (void)pp; (void)new_pp;
-       return 0;
+       (void)dbp;
 #endif
+       return 0;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -1998,25 +2075,24 @@ static inline pthread_priority_t
 _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
-       bool enforce, inherited, defaultqueue;
-       enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
+       dispatch_priority_t inherited, defaultqueue, dbp = _dispatch_get_basepri();
+       pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp);
+       bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
                        (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG);
-       inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
-       defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
-       defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
+       inherited = (dbp & DISPATCH_PRIORITY_FLAG_INHERIT);
+       defaultqueue = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
        pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
 
        if (!pp) {
-               return defaultpri;
+               return basepp;
        } else if (defaultqueue) { // rdar://16349734
                return pp;
-       } else if (pp < defaultpri) {
-               return defaultpri;
+       } else if (pp < basepp) {
+               return basepp;
        } else if (enforce || inherited) {
                return pp;
        } else {
-               return defaultpri;
+               return basepp;
        }
 #else
        (void)pp; (void)flags;
@@ -2025,22 +2101,61 @@ _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags)
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_priority_inherit_from_root_queue(pthread_priority_t pp,
+static inline void
+_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
+               dispatch_queue_t tq)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+       const dispatch_priority_t rootqueue_flag = DISPATCH_PRIORITY_FLAG_ROOTQUEUE;
+       const dispatch_priority_t inherited_flag = DISPATCH_PRIORITY_FLAG_INHERIT;
+       const dispatch_priority_t defaultqueue_flag =
+            DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
+       dispatch_priority_t pri = dq->dq_priority, tpri = tq->dq_priority;
+
+       if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) &&
+                       (tpri & rootqueue_flag)) {
+               if (_dispatch_priority_override_qos(pri) == DISPATCH_QOS_SATURATED) {
+                       pri &= DISPATCH_PRIORITY_OVERRIDE_MASK;
+               } else {
+                       pri = 0;
+               }
+               if (tpri & defaultqueue_flag) {
+                       // <rdar://problem/32921639> base queues need to know they target
+                       // the default root queue so that _dispatch_queue_override_qos()
+                       // in _dispatch_queue_class_wakeup() can fallback to QOS_DEFAULT
+                       // if no other priority was provided.
+                       pri |= defaultqueue_flag;
+               } else {
+                       pri |= (tpri & ~rootqueue_flag) | inherited_flag;
+               }
+               dq->dq_priority = pri;
+       } else if (pri & defaultqueue_flag) {
+               // the DEFAULTQUEUE flag is only set on queues due to the code above,
+               // and must never be kept if we don't target a global root queue.
+               dq->dq_priority = (pri & ~defaultqueue_flag);
+       }
+#else
+       (void)dq; (void)tq;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_priority_t
+_dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri,
                dispatch_queue_t rq)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       pthread_priority_t defaultqueue =
-                       rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+       dispatch_priority_t p = pri & DISPATCH_PRIORITY_REQUESTED_MASK;
+       dispatch_priority_t rqp = rq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
+       dispatch_priority_t defaultqueue =
+                       rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
        if (!p || (!defaultqueue && p < rqp)) {
                p = rqp | defaultqueue;
        }
-       return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
+       return p | (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
 #else
-       (void)rq; (void)pp;
+       (void)rq; (void)pri;
        return 0;
 #endif
 }
@@ -2078,7 +2193,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp)
        pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
        if (unlikely(cur_priority & unbind)) {
                // else we always need an update if the NEEDS_UNBIND flag is set
-               // the slowpath in _dispatch_set_priority_and_voucher_slow() will
+               // the slow path in _dispatch_set_priority_and_voucher_slow() will
                // adjust the priority further with the proper overcommitness
                return pp ? pp : (cur_priority & ~unbind);
        } else {
@@ -2092,7 +2207,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp)
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline voucher_t
 _dispatch_set_priority_and_voucher(pthread_priority_t pp,
-               voucher_t v, _dispatch_thread_set_self_t flags)
+               voucher_t v, dispatch_thread_set_self_t flags)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
        pp = _dispatch_priority_compute_update(pp);
@@ -2121,7 +2236,7 @@ _dispatch_set_priority_and_voucher(pthread_priority_t pp,
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline voucher_t
 _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp,
-               voucher_t v, _dispatch_thread_set_self_t flags)
+               voucher_t v, dispatch_thread_set_self_t flags)
 {
        pthread_priority_t p = 0;
        if (pp != DISPATCH_NO_PRIORITY) {
@@ -2141,7 +2256,7 @@ _dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v)
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags)
+_dispatch_reset_voucher(voucher_t v, dispatch_thread_set_self_t flags)
 {
        flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE;
        (void)_dispatch_set_priority_and_voucher(0, v, flags);
@@ -2149,135 +2264,73 @@ _dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags)
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp)
-{
-       // global queues have their override set to DISPATCH_SATURATED_OVERRIDE
-       // which makes this test always return false for them.
-       return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_queue_received_override(dispatch_queue_class_t dqu,
-               pthread_priority_t pp)
+_dispatch_queue_need_override(dispatch_queue_class_t dqu, dispatch_qos_t qos)
 {
-       dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE);
-       return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+       uint64_t dq_state = os_atomic_load2o(dqu._dq, dq_state, relaxed);
+       // dq_priority "override qos" contains the priority at which the queue
+       // is already running for thread-bound queues.
+       // For non thread-bound queues, the qos of the queue may not be observed
+       // when the first work item is dispatched synchronously.
+       return _dq_state_max_qos(dq_state) < qos &&
+                       _dispatch_priority_override_qos(dqu._dq->dq_priority) < qos;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu,
-               pthread_priority_t pp)
+               dispatch_qos_t qos)
 {
-       if (_dispatch_queue_need_override(dqu, pp)) {
-               _os_object_retain_internal_inline(dqu._oq->_as_os_obj);
+       if (_dispatch_queue_need_override(dqu, qos)) {
+               _os_object_retain_internal_n_inline(dqu._oq->_as_os_obj, 2);
                return true;
        }
        return false;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
-               dispatch_priority_t new_op)
-{
-       dispatch_priority_t old_op;
-       new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       if (!new_op) return false;
-       os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, {
-               if (new_op <= old_op) {
-                       os_atomic_rmw_loop_give_up(return false);
-               }
-       });
-       return true;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_override_priority(dispatch_queue_class_t dqu,
-               pthread_priority_t *pp, dispatch_wakeup_flags_t *flags)
+static inline dispatch_qos_t
+_dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos)
 {
-       os_mpsc_queue_t oq = dqu._oq;
-       dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
-       dispatch_priority_t o;
-
-       _dispatch_assert_is_valid_qos_override(np);
-       if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) {
-               qp = 0;
-       } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) {
-               // when a queue is used as a lock its priority doesn't count
-       } else if (np < qp) {
-               // for asynchronous workitems, queue priority is the floor for overrides
-               np = qp;
-       }
-       *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS;
-
-       // this optimizes for the case when no update of the override is required
-       // os_atomic_rmw_loop2o optimizes for the case when the update happens,
-       // and can't be used.
-       o = os_atomic_load2o(oq, oq_override, relaxed);
-       do {
-               if (likely(np <= o)) break;
-       } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed)));
-
-       if (np <= o) {
-               *pp = o;
-       } else {
-               *flags |= DISPATCH_WAKEUP_OVERRIDING;
-               *pp = np;
-       }
-       if (o > qp) {
-               *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN;
+       if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) {
+               // queues targeting the default root queue use any asynchronous
+               // workitem priority available and fallback to QOS_DEFAULT otherwise.
+               return qos ? qos : DISPATCH_QOS_DEFAULT;
        }
+       // for asynchronous workitems, queue priority is the floor for overrides
+       return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority));
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_priority_t
-_dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu,
-               bool qp_is_floor)
-{
-       os_mpsc_queue_t oq = dqu._oq;
-       dispatch_priority_t p = 0;
-       if (qp_is_floor) {
-               // thread bound queues floor their dq_override to their
-               // priority to avoid receiving useless overrides
-               p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       }
-       dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed);
-       dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE);
-       return (o > p) ? o : 0;
-}
+#define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1
+#define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2
 
 DISPATCH_ALWAYS_INLINE
 static inline pthread_priority_t
-_dispatch_priority_propagate(void)
+_dispatch_priority_compute_propagated(pthread_priority_t pp,
+               unsigned int flags)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t pp = _dispatch_get_priority();
+       if (flags & DISPATCH_PRIORITY_PROPAGATE_CURRENT) {
+               pp = _dispatch_get_priority();
+       }
        pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       if (pp > _dispatch_user_initiated_priority) {
+       if (!(flags & DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC) &&
+                       pp > _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED)) {
                // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
-               pp = _dispatch_user_initiated_priority;
+               return _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED);
        }
        return pp;
 #else
+       (void)pp; (void)flags;
        return 0;
 #endif
 }
 
-// including maintenance
 DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_is_background_priority(pthread_priority_t pp)
+static inline pthread_priority_t
+_dispatch_priority_propagate(void)
 {
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       return pp && (pp <= _dispatch_background_priority);
-#else
-       return false;
-#endif
+       return _dispatch_priority_compute_propagated(0,
+                       DISPATCH_PRIORITY_PROPAGATE_CURRENT);
 }
 
 // including maintenance
@@ -2285,7 +2338,12 @@ DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_is_background_thread(void)
 {
-       return _dispatch_is_background_priority(_dispatch_get_priority());
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+       pthread_priority_t pp = _dispatch_get_priority();
+       return _dispatch_qos_is_background(_dispatch_qos_from_pp(pp));
+#else
+       return false;
+#endif
 }
 
 #pragma mark -
@@ -2301,16 +2359,21 @@ _dispatch_block_has_private_data(const dispatch_block_t block)
        return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags)
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline pthread_priority_t
+_dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags,
+        pthread_priority_t new_pri)
 {
-       /*
-        * Generates better assembly than the actual readable test:
-        *       (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS)
-        */
-       flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS;
-       return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS;
+       pthread_priority_t old_pri, p = 0;  // 0 means do not change priority.
+       if ((flags & DISPATCH_BLOCK_HAS_PRIORITY)
+                       && ((flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ||
+                       !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS))) {
+               old_pri = _dispatch_get_priority();
+               new_pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
+               p = old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+               if (!p || p >= new_pri) p = 0;
+       }
+       return p;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -2452,12 +2515,14 @@ _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov,
                        _dispatch_continuation_free_to_cache_limit(dc1);
                }
        });
+       _dispatch_perfmon_workitem_inc();
 }
 
 DISPATCH_ALWAYS_INLINE_NDEBUG
 static inline void
-_dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags)
+_dispatch_continuation_pop_inline(dispatch_object_t dou,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               dispatch_queue_t dq)
 {
        dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
                        _dispatch_get_pthread_root_queue_observer_hooks();
@@ -2465,10 +2530,9 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq,
        _dispatch_trace_continuation_pop(dq, dou);
        flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
        if (_dispatch_object_has_vtable(dou)) {
-               dx_invoke(dou._do, flags);
+               dx_invoke(dou._do, dic, flags);
        } else {
-               voucher_t ov = dq->dq_override_voucher;
-               _dispatch_continuation_invoke_inline(dou, ov, flags);
+               _dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags);
        }
        if (observer_hooks) observer_hooks->queue_did_execute(dq);
 }
@@ -2511,21 +2575,21 @@ _dispatch_continuation_priority_set(dispatch_continuation_t dc,
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_continuation_get_override_priority(dispatch_queue_t dq,
+static inline dispatch_qos_t
+_dispatch_continuation_override_qos(dispatch_queue_t dq,
                dispatch_continuation_t dc)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+       dispatch_qos_t dc_qos = _dispatch_qos_from_pp(dc->dc_priority);
        bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
-       pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+       dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority);
+       bool defaultqueue = dq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
        dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY);
-       if (p && (enforce || !dqp || defaultqueue)) {
-               return p;
+       if (dc_qos && (enforce || !dq_qos || defaultqueue)) {
+               return dc_qos;
        }
-       return dqp;
+       return dq_qos;
 #else
        (void)dq; (void)dc;
        return 0;
@@ -2569,6 +2633,36 @@ _dispatch_continuation_init(dispatch_continuation_t dc,
        _dispatch_continuation_voucher_set(dc, dqu, flags);
 }
 
+#if HAVE_MACH
+#pragma mark dispatch_mach_reply_refs_t
+
+// assumes low bit of mach port names is always set
+#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr)
+{
+       dmr->du_ident &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr)
+{
+       mach_port_t reply_port = (mach_port_t)dmr->du_ident;
+       return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_port_t
+_dispatch_mach_reply_get_reply_port(mach_port_t reply_port)
+{
+       return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0;
+}
+
+#endif // HAVE_MACH
+
 #endif // DISPATCH_PURE_C
 
 #endif /* __DISPATCH_INLINE_INTERNAL__ */
index 1f63cceaf81410dea4ad35b6c61c30a16b0b04bf..286e53458d3adca6db76e6766ee37fa7c940b58a 100644 (file)
@@ -38,6 +38,7 @@
 
 #ifdef __APPLE__
 #include <Availability.h>
+#include <os/availability.h>
 #include <TargetConditionals.h>
 
 #ifndef TARGET_OS_MAC_DESKTOP
 #endif
 
 #if TARGET_OS_MAC_DESKTOP
-#  define DISPATCH_HOST_SUPPORTS_OSX(x) \
+#  define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \
                (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x))
-#  if !DISPATCH_HOST_SUPPORTS_OSX(101000)
-#    error "OS X hosts older than OS X 10.10 aren't supported anymore"
-#  endif // !DISPATCH_HOST_SUPPORTS_OSX(101000)
+#  if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200)
+#    error "OS X hosts older than OS X 10.12 aren't supported anymore"
+#  endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200)
 #elif TARGET_OS_SIMULATOR
-#  define DISPATCH_HOST_SUPPORTS_OSX(x) \
+#  define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \
                (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x))
-#  if !DISPATCH_HOST_SUPPORTS_OSX(101000)
-#    error "Simulator hosts older than OS X 10.10 aren't supported anymore"
-#  endif // !DISPATCH_HOST_SUPPORTS_OSX(101000)
+#  if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200)
+#    error "Simulator hosts older than OS X 10.12 aren't supported anymore"
+#  endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200)
 #else
-#  define DISPATCH_HOST_SUPPORTS_OSX(x) 1
-#  if __IPHONE_OS_VERSION_MIN_REQUIRED < 70000
-#    error "iOS hosts older than iOS 7.0 aren't supported anymore"
+#  define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1
+#  if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000
+#    error "iOS hosts older than iOS 9.0 aren't supported anymore"
 #  endif
 #endif
 
 #else // !__APPLE__
-#define DISPATCH_HOST_SUPPORTS_OSX(x) 0
+#define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 0
 #endif // !__APPLE__
 
 
 #endif
 
 /* private.h must be included last to avoid picking up installed headers. */
+#include <pthread.h>
 #include "os/object_private.h"
 #include "queue_private.h"
 #include "source_private.h"
@@ -187,6 +189,8 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void);
 #define DISPATCH_USE_CLIENT_CALLOUT 1
 #endif
 
+#define DISPATCH_ALLOW_NON_LEAF_RETARGET 1
+
 /* The "_debug" library build */
 #ifndef DISPATCH_DEBUG
 #define DISPATCH_DEBUG 0
@@ -238,28 +242,29 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void);
 #if HAVE_MALLOC_MALLOC_H
 #include <malloc/malloc.h>
 #endif
-#if __has_include(<malloc_private.h>)
-#include <malloc_private.h>
-#endif // __has_include(<malloc_private.h)
 
 #include <sys/stat.h>
 
 #if !TARGET_OS_WIN32
-#include <sys/event.h>
 #include <sys/mount.h>
 #include <sys/queue.h>
+#ifdef __ANDROID__
+#include <linux/sysctl.h>
+#else
 #include <sys/sysctl.h>
+#endif /* __ANDROID__ */
 #include <sys/socket.h>
 #include <sys/time.h>
 #include <sys/mman.h>
 #include <netinet/in.h>
 #endif
-#if defined(__linux__)
-#include <sys/eventfd.h>
-#endif
 
 #ifdef __BLOCKS__
+#if __has_include(<Block_private.h>)
 #include <Block_private.h>
+#else
+#include "BlocksRuntime/Block_private.h"
+#endif // __has_include(<Block_private.h>)
 #include <Block.h>
 #endif /* __BLOCKS__ */
 
@@ -307,6 +312,31 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void);
 #define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y)
 #define DISPATCH_CONCAT1(x,y) x ## y
 
+#define DISPATCH_COUNT_ARGS(...) DISPATCH_COUNT_ARGS1(, ## __VA_ARGS__, \
+               _8, _7, _6, _5, _4, _3, _2, _1, _0)
+#define DISPATCH_COUNT_ARGS1(z, a, b, c, d, e, f, g, h, cnt, ...) cnt
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define DISPATCH_STRUCT_LE_2(a, b)        struct { a; b; }
+#define DISPATCH_STRUCT_LE_3(a, b, c)     struct { a; b; c; }
+#define DISPATCH_STRUCT_LE_4(a, b, c, d)  struct { a; b; c; d; }
+#else
+#define DISPATCH_STRUCT_LE_2(a, b)        struct { b; a; }
+#define DISPATCH_STRUCT_LE_3(a, b, c)     struct { c; b; a; }
+#define DISPATCH_STRUCT_LE_4(a, b, c, d)  struct { d; c; b; a; }
+#endif
+#if __has_feature(c_startic_assert)
+#define DISPATCH_UNION_ASSERT(alias, st) \
+               _Static_assert(sizeof(struct { alias; }) == sizeof(st), "bogus union");
+#else
+#define DISPATCH_UNION_ASSERT(alias, st)
+#endif
+#define DISPATCH_UNION_LE(alias, ...) \
+               DISPATCH_UNION_ASSERT(alias, DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \
+                               DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)) \
+               union { alias; DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \
+                               DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); }
+
 // workaround 6368156
 #ifdef NSEC_PER_SEC
 #undef NSEC_PER_SEC
@@ -338,16 +368,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void);
 #define unlikely(x) (!!(x))
 #endif // __GNUC__
 
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b)        struct { a; b; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c)     struct { a; b; c; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d)  struct { a; b; c; d; }
-#else
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b)        struct { b; a; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c)     struct { c; b; a; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d)  struct { d; c; b; a; }
-#endif
-
 #define _TAILQ_IS_ENQUEUED(elm, field) \
                ((elm)->field.tqe_prev != NULL)
 #define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \
@@ -374,9 +394,9 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void);
 DISPATCH_EXPORT DISPATCH_NOINLINE
 void _dispatch_bug(size_t line, long val);
 
-#if HAVE_MACH
 DISPATCH_NOINLINE
 void _dispatch_bug_client(const char* msg);
+#if HAVE_MACH
 DISPATCH_NOINLINE
 void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr);
 #endif // HAVE_MACH
@@ -434,25 +454,27 @@ void _dispatch_log(const char *msg, ...);
  * For reporting bugs within libdispatch when using the "_debug" version of the
  * library.
  */
-#if __GNUC__
+#if __APPLE__
 #define dispatch_assert(e) do { \
                if (__builtin_constant_p(e)) { \
                        dispatch_static_assert(e); \
                } else { \
-                       typeof(e) _e = fastpath(e); /* always eval 'e' */ \
-                       if (DISPATCH_DEBUG && !_e) { \
+                       typeof(e) _e = (e); /* always eval 'e' */ \
+                       if (unlikely(DISPATCH_DEBUG && !_e)) { \
                                _dispatch_abort(__LINE__, (long)_e); \
                        } \
                } \
        } while (0)
 #else
-static inline void _dispatch_assert(long e, long line) {
+static inline void
+_dispatch_assert(long e, size_t line)
+{
        if (DISPATCH_DEBUG && !e) _dispatch_abort(line, e);
 }
 #define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__)
 #endif /* __GNUC__ */
 
-#if __GNUC__
+#if __APPLE__
 /*
  * A lot of API return zero upon success and not-zero on fail. Let's capture
  * and log the non-zero value
@@ -461,17 +483,19 @@ static inline void _dispatch_assert(long e, long line) {
                if (__builtin_constant_p(e)) { \
                        dispatch_static_assert(e); \
                } else { \
-                       typeof(e) _e = slowpath(e); /* always eval 'e' */ \
-                       if (DISPATCH_DEBUG && _e) { \
+                       typeof(e) _e = (e); /* always eval 'e' */ \
+                       if (unlikely(DISPATCH_DEBUG && _e)) { \
                                _dispatch_abort(__LINE__, (long)_e); \
                        } \
                } \
        } while (0)
 #else
-static inline void _dispatch_assert_zero(long e, long line) {
+static inline void
+_dispatch_assert_zero(long e, size_t line)
+{
        if (DISPATCH_DEBUG && e) _dispatch_abort(line, e);
 }
-#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__)
+#define dispatch_assert_zero(e) _dispatch_assert_zero((long)(e), __LINE__)
 #endif /* __GNUC__ */
 
 /*
@@ -482,8 +506,8 @@ static inline void _dispatch_assert_zero(long e, long line) {
  */
 #if __GNUC__
 #define dispatch_assume(e) ({ \
-               typeof(e) _e = fastpath(e); /* always eval 'e' */ \
-               if (!_e) { \
+               typeof(e) _e = (e); /* always eval 'e' */ \
+               if (unlikely(!_e)) { \
                        if (__builtin_constant_p(e)) { \
                                dispatch_static_assert(e); \
                        } \
@@ -492,7 +516,9 @@ static inline void _dispatch_assert_zero(long e, long line) {
                _e; \
        })
 #else
-static inline long _dispatch_assume(long e, long line) {
+static inline long
+_dispatch_assume(long e, long line)
+{
        if (!e) _dispatch_bug(line, e);
        return e;
 }
@@ -505,8 +531,8 @@ static inline long _dispatch_assume(long e, long line) {
  */
 #if __GNUC__
 #define dispatch_assume_zero(e) ({ \
-               typeof(e) _e = slowpath(e); /* always eval 'e' */ \
-               if (_e) { \
+               typeof(e) _e = (e); /* always eval 'e' */ \
+               if (unlikely(_e)) { \
                        if (__builtin_constant_p(e)) { \
                                dispatch_static_assert(e); \
                        } \
@@ -515,7 +541,9 @@ static inline long _dispatch_assume(long e, long line) {
                _e; \
        })
 #else
-static inline long _dispatch_assume_zero(long e, long line) {
+static inline long
+_dispatch_assume_zero(long e, long line)
+{
        if (e) _dispatch_bug(line, e);
        return e;
 }
@@ -530,8 +558,8 @@ static inline long _dispatch_assume_zero(long e, long line) {
                if (__builtin_constant_p(e)) { \
                        dispatch_static_assert(e); \
                } else { \
-                       typeof(e) _e = fastpath(e); /* always eval 'e' */ \
-                       if (DISPATCH_DEBUG && !_e) { \
+                       typeof(e) _e = (e); /* always eval 'e' */ \
+                       if (unlikely(DISPATCH_DEBUG && !_e)) { \
                                _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \
                                abort(); \
                        } \
@@ -539,8 +567,8 @@ static inline long _dispatch_assume_zero(long e, long line) {
        } while (0)
 #else
 #define dispatch_debug_assert(e, msg, args...) do { \
-       long _e = (long)fastpath(e); /* always eval 'e' */ \
-       if (DISPATCH_DEBUG && !_e) { \
+       typeof(e) _e = (e); /* always eval 'e' */ \
+       if (unlikely(DISPATCH_DEBUG && !_e)) { \
                _dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \
                abort(); \
        } \
@@ -555,13 +583,6 @@ static inline long _dispatch_assume_zero(long e, long line) {
        } \
 } while (0)
 
-#if DISPATCH_DEBUG
-#if HAVE_MACH
-DISPATCH_NOINLINE DISPATCH_USED
-void dispatch_debug_machport(mach_port_t name, const char* str);
-#endif
-#endif
-
 #if DISPATCH_DEBUG
 /* This is the private version of the deprecated dispatch_debug() */
 DISPATCH_NONNULL2 DISPATCH_NOTHROW
@@ -587,6 +608,7 @@ void *_dispatch_calloc(size_t num_items, size_t size);
 const char *_dispatch_strdup_if_mutable(const char *str);
 void _dispatch_vtable_init(void);
 char *_dispatch_get_build(void);
+int _dispatch_sigmask(void);
 
 uint64_t _dispatch_timeout(dispatch_time_t when);
 uint64_t _dispatch_time_nanoseconds_since_epoch(dispatch_time_t when);
@@ -604,51 +626,40 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_fork_becomes_unsafe(void)
 {
-       if (!fastpath(_dispatch_is_multithreaded_inline())) {
+       if (unlikely(!_dispatch_is_multithreaded_inline())) {
                _dispatch_fork_becomes_unsafe_slow();
                DISPATCH_COMPILER_CAN_ASSUME(_dispatch_is_multithreaded_inline());
        }
 }
 
+#if DISPATCH_INTROSPECTION
+#undef DISPATCH_PERF_MON
+#define DISPATCH_PERF_MON 0
+#endif
+
 /* #includes dependent on internal.h */
 #include "shims.h"
+#include "event/event_internal.h"
 
 // Older Mac OS X and iOS Simulator fallbacks
 
-#if HAVE_PTHREAD_WORKQUEUES
-#ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
-#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001
-#endif
-#endif // HAVE_PTHREAD_WORKQUEUES
 #if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \
                && !defined(HAVE_PTHREAD_WORKQUEUE_QOS)
 #define HAVE_PTHREAD_WORKQUEUE_QOS 1
 #endif
-#if HAVE__PTHREAD_WORKQUEUE_INIT && (PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \
-               || (PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 && \
-                       defined(WORKQ_FEATURE_KEVENT))) \
+#if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \
                && !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT)
-#if PTHREAD_WORKQUEUE_SPI_VERSION == 20140730
-// rdar://problem/20609877
-typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t;
-#endif
 #define HAVE_PTHREAD_WORKQUEUE_KEVENT 1
 #endif
 
-#ifndef PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK
-#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_HOST_SUPPORTS_OSX(101200)
-#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 1
+
+#ifndef HAVE_PTHREAD_WORKQUEUE_NARROWING
+#if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900)
+#define HAVE_PTHREAD_WORKQUEUE_NARROWING 0
 #else
-#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 0
+#define HAVE_PTHREAD_WORKQUEUE_NARROWING 1
 #endif
-#endif // PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK
-
-#if HAVE_MACH
-#if !defined(MACH_NOTIFY_SEND_POSSIBLE)
-#undef MACH_NOTIFY_SEND_POSSIBLE
-#define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME
 #endif
-#endif // HAVE_MACH
 
 #ifdef EVFILT_MEMORYSTATUS
 #ifndef DISPATCH_USE_MEMORYSTATUS
@@ -664,153 +675,30 @@ typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t;
 #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1
 #endif
 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
-extern bool _dispatch_memory_warn;
-#endif
-
-#if !defined(NOTE_LEEWAY)
-#undef NOTE_LEEWAY
-#define NOTE_LEEWAY 0
-#undef NOTE_CRITICAL
-#define NOTE_CRITICAL 0
-#undef NOTE_BACKGROUND
-#define NOTE_BACKGROUND 0
-#endif // NOTE_LEEWAY
-
-#if !defined(NOTE_FUNLOCK)
-#define NOTE_FUNLOCK 0x00000100
-#endif
-
-#if !defined(NOTE_MACH_CONTINUOUS_TIME)
-#define NOTE_MACH_CONTINUOUS_TIME 0
-#endif // NOTE_MACH_CONTINUOUS_TIME
-
-#if !defined(HOST_NOTIFY_CALENDAR_SET)
-#define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE
-#endif // HOST_NOTIFY_CALENDAR_SET
-
-#if !defined(HOST_CALENDAR_SET_REPLYID)
-#define HOST_CALENDAR_SET_REPLYID 951
-#endif // HOST_CALENDAR_SET_REPLYID
-
-#if HAVE_DECL_NOTE_REAP
-#if defined(NOTE_REAP) && defined(__APPLE__)
-#undef NOTE_REAP
-#define NOTE_REAP 0x10000000 // <rdar://problem/13338526>
-#endif
-#endif // HAVE_DECL_NOTE_REAP
-
-#ifndef VQ_QUOTA
-#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982
-#endif // VQ_QUOTA
-
-#ifndef VQ_NEARLOWDISK
-#undef HAVE_DECL_VQ_NEARLOWDISK
-#endif // VQ_NEARLOWDISK
-
-#ifndef VQ_DESIRED_DISK
-#undef HAVE_DECL_VQ_DESIRED_DISK
-#endif // VQ_DESIRED_DISK
-
-#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \
-               !DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
-#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0
-#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
-
-#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \
-               !DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
-#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0
-#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
-
-#if !defined(EV_UDATA_SPECIFIC) || !DISPATCH_HOST_SUPPORTS_OSX(101100)
-#undef DISPATCH_USE_EV_UDATA_SPECIFIC
-#define DISPATCH_USE_EV_UDATA_SPECIFIC 0
-#elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC)
-#define DISPATCH_USE_EV_UDATA_SPECIFIC 1
-#endif // EV_UDATA_SPECIFIC
-
-#if !DISPATCH_USE_EV_UDATA_SPECIFIC
-#undef EV_UDATA_SPECIFIC
-#define EV_UDATA_SPECIFIC 0
-#undef EV_VANISHED
-#define EV_VANISHED 0
-#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
-
-#ifndef EV_VANISHED
-#define EV_VANISHED 0x0200
-#endif
-
-#ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-#if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200)
-// deferred delete can return bogus ENOENTs on older kernels
-#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1
+#if __has_include(<malloc_private.h>)
+#include <malloc_private.h>
 #else
-#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0
-#endif
+extern void malloc_memory_event_handler(unsigned long);
+#endif // __has_include(<malloc_private.h)
+extern bool _dispatch_memory_warn;
 #endif
 
-#if !defined(EV_SET_QOS) || !DISPATCH_HOST_SUPPORTS_OSX(101100)
-#undef DISPATCH_USE_KEVENT_QOS
-#define DISPATCH_USE_KEVENT_QOS 0
-#elif !defined(DISPATCH_USE_KEVENT_QOS)
-#define DISPATCH_USE_KEVENT_QOS 1
-#endif // EV_SET_QOS
-
 #if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(KEVENT_FLAG_WORKQ) && \
-               DISPATCH_USE_EV_UDATA_SPECIFIC && DISPATCH_USE_KEVENT_QOS && \
-               DISPATCH_HOST_SUPPORTS_OSX(101200) && \
                !defined(DISPATCH_USE_KEVENT_WORKQUEUE)
 #define DISPATCH_USE_KEVENT_WORKQUEUE 1
 #endif
 
-
-#if (!DISPATCH_USE_KEVENT_WORKQUEUE || DISPATCH_DEBUG) && \
+#if (!DISPATCH_USE_KEVENT_WORKQUEUE || DISPATCH_DEBUG || DISPATCH_PROFILE) && \
                !defined(DISPATCH_USE_MGR_THREAD)
 #define DISPATCH_USE_MGR_THREAD 1
 #endif
 
-#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_EV_UDATA_SPECIFIC && \
-               DISPATCH_HOST_SUPPORTS_OSX(101200) && \
-               !defined(DISPATCH_USE_EVFILT_MACHPORT_DIRECT)
-#define DISPATCH_USE_EVFILT_MACHPORT_DIRECT 1
-#endif
-
-#ifndef MACH_SEND_OVERRIDE
-#define MACH_SEND_OVERRIDE 0x00000020
-typedef unsigned int mach_msg_priority_t;
-#define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0)
-#endif // MACH_SEND_OVERRIDE
-
-
-#if (!DISPATCH_USE_EVFILT_MACHPORT_DIRECT || DISPATCH_DEBUG) && \
-               !defined(DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK)
-#define DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK 1
-#endif
-
-#if DISPATCH_USE_KEVENT_QOS
-typedef struct kevent_qos_s _dispatch_kevent_qos_s;
-typedef typeof(((struct kevent_qos_s*)NULL)->qos) _dispatch_kevent_priority_t;
-#else // DISPATCH_USE_KEVENT_QOS
-#ifndef KEVENT_FLAG_IMMEDIATE
-#define KEVENT_FLAG_NONE 0x00
-#define KEVENT_FLAG_IMMEDIATE 0x01
-#define KEVENT_FLAG_ERROR_EVENTS 0x02
-#endif // KEVENT_FLAG_IMMEDIATE
-typedef struct kevent64_s _dispatch_kevent_qos_s;
-#define kevent_qos(_kq, _changelist, _nchanges, _eventlist, _nevents, \
-               _data_out, _data_available, _flags) \
-               ({ unsigned int _f = (_flags); _dispatch_kevent_qos_s _kev_copy; \
-               const _dispatch_kevent_qos_s *_cl = (_changelist); \
-               int _n = (_nchanges); const struct timespec _timeout_immediately = {}; \
-               dispatch_static_assert(!(_data_out) && !(_data_available)); \
-               if (_f & KEVENT_FLAG_ERROR_EVENTS) { \
-                       dispatch_static_assert(_n == 1); \
-                       _kev_copy = *_cl; _kev_copy.flags |= EV_RECEIPT; } \
-               kevent64((_kq), _f & KEVENT_FLAG_ERROR_EVENTS ? &_kev_copy : _cl, _n, \
-                       (_eventlist), (_nevents), 0, \
-                       _f & KEVENT_FLAG_IMMEDIATE ? &_timeout_immediately : NULL); })
-#endif // DISPATCH_USE_KEVENT_QOS
+
+#if defined(MACH_SEND_SYNC_OVERRIDE) && defined(MACH_RCV_SYNC_WAIT) && \
+               DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) && \
+               !defined(DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE)
+#define DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE 1
+#endif
 
 #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE)
 #ifndef DISPATCH_USE_SETNOSIGPIPE
@@ -844,6 +732,14 @@ typedef struct kevent64_s _dispatch_kevent_qos_s;
 #endif // HAVE_SYS_GUARDED_H
 
 
+#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
+typedef struct dispatch_trace_timer_params_s {
+       int64_t deadline, interval, leeway;
+} *dispatch_trace_timer_params_t;
+
+#include "provider.h"
+#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
+
 #if __has_include(<sys/kdebug.h>)
 #include <sys/kdebug.h>
 #ifndef DBG_DISPATCH
@@ -851,15 +747,22 @@ typedef struct kevent64_s _dispatch_kevent_qos_s;
 #endif
 #ifndef KDBG_CODE
 #define KDBG_CODE(...) 0
+#define DBG_FUNC_START 0
+#define DBG_FUNC_END 0
 #endif
 #define DISPATCH_CODE(subclass, code) \
                KDBG_CODE(DBG_DISPATCH, DISPATCH_TRACE_SUBCLASS_##subclass, code)
+#define DISPATCH_CODE_START(subclass, code) \
+               (DISPATCH_CODE(subclass, code) | DBG_FUNC_START)
+#define DISPATCH_CODE_END(subclass, code) \
+               (DISPATCH_CODE(subclass, code) | DBG_FUNC_END)
 #ifdef ARIADNEDBG_CODE
 #define ARIADNE_ENTER_DISPATCH_MAIN_CODE ARIADNEDBG_CODE(220, 2)
 #else
 #define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0
 #endif
-#if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && DISPATCH_INTROSPECTION
+#if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && \
+               (DISPATCH_INTROSPECTION || DISPATCH_PROFILE || DISPATCH_DEBUG)
 #define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 1
 #endif
 
@@ -867,15 +770,21 @@ typedef struct kevent64_s _dispatch_kevent_qos_s;
 #define DISPATCH_TRACE_SUBCLASS_VOUCHER 1
 #define DISPATCH_TRACE_SUBCLASS_PERF 2
 #define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3
+#define DISPATCH_TRACE_SUBCLASS_PERF_MON 4
 
 #define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1)
 #define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2)
 #define DISPATCH_PERF_post_activate_mutation DISPATCH_CODE(PERF, 3)
 #define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4)
 #define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5)
+#define DISPATCH_PERF_strict_bg_timer DISPATCH_CODE(PERF, 6)
 
 #define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1)
 
+#define DISPATCH_PERF_MON_worker_thread_start DISPATCH_CODE_START(PERF_MON, 1)
+#define DISPATCH_PERF_MON_worker_thread_end DISPATCH_CODE_END(PERF_MON, 1)
+#define DISPATCH_PERF_MON_worker_useless DISPATCH_CODE(PERF_MON, 2)
+
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b,
@@ -922,42 +831,18 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b,
 #define MACH_SEND_INVALID_VOUCHER 0x10000005
 #endif
 
-#if TARGET_OS_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100
-#undef VOUCHER_USE_MACH_VOUCHER
-#define VOUCHER_USE_MACH_VOUCHER 0
-#endif
 #ifndef VOUCHER_USE_MACH_VOUCHER
 #if __has_include(<mach/mach_voucher.h>)
 #define VOUCHER_USE_MACH_VOUCHER 1
 #endif
-#endif
+#endif // VOUCHER_USE_MACH_VOUCHER
 
+#ifndef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER
 #if RDAR_24272659 // FIXME: <rdar://problem/24272659>
-#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER
-#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0
-#elif !defined(VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER)
 #define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 1
-#endif
 #else // RDAR_24272659
-#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER
 #define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0
 #endif // RDAR_24272659
-
-#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef VOUCHER_USE_BANK_AUTOREDEEM
-#define VOUCHER_USE_BANK_AUTOREDEEM 0
-#elif !defined(VOUCHER_USE_BANK_AUTOREDEEM)
-#define VOUCHER_USE_BANK_AUTOREDEEM 1
-#endif
-
-#if !VOUCHER_USE_MACH_VOUCHER || \
-               !__has_include(<voucher/ipc_pthread_priority_types.h>) || \
-               !DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef VOUCHER_USE_MACH_VOUCHER_PRIORITY
-#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 0
-#elif !defined(VOUCHER_USE_MACH_VOUCHER_PRIORITY)
-#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 1
 #endif
 
 #ifndef VOUCHER_USE_PERSONA
@@ -981,7 +866,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b,
 #define _dispatch_hardware_crash() \
                __asm__(""); __builtin_trap() // <rdar://problem/17464981>
 
-#define _dispatch_set_crash_log_cause_and_message(ac, msg)
+#define _dispatch_set_crash_log_cause_and_message(ac, msg) ((void)(ac))
 #define _dispatch_set_crash_log_message(msg)
 #define _dispatch_set_crash_log_message_dynamic(msg)
 
@@ -1036,7 +921,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b,
 
 #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul)
 #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul)
-DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long,
+DISPATCH_ENUM(dispatch_thread_set_self, unsigned long,
        DISPATCH_PRIORITY_ENFORCE = 0x1,
        DISPATCH_VOUCHER_REPLACE = 0x2,
        DISPATCH_VOUCHER_CONSUME = 0x4,
@@ -1045,7 +930,7 @@ DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long,
 DISPATCH_WARN_RESULT
 static inline voucher_t _dispatch_adopt_priority_and_set_voucher(
                pthread_priority_t priority, voucher_t voucher,
-               _dispatch_thread_set_self_t flags);
+               dispatch_thread_set_self_t flags);
 #if HAVE_MACH
 mach_port_t _dispatch_get_mach_host_port(void);
 #endif
@@ -1058,8 +943,7 @@ extern int _dispatch_set_qos_class_enabled;
 #endif
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 #if DISPATCH_USE_KEVENT_WORKQUEUE
-#if !HAVE_PTHREAD_WORKQUEUE_QOS || !DISPATCH_USE_KEVENT_QOS || \
-               !DISPATCH_USE_EV_UDATA_SPECIFIC
+#if !HAVE_PTHREAD_WORKQUEUE_QOS || !EV_UDATA_SPECIFIC
 #error Invalid build configuration
 #endif
 #if DISPATCH_USE_MGR_THREAD
@@ -1067,20 +951,9 @@ extern int _dispatch_kevent_workqueue_enabled;
 #else
 #define _dispatch_kevent_workqueue_enabled (1)
 #endif
-#endif // DISPATCH_USE_KEVENT_WORKQUEUE
-
-#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT
-#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_EV_UDATA_SPECIFIC
-#error Invalid build configuration
-#endif
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-extern int _dispatch_evfilt_machport_direct_enabled;
-#else
-#define _dispatch_evfilt_machport_direct_enabled (1)
-#endif
 #else
-#define _dispatch_evfilt_machport_direct_enabled (0)
-#endif // DISPATCH_USE_EVFILT_MACHPORT_DIRECT
+#define _dispatch_kevent_workqueue_enabled (0)
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
 
 
 /* #includes dependent on internal.h */
@@ -1089,6 +962,7 @@ extern int _dispatch_evfilt_machport_direct_enabled;
 #include "introspection_internal.h"
 #include "queue_internal.h"
 #include "source_internal.h"
+#include "mach_internal.h"
 #include "voucher_internal.h"
 #include "data_internal.h"
 #if !TARGET_OS_WIN32
index d847cb91a26da96ef4228c29fff10cc1f4af4845..8692a8bc53542a0f2319504143f5250958b9e82c 100644 (file)
@@ -193,7 +193,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq,
                case DC_OVERRIDE_STEALING_TYPE:
                case DC_OVERRIDE_OWNING_TYPE:
                        dc = dc->dc_data;
-                       if (_dispatch_object_has_vtable(dc)) {
+                       if (!_dispatch_object_is_continuation(dc)) {
                                // these really wrap queues so we should hide the continuation type
                                dq = (dispatch_queue_t)dc;
                                diqi->type = dispatch_introspection_queue_item_type_queue;
@@ -204,6 +204,8 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq,
 #endif
                case DC_ASYNC_REDIRECT_TYPE:
                        DISPATCH_INTERNAL_CRASH(0, "Handled by the caller");
+               case DC_MACH_ASYNC_REPLY_TYPE:
+                       break;
                case DC_MACH_SEND_BARRRIER_DRAIN_TYPE:
                        break;
                case DC_MACH_SEND_BARRIER_TYPE:
@@ -211,23 +213,17 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq,
                        flags = (uintptr_t)dc->dc_data;
                        dq = dq->do_targetq;
                        break;
+               default:
+                       DISPATCH_INTERNAL_CRASH(dc->do_vtable, "Unknown dc vtable type");
                }
        } else {
-               if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) {
-                       waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data);
-                       if (flags & DISPATCH_OBJ_BARRIER_BIT) {
-                               dc = dc->dc_ctxt;
-                               dq = dc->dc_data;
-                       }
-                       ctxt = dc->dc_ctxt;
-                       func = dc->dc_func;
+               if (flags & DISPATCH_OBJ_SYNC_WAITER_BIT) {
+                       dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc;
+                       waiter = pthread_from_mach_thread_np(dsc->dsc_waiter);
+                       ctxt = dsc->dsc_ctxt;
+                       func = dsc->dsc_func;
                }
-               if (func == _dispatch_sync_recurse_invoke) {
-                       dc = dc->dc_ctxt;
-                       dq = dc->dc_data;
-                       ctxt = dc->dc_ctxt;
-                       func = dc->dc_func;
-               } else if (func == _dispatch_apply_invoke ||
+               if (func == _dispatch_apply_invoke ||
                                func == _dispatch_apply_redirect_invoke) {
                        dispatch_apply_t da = ctxt;
                        if (da->da_todo) {
@@ -252,7 +248,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq,
                .function = func,
                .waiter = waiter,
                .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1,
-               .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT,
+               .sync = flags & DISPATCH_OBJ_SYNC_WAITER_BIT,
                .apply = apply,
        };
        if (flags & DISPATCH_OBJ_GROUP_BIT) {
@@ -300,16 +296,11 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds)
                .suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt,
                .enqueued = _dq_state_is_enqueued(dq_state),
                .handler_is_block = hdlr_is_block,
-               .timer = ds->ds_is_timer,
-               .after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER),
+               .timer = dr->du_is_timer,
+               .after = dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER),
+               .type = (unsigned long)dr->du_filter,
+               .handle = (unsigned long)dr->du_ident,
        };
-       dispatch_kevent_t dk = ds->ds_dkev;
-       if (ds->ds_is_custom_source) {
-               dis.type = (unsigned long)dk;
-       } else if (dk) {
-               dis.type = (unsigned long)dk->dk_kevent.filter;
-               dis.handle = (unsigned long)dk->dk_kevent.ident;
-       }
        return dis;
 }
 
@@ -739,7 +730,7 @@ struct dispatch_order_frame_s {
        dispatch_queue_order_entry_t dof_e;
 };
 
-DISPATCH_NOINLINE
+DISPATCH_NOINLINE DISPATCH_NORETURN
 static void
 _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof,
                dispatch_queue_t top_q, dispatch_queue_t bottom_q)
index 06504a8ba46ab465f8a24310f681f07054469ed0..e2fa6d18b8136e2af5c7e78a409da487221c522e 100644 (file)
@@ -66,7 +66,6 @@ void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f);
 
 #if DISPATCH_PURE_C
 
-void _dispatch_sync_recurse_invoke(void *ctxt);
 static dispatch_queue_t _dispatch_queue_get_current(void);
 
 DISPATCH_ALWAYS_INLINE
@@ -100,24 +99,10 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq);
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq,
-       dispatch_function_t func)
+_dispatch_introspection_sync_begin(dispatch_queue_t dq)
 {
        if (!_dispatch_introspection.debug_queue_inversions) return;
-       if (func != _dispatch_sync_recurse_invoke) {
-               _dispatch_introspection_order_record(dq, _dispatch_queue_get_current());
-       }
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq,
-       dispatch_function_t func)
-{
-       if (!_dispatch_introspection.debug_queue_inversions) return;
-       if (func != _dispatch_sync_recurse_invoke) {
-               _dispatch_introspection_order_record(dq, _dispatch_queue_get_current());
-       }
+       _dispatch_introspection_order_record(dq, _dispatch_queue_get_current());
 }
 
 #endif // DISPATCH_PURE_C
@@ -129,7 +114,6 @@ _dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq,
 
 #define _dispatch_introspection_init()
 #define _dispatch_introspection_thread_add()
-#define _dispatch_introspection_thread_remove()
 
 DISPATCH_ALWAYS_INLINE
 static inline dispatch_queue_t
@@ -177,13 +161,7 @@ _dispatch_introspection_target_queue_changed(
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED,
-       dispatch_function_t func DISPATCH_UNUSED) {}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED,
-       dispatch_function_t func DISPATCH_UNUSED) {}
+_dispatch_introspection_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED) {}
 
 #endif // DISPATCH_INTROSPECTION
 
index 0a00e6e63a0939c3361ad4255ec1e94d42a38fff..155b6cf02e95c1c3a9deeeb507a030b4fc156112 100644 (file)
--- a/src/io.c
+++ b/src/io.c
@@ -25,7 +25,7 @@
 #endif
 
 #ifndef PAGE_SIZE
-#define PAGE_SIZE getpagesize()
+#define PAGE_SIZE ((size_t)getpagesize())
 #endif
 
 #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
@@ -233,11 +233,10 @@ _dispatch_iocntl(uint32_t param, uint64_t value)
 static dispatch_io_t
 _dispatch_io_create(dispatch_io_type_t type)
 {
-       dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io),
+       dispatch_io_t channel = _dispatch_object_alloc(DISPATCH_VTABLE(io),
                        sizeof(struct dispatch_io_s));
        channel->do_next = DISPATCH_OBJECT_LISTLESS;
-       channel->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-                       true);
+       channel->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
        channel->params.type = type;
        channel->params.high = SIZE_MAX;
        channel->params.low = dispatch_io_defaults.low_water_chunks *
@@ -279,7 +278,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry,
 }
 
 void
-_dispatch_io_dispose(dispatch_io_t channel)
+_dispatch_io_dispose(dispatch_io_t channel, DISPATCH_UNUSED bool *allow_free)
 {
        _dispatch_object_debug(channel, "%s", __func__);
        if (channel->fd_entry &&
@@ -683,6 +682,9 @@ _dispatch_io_stop(dispatch_io_t channel)
                                _dispatch_channel_debug("stop cleanup", channel);
                                _dispatch_fd_entry_cleanup_operations(fd_entry, channel);
                                if (!(channel->atomic_flags & DIO_CLOSED)) {
+                                       if (fd_entry->path_data) {
+                                               fd_entry->path_data->channel = NULL;
+                                       }
                                        channel->fd_entry = NULL;
                                        _dispatch_fd_entry_release(fd_entry);
                                }
@@ -733,9 +735,10 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags)
                                                relaxed);
                                dispatch_fd_entry_t fd_entry = channel->fd_entry;
                                if (fd_entry) {
-                                       if (!fd_entry->path_data) {
-                                               channel->fd_entry = NULL;
+                                       if (fd_entry->path_data) {
+                                               fd_entry->path_data->channel = NULL;
                                        }
+                                       channel->fd_entry = NULL;
                                        _dispatch_fd_entry_release(fd_entry);
                                }
                        }
@@ -889,7 +892,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue,
                dispatch_operation_t op =
                        _dispatch_operation_create(DOP_DIR_READ, channel, 0,
                                        length, dispatch_data_empty,
-                                       _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false),
+                                       _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false),
                                        ^(bool done, dispatch_data_t data, int error) {
                                if (data) {
                                        data = dispatch_data_create_concat(deliver_data, data);
@@ -960,7 +963,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue,
                dispatch_operation_t op =
                        _dispatch_operation_create(DOP_DIR_WRITE, channel, 0,
                                        dispatch_data_get_size(data), data,
-                                       _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false),
+                                       _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false),
                                        ^(bool done, dispatch_data_t d, int error) {
                                if (done) {
                                        if (d) {
@@ -1020,14 +1023,13 @@ _dispatch_operation_create(dispatch_op_direction_t direction,
                });
                return NULL;
        }
-       dispatch_operation_t op = _dispatch_alloc(DISPATCH_VTABLE(operation),
+       dispatch_operation_t op = _dispatch_object_alloc(DISPATCH_VTABLE(operation),
                        sizeof(struct dispatch_operation_s));
        _dispatch_channel_debug("operation create: %p", channel, op);
        op->do_next = DISPATCH_OBJECT_LISTLESS;
        op->do_xref_cnt = -1; // operation object is not exposed externally
-       op->op_q = dispatch_queue_create("com.apple.libdispatch-io.opq", NULL);
-       op->op_q->do_targetq = queue;
-       _dispatch_retain(queue);
+       op->op_q = dispatch_queue_create_with_target("com.apple.libdispatch-io.opq",
+                       NULL, queue);
        op->active = false;
        op->direction = direction;
        op->offset = offset + channel->f_ptr;
@@ -1048,7 +1050,8 @@ _dispatch_operation_create(dispatch_op_direction_t direction,
 }
 
 void
-_dispatch_operation_dispose(dispatch_operation_t op)
+_dispatch_operation_dispose(dispatch_operation_t op,
+               DISPATCH_UNUSED bool *allow_free)
 {
        _dispatch_object_debug(op, "%s", __func__);
        _dispatch_op_debug("dispose", op);
@@ -1155,8 +1158,9 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op)
        }
        dispatch_source_t timer = dispatch_source_create(
                        DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq);
-       dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
-                       (int64_t)op->params.interval), op->params.interval, 0);
+       dispatch_source_set_timer(timer,
+                       dispatch_time(DISPATCH_TIME_NOW, (int64_t)op->params.interval),
+                       op->params.interval, 0);
        dispatch_source_set_event_handler(timer, ^{
                // On stream queue or pick queue
                if (dispatch_source_testcancel(timer)) {
@@ -1236,9 +1240,10 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path,
                return fd;
        }
        errno = 0;
+#else
+       (void)fd_entry;
 #endif
        return open(path, oflag, mode);
-       (void)fd_entry;
 }
 
 static inline int
@@ -1248,11 +1253,12 @@ _dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) {
                guardid_t guard = (uintptr_t)fd_entry;
                return guarded_close_np(fd, &guard);
        } else
+#else
+       (void)fd_entry;
 #endif
        {
                return close(fd);
        }
-       (void)fd_entry;
 }
 
 static inline void
@@ -1303,12 +1309,10 @@ _dispatch_fd_entry_create(dispatch_queue_t q)
 {
        dispatch_fd_entry_t fd_entry;
        fd_entry = _dispatch_calloc(1ul, sizeof(struct dispatch_fd_entry_s));
-       fd_entry->close_queue = dispatch_queue_create(
-                       "com.apple.libdispatch-io.closeq", NULL);
        // Use target queue to ensure that no concurrent lookups are going on when
        // the close queue is running
-       fd_entry->close_queue->do_targetq = q;
-       _dispatch_retain(q);
+       fd_entry->close_queue = dispatch_queue_create_with_target(
+                       "com.apple.libdispatch-io.closeq", NULL, q);
        // Suspend the cleanup queue until closing
        _dispatch_fd_entry_retain(fd_entry);
        return fd_entry;
@@ -1368,7 +1372,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash)
                                                break;
                                );
                        }
-                       int32_t dev = major(st.st_dev);
+                       dev_t dev = major(st.st_dev);
                        // We have to get the disk on the global dev queue. The
                        // barrier queue cannot continue until that is complete
                        dispatch_suspend(fd_entry->barrier_queue);
@@ -1388,8 +1392,9 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash)
                                                break;
                                );
                        }
-                       _dispatch_stream_init(fd_entry, _dispatch_get_root_queue(
-                                       _DISPATCH_QOS_CLASS_DEFAULT, false));
+
+                       _dispatch_stream_init(fd_entry,
+                                       _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false));
                }
                fd_entry->orig_flags = orig_flags;
                fd_entry->orig_nosigpipe = orig_nosigpipe;
@@ -1456,8 +1461,8 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data,
        if (S_ISREG(mode)) {
                _dispatch_disk_init(fd_entry, major(dev));
        } else {
-               _dispatch_stream_init(fd_entry, _dispatch_get_root_queue(
-                               _DISPATCH_QOS_CLASS_DEFAULT, false));
+                       _dispatch_stream_init(fd_entry,
+                                       _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false));
        }
        fd_entry->fd = -1;
        fd_entry->orig_flags = -1;
@@ -1581,11 +1586,9 @@ _dispatch_stream_init(dispatch_fd_entry_t fd_entry, dispatch_queue_t tq)
        for (direction = 0; direction < DOP_DIR_MAX; direction++) {
                dispatch_stream_t stream;
                stream = _dispatch_calloc(1ul, sizeof(struct dispatch_stream_s));
-               stream->dq = dispatch_queue_create("com.apple.libdispatch-io.streamq",
-                               NULL);
+               stream->dq = dispatch_queue_create_with_target(
+                               "com.apple.libdispatch-io.streamq", NULL, tq);
                dispatch_set_context(stream->dq, stream);
-               _dispatch_retain(tq);
-               stream->dq->do_targetq = tq;
                TAILQ_INIT(&stream->operations[DISPATCH_IO_RANDOM]);
                TAILQ_INIT(&stream->operations[DISPATCH_IO_STREAM]);
                fd_entry->streams[direction] = stream;
@@ -1630,14 +1633,13 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev)
        }
        // Otherwise create a new entry
        size_t pending_reqs_depth = dispatch_io_defaults.max_pending_io_reqs;
-       disk = _dispatch_alloc(DISPATCH_VTABLE(disk),
+       disk = _dispatch_object_alloc(DISPATCH_VTABLE(disk),
                        sizeof(struct dispatch_disk_s) +
                        (pending_reqs_depth * sizeof(dispatch_operation_t)));
        disk->do_next = DISPATCH_OBJECT_LISTLESS;
        disk->do_xref_cnt = -1;
        disk->advise_list_depth = pending_reqs_depth;
-       disk->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-                       false);
+       disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
        disk->dev = dev;
        TAILQ_INIT(&disk->operations);
        disk->cur_rq = TAILQ_FIRST(&disk->operations);
@@ -1652,7 +1654,7 @@ out:
 }
 
 void
-_dispatch_disk_dispose(dispatch_disk_t disk)
+_dispatch_disk_dispose(dispatch_disk_t disk, DISPATCH_UNUSED bool *allow_free)
 {
        uintptr_t hash = DIO_HASH(disk->dev);
        TAILQ_REMOVE(&_dispatch_io_devs[hash], disk, disk_list);
@@ -1897,7 +1899,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op)
        // Close queue must not run user cleanup handlers until sources are fully
        // unregistered
        dispatch_queue_t close_queue = op->fd_entry->close_queue;
-       dispatch_source_set_cancel_handler(source, ^{
+       dispatch_source_set_mandatory_cancel_handler(source, ^{
                _dispatch_op_debug("stream source cancel", op);
                dispatch_resume(close_queue);
        });
@@ -2165,7 +2167,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size)
        op->advise_offset += advise.ra_count;
 #ifdef __linux__
        _dispatch_io_syscall_switch(err,
-               readahead(op->fd_entry->fd, advise.ra_offset, advise.ra_count),
+                       readahead(op->fd_entry->fd, advise.ra_offset, (size_t)advise.ra_count),
                case EINVAL: break; // fd does refer to a non-supported filetype
                default: (void)dispatch_assume_zero(err); break;
        );
index ad8259a1de0332ea8f9e8aee559fd9938c064b51..672727fae6f574f0343f428b28a518804712f56e 100644 (file)
@@ -178,10 +178,11 @@ struct dispatch_io_s {
 
 void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq);
 size_t _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz);
-void _dispatch_io_dispose(dispatch_io_t channel);
+void _dispatch_io_dispose(dispatch_io_t channel, bool *allow_free);
 size_t _dispatch_operation_debug(dispatch_operation_t op, char* buf,
                size_t bufsiz);
-void _dispatch_operation_dispose(dispatch_operation_t operation);
-void _dispatch_disk_dispose(dispatch_disk_t disk);
+void _dispatch_operation_dispose(dispatch_operation_t operation,
+               bool *allow_free);
+void _dispatch_disk_dispose(dispatch_disk_t disk, bool *allow_free);
 
 #endif // __DISPATCH_IO_INTERNAL__
index 9aca7e16c7c87516c605b7bb1be55e1739c4b690..0ecc3331f9f9169047e885168a04fd9bd232419a 100644 (file)
@@ -11,3 +11,9 @@
 0x2e02000c     DISPATCH_PERF_post_activate_mutation
 0x2e020010     DISPATCH_PERF_delayed_registration
 0x2e020014     DISPATCH_PERF_mutable_target
+0x2e020018     DISPATCH_PERF_strict_bg_timer
+
+0x2e030004     DISPATCH_MACH_MSG_hdr_move
+
+0x2e040004     DISPATCH_PERF_MON_worker_thread
+0x2e040008     DISPATCH_PERF_MON_worker_useless
diff --git a/src/mach.c b/src/mach.c
new file mode 100644 (file)
index 0000000..699492d
--- /dev/null
@@ -0,0 +1,2982 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+#if HAVE_MACH
+
+#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1
+#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
+#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4
+#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8
+#define DISPATCH_MACH_ASYNC_REPLY 0x10
+#define DISPATCH_MACH_OPTIONS_MASK 0xffff
+
+#define DM_SEND_STATUS_SUCCESS 0x1
+#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2
+
+DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t,
+       DM_SEND_INVOKE_NONE            = 0x0,
+       DM_SEND_INVOKE_MAKE_DIRTY      = 0x1,
+       DM_SEND_INVOKE_NEEDS_BARRIER   = 0x2,
+       DM_SEND_INVOKE_CANCEL          = 0x4,
+       DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8,
+       DM_SEND_INVOKE_IMMEDIATE_SEND  = 0x10,
+);
+#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \
+               ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND)
+
+static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
+static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou);
+static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou);
+static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm,
+               mach_port_t local_port, mach_port_t remote_port);
+static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr, mach_port_t local_port);
+static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected(
+               dispatch_object_t dou, dispatch_mach_reply_refs_t dmr,
+               dispatch_mach_reason_t reason);
+static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm,
+               dispatch_object_t dou);
+static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(
+               dispatch_mach_msg_t dmsg);
+static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou,
+               dispatch_qos_t qos);
+static void _dispatch_mach_cancel(dispatch_mach_t dm);
+static void _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm,
+               dispatch_qos_t qos);
+static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg);
+static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg, dispatch_queue_t drq);
+static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue(
+               void *ctxt);
+static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap(
+               dispatch_mach_msg_t dmsg, dispatch_mach_t dm);
+static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm);
+static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm,
+               mach_port_t send);
+
+// For tests only.
+DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void);
+
+dispatch_source_t
+_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
+               const struct dispatch_continuation_s *dc)
+{
+       dispatch_source_t ds;
+       ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct,
+                       recvp, 0, &_dispatch_mgr_q);
+       os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER],
+                       (dispatch_continuation_t)dc, relaxed);
+       return ds;
+}
+
+#pragma mark -
+#pragma mark dispatch to XPC callbacks
+
+static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks;
+
+// Default dmxh_direct_message_handler callback that does not handle
+// messages inline.
+static bool
+_dispatch_mach_xpc_no_handle_message(
+               void *_Nullable context DISPATCH_UNUSED,
+               dispatch_mach_reason_t reason DISPATCH_UNUSED,
+               dispatch_mach_msg_t message DISPATCH_UNUSED,
+               mach_error_t error DISPATCH_UNUSED)
+{
+       return false;
+}
+
+// Default dmxh_msg_context_reply_queue callback that returns a NULL queue.
+static dispatch_queue_t
+_dispatch_mach_msg_context_no_async_reply_queue(
+               void *_Nonnull msg_context DISPATCH_UNUSED)
+{
+       return NULL;
+}
+
+// Default dmxh_async_reply_handler callback that crashes when called.
+DISPATCH_NORETURN
+static void
+_dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED,
+               dispatch_mach_reason_t reason DISPATCH_UNUSED,
+               dispatch_mach_msg_t message DISPATCH_UNUSED)
+{
+       DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks,
+                       "_dispatch_mach_default_async_reply_handler called");
+}
+
+// Default dmxh_enable_sigterm_notification callback that enables delivery of
+// SIGTERM notifications (for backwards compatibility).
+static bool
+_dispatch_mach_enable_sigterm(void *_Nullable context DISPATCH_UNUSED)
+{
+       return true;
+}
+
+// Callbacks from dispatch to XPC. The default is to not support any callbacks.
+static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default
+               = {
+       .version = DISPATCH_MACH_XPC_HOOKS_VERSION,
+       .dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message,
+       .dmxh_msg_context_reply_queue =
+                       &_dispatch_mach_msg_context_no_async_reply_queue,
+       .dmxh_async_reply_handler = &_dispatch_mach_default_async_reply_handler,
+       .dmxh_enable_sigterm_notification = &_dispatch_mach_enable_sigterm,
+};
+
+static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks
+               = &_dispatch_mach_xpc_hooks_default;
+
+void
+dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks)
+{
+       if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks,
+                       &_dispatch_mach_xpc_hooks_default, hooks, relaxed)) {
+               DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks,
+                               "dispatch_mach_hooks_install_4libxpc called twice");
+       }
+}
+
+void
+_dispatch_mach_hooks_install_default(void)
+{
+       os_atomic_store(&_dispatch_mach_xpc_hooks,
+                       &_dispatch_mach_xpc_hooks_default, relaxed);
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_t
+
+static dispatch_mach_t
+_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context,
+               dispatch_mach_handler_function_t handler, bool handler_is_block,
+               bool is_xpc)
+{
+       dispatch_mach_recv_refs_t dmrr;
+       dispatch_mach_send_refs_t dmsr;
+       dispatch_mach_t dm;
+       dm = _dispatch_object_alloc(DISPATCH_VTABLE(mach),
+                       sizeof(struct dispatch_mach_s));
+       _dispatch_queue_init(dm->_as_dq, DQF_LEGACY, 1,
+                       DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER);
+
+       dm->dq_label = label;
+       dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds
+       dm->dm_is_xpc = is_xpc;
+
+       dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr;
+       dispatch_assert(dmrr->du_is_direct);
+       dmrr->du_owner_wref = _dispatch_ptr2wref(dm);
+       dmrr->dmrr_handler_func = handler;
+       dmrr->dmrr_handler_ctxt = context;
+       dmrr->dmrr_handler_is_block = handler_is_block;
+       dm->dm_recv_refs = dmrr;
+
+       dmsr = dux_create(&_dispatch_mach_type_send, 0,
+                       DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD)._dmsr;
+       dmsr->du_owner_wref = _dispatch_ptr2wref(dm);
+       dm->dm_send_refs = dmsr;
+
+       if (slowpath(!q)) {
+               q = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
+       } else {
+               _dispatch_retain(q);
+       }
+       dm->do_targetq = q;
+       _dispatch_object_debug(dm, "%s", __func__);
+       return dm;
+}
+
+dispatch_mach_t
+dispatch_mach_create(const char *label, dispatch_queue_t q,
+               dispatch_mach_handler_t handler)
+{
+       dispatch_block_t bb = _dispatch_Block_copy((void*)handler);
+       return _dispatch_mach_create(label, q, bb,
+                       (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true,
+                       false);
+}
+
+dispatch_mach_t
+dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context,
+               dispatch_mach_handler_function_t handler)
+{
+       return _dispatch_mach_create(label, q, context, handler, false, false);
+}
+
+dispatch_mach_t
+dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q,
+               void *context, dispatch_mach_handler_function_t handler)
+{
+       return _dispatch_mach_create(label, q, context, handler, false, true);
+}
+
+void
+_dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free)
+{
+       _dispatch_object_debug(dm, "%s", __func__);
+       _dispatch_unote_dispose(dm->dm_recv_refs);
+       dm->dm_recv_refs = NULL;
+       _dispatch_unote_dispose(dm->dm_send_refs);
+       dm->dm_send_refs = NULL;
+       if (dm->dm_xpc_term_refs) {
+               _dispatch_unote_dispose(dm->dm_xpc_term_refs);
+               dm->dm_xpc_term_refs = NULL;
+       }
+       _dispatch_queue_destroy(dm->_as_dq, allow_free);
+}
+
+void
+dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive,
+               mach_port_t send, dispatch_mach_msg_t checkin)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       uint32_t disconnect_cnt;
+
+       if (MACH_PORT_VALID(receive)) {
+               dm->dm_recv_refs->du_ident = receive;
+               _dispatch_retain(dm); // the reference the manager queue holds
+       }
+       dmsr->dmsr_send = send;
+       if (MACH_PORT_VALID(send)) {
+               if (checkin) {
+                       dispatch_mach_msg_t dmsg = checkin;
+                       dispatch_retain(dmsg);
+                       dmsg->dmsg_options = _dispatch_mach_checkin_options();
+                       dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg);
+               }
+               dmsr->dmsr_checkin = checkin;
+       }
+       dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 ==
+                       DISPATCH_MACH_NEVER_INSTALLED);
+       disconnect_cnt = os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, release);
+       if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) {
+               DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected");
+       }
+       _dispatch_object_debug(dm, "%s", __func__);
+       return dispatch_activate(dm);
+}
+
+static inline bool
+_dispatch_mach_reply_tryremove(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr)
+{
+       bool removed;
+       _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+       if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+               TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+       }
+       _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+       return removed;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr, uint32_t options)
+{
+       dispatch_mach_msg_t dmsgr = NULL;
+       bool disconnected = (options & DU_UNREGISTER_DISCONNECTED);
+       if (options & DU_UNREGISTER_REPLY_REMOVE) {
+               _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+               if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+                       DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
+               }
+               TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+               _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+       }
+       if (disconnected) {
+               dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr,
+                               DISPATCH_MACH_DISCONNECTED);
+       } else if (dmr->dmr_voucher) {
+               _voucher_release(dmr->dmr_voucher);
+               dmr->dmr_voucher = NULL;
+       }
+       _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p",
+                       _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident),
+                       disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
+       if (dmsgr) {
+               return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+       }
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_reply_list_remove(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr) {
+       // dmsr_replies_lock must be held by the caller.
+       bool removed = false;
+       if (likely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+               TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+               removed = true;
+       }
+       return removed;
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr, uint32_t options)
+{
+       dispatch_assert(!_TAILQ_IS_ENQUEUED(dmr, dmr_list));
+
+       bool disconnected = (options & DU_UNREGISTER_DISCONNECTED);
+       _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p",
+                       (mach_port_t)dmr->du_ident, disconnected ? " (disconnected)" : "",
+                       dmr->dmr_ctxt);
+       if (!_dispatch_unote_unregister(dmr, options)) {
+               _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]",
+                                               (mach_port_t)dmr->du_ident, dmr);
+               dispatch_assert(options == DU_UNREGISTER_DISCONNECTED);
+               return false;
+       }
+
+       dispatch_mach_msg_t dmsgr = NULL;
+       dispatch_queue_t drq = NULL;
+       if (disconnected) {
+               // The next call is guaranteed to always transfer or consume the voucher
+               // in the dmr, if there is one.
+               dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr,
+                       dmr->dmr_async_reply ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
+                       : DISPATCH_MACH_DISCONNECTED);
+               if (dmr->dmr_ctxt) {
+                       drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt);
+               }
+               dispatch_assert(dmr->dmr_voucher == NULL);
+       } else if (dmr->dmr_voucher) {
+               _voucher_release(dmr->dmr_voucher);
+               dmr->dmr_voucher = NULL;
+       }
+       _dispatch_unote_dispose(dmr);
+
+       if (dmsgr) {
+               if (drq) {
+                       _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq);
+               } else {
+                       _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+               }
+       }
+       return true;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_waiter_register(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
+               dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts)
+{
+       dmr->du_owner_wref = _dispatch_ptr2wref(dm);
+       dmr->du_wlh = NULL;
+       dmr->du_filter = EVFILT_MACHPORT;
+       dmr->du_ident = reply_port;
+       if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
+               _dispatch_mach_reply_mark_reply_port_owned(dmr);
+       } else {
+               if (dmsg->dmsg_voucher) {
+                       dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
+               }
+               dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority);
+               // make reply context visible to leaks rdar://11777199
+               dmr->dmr_ctxt = dmsg->do_ctxt;
+       }
+
+       _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p",
+                       reply_port, dmsg->do_ctxt);
+       _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+       if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+               DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev,
+                               "Reply already registered");
+       }
+       TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+       _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port,
+               dispatch_mach_msg_t dmsg)
+{
+       dispatch_mach_reply_refs_t dmr;
+       dispatch_priority_t mpri, pri, overcommit;
+       dispatch_wlh_t wlh;
+
+       dmr = dux_create(&_dispatch_mach_type_reply, reply_port, 0)._dmr;
+       dispatch_assert(dmr->du_is_direct);
+       dmr->du_owner_wref = _dispatch_ptr2wref(dm);
+       if (dmsg->dmsg_voucher) {
+               dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
+       }
+       dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority);
+       // make reply context visible to leaks rdar://11777199
+       dmr->dmr_ctxt = dmsg->do_ctxt;
+
+       dispatch_queue_t drq = NULL;
+       if (dmsg->dmsg_options & DISPATCH_MACH_ASYNC_REPLY) {
+               dmr->dmr_async_reply = true;
+               drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
+       }
+
+       if (!drq) {
+               pri = dm->dq_priority;
+               wlh = dm->dm_recv_refs->du_wlh;
+       } else if (dx_type(drq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) {
+               pri = DISPATCH_PRIORITY_FLAG_MANAGER;
+               wlh = (dispatch_wlh_t)drq;
+       } else if (dx_hastypeflag(drq, QUEUE_ROOT)) {
+               pri = drq->dq_priority;
+               wlh = DISPATCH_WLH_ANON;
+       } else if (drq == dm->do_targetq) {
+               pri = dm->dq_priority;
+               wlh = dm->dm_recv_refs->du_wlh;
+       } else if (!(pri = _dispatch_queue_compute_priority_and_wlh(drq, &wlh))) {
+               pri = drq->dq_priority;
+               wlh = DISPATCH_WLH_ANON;
+       }
+       if (pri & DISPATCH_PRIORITY_REQUESTED_MASK) {
+               overcommit = pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+               pri &= DISPATCH_PRIORITY_REQUESTED_MASK;
+               mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority);
+               if (pri < mpri) pri = mpri;
+               pri |= overcommit;
+       } else {
+               pri = DISPATCH_PRIORITY_FLAG_MANAGER;
+       }
+
+       _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p",
+                       reply_port, dmsg->do_ctxt);
+       _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+       if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+               DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev,
+                               "Reply already registered");
+       }
+       TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+       _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+
+       if (!_dispatch_unote_register(dmr, wlh, pri)) {
+               _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+               _dispatch_mach_reply_list_remove(dm, dmr);
+               _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+               _dispatch_mach_reply_kevent_unregister(dm, dmr,
+                               DU_UNREGISTER_DISCONNECTED);
+       }
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_msg
+
+DISPATCH_ALWAYS_INLINE DISPATCH_CONST
+static inline bool
+_dispatch_use_mach_special_reply_port(void)
+{
+#if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE
+       return true;
+#else
+#define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;})
+       return false;
+#endif
+}
+
+static mach_port_t
+_dispatch_get_thread_reply_port(void)
+{
+       mach_port_t reply_port, mrp;
+       if (_dispatch_use_mach_special_reply_port()) {
+               mrp = _dispatch_get_thread_special_reply_port();
+       } else {
+               mrp = _dispatch_get_thread_mig_reply_port();
+       }
+       if (mrp) {
+               reply_port = mrp;
+               _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port",
+                               reply_port);
+       } else {
+               if (_dispatch_use_mach_special_reply_port()) {
+                       reply_port = thread_get_special_reply_port();
+                       _dispatch_set_thread_special_reply_port(reply_port);
+               } else {
+                       reply_port = mach_reply_port();
+                       _dispatch_set_thread_mig_reply_port(reply_port);
+               }
+               if (unlikely(!MACH_PORT_VALID(reply_port))) {
+                       DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(),
+                               "Unable to allocate reply port, possible port leak");
+               }
+               _dispatch_debug("machport[0x%08x]: allocated thread sync reply port",
+                               reply_port);
+       }
+       _dispatch_debug_machport(reply_port);
+       return reply_port;
+}
+
+static void
+_dispatch_clear_thread_reply_port(mach_port_t reply_port)
+{
+       mach_port_t mrp;
+       if (_dispatch_use_mach_special_reply_port()) {
+               mrp = _dispatch_get_thread_special_reply_port();
+       } else {
+               mrp = _dispatch_get_thread_mig_reply_port();
+       }
+       if (reply_port != mrp) {
+               if (mrp) {
+                       _dispatch_debug("machport[0x%08x]: did not clear thread sync reply "
+                                       "port (found 0x%08x)", reply_port, mrp);
+               }
+               return;
+       }
+       if (_dispatch_use_mach_special_reply_port()) {
+               _dispatch_set_thread_special_reply_port(MACH_PORT_NULL);
+       } else {
+               _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL);
+       }
+       _dispatch_debug_machport(reply_port);
+       _dispatch_debug("machport[0x%08x]: cleared thread sync reply port",
+                       reply_port);
+}
+
+static void
+_dispatch_set_thread_reply_port(mach_port_t reply_port)
+{
+       _dispatch_debug_machport(reply_port);
+       mach_port_t mrp;
+       if (_dispatch_use_mach_special_reply_port()) {
+               mrp = _dispatch_get_thread_special_reply_port();
+       } else {
+               mrp = _dispatch_get_thread_mig_reply_port();
+       }
+       if (mrp) {
+               kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
+                               MACH_PORT_RIGHT_RECEIVE, -1);
+               DISPATCH_VERIFY_MIG(kr);
+               dispatch_assume_zero(kr);
+               _dispatch_debug("machport[0x%08x]: deallocated sync reply port "
+                               "(found 0x%08x)", reply_port, mrp);
+       } else {
+               if (_dispatch_use_mach_special_reply_port()) {
+                       _dispatch_set_thread_special_reply_port(reply_port);
+               } else {
+                       _dispatch_set_thread_mig_reply_port(reply_port);
+               }
+               _dispatch_debug("machport[0x%08x]: restored thread sync reply port",
+                               reply_port);
+       }
+}
+
+static inline mach_port_t
+_dispatch_mach_msg_get_remote_port(dispatch_object_t dou)
+{
+       mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
+       mach_port_t remote = hdr->msgh_remote_port;
+       return remote;
+}
+
+static inline mach_port_t
+_dispatch_mach_msg_get_reply_port(dispatch_object_t dou)
+{
+       mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
+       mach_port_t local = hdr->msgh_local_port;
+       if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) !=
+                       MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL;
+       return local;
+}
+
+static inline void
+_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err,
+               unsigned long reason)
+{
+       dispatch_assert_zero(reason & ~(unsigned long)code_emask);
+       dmsg->dmsg_error = ((err || !reason) ? err :
+                        err_local|err_sub(0x3e0)|(mach_error_t)reason);
+}
+
+static inline unsigned long
+_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr)
+{
+       mach_error_t err = dmsg->dmsg_error;
+
+       if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) {
+               *err_ptr = 0;
+               return err_get_code(err);
+       }
+       *err_ptr = err;
+       return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT;
+}
+
+static inline dispatch_mach_msg_t
+_dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz,
+               dispatch_mach_reply_refs_t dmr, uint32_t flags)
+{
+       dispatch_mach_msg_destructor_t destructor;
+       dispatch_mach_msg_t dmsg;
+       voucher_t voucher;
+       pthread_priority_t pp;
+
+       if (dmr) {
+               _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher
+               pp = _dispatch_priority_to_pp(dmr->dmr_priority);
+               voucher = dmr->dmr_voucher;
+               dmr->dmr_voucher = NULL; // transfer reference
+       } else {
+               voucher = voucher_create_with_mach_msg(hdr);
+               pp = _dispatch_priority_compute_propagated(
+                               _voucher_get_priority(voucher), 0);
+       }
+
+       destructor = (flags & DISPATCH_EV_MSG_NEEDS_FREE) ?
+                       DISPATCH_MACH_MSG_DESTRUCTOR_FREE :
+                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT;
+       dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
+       if (!(flags & DISPATCH_EV_MSG_NEEDS_FREE)) {
+               _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move,
+                               (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf);
+       }
+       dmsg->dmsg_voucher = voucher;
+       dmsg->dmsg_priority = pp;
+       dmsg->do_ctxt = dmr ? dmr->dmr_ctxt : NULL;
+       _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED);
+       _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg);
+       _dispatch_voucher_ktrace_dmsg_push(dmsg);
+       return dmsg;
+}
+
+void
+_dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags,
+               mach_msg_header_t *hdr, mach_msg_size_t siz)
+{
+       // this function is very similar with what _dispatch_source_merge_evt does
+       // but can't reuse it as handling the message must be protected by the
+       // internal refcount between the first half and the trailer of what
+       // _dispatch_source_merge_evt does.
+
+       dispatch_mach_recv_refs_t dmrr = du._dmrr;
+       dispatch_mach_t dm = _dispatch_wref2ptr(dmrr->du_owner_wref);
+       dispatch_queue_flags_t dqf;
+       dispatch_mach_msg_t dmsg;
+
+       dispatch_assert(_dispatch_unote_needs_rearm(du));
+
+       if (flags & EV_VANISHED) {
+               DISPATCH_CLIENT_CRASH(du._du->du_ident,
+                               "Unexpected EV_VANISHED (do not destroy random mach ports)");
+       }
+
+       // once we modify the queue atomic flags below, it will allow concurrent
+       // threads running _dispatch_mach_invoke2 to dispose of the source,
+       // so we can't safely borrow the reference we get from the muxnote udata
+       // anymore, and need our own
+       dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_CONSUME_2;
+       _dispatch_retain_2(dm); // rdar://20382435
+
+       if (unlikely((flags & EV_ONESHOT) && !(flags & EV_DELETE))) {
+               dqf = _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq,
+                               DSF_DEFERRED_DELETE, DSF_ARMED);
+               _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]",
+                               dm, dmrr);
+       } else if (unlikely(flags & (EV_ONESHOT | EV_DELETE))) {
+               _dispatch_source_refs_unregister(dm->_as_ds,
+                               DU_UNREGISTER_ALREADY_DELETED);
+               dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+               _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", dm, dmrr);
+       } else {
+               dqf = _dispatch_queue_atomic_flags_clear(dm->_as_dq, DSF_ARMED);
+               _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", dm, dmrr);
+       }
+
+       _dispatch_debug_machport(hdr->msgh_remote_port);
+       _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
+                       hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
+
+       if (dqf & DSF_CANCELED) {
+               _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x",
+                               hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
+               mach_msg_destroy(hdr);
+               if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+                       free(hdr);
+               }
+               return dx_wakeup(dm, 0, wflags | DISPATCH_WAKEUP_MAKE_DIRTY);
+       }
+
+       // Once the mach channel disarming is visible, cancellation will switch to
+       // immediate deletion.  If we're preempted here, then the whole cancellation
+       // sequence may be complete by the time we really enqueue the message.
+       //
+       // _dispatch_mach_msg_invoke_with_mach() is responsible for filtering it out
+       // to keep the promise that DISPATCH_MACH_DISCONNECTED is the last
+       // event sent.
+
+       dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags);
+       _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+       return _dispatch_release_2_tailcall(dm);
+}
+
+void
+_dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags,
+               mach_msg_header_t *hdr, mach_msg_size_t siz)
+{
+       dispatch_mach_reply_refs_t dmr = du._dmr;
+       dispatch_mach_t dm = _dispatch_wref2ptr(dmr->du_owner_wref);
+       bool canceled = (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED);
+       dispatch_mach_msg_t dmsg = NULL;
+
+       _dispatch_debug_machport(hdr->msgh_remote_port);
+       _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
+                       hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
+
+       if (!canceled) {
+               dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags);
+       }
+
+       if (dmsg) {
+               dispatch_queue_t drq = NULL;
+               if (dmsg->do_ctxt) {
+                       drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
+               }
+               if (drq) {
+                       _dispatch_mach_push_async_reply_msg(dm, dmsg, drq);
+               } else {
+                       _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+               }
+       } else {
+               _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x",
+                               hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
+               mach_msg_destroy(hdr);
+               if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+                       free(hdr);
+               }
+       }
+
+       dispatch_wakeup_flags_t wflags = 0;
+       uint32_t options = DU_UNREGISTER_IMMEDIATE_DELETE;
+       if (canceled) {
+               options |= DU_UNREGISTER_DISCONNECTED;
+       }
+
+       _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+       bool removed = _dispatch_mach_reply_list_remove(dm, dmr);
+       dispatch_assert(removed);
+       if (TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies) &&
+                       (dm->dm_send_refs->dmsr_disconnect_cnt ||
+                       (dm->dq_atomic_flags & DSF_CANCELED))) {
+               // When the list is empty, _dispatch_mach_disconnect() may release the
+               // last reference count on the Mach channel. To avoid this, take our
+               // own reference before releasing the lock.
+               wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2;
+               _dispatch_retain_2(dm);
+       }
+       _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+
+       bool result = _dispatch_mach_reply_kevent_unregister(dm, dmr, options);
+       dispatch_assert(result);
+       if (wflags) dx_wakeup(dm, 0, wflags);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_mach_msg_t
+_dispatch_mach_msg_reply_recv(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
+               mach_port_t send)
+{
+       if (slowpath(!MACH_PORT_VALID(reply_port))) {
+               DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port");
+       }
+       void *ctxt = dmr->dmr_ctxt;
+       mach_msg_header_t *hdr, *hdr2 = NULL;
+       void *hdr_copyout_addr;
+       mach_msg_size_t siz, msgsiz = 0;
+       mach_msg_return_t kr;
+       mach_msg_option_t options;
+       mach_port_t notify = MACH_PORT_NULL;
+       siz = mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE +
+                       DISPATCH_MACH_TRAILER_SIZE);
+       hdr = alloca(siz);
+       for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size);
+                       p < (mach_vm_address_t)hdr + siz; p += vm_page_size) {
+               *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
+       }
+       options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER);
+       if (MACH_PORT_VALID(send)) {
+               notify = send;
+               options |= MACH_RCV_SYNC_WAIT;
+       }
+
+retry:
+       _dispatch_debug_machport(reply_port);
+       _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port,
+                       (options & MACH_RCV_TIMEOUT) ? "poll" : "wait");
+       kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE,
+                       notify);
+       hdr_copyout_addr = hdr;
+       _dispatch_debug_machport(reply_port);
+       _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) "
+                       "returned: %s - 0x%x", reply_port, siz, options,
+                       mach_error_string(kr), kr);
+       switch (kr) {
+       case MACH_RCV_TOO_LARGE:
+               if (!fastpath(hdr->msgh_size <= UINT_MAX -
+                               DISPATCH_MACH_TRAILER_SIZE)) {
+                       DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message");
+               }
+               if (options & MACH_RCV_LARGE) {
+                       msgsiz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
+                       hdr2 = malloc(msgsiz);
+                       if (dispatch_assume(hdr2)) {
+                               hdr = hdr2;
+                               siz = msgsiz;
+                       }
+                       options |= MACH_RCV_TIMEOUT;
+                       options &= ~MACH_RCV_LARGE;
+                       goto retry;
+               }
+               _dispatch_log("BUG in libdispatch client: "
+                               "dispatch_mach_send_and_wait_for_reply: dropped message too "
+                               "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id,
+                               hdr->msgh_size);
+               break;
+       case MACH_RCV_INVALID_NAME: // rdar://problem/21963848
+       case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327
+       case MACH_RCV_PORT_DIED:
+               // channel was disconnected/canceled and reply port destroyed
+               _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: "
+                               "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr);
+               goto out;
+       case MACH_MSG_SUCCESS:
+               if (hdr->msgh_remote_port) {
+                       _dispatch_debug_machport(hdr->msgh_remote_port);
+               }
+               _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, "
+                               "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id,
+                               hdr->msgh_size, hdr->msgh_remote_port);
+               siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
+               if (hdr2 && siz < msgsiz) {
+                       void *shrink = realloc(hdr2, msgsiz);
+                       if (shrink) hdr = hdr2 = shrink;
+               }
+               break;
+       case MACH_RCV_INVALID_NOTIFY:
+       default:
+               DISPATCH_INTERNAL_CRASH(kr, "Unexpected error from mach_msg_receive");
+               break;
+       }
+       _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port);
+       hdr->msgh_local_port = MACH_PORT_NULL;
+       if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) {
+               if (!kr) mach_msg_destroy(hdr);
+               goto out;
+       }
+       dispatch_mach_msg_t dmsg;
+       dispatch_mach_msg_destructor_t destructor = (!hdr2) ?
+                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
+                       DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
+       dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
+       if (!hdr2 || hdr != hdr_copyout_addr) {
+               _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move,
+                               (uint64_t)hdr_copyout_addr,
+                               (uint64_t)_dispatch_mach_msg_get_msg(dmsg));
+       }
+       dmsg->do_ctxt = ctxt;
+       return dmsg;
+out:
+       free(hdr2);
+       return NULL;
+}
+
+static inline void
+_dispatch_mach_msg_reply_received(dispatch_mach_t dm,
+               dispatch_mach_reply_refs_t dmr, mach_port_t local_port)
+{
+       bool removed = _dispatch_mach_reply_tryremove(dm, dmr);
+       if (!MACH_PORT_VALID(local_port) || !removed) {
+               // port moved/destroyed during receive, or reply waiter was never
+               // registered or already removed (disconnected)
+               return;
+       }
+       mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(
+                       (mach_port_t)dmr->du_ident);
+       _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p",
+                       reply_port, dmr->dmr_ctxt);
+       if (_dispatch_mach_reply_is_reply_port_owned(dmr)) {
+               _dispatch_set_thread_reply_port(reply_port);
+               if (local_port != reply_port) {
+                       DISPATCH_CLIENT_CRASH(local_port,
+                                       "Reply received on unexpected port");
+               }
+               return;
+       }
+       mach_msg_header_t *hdr;
+       dispatch_mach_msg_t dmsg;
+       dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+       hdr->msgh_local_port = local_port;
+       dmsg->dmsg_voucher = dmr->dmr_voucher;
+       dmr->dmr_voucher = NULL;  // transfer reference
+       dmsg->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority);
+       dmsg->do_ctxt = dmr->dmr_ctxt;
+       _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED);
+       return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+}
+
+static inline void
+_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port,
+               mach_port_t remote_port)
+{
+       mach_msg_header_t *hdr;
+       dispatch_mach_msg_t dmsg;
+       dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+       if (local_port) hdr->msgh_local_port = local_port;
+       if (remote_port) hdr->msgh_remote_port = remote_port;
+       _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED);
+       _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ?
+                       local_port : remote_port, local_port ? "receive" : "send");
+       return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+}
+
+static inline dispatch_mach_msg_t
+_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou,
+               dispatch_mach_reply_refs_t dmr, dispatch_mach_reason_t reason)
+{
+       dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
+       mach_port_t reply_port = dmsg ? dmsg->dmsg_reply :
+                       _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident);
+       voucher_t v;
+
+       if (!reply_port) {
+               if (!dmsg) {
+                       v = dmr->dmr_voucher;
+                       dmr->dmr_voucher = NULL; // transfer reference
+                       if (v) _voucher_release(v);
+               }
+               return NULL;
+       }
+
+       if (dmsg) {
+               v = dmsg->dmsg_voucher;
+               if (v) _voucher_retain(v);
+       } else {
+               v = dmr->dmr_voucher;
+               dmr->dmr_voucher = NULL; // transfer reference
+       }
+
+       if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
+                       (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) ||
+                       (dmr && !_dispatch_unote_registered(dmr) &&
+                       _dispatch_mach_reply_is_reply_port_owned(dmr))) {
+               if (v) _voucher_release(v);
+               // deallocate owned reply port to break _dispatch_mach_msg_reply_recv
+               // out of waiting in mach_msg(MACH_RCV_MSG)
+               kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
+                               MACH_PORT_RIGHT_RECEIVE, -1);
+               DISPATCH_VERIFY_MIG(kr);
+               dispatch_assume_zero(kr);
+               return NULL;
+       }
+
+       mach_msg_header_t *hdr;
+       dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+       dmsgr->dmsg_voucher = v;
+       hdr->msgh_local_port = reply_port;
+       if (dmsg) {
+               dmsgr->dmsg_priority = dmsg->dmsg_priority;
+               dmsgr->do_ctxt = dmsg->do_ctxt;
+       } else {
+               dmsgr->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority);
+               dmsgr->do_ctxt = dmr->dmr_ctxt;
+       }
+       _dispatch_mach_msg_set_reason(dmsgr, 0, reason);
+       _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p",
+                       hdr->msgh_local_port, dmsgr->do_ctxt);
+       return dmsgr;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou)
+{
+       dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
+       dispatch_queue_t drq = NULL;
+       mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+       mach_msg_option_t msg_opts = dmsg->dmsg_options;
+       _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, "
+                       "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x",
+                       msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
+                       msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply);
+       unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ?
+                       0 : DISPATCH_MACH_MESSAGE_NOT_SENT;
+       dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL,
+                       msg_opts & DISPATCH_MACH_ASYNC_REPLY
+                       ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
+                       : DISPATCH_MACH_DISCONNECTED);
+       if (dmsg->do_ctxt) {
+               drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
+       }
+       _dispatch_mach_msg_set_reason(dmsg, 0, reason);
+       _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+       if (dmsgr) {
+               if (drq) {
+                       _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq);
+               } else {
+                       _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+               }
+       }
+}
+
+DISPATCH_NOINLINE
+static uint32_t
+_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou,
+               dispatch_mach_reply_refs_t dmr, dispatch_qos_t qos,
+               dispatch_mach_send_invoke_flags_t send_flags)
+{
+       dispatch_mach_send_refs_t dsrr = dm->dm_send_refs;
+       dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL;
+       voucher_t voucher = dmsg->dmsg_voucher;
+       dispatch_queue_t drq = NULL;
+       mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL;
+       uint32_t send_status = 0;
+       bool clear_voucher = false, kvoucher_move_send = false;
+       mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+       bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
+                       MACH_MSG_TYPE_MOVE_SEND_ONCE);
+       mach_port_t reply_port = dmsg->dmsg_reply;
+       if (!is_reply) {
+               dm->dm_needs_mgr = 0;
+               if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) {
+                       // send initial checkin message
+                       if (unlikely(_dispatch_unote_registered(dsrr) &&
+                                       _dispatch_queue_get_current() != &_dispatch_mgr_q)) {
+                               // send kevent must be uninstalled on the manager queue
+                               dm->dm_needs_mgr = 1;
+                               goto out;
+                       }
+                       if (unlikely(!_dispatch_mach_msg_send(dm,
+                                       dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) {
+                               goto out;
+                       }
+                       dsrr->dmsr_checkin = NULL;
+               }
+       }
+       mach_msg_return_t kr = 0;
+       mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options;
+       if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) {
+               mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED;
+               opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK);
+               if (!is_reply) {
+                       if (dmsg != dsrr->dmsr_checkin) {
+                               msg->msgh_remote_port = dsrr->dmsr_send;
+                       }
+                       if (_dispatch_queue_get_current() == &_dispatch_mgr_q) {
+                               if (unlikely(!_dispatch_unote_registered(dsrr))) {
+                                       _dispatch_mach_notification_kevent_register(dm,
+                                                       msg->msgh_remote_port);
+                               }
+                               if (likely(_dispatch_unote_registered(dsrr))) {
+                                       if (os_atomic_load2o(dsrr, dmsr_notification_armed,
+                                                       relaxed)) {
+                                               goto out;
+                                       }
+                                       opts |= MACH_SEND_NOTIFY;
+                               }
+                       }
+                       opts |= MACH_SEND_TIMEOUT;
+                       if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) {
+                               ipc_kvoucher = _voucher_create_mach_voucher_with_priority(
+                                               voucher, dmsg->dmsg_priority);
+                       }
+                       _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg);
+                       if (ipc_kvoucher) {
+                               kvoucher_move_send = true;
+                               clear_voucher = _voucher_mach_msg_set_mach_voucher(msg,
+                                               ipc_kvoucher, kvoucher_move_send);
+                       } else {
+                               clear_voucher = _voucher_mach_msg_set(msg, voucher);
+                       }
+                       if (qos) {
+                               opts |= MACH_SEND_OVERRIDE;
+                               msg_priority = (mach_msg_priority_t)
+                                               _dispatch_priority_compute_propagated(
+                                               _dispatch_qos_to_pp(qos), 0);
+                       }
+               }
+               _dispatch_debug_machport(msg->msgh_remote_port);
+               if (reply_port) _dispatch_debug_machport(reply_port);
+               if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) {
+                       if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
+                               if (_dispatch_use_mach_special_reply_port()) {
+                                       opts |= MACH_SEND_SYNC_OVERRIDE;
+                               }
+                               _dispatch_clear_thread_reply_port(reply_port);
+                       }
+                       _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg,
+                                       msg_opts);
+               }
+               kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0,
+                               msg_priority);
+               _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
+                               "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
+                               "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
+                               opts, msg_opts, msg->msgh_voucher_port, reply_port,
+                               mach_error_string(kr), kr);
+               if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) {
+                       _dispatch_mach_reply_waiter_unregister(dm, dmr,
+                                       DU_UNREGISTER_REPLY_REMOVE);
+               }
+               if (clear_voucher) {
+                       if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) {
+                               DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption");
+                       }
+                       mach_voucher_t kv;
+                       kv = _voucher_mach_msg_clear(msg, kvoucher_move_send);
+                       if (kvoucher_move_send) ipc_kvoucher = kv;
+               }
+       }
+       if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) {
+               if (opts & MACH_SEND_NOTIFY) {
+                       _dispatch_debug("machport[0x%08x]: send-possible notification "
+                                       "armed", (mach_port_t)dsrr->du_ident);
+                       _dispatch_mach_notification_set_armed(dsrr);
+               } else {
+                       // send kevent must be installed on the manager queue
+                       dm->dm_needs_mgr = 1;
+               }
+               if (ipc_kvoucher) {
+                       _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher);
+                       voucher_t ipc_voucher;
+                       ipc_voucher = _voucher_create_with_priority_and_mach_voucher(
+                                       voucher, dmsg->dmsg_priority, ipc_kvoucher);
+                       _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
+                                       ipc_voucher, dmsg, voucher);
+                       if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
+                       dmsg->dmsg_voucher = ipc_voucher;
+               }
+               goto out;
+       } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) {
+               _voucher_dealloc_mach_voucher(ipc_kvoucher);
+       }
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+       if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port &&
+                       !(_dispatch_unote_registered(dmrr) &&
+                       dmrr->du_ident == reply_port)) {
+               _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg);
+       }
+       if (unlikely(!is_reply && dmsg == dsrr->dmsr_checkin &&
+                       _dispatch_unote_registered(dsrr))) {
+               _dispatch_mach_notification_kevent_unregister(dm);
+       }
+       if (slowpath(kr)) {
+               // Send failed, so reply was never registered <rdar://problem/14309159>
+               dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL,
+                               msg_opts & DISPATCH_MACH_ASYNC_REPLY
+                               ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED
+                               : DISPATCH_MACH_DISCONNECTED);
+               if (dmsg->do_ctxt) {
+                       drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt);
+               }
+       }
+       _dispatch_mach_msg_set_reason(dmsg, kr, 0);
+       if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) &&
+                       (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) {
+               // Return sent message synchronously <rdar://problem/25947334>
+               send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT;
+       } else {
+               _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+       }
+       if (dmsgr) {
+               if (drq) {
+                       _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq);
+               } else {
+                       _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+               }
+       }
+       send_status |= DM_SEND_STATUS_SUCCESS;
+out:
+       return send_status;
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_send_refs_t
+
+#define _dmsr_state_needs_lock_override(dq_state, qos) \
+               unlikely(qos < _dq_state_max_qos(dq_state))
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dmsr_state_max_qos(uint64_t dmsr_state)
+{
+       return _dq_state_max_qos(dmsr_state);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dmsr_state_needs_override(uint64_t dmsr_state, dispatch_qos_t qos)
+{
+       dmsr_state &= DISPATCH_MACH_STATE_MAX_QOS_MASK;
+       return dmsr_state < _dq_state_from_qos(qos);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dmsr_state_merge_override(uint64_t dmsr_state, dispatch_qos_t qos)
+{
+       if (_dmsr_state_needs_override(dmsr_state, qos)) {
+               dmsr_state &= ~DISPATCH_MACH_STATE_MAX_QOS_MASK;
+               dmsr_state |= _dq_state_from_qos(qos);
+               dmsr_state |= DISPATCH_MACH_STATE_DIRTY;
+               dmsr_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+       }
+       return dmsr_state;
+}
+
+#define _dispatch_mach_send_push_update_tail(dmsr, tail) \
+               os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next)
+#define _dispatch_mach_send_push_update_head(dmsr, head) \
+               os_mpsc_push_update_head(dmsr, dmsr, head)
+#define _dispatch_mach_send_get_head(dmsr) \
+               os_mpsc_get_head(dmsr, dmsr)
+#define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \
+               os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next)
+#define _dispatch_mach_send_pop_head(dmsr, head) \
+               os_mpsc_pop_head(dmsr, dmsr, head, do_next)
+
+#define dm_push(dm, dc, qos) \
+               _dispatch_queue_push((dm)->_as_dq, dc, qos)
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr,
+               dispatch_object_t dou)
+{
+       if (_dispatch_mach_send_push_update_tail(dmsr, dou._do)) {
+               _dispatch_mach_send_push_update_head(dmsr, dou._do);
+               return true;
+       }
+       return false;
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
+               dispatch_mach_send_invoke_flags_t send_flags)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dispatch_mach_reply_refs_t dmr;
+       dispatch_mach_msg_t dmsg;
+       struct dispatch_object_s *dc = NULL, *next_dc = NULL;
+       dispatch_qos_t qos = _dmsr_state_max_qos(dmsr->dmsr_state);
+       uint64_t old_state, new_state;
+       uint32_t send_status;
+       bool needs_mgr, disconnecting, returning_send_result = false;
+
+again:
+       needs_mgr = false; disconnecting = false;
+       while (dmsr->dmsr_tail) {
+               dc = _dispatch_mach_send_get_head(dmsr);
+               do {
+                       dispatch_mach_send_invoke_flags_t sf = send_flags;
+                       // Only request immediate send result for the first message
+                       send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
+                       next_dc = _dispatch_mach_send_pop_head(dmsr, dc);
+                       if (_dispatch_object_has_type(dc,
+                                       DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
+                               if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
+                                       goto partial_drain;
+                               }
+                               _dispatch_continuation_pop(dc, NULL, flags, dm->_as_dq);
+                               continue;
+                       }
+                       if (_dispatch_object_is_sync_waiter(dc)) {
+                               dmsg = ((dispatch_continuation_t)dc)->dc_data;
+                               dmr = ((dispatch_continuation_t)dc)->dc_other;
+                       } else if (_dispatch_object_has_vtable(dc)) {
+                               dmsg = (dispatch_mach_msg_t)dc;
+                               dmr = NULL;
+                       } else {
+                               if (_dispatch_unote_registered(dmsr) &&
+                                               (_dispatch_queue_get_current() != &_dispatch_mgr_q)) {
+                                       // send kevent must be uninstalled on the manager queue
+                                       needs_mgr = true;
+                                       goto partial_drain;
+                               }
+                               if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) {
+                                       disconnecting = true;
+                                       goto partial_drain;
+                               }
+                               _dispatch_perfmon_workitem_inc();
+                               continue;
+                       }
+                       _dispatch_voucher_ktrace_dmsg_pop(dmsg);
+                       if (unlikely(dmsr->dmsr_disconnect_cnt ||
+                                       (dm->dq_atomic_flags & DSF_CANCELED))) {
+                               _dispatch_mach_msg_not_sent(dm, dmsg);
+                               _dispatch_perfmon_workitem_inc();
+                               continue;
+                       }
+                       send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, qos, sf);
+                       if (unlikely(!send_status)) {
+                               goto partial_drain;
+                       }
+                       if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) {
+                               returning_send_result = true;
+                       }
+                       _dispatch_perfmon_workitem_inc();
+               } while ((dc = next_dc));
+       }
+
+       os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+               if (old_state & DISPATCH_MACH_STATE_DIRTY) {
+                       new_state = old_state;
+                       new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+                       new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+                       new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+               } else {
+                       // unlock
+                       new_state = 0;
+               }
+       });
+       goto out;
+
+partial_drain:
+       // if this is not a complete drain, we must undo some things
+       _dispatch_mach_send_unpop_head(dmsr, dc, next_dc);
+
+       if (_dispatch_object_has_type(dc,
+                       DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+                       new_state = old_state;
+                       new_state |= DISPATCH_MACH_STATE_DIRTY;
+                       new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+                       new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
+                       new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+               });
+       } else {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+                       new_state = old_state;
+                       if (old_state & (DISPATCH_MACH_STATE_DIRTY |
+                                       DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) {
+                               new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+                               new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+                               new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+                       } else {
+                               new_state |= DISPATCH_MACH_STATE_DIRTY;
+                               new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
+                       }
+               });
+       }
+
+out:
+       if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) {
+               // Ensure that the root queue sees that this thread was overridden.
+               _dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state));
+       }
+
+       if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) {
+               qos = _dmsr_state_max_qos(new_state);
+               os_atomic_thread_fence(dependency);
+               dmsr = os_atomic_force_dependency_on(dmsr, new_state);
+               goto again;
+       }
+
+       if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+               qos = _dmsr_state_max_qos(new_state);
+               _dispatch_mach_push_send_barrier_drain(dm, qos);
+       } else {
+               if (needs_mgr || dm->dm_needs_mgr) {
+                       qos = _dmsr_state_max_qos(new_state);
+               } else {
+                       qos = 0;
+               }
+               if (!disconnecting) dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
+       }
+       return returning_send_result;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
+               dispatch_mach_send_invoke_flags_t send_flags)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dispatch_lock owner_self = _dispatch_lock_value_for_self();
+       uint64_t old_state, new_state;
+
+       uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK;
+       uint64_t canlock_state = 0;
+
+       if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) {
+               canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+               canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER;
+       } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
+               canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+       }
+
+       dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor();
+retry:
+       os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, {
+               new_state = old_state;
+               if (unlikely((old_state & canlock_mask) != canlock_state)) {
+                       if (!(send_flags & DM_SEND_INVOKE_MAKE_DIRTY)) {
+                               os_atomic_rmw_loop_give_up(break);
+                       }
+                       new_state |= DISPATCH_MACH_STATE_DIRTY;
+               } else {
+                       if (_dmsr_state_needs_lock_override(old_state, oq_floor)) {
+                               os_atomic_rmw_loop_give_up({
+                                       oq_floor = _dispatch_queue_override_self(old_state);
+                                       goto retry;
+                               });
+                       }
+                       new_state |= owner_self;
+                       new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+                       new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+                       new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+               }
+       });
+
+       if (unlikely((old_state & canlock_mask) != canlock_state)) {
+               return;
+       }
+       if (send_flags & DM_SEND_INVOKE_CANCEL) {
+               _dispatch_mach_cancel(dm);
+       }
+       _dispatch_mach_send_drain(dm, flags, send_flags);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
+               DISPATCH_UNUSED dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags)
+{
+       dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current();
+       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+       dispatch_thread_frame_s dtf;
+
+       DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY);
+       DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER);
+       // hide the mach channel (see _dispatch_mach_barrier_invoke comment)
+       _dispatch_thread_frame_stash(&dtf);
+       _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{
+               _dispatch_mach_send_invoke(dm, flags,
+                               DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER);
+       });
+       _dispatch_thread_frame_unstash(&dtf);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos)
+{
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+
+       dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN);
+       dc->dc_func = NULL;
+       dc->dc_ctxt = NULL;
+       dc->dc_voucher = DISPATCH_NO_VOUCHER;
+       dc->dc_priority = DISPATCH_NO_PRIORITY;
+       dm_push(dm, dc, qos);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc,
+               dispatch_qos_t qos)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       uint64_t old_state, new_state, state_flags = 0;
+       dispatch_tid owner;
+       bool wakeup;
+
+       // <rdar://problem/25896179> when pushing a send barrier that destroys
+       // the last reference to this channel, and the send queue is already
+       // draining on another thread, the send barrier may run as soon as
+       // _dispatch_mach_send_push_inline() returns.
+       _dispatch_retain_2(dm);
+
+       wakeup = _dispatch_mach_send_push_inline(dmsr, dc);
+       if (wakeup) {
+               state_flags = DISPATCH_MACH_STATE_DIRTY;
+               if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) {
+                       state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+               }
+       }
+
+       if (state_flags) {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+                       new_state = _dmsr_state_merge_override(old_state, qos);
+                       new_state |= state_flags;
+               });
+       } else {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, {
+                       new_state = _dmsr_state_merge_override(old_state, qos);
+                       if (old_state == new_state) {
+                               os_atomic_rmw_loop_give_up(break);
+                       }
+               });
+       }
+
+       qos = _dmsr_state_max_qos(new_state);
+       owner = _dispatch_lock_owner((dispatch_lock)old_state);
+       if (owner) {
+               if (_dmsr_state_needs_override(old_state, qos)) {
+                       _dispatch_wqthread_override_start_check_owner(owner, qos,
+                                       &dmsr->dmsr_state_lock.dul_lock);
+               }
+               return _dispatch_release_2_tailcall(dm);
+       }
+
+       dispatch_wakeup_flags_t wflags = 0;
+       if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+               _dispatch_mach_push_send_barrier_drain(dm, qos);
+       } else if (wakeup || dmsr->dmsr_disconnect_cnt ||
+                       (dm->dq_atomic_flags & DSF_CANCELED)) {
+               wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2;
+       } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+               wflags = DISPATCH_WAKEUP_CONSUME_2;
+       }
+       if (wflags) {
+               return dx_wakeup(dm, qos, wflags);
+       }
+       return _dispatch_release_2_tailcall(dm);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm,
+               dispatch_object_t dou, dispatch_qos_t qos,
+               dispatch_mach_send_invoke_flags_t send_flags)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dispatch_lock owner_self = _dispatch_lock_value_for_self();
+       uint64_t old_state, new_state, canlock_mask, state_flags = 0;
+       dispatch_tid owner;
+
+       bool wakeup = _dispatch_mach_send_push_inline(dmsr, dou);
+       if (wakeup) {
+               state_flags = DISPATCH_MACH_STATE_DIRTY;
+       }
+
+       if (unlikely(dmsr->dmsr_disconnect_cnt ||
+                       (dm->dq_atomic_flags & DSF_CANCELED))) {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+                       new_state = _dmsr_state_merge_override(old_state, qos);
+                       new_state |= state_flags;
+               });
+               dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
+               return false;
+       }
+
+       canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK |
+                       DISPATCH_MACH_STATE_PENDING_BARRIER;
+       if (state_flags) {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, seq_cst, {
+                       new_state = _dmsr_state_merge_override(old_state, qos);
+                       new_state |= state_flags;
+                       if (likely((old_state & canlock_mask) == 0)) {
+                               new_state |= owner_self;
+                               new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+                               new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+                               new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+                       }
+               });
+       } else {
+               os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, {
+                       new_state = _dmsr_state_merge_override(old_state, qos);
+                       if (new_state == old_state) {
+                               os_atomic_rmw_loop_give_up(return false);
+                       }
+                       if (likely((old_state & canlock_mask) == 0)) {
+                               new_state |= owner_self;
+                               new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+                               new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+                               new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+                       }
+               });
+       }
+
+       owner = _dispatch_lock_owner((dispatch_lock)old_state);
+       if (owner) {
+               if (_dmsr_state_needs_override(old_state, qos)) {
+                       _dispatch_wqthread_override_start_check_owner(owner, qos,
+                                       &dmsr->dmsr_state_lock.dul_lock);
+               }
+               return false;
+       }
+
+       if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+               dx_wakeup(dm, qos, 0);
+               return false;
+       }
+
+       // Ensure our message is still at the head of the queue and has not already
+       // been dequeued by another thread that raced us to the send queue lock.
+       // A plain load of the head and comparison against our object pointer is
+       // sufficient.
+       if (unlikely(!(wakeup && dou._do == dmsr->dmsr_head))) {
+               // Don't request immediate send result for messages we don't own
+               send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
+       }
+       return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags);
+}
+
+#pragma mark -
+#pragma mark dispatch_mach
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm)
+{
+       DISPATCH_ASSERT_ON_MANAGER_QUEUE();
+       if (_dispatch_unote_registered(dm->dm_send_refs)) {
+               dispatch_assume(_dispatch_unote_unregister(dm->dm_send_refs, 0));
+       }
+       dm->dm_send_refs->du_ident = 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send)
+{
+       DISPATCH_ASSERT_ON_MANAGER_QUEUE();
+       dm->dm_send_refs->du_ident = send;
+       dispatch_assume(_dispatch_unote_register(dm->dm_send_refs,
+                       DISPATCH_WLH_ANON, 0));
+}
+
+void
+_dispatch_mach_merge_notification(dispatch_unote_t du,
+               uint32_t flags DISPATCH_UNUSED, uintptr_t data,
+               uintptr_t status DISPATCH_UNUSED,
+               pthread_priority_t pp DISPATCH_UNUSED)
+{
+       dispatch_mach_send_refs_t dmsr = du._dmsr;
+       dispatch_mach_t dm = _dispatch_wref2ptr(dmsr->du_owner_wref);
+
+       if (data & dmsr->du_fflags) {
+               _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN,
+                               DM_SEND_INVOKE_MAKE_DIRTY);
+       }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg)
+{
+       mach_error_t error;
+       dispatch_mach_reason_t reason = _dispatch_mach_msg_get_reason(dmsg, &error);
+       if (reason == DISPATCH_MACH_MESSAGE_RECEIVED || !dm->dm_is_xpc ||
+                       !_dispatch_mach_xpc_hooks->dmxh_direct_message_handler(
+                       dm->dm_recv_refs->dmrr_handler_ctxt, reason, dmsg, error)) {
+               // Not XPC client or not a message that XPC can handle inline - push
+               // it onto the channel queue.
+               dm_push(dm, dmsg, _dispatch_qos_from_pp(dmsg->dmsg_priority));
+       } else {
+               // XPC handled the message inline. Do the cleanup that would otherwise
+               // have happened in _dispatch_mach_msg_invoke(), leaving out steps that
+               // are not required in this context.
+               dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+               dispatch_release(dmsg);
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static void
+_dispatch_mach_push_async_reply_msg(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg, dispatch_queue_t drq) {
+       // Push the message onto the given queue. This function is only used for
+       // replies to messages sent by
+       // dispatch_mach_send_with_result_and_async_reply_4libxpc().
+       dispatch_continuation_t dc = _dispatch_mach_msg_async_reply_wrap(dmsg, dm);
+       _dispatch_trace_continuation_push(drq, dc);
+       dx_push(drq, dc, _dispatch_qos_from_pp(dmsg->dmsg_priority));
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_t
+
+static inline mach_msg_option_t
+_dispatch_mach_checkin_options(void)
+{
+       mach_msg_option_t options = 0;
+#if DISPATCH_USE_CHECKIN_NOIMPORTANCE
+       options = MACH_SEND_NOIMPORTANCE; // <rdar://problem/16996737>
+#endif
+       return options;
+}
+
+
+static inline mach_msg_option_t
+_dispatch_mach_send_options(void)
+{
+       mach_msg_option_t options = 0;
+       return options;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_mach_priority_propagate(mach_msg_option_t options,
+               pthread_priority_t *msg_pp)
+{
+#if DISPATCH_USE_NOIMPORTANCE_QOS
+       if (options & MACH_SEND_NOIMPORTANCE) {
+               *msg_pp = 0;
+               return 0;
+       }
+#endif
+       unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT;
+       if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
+                       (options & DISPATCH_MACH_OWNED_REPLY_PORT) &&
+                       _dispatch_use_mach_special_reply_port()) {
+               flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC;
+       }
+       *msg_pp = _dispatch_priority_compute_propagated(0, flags);
+       // TODO: remove QoS contribution of sync IPC messages to send queue
+       // rdar://31848737
+       return _dispatch_qos_from_pp(*msg_pp);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
+               dispatch_continuation_t dc_wait, mach_msg_option_t options)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) {
+               DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued");
+       }
+       dispatch_retain(dmsg);
+       pthread_priority_t msg_pp;
+       dispatch_qos_t qos = _dispatch_mach_priority_propagate(options, &msg_pp);
+       options |= _dispatch_mach_send_options();
+       dmsg->dmsg_options = options;
+       mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+       dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg);
+       bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
+                       MACH_MSG_TYPE_MOVE_SEND_ONCE);
+       dmsg->dmsg_priority = msg_pp;
+       dmsg->dmsg_voucher = _voucher_copy();
+       _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
+
+       uint32_t send_status;
+       bool returning_send_result = false;
+       dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
+       if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) {
+               send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND;
+       }
+       if (is_reply && !dmsg->dmsg_reply && !dmsr->dmsr_disconnect_cnt &&
+                       !(dm->dq_atomic_flags & DSF_CANCELED)) {
+               // replies are sent to a send-once right and don't need the send queue
+               dispatch_assert(!dc_wait);
+               send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags);
+               dispatch_assert(send_status);
+               returning_send_result = !!(send_status &
+                               DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT);
+       } else {
+               _dispatch_voucher_ktrace_dmsg_push(dmsg);
+               dispatch_object_t dou = { ._dmsg = dmsg };
+               if (dc_wait) dou._dc = dc_wait;
+               returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou,
+                               qos, send_flags);
+       }
+       if (returning_send_result) {
+               _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg);
+               if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
+               dmsg->dmsg_voucher = NULL;
+               dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+               dispatch_release(dmsg);
+       }
+       return returning_send_result;
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
+               mach_msg_option_t options)
+{
+       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+       options &= ~DISPATCH_MACH_OPTIONS_MASK;
+       bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
+       dispatch_assert(!returned_send_result);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
+               mach_msg_option_t options, dispatch_mach_send_flags_t send_flags,
+               dispatch_mach_reason_t *send_result, mach_error_t *send_error)
+{
+       if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
+               DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
+       }
+       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+       options &= ~DISPATCH_MACH_OPTIONS_MASK;
+       options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
+       bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
+       unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
+       mach_error_t err = 0;
+       if (returned_send_result) {
+               reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+       }
+       *send_result = reason;
+       *send_error = err;
+}
+
+static inline
+dispatch_mach_msg_t
+_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg, mach_msg_option_t options,
+               bool *returned_send_result)
+{
+       mach_port_t send = MACH_PORT_NULL;
+       mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
+       if (!reply_port) {
+               // use per-thread mach reply port <rdar://24597802>
+               reply_port = _dispatch_get_thread_reply_port();
+               mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
+               dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) ==
+                               MACH_MSG_TYPE_MAKE_SEND_ONCE);
+               hdr->msgh_local_port = reply_port;
+               options |= DISPATCH_MACH_OWNED_REPLY_PORT;
+       }
+       options |= DISPATCH_MACH_WAIT_FOR_REPLY;
+
+       dispatch_mach_reply_refs_t dmr;
+#if DISPATCH_DEBUG
+       dmr = _dispatch_calloc(1, sizeof(*dmr));
+#else
+       struct dispatch_mach_reply_refs_s dmr_buf = { };
+       dmr = &dmr_buf;
+#endif
+       struct dispatch_continuation_s dc_wait = {
+               .dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT,
+               .dc_data = dmsg,
+               .dc_other = dmr,
+               .dc_priority = DISPATCH_NO_PRIORITY,
+               .dc_voucher = DISPATCH_NO_VOUCHER,
+       };
+       dmr->dmr_ctxt = dmsg->do_ctxt;
+       dmr->dmr_waiter_tid = _dispatch_tid_self();
+       *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options);
+       if (options & DISPATCH_MACH_OWNED_REPLY_PORT) {
+               _dispatch_clear_thread_reply_port(reply_port);
+               if (_dispatch_use_mach_special_reply_port()) {
+                       // link special reply port to send right for remote receive right
+                       // TODO: extend to pre-connect phase <rdar://problem/31823384>
+                       send = dm->dm_send_refs->dmsr_send;
+               }
+       }
+       dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port, send);
+#if DISPATCH_DEBUG
+       free(dmr);
+#endif
+       return dmsg;
+}
+
+DISPATCH_NOINLINE
+dispatch_mach_msg_t
+dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg, mach_msg_option_t options)
+{
+       bool returned_send_result;
+       dispatch_mach_msg_t reply;
+       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+       options &= ~DISPATCH_MACH_OPTIONS_MASK;
+       reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
+                       &returned_send_result);
+       dispatch_assert(!returned_send_result);
+       return reply;
+}
+
+DISPATCH_NOINLINE
+dispatch_mach_msg_t
+dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg, mach_msg_option_t options,
+               dispatch_mach_send_flags_t send_flags,
+               dispatch_mach_reason_t *send_result, mach_error_t *send_error)
+{
+       if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
+               DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
+       }
+       bool returned_send_result;
+       dispatch_mach_msg_t reply;
+       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+       options &= ~DISPATCH_MACH_OPTIONS_MASK;
+       options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
+       reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
+                       &returned_send_result);
+       unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
+       mach_error_t err = 0;
+       if (returned_send_result) {
+               reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+       }
+       *send_result = reason;
+       *send_error = err;
+       return reply;
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm,
+               dispatch_mach_msg_t dmsg, mach_msg_option_t options,
+               dispatch_mach_send_flags_t send_flags,
+               dispatch_mach_reason_t *send_result, mach_error_t *send_error)
+{
+       if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
+               DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
+       }
+       if (unlikely(!dm->dm_is_xpc)) {
+               DISPATCH_CLIENT_CRASH(0,
+                       "dispatch_mach_send_with_result_and_wait_for_reply is XPC only");
+       }
+
+       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+       options &= ~DISPATCH_MACH_OPTIONS_MASK;
+       options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
+       mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
+       if (!reply_port) {
+               DISPATCH_CLIENT_CRASH(0, "Reply port needed for async send with reply");
+       }
+       options |= DISPATCH_MACH_ASYNC_REPLY;
+       bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
+       unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
+       mach_error_t err = 0;
+       if (returned_send_result) {
+               reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+       }
+       *send_result = reason;
+       *send_error = err;
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_disconnect(dispatch_mach_t dm)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       bool disconnected;
+       if (_dispatch_unote_registered(dmsr)) {
+               _dispatch_mach_notification_kevent_unregister(dm);
+       }
+       if (MACH_PORT_VALID(dmsr->dmsr_send)) {
+               _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send);
+               dmsr->dmsr_send = MACH_PORT_NULL;
+       }
+       if (dmsr->dmsr_checkin) {
+               _dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin);
+               dmsr->dmsr_checkin = NULL;
+       }
+       _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+       dispatch_mach_reply_refs_t dmr, tmp;
+       TAILQ_FOREACH_SAFE(dmr, &dm->dm_send_refs->dmsr_replies, dmr_list, tmp) {
+               TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+               if (_dispatch_unote_registered(dmr)) {
+                       if (!_dispatch_mach_reply_kevent_unregister(dm, dmr,
+                                       DU_UNREGISTER_DISCONNECTED)) {
+                               TAILQ_INSERT_HEAD(&dm->dm_send_refs->dmsr_replies, dmr,
+                                       dmr_list);
+                       }
+               } else {
+                       _dispatch_mach_reply_waiter_unregister(dm, dmr,
+                               DU_UNREGISTER_DISCONNECTED);
+               }
+       }
+       disconnected = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies);
+       _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+       return disconnected;
+}
+
+static void
+_dispatch_mach_cancel(dispatch_mach_t dm)
+{
+       _dispatch_object_debug(dm, "%s", __func__);
+       if (!_dispatch_mach_disconnect(dm)) return;
+
+       bool uninstalled = true;
+       dispatch_assert(!dm->dm_uninstalled);
+
+       if (dm->dm_xpc_term_refs) {
+               uninstalled = _dispatch_unote_unregister(dm->dm_xpc_term_refs, 0);
+       }
+
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+       mach_port_t local_port = (mach_port_t)dmrr->du_ident;
+       if (local_port) {
+               // handle the deferred delete case properly, similar to what
+               // _dispatch_source_invoke2() does
+               dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+               if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) {
+                       _dispatch_source_refs_unregister(dm->_as_ds,
+                                       DU_UNREGISTER_IMMEDIATE_DELETE);
+                       dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+               } else if (!(dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_DELETED)) {
+                       _dispatch_source_refs_unregister(dm->_as_ds, 0);
+                       dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+               }
+               if ((dqf & DSF_STATE_MASK) == DSF_DELETED) {
+                       _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL);
+                       dmrr->du_ident = 0;
+               } else {
+                       uninstalled = false;
+               }
+       } else {
+               _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED,
+                               DSF_ARMED | DSF_DEFERRED_DELETE);
+       }
+
+       if (dm->dm_send_refs->dmsr_disconnect_cnt) {
+               uninstalled = false; // <rdar://problem/31233110>
+       }
+       if (uninstalled) dm->dm_uninstalled = uninstalled;
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou)
+{
+       if (!_dispatch_mach_disconnect(dm)) return false;
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dmsr->dmsr_checkin = dou._dc->dc_data;
+       dmsr->dmsr_send = (mach_port_t)dou._dc->dc_other;
+       _dispatch_continuation_free(dou._dc);
+       (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed);
+       _dispatch_object_debug(dm, "%s", __func__);
+       _dispatch_release(dm); // <rdar://problem/26266265>
+       return true;
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send,
+               dispatch_mach_msg_t checkin)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       (void)os_atomic_inc2o(dmsr, dmsr_disconnect_cnt, relaxed);
+       if (MACH_PORT_VALID(send) && checkin) {
+               dispatch_mach_msg_t dmsg = checkin;
+               dispatch_retain(dmsg);
+               dmsg->dmsg_options = _dispatch_mach_checkin_options();
+               dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg);
+       } else {
+               checkin = NULL;
+               dmsr->dmsr_checkin_port = MACH_PORT_NULL;
+       }
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+       // actually called manually in _dispatch_mach_send_drain
+       dc->dc_func = (void*)_dispatch_mach_reconnect_invoke;
+       dc->dc_ctxt = dc;
+       dc->dc_data = checkin;
+       dc->dc_other = (void*)(uintptr_t)send;
+       dc->dc_voucher = DISPATCH_NO_VOUCHER;
+       dc->dc_priority = DISPATCH_NO_PRIORITY;
+       _dispatch_retain(dm); // <rdar://problem/26266265>
+       return _dispatch_mach_send_push(dm, dc, 0);
+}
+
+DISPATCH_NOINLINE
+mach_port_t
+dispatch_mach_get_checkin_port(dispatch_mach_t dm)
+{
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) {
+               return MACH_PORT_DEAD;
+       }
+       return dmsr->dmsr_checkin_port;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_connect_invoke(dispatch_mach_t dm)
+{
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+       _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+                       DISPATCH_MACH_CONNECTED, NULL, 0, dmrr->dmrr_handler_func);
+       dm->dm_connect_handler_called = 1;
+       _dispatch_perfmon_workitem_inc();
+}
+
+DISPATCH_ALWAYS_INLINE
+static void
+_dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg,
+               dispatch_invoke_flags_t flags, dispatch_mach_t dm)
+{
+       dispatch_mach_recv_refs_t dmrr;
+       mach_error_t err;
+       unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+       dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE|
+                       DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE;
+
+       dmrr = dm->dm_recv_refs;
+       dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+       _dispatch_voucher_ktrace_dmsg_pop(dmsg);
+       _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg);
+       (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority,
+                       dmsg->dmsg_voucher, adopt_flags);
+       dmsg->dmsg_voucher = NULL;
+       dispatch_invoke_with_autoreleasepool(flags, {
+               if (flags & DISPATCH_INVOKE_ASYNC_REPLY) {
+                       _dispatch_client_callout3(dmrr->dmrr_handler_ctxt, reason, dmsg,
+                                       _dispatch_mach_xpc_hooks->dmxh_async_reply_handler);
+               } else {
+                       if (slowpath(!dm->dm_connect_handler_called)) {
+                               _dispatch_mach_connect_invoke(dm);
+                       }
+                       if (reason == DISPATCH_MACH_MESSAGE_RECEIVED &&
+                                       (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED)) {
+                               // <rdar://problem/32184699> Do not deliver message received
+                               // after cancellation: _dispatch_mach_merge_msg can be preempted
+                               // for a long time between clearing DSF_ARMED but before
+                               // enqueuing the message, allowing for cancellation to complete,
+                               // and then the message event to be delivered.
+                               //
+                               // This makes XPC unhappy because some of these messages are
+                               // port-destroyed notifications that can cause it to try to
+                               // reconnect on a channel that is almost fully canceled
+                       } else {
+                               _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, reason, dmsg,
+                                               err, dmrr->dmrr_handler_func);
+                       }
+               }
+               _dispatch_perfmon_workitem_inc();
+       });
+       _dispatch_introspection_queue_item_complete(dmsg);
+       dispatch_release(dmsg);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
+               DISPATCH_UNUSED dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags)
+{
+       dispatch_thread_frame_s dtf;
+
+       // hide mach channel
+       dispatch_mach_t dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf);
+       _dispatch_mach_msg_invoke_with_mach(dmsg, flags, dm);
+       _dispatch_thread_frame_unstash(&dtf);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
+               DISPATCH_UNUSED dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags)
+{
+       dispatch_thread_frame_s dtf;
+       dispatch_mach_t dm = dc->dc_other;
+       dispatch_mach_recv_refs_t dmrr;
+       uintptr_t dc_flags = (uintptr_t)dc->dc_data;
+       unsigned long type = dc_type(dc);
+
+       // hide mach channel from clients
+       if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
+               // on the send queue, the mach channel isn't the current queue
+               // its target queue is the current one already
+               _dispatch_thread_frame_stash(&dtf);
+       }
+       dmrr = dm->dm_recv_refs;
+       DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT);
+       _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags, {
+               dispatch_invoke_with_autoreleasepool(flags, {
+                       if (slowpath(!dm->dm_connect_handler_called)) {
+                               _dispatch_mach_connect_invoke(dm);
+                       }
+                       _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
+                       _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+                                       DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0,
+                                       dmrr->dmrr_handler_func);
+               });
+       });
+       if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
+               _dispatch_thread_frame_unstash(&dtf);
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc,
+               dispatch_mach_t dm, dispatch_continuation_vtable_t vtable)
+{
+       dc->dc_data = (void *)dc->dc_flags;
+       dc->dc_other = dm;
+       dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context,
+               dispatch_function_t func)
+{
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
+       dispatch_qos_t qos;
+
+       _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
+       _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER));
+       _dispatch_trace_continuation_push(dm->_as_dq, dc);
+       qos = _dispatch_continuation_override_qos(dm->_as_dq, dc);
+       return _dispatch_mach_send_push(dm, dc, qos);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
+{
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
+       dispatch_qos_t qos;
+
+       _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
+       _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER));
+       _dispatch_trace_continuation_push(dm->_as_dq, dc);
+       qos = _dispatch_continuation_override_qos(dm->_as_dq, dc);
+       return _dispatch_mach_send_push(dm, dc, qos);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context,
+               dispatch_function_t func)
+{
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
+
+       _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
+       _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER));
+       return _dispatch_continuation_async(dm->_as_dq, dc);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
+{
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER;
+
+       _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
+       _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER));
+       return _dispatch_continuation_async(dm->_as_dq, dc);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
+{
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+
+       dispatch_invoke_with_autoreleasepool(flags, {
+               if (slowpath(!dm->dm_connect_handler_called)) {
+                       _dispatch_mach_connect_invoke(dm);
+               }
+               _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+                               DISPATCH_MACH_CANCELED, NULL, 0, dmrr->dmrr_handler_func);
+               _dispatch_perfmon_workitem_inc();
+       });
+       dm->dm_cancel_handler_called = 1;
+       _dispatch_release(dm); // the retain is done at creation time
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_cancel(dispatch_mach_t dm)
+{
+       dispatch_source_cancel(dm->_as_ds);
+}
+
+static void
+_dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh,
+               dispatch_priority_t pri)
+{
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+       uint32_t disconnect_cnt;
+
+       if (dmrr->du_ident) {
+               _dispatch_source_refs_register(dm->_as_ds, wlh, pri);
+               dispatch_assert(dmrr->du_is_direct);
+       }
+
+       if (dm->dm_is_xpc) {
+               bool monitor_sigterm;
+               if (_dispatch_mach_xpc_hooks->version < 3) {
+                       monitor_sigterm = true;
+               } else if (!_dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification){
+                       monitor_sigterm = true;
+               } else {
+                       monitor_sigterm =
+                                       _dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification(
+                                       dm->dm_recv_refs->dmrr_handler_ctxt);
+               }
+               if (monitor_sigterm) {
+                       dispatch_xpc_term_refs_t _dxtr =
+                                       dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr;
+                       _dxtr->du_owner_wref = _dispatch_ptr2wref(dm);
+                       dm->dm_xpc_term_refs = _dxtr;
+                       _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri);
+               }
+       }
+       if (!dm->dq_priority) {
+               // _dispatch_mach_reply_kevent_register assumes this has been done
+               // which is unlike regular sources or queues, the DEFAULTQUEUE flag
+               // is used so that the priority of the channel doesn't act as
+               // a QoS floor for incoming messages (26761457)
+               dm->dq_priority = pri;
+       }
+       dm->ds_is_installed = true;
+       if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_send_refs, dmsr_disconnect_cnt,
+                       DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) {
+               DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed");
+       }
+}
+
+void
+_dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume)
+{
+       dispatch_priority_t pri;
+       dispatch_wlh_t wlh;
+
+       // call "super"
+       _dispatch_queue_finalize_activation(dm->_as_dq, allow_resume);
+
+       if (!dm->ds_is_installed) {
+               pri = _dispatch_queue_compute_priority_and_wlh(dm->_as_dq, &wlh);
+               if (pri) _dispatch_mach_install(dm, wlh, pri);
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_mach_tryarm(dispatch_mach_t dm, dispatch_queue_flags_t *out_dqf)
+{
+       dispatch_queue_flags_t oqf, nqf;
+       bool rc = os_atomic_rmw_loop2o(dm, dq_atomic_flags, oqf, nqf, relaxed, {
+               nqf = oqf;
+               if (nqf & (DSF_ARMED | DSF_CANCELED | DSF_DEFERRED_DELETE |
+                               DSF_DELETED)) {
+                       // the test is inside the loop because it's convenient but the
+                       // result should not change for the duration of the rmw_loop
+                       os_atomic_rmw_loop_give_up(break);
+               }
+               nqf |= DSF_ARMED;
+       });
+       if (out_dqf) *out_dqf = nqf;
+       return rc;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_wakeup_target_t
+_dispatch_mach_invoke2(dispatch_object_t dou,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               uint64_t *owned)
+{
+       dispatch_mach_t dm = dou._dm;
+       dispatch_queue_wakeup_target_t retq = NULL;
+       dispatch_queue_t dq = _dispatch_queue_get_current();
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+       dispatch_queue_flags_t dqf = 0;
+
+       if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && dmrr &&
+                       _dispatch_unote_wlh_changed(dmrr, _dispatch_get_wlh())) {
+               dqf = _dispatch_queue_atomic_flags_set_orig(dm->_as_dq,
+                               DSF_WLH_CHANGED);
+               if (!(dqf & DSF_WLH_CHANGED)) {
+                       if (dm->dm_is_xpc) {
+                               _dispatch_bug_deprecated("Changing target queue "
+                                               "hierarchy after xpc connection was activated");
+                       } else {
+                               _dispatch_bug_deprecated("Changing target queue "
+                                               "hierarchy after mach channel was activated");
+                       }
+               }
+       }
+
+       // This function performs all mach channel actions. Each action is
+       // responsible for verifying that it takes place on the appropriate queue.
+       // If the current queue is not the correct queue for this action, the
+       // correct queue will be returned and the invoke will be re-driven on that
+       // queue.
+
+       // The order of tests here in invoke and in wakeup should be consistent.
+
+       if (unlikely(!dm->ds_is_installed)) {
+               // The channel needs to be installed on the kevent queue.
+               if (unlikely(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
+                       return dm->do_targetq;
+               }
+               _dispatch_mach_install(dm, _dispatch_get_wlh(),_dispatch_get_basepri());
+               _dispatch_perfmon_workitem_inc();
+       }
+
+       if (_dispatch_queue_class_probe(dm)) {
+               if (dq == dm->do_targetq) {
+drain:
+                       retq = _dispatch_queue_serial_drain(dm->_as_dq, dic, flags, owned);
+               } else {
+                       retq = dm->do_targetq;
+               }
+       }
+
+       if (!retq && _dispatch_unote_registered(dmrr)) {
+               if (_dispatch_mach_tryarm(dm, &dqf)) {
+                       _dispatch_unote_resume(dmrr);
+                       if (dq == dm->do_targetq && !dq->do_targetq && !dmsr->dmsr_tail &&
+                                       (dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) &&
+                                       _dispatch_wlh_should_poll_unote(dmrr)) {
+                               // try to redrive the drain from under the lock for channels
+                               // targeting an overcommit root queue to avoid parking
+                               // when the next message has already fired
+                               _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE);
+                               if (dm->dq_items_tail) goto drain;
+                       }
+               }
+       } else {
+               dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+       }
+
+       if (dmsr->dmsr_tail) {
+               bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt &&
+                               _dispatch_unote_registered(dmsr));
+               if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) ||
+                               (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) {
+                       // The channel has pending messages to send.
+                       if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) {
+                               return retq ? retq : &_dispatch_mgr_q;
+                       }
+                       dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
+                       if (dq != &_dispatch_mgr_q) {
+                               send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER;
+                       }
+                       _dispatch_mach_send_invoke(dm, flags, send_flags);
+               }
+               if (!retq) retq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
+       } else if (!retq && (dqf & DSF_CANCELED)) {
+               // The channel has been cancelled and needs to be uninstalled from the
+               // manager queue. After uninstallation, the cancellation handler needs
+               // to be delivered to the target queue.
+               if (!dm->dm_uninstalled) {
+                       if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
+                               // waiting for the delivery of a deferred delete event
+                               return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
+                       }
+                       if (dq != &_dispatch_mgr_q) {
+                               return retq ? retq : &_dispatch_mgr_q;
+                       }
+                       _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL);
+                       if (unlikely(!dm->dm_uninstalled)) {
+                               // waiting for the delivery of a deferred delete event
+                               // or deletion didn't happen because send_invoke couldn't
+                               // acquire the send lock
+                               return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
+                       }
+               }
+               if (!dm->dm_cancel_handler_called) {
+                       if (dq != dm->do_targetq) {
+                               return retq ? retq : dm->do_targetq;
+                       }
+                       _dispatch_mach_cancel_invoke(dm, flags);
+               }
+       }
+
+       return retq;
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_invoke(dispatch_mach_t dm,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
+{
+       _dispatch_queue_class_invoke(dm, dic, flags,
+                       DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_mach_invoke2);
+}
+
+void
+_dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos,
+               dispatch_wakeup_flags_t flags)
+{
+       // This function determines whether the mach channel needs to be invoked.
+       // The order of tests here in probe and in invoke should be consistent.
+
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
+       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+
+       if (!dm->ds_is_installed) {
+               // The channel needs to be installed on the kevent queue.
+               tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+               goto done;
+       }
+
+       if (_dispatch_queue_class_probe(dm)) {
+               tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+               goto done;
+       }
+
+       if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) {
+               // Sending and uninstallation below require the send lock, the channel
+               // will be woken up when the lock is dropped <rdar://15132939&15203957>
+               goto done;
+       }
+
+       if (dmsr->dmsr_tail) {
+               bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt &&
+                               _dispatch_unote_registered(dmsr));
+               if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) ||
+                               (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) {
+                       if (unlikely(requires_mgr)) {
+                               tq = DISPATCH_QUEUE_WAKEUP_MGR;
+                       } else {
+                               tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+                       }
+               }
+       } else if (dqf & DSF_CANCELED) {
+               if (!dm->dm_uninstalled) {
+                       if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
+                               // waiting for the delivery of a deferred delete event
+                       } else {
+                               // The channel needs to be uninstalled from the manager queue
+                               tq = DISPATCH_QUEUE_WAKEUP_MGR;
+                       }
+               } else if (!dm->dm_cancel_handler_called) {
+                       // the cancellation handler needs to be delivered to the target
+                       // queue.
+                       tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+               }
+       }
+
+done:
+       if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) &&
+                       dm->do_targetq == &_dispatch_mgr_q) {
+               tq = DISPATCH_QUEUE_WAKEUP_MGR;
+       }
+
+       return _dispatch_queue_class_wakeup(dm->_as_dq, qos, flags, tq);
+}
+
+static void
+_dispatch_mach_sigterm_invoke(void *ctx)
+{
+       dispatch_mach_t dm = ctx;
+       if (!(dm->dq_atomic_flags & DSF_CANCELED)) {
+               dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+               _dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+                               DISPATCH_MACH_SIGTERM_RECEIVED, NULL, 0,
+                               dmrr->dmrr_handler_func);
+       }
+}
+
+void
+_dispatch_xpc_sigterm_merge(dispatch_unote_t du,
+               uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED,
+               uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp)
+{
+       dispatch_mach_t dm = _dispatch_wref2ptr(du._du->du_owner_wref);
+       uint32_t options = 0;
+       if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
+                       !(flags & EV_DELETE)) {
+               options = DU_UNREGISTER_IMMEDIATE_DELETE;
+       } else {
+               dispatch_assert((flags & EV_ONESHOT) && (flags & EV_DELETE));
+               options = DU_UNREGISTER_ALREADY_DELETED;
+       }
+       _dispatch_unote_unregister(du, options);
+
+       if (!(dm->dq_atomic_flags & DSF_CANCELED)) {
+               _dispatch_barrier_async_detached_f(dm->_as_dq, dm,
+                               _dispatch_mach_sigterm_invoke);
+       } else {
+               dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY);
+       }
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_msg_t
+
+dispatch_mach_msg_t
+dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size,
+               dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr)
+{
+       if (slowpath(size < sizeof(mach_msg_header_t)) ||
+                       slowpath(destructor && !msg)) {
+               DISPATCH_CLIENT_CRASH(size, "Empty message");
+       }
+
+       dispatch_mach_msg_t dmsg;
+       size_t msg_size = sizeof(struct dispatch_mach_msg_s);
+       if (!destructor && os_add_overflow(msg_size,
+                         (size - sizeof(dmsg->dmsg_msg)), &msg_size)) {
+               DISPATCH_CLIENT_CRASH(size, "Message size too large");
+       }
+
+       dmsg = _dispatch_object_alloc(DISPATCH_VTABLE(mach_msg), msg_size);
+       if (destructor) {
+               dmsg->dmsg_msg = msg;
+       } else if (msg) {
+               memcpy(dmsg->dmsg_buf, msg, size);
+       }
+       dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+       dmsg->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
+       dmsg->dmsg_destructor = destructor;
+       dmsg->dmsg_size = size;
+       if (msg_ptr) {
+               *msg_ptr = _dispatch_mach_msg_get_msg(dmsg);
+       }
+       return dmsg;
+}
+
+void
+_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg,
+               DISPATCH_UNUSED bool *allow_free)
+{
+       if (dmsg->dmsg_voucher) {
+               _voucher_release(dmsg->dmsg_voucher);
+               dmsg->dmsg_voucher = NULL;
+       }
+       switch (dmsg->dmsg_destructor) {
+       case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT:
+               break;
+       case DISPATCH_MACH_MSG_DESTRUCTOR_FREE:
+               free(dmsg->dmsg_msg);
+               break;
+       case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: {
+               mach_vm_size_t vm_size = dmsg->dmsg_size;
+               mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg;
+               (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
+                               vm_addr, vm_size));
+               break;
+       }}
+}
+
+static inline mach_msg_header_t*
+_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg)
+{
+       return dmsg->dmsg_destructor ? dmsg->dmsg_msg :
+                       (mach_msg_header_t*)dmsg->dmsg_buf;
+}
+
+mach_msg_header_t*
+dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr)
+{
+       if (size_ptr) {
+               *size_ptr = dmsg->dmsg_size;
+       }
+       return _dispatch_mach_msg_get_msg(dmsg);
+}
+
+size_t
+_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz)
+{
+       size_t offset = 0;
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
+                       dx_kind(dmsg), dmsg);
+       offset += _dispatch_object_debug_attr(dmsg, buf + offset, bufsiz - offset);
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, "
+                       "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf);
+       mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
+       if (hdr->msgh_id) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ",
+                               hdr->msgh_id);
+       }
+       if (hdr->msgh_size) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ",
+                               hdr->msgh_size);
+       }
+       if (hdr->msgh_bits) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "bits <l %u, r %u",
+                               MACH_MSGH_BITS_LOCAL(hdr->msgh_bits),
+                               MACH_MSGH_BITS_REMOTE(hdr->msgh_bits));
+               if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) {
+                       offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x",
+                                       MACH_MSGH_BITS_OTHER(hdr->msgh_bits));
+               }
+               offset += dsnprintf(&buf[offset], bufsiz - offset, ">, ");
+       }
+       if (hdr->msgh_local_port && hdr->msgh_remote_port) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, "
+                               "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port);
+       } else if (hdr->msgh_local_port) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x",
+                               hdr->msgh_local_port);
+       } else if (hdr->msgh_remote_port) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x",
+                               hdr->msgh_remote_port);
+       } else {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports");
+       }
+       offset += dsnprintf(&buf[offset], bufsiz - offset, " } }");
+       return offset;
+}
+
+DISPATCH_ALWAYS_INLINE
+static dispatch_queue_t
+_dispatch_mach_msg_context_async_reply_queue(void *msg_context)
+{
+       if (DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(_dispatch_mach_xpc_hooks)) {
+               return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(
+                               msg_context);
+       }
+       return NULL;
+}
+
+static dispatch_continuation_t
+_dispatch_mach_msg_async_reply_wrap(dispatch_mach_msg_t dmsg,
+               dispatch_mach_t dm)
+{
+       _dispatch_retain(dm); // Released in _dispatch_mach_msg_async_reply_invoke()
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       dc->do_vtable = DC_VTABLE(MACH_ASYNC_REPLY);
+       dc->dc_data = dmsg;
+       dc->dc_other = dm;
+       dc->dc_priority = DISPATCH_NO_PRIORITY;
+       dc->dc_voucher = DISPATCH_NO_VOUCHER;
+       return dc;
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc,
+               DISPATCH_UNUSED dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags)
+{
+       // _dispatch_mach_msg_invoke_with_mach() releases the reference on dmsg
+       // taken by _dispatch_mach_msg_async_reply_wrap() after handling it.
+       dispatch_mach_msg_t dmsg = dc->dc_data;
+       dispatch_mach_t dm = dc->dc_other;
+       _dispatch_mach_msg_invoke_with_mach(dmsg,
+                       flags | DISPATCH_INVOKE_ASYNC_REPLY, dm);
+
+       // Balances _dispatch_mach_msg_async_reply_wrap
+       _dispatch_release(dc->dc_other);
+
+       _dispatch_continuation_free(dc);
+}
+
+#pragma mark -
+#pragma mark dispatch_mig_server
+
+mach_msg_return_t
+dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
+               dispatch_mig_callback_t callback)
+{
+       mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT
+               | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX)
+               | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER;
+       mach_msg_options_t tmp_options;
+       mig_reply_error_t *bufTemp, *bufRequest, *bufReply;
+       mach_msg_return_t kr = 0;
+       uint64_t assertion_token = 0;
+       uint32_t cnt = 1000; // do not stall out serial queues
+       boolean_t demux_success;
+       bool received = false;
+       size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE;
+       dispatch_source_refs_t dr = ds->ds_refs;
+
+       bufRequest = alloca(rcv_size);
+       bufRequest->RetCode = 0;
+       for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size);
+                       p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) {
+               *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
+       }
+
+       bufReply = alloca(rcv_size);
+       bufReply->Head.msgh_size = 0;
+       for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size);
+                       p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) {
+               *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
+       }
+
+#if DISPATCH_DEBUG
+       options |= MACH_RCV_LARGE; // rdar://problem/8422992
+#endif
+       tmp_options = options;
+       // XXX FIXME -- change this to not starve out the target queue
+       for (;;) {
+               if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) {
+                       options &= ~MACH_RCV_MSG;
+                       tmp_options &= ~MACH_RCV_MSG;
+
+                       if (!(tmp_options & MACH_SEND_MSG)) {
+                               goto out;
+                       }
+               }
+               kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
+                               (mach_msg_size_t)rcv_size, (mach_port_t)dr->du_ident, 0, 0);
+
+               tmp_options = options;
+
+               if (slowpath(kr)) {
+                       switch (kr) {
+                       case MACH_SEND_INVALID_DEST:
+                       case MACH_SEND_TIMED_OUT:
+                               if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+                                       mach_msg_destroy(&bufReply->Head);
+                               }
+                               break;
+                       case MACH_RCV_TIMED_OUT:
+                               // Don't return an error if a message was sent this time or
+                               // a message was successfully received previously
+                               // rdar://problems/7363620&7791738
+                               if(bufReply->Head.msgh_remote_port || received) {
+                                       kr = MACH_MSG_SUCCESS;
+                               }
+                               break;
+                       case MACH_RCV_INVALID_NAME:
+                               break;
+#if DISPATCH_DEBUG
+                       case MACH_RCV_TOO_LARGE:
+                               // receive messages that are too large and log their id and size
+                               // rdar://problem/8422992
+                               tmp_options &= ~MACH_RCV_LARGE;
+                               size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE;
+                               void *large_buf = malloc(large_size);
+                               if (large_buf) {
+                                       rcv_size = large_size;
+                                       bufReply = large_buf;
+                               }
+                               if (!mach_msg(&bufReply->Head, tmp_options, 0,
+                                               (mach_msg_size_t)rcv_size,
+                                               (mach_port_t)dr->du_ident, 0, 0)) {
+                                       _dispatch_log("BUG in libdispatch client: "
+                                                       "dispatch_mig_server received message larger than "
+                                                       "requested size %zd: id = 0x%x, size = %d",
+                                                       maxmsgsz, bufReply->Head.msgh_id,
+                                                       bufReply->Head.msgh_size);
+                               }
+                               if (large_buf) {
+                                       free(large_buf);
+                               }
+                               // fall through
+#endif
+                       default:
+                               _dispatch_bug_mach_client(
+                                               "dispatch_mig_server: mach_msg() failed", kr);
+                               break;
+                       }
+                       goto out;
+               }
+
+               if (!(tmp_options & MACH_RCV_MSG)) {
+                       goto out;
+               }
+
+               if (assertion_token) {
+#if DISPATCH_USE_IMPORTANCE_ASSERTION
+                       int r = proc_importance_assertion_complete(assertion_token);
+                       (void)dispatch_assume_zero(r);
+#endif
+                       assertion_token = 0;
+               }
+               received = true;
+
+               bufTemp = bufRequest;
+               bufRequest = bufReply;
+               bufReply = bufTemp;
+
+#if DISPATCH_USE_IMPORTANCE_ASSERTION
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+               int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head,
+                               NULL, &assertion_token);
+               if (r && slowpath(r != EIO)) {
+                       (void)dispatch_assume_zero(r);
+               }
+#pragma clang diagnostic pop
+#endif
+               _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head));
+               demux_success = callback(&bufRequest->Head, &bufReply->Head);
+
+               if (!demux_success) {
+                       // destroy the request - but not the reply port
+                       bufRequest->Head.msgh_remote_port = 0;
+                       mach_msg_destroy(&bufRequest->Head);
+               } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
+                       // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode
+                       // is present
+                       if (slowpath(bufReply->RetCode)) {
+                               if (bufReply->RetCode == MIG_NO_REPLY) {
+                                       continue;
+                               }
+
+                               // destroy the request - but not the reply port
+                               bufRequest->Head.msgh_remote_port = 0;
+                               mach_msg_destroy(&bufRequest->Head);
+                       }
+               }
+
+               if (bufReply->Head.msgh_remote_port) {
+                       tmp_options |= MACH_SEND_MSG;
+                       if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) !=
+                                       MACH_MSG_TYPE_MOVE_SEND_ONCE) {
+                               tmp_options |= MACH_SEND_TIMEOUT;
+                       }
+               }
+       }
+
+out:
+       if (assertion_token) {
+#if DISPATCH_USE_IMPORTANCE_ASSERTION
+               int r = proc_importance_assertion_complete(assertion_token);
+               (void)dispatch_assume_zero(r);
+#endif
+       }
+
+       return kr;
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_debug
+
+static size_t
+_dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz)
+{
+       dispatch_queue_t target = dm->do_targetq;
+       dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+       dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+
+       return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, "
+                       "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, "
+                       "send state = %016llx, disconnected = %d, canceled = %d ",
+                       target && target->dq_label ? target->dq_label : "", target,
+                       (mach_port_t)dmrr->du_ident, dmsr->dmsr_send,
+                       (mach_port_t)dmsr->du_ident,
+                       dmsr->dmsr_notification_armed ? " (armed)" : "",
+                       dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "",
+                       dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt,
+                       (bool)(dm->dq_atomic_flags & DSF_CANCELED));
+}
+
+size_t
+_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz)
+{
+       size_t offset = 0;
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
+                       dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label :
+                       dx_kind(dm), dm);
+       offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset);
+       offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset);
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
+       return offset;
+}
+
+#endif /* HAVE_MACH */
diff --git a/src/mach_internal.h b/src/mach_internal.h
new file mode 100644 (file)
index 0000000..8c8edd8
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_MACH_INTERNAL__
+#define __DISPATCH_MACH_INTERNAL__
+#if HAVE_MACH
+
+#ifndef __DISPATCH_INDIRECT__
+#error "Please #include <dispatch/dispatch.h> instead of this file directly."
+#include <dispatch/base.h> // for HeaderDoc
+#endif
+
+// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t
+//       bit values must not overlap as they share the same kevent fflags !
+
+/*!
+ * @enum dispatch_source_mach_send_flags_t
+ *
+ * @constant DISPATCH_MACH_SEND_DELETED
+ * Port-deleted notification. Disabled for source registration.
+ */
+enum {
+       DISPATCH_MACH_SEND_DELETED = 0x4,
+};
+/*!
+ * @enum dispatch_source_mach_recv_flags_t
+ *
+ * @constant DISPATCH_MACH_RECV_MESSAGE
+ * Receive right has pending messages
+ */
+enum {
+       DISPATCH_MACH_RECV_MESSAGE = 0x2,
+};
+
+
+DISPATCH_CLASS_DECL(mach);
+DISPATCH_CLASS_DECL(mach_msg);
+
+#ifndef __cplusplus
+struct dispatch_mach_s {
+       DISPATCH_SOURCE_HEADER(mach);
+       dispatch_mach_send_refs_t dm_send_refs;
+       dispatch_xpc_term_refs_t dm_xpc_term_refs;
+} DISPATCH_ATOMIC64_ALIGN;
+
+struct dispatch_mach_msg_s {
+       DISPATCH_OBJECT_HEADER(mach_msg);
+       union {
+               mach_msg_option_t dmsg_options;
+               mach_error_t dmsg_error;
+       };
+       mach_port_t dmsg_reply;
+       pthread_priority_t dmsg_priority;
+       voucher_t dmsg_voucher;
+       dispatch_mach_msg_destructor_t dmsg_destructor;
+       size_t dmsg_size;
+       union {
+               mach_msg_header_t *dmsg_msg;
+               char dmsg_buf[0];
+       };
+};
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_xref_dispose(struct dispatch_mach_s *dm)
+{
+       if (dm->dm_is_xpc) {
+               dm->dm_recv_refs->dmrr_handler_ctxt = (void *)0xbadfeed;
+       }
+}
+#endif // __cplusplus
+
+dispatch_source_t
+_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
+               const struct dispatch_continuation_s *dc);
+
+void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+void _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free);
+void _dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume);
+void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags);
+void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos,
+               dispatch_wakeup_flags_t flags);
+size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz);
+void _dispatch_mach_merge_notification(dispatch_unote_t du,
+               uint32_t flags, uintptr_t data, uintptr_t status,
+               pthread_priority_t pp);
+void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags,
+               mach_msg_header_t *msg, mach_msg_size_t msgsz);
+void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags,
+               mach_msg_header_t *msg, mach_msg_size_t msgsz);
+void _dispatch_xpc_sigterm_merge(dispatch_unote_t du, uint32_t flags,
+               uintptr_t data, uintptr_t status, pthread_priority_t pp);
+
+void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg, bool *allow_free);
+void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf,
+               size_t bufsiz);
+
+void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+
+#endif // HAVE_MACH
+#endif /* __DISPATCH_MACH_INTERNAL__ */
index 1928df53f4e964b665163e2dda4e7987f41d5d26..86d100507de645de5efcb642631e812c47a96fbe 100644 (file)
@@ -37,14 +37,28 @@ DISPATCH_NOINLINE
 _os_object_t
 _os_object_retain_internal(_os_object_t obj)
 {
-       return _os_object_retain_internal_inline(obj);
+       return _os_object_retain_internal_n_inline(obj, 1);
+}
+
+DISPATCH_NOINLINE
+_os_object_t
+_os_object_retain_internal_n(_os_object_t obj, uint16_t n)
+{
+       return _os_object_retain_internal_n_inline(obj, n);
 }
 
 DISPATCH_NOINLINE
 void
 _os_object_release_internal(_os_object_t obj)
 {
-       return _os_object_release_internal_inline(obj);
+       return _os_object_release_internal_n_inline(obj, 1);
+}
+
+DISPATCH_NOINLINE
+void
+_os_object_release_internal_n(_os_object_t obj, uint16_t n)
+{
+       return _os_object_release_internal_n_inline(obj, n);
 }
 
 DISPATCH_NOINLINE
@@ -89,21 +103,19 @@ _os_object_release(_os_object_t obj)
 bool
 _os_object_retain_weak(_os_object_t obj)
 {
-       int xref_cnt = obj->os_obj_xref_cnt;
-       if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
-               return true; // global object
-       }
-retry:
-       if (slowpath(xref_cnt == -1)) {
-               return false;
-       }
-       if (slowpath(xref_cnt < -1)) {
-               goto overrelease;
-       }
-       if (slowpath(!os_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt,
-                       xref_cnt + 1, &xref_cnt, relaxed))) {
-               goto retry;
-       }
+       int xref_cnt, nxref_cnt;
+       os_atomic_rmw_loop2o(obj, os_obj_xref_cnt, xref_cnt, nxref_cnt, relaxed, {
+               if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
+                       os_atomic_rmw_loop_give_up(return true); // global object
+               }
+               if (slowpath(xref_cnt == -1)) {
+                       os_atomic_rmw_loop_give_up(return false);
+               }
+               if (slowpath(xref_cnt < -1)) {
+                       os_atomic_rmw_loop_give_up(goto overrelease);
+               }
+               nxref_cnt = xref_cnt + 1;
+       });
        return true;
 overrelease:
        _OS_OBJECT_CLIENT_CRASH("Over-release of an object");
@@ -126,7 +138,7 @@ _os_object_allows_weak_reference(_os_object_t obj)
 #pragma mark dispatch_object_t
 
 void *
-_dispatch_alloc(const void *vtable, size_t size)
+_dispatch_object_alloc(const void *vtable, size_t size)
 {
 #if OS_OBJECT_HAVE_OBJC1
        const struct dispatch_object_vtable_s *_vtable = vtable;
@@ -139,6 +151,27 @@ _dispatch_alloc(const void *vtable, size_t size)
 #endif
 }
 
+void
+_dispatch_object_finalize(dispatch_object_t dou)
+{
+#if USE_OBJC
+       objc_destructInstance((id)dou._do);
+#else
+       (void)dou;
+#endif
+}
+
+void
+_dispatch_object_dealloc(dispatch_object_t dou)
+{
+       // so that ddt doesn't pick up bad objects when malloc reuses this memory
+       dou._os_obj->os_obj_isa = NULL;
+#if OS_OBJECT_HAVE_OBJC1
+       dou._do->do_vtable = NULL;
+#endif
+       free(dou._os_obj);
+}
+
 void
 dispatch_retain(dispatch_object_t dou)
 {
@@ -153,24 +186,6 @@ dispatch_release(dispatch_object_t dou)
        _os_object_release(dou._os_obj);
 }
 
-static void
-_dispatch_dealloc(dispatch_object_t dou)
-{
-       dispatch_queue_t tq = dou._do->do_targetq;
-       dispatch_function_t func = dou._do->do_finalizer;
-       void *ctxt = dou._do->do_ctxt;
-#if OS_OBJECT_HAVE_OBJC1
-       // so that ddt doesn't pick up bad objects when malloc reuses this memory
-       dou._do->do_vtable = NULL;
-#endif
-       _os_object_dealloc(dou._os_obj);
-
-       if (func && ctxt) {
-               dispatch_async_f(tq, ctxt, func);
-       }
-       _dispatch_release_tailcall(tq);
-}
-
 #if !USE_OBJC
 void
 _dispatch_xref_dispose(dispatch_object_t dou)
@@ -181,6 +196,10 @@ _dispatch_xref_dispose(dispatch_object_t dou)
        }
        if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) {
                _dispatch_source_xref_dispose(dou._ds);
+#if HAVE_MACH
+       } else if (dx_type(dou._do) == DISPATCH_MACH_CHANNEL_TYPE) {
+               _dispatch_mach_xref_dispose(dou._dm);
+#endif
        } else if (dx_type(dou._do) == DISPATCH_QUEUE_RUNLOOP_TYPE) {
                _dispatch_runloop_queue_xref_dispose(dou._dq);
        }
@@ -191,19 +210,35 @@ _dispatch_xref_dispose(dispatch_object_t dou)
 void
 _dispatch_dispose(dispatch_object_t dou)
 {
+       dispatch_queue_t tq = dou._do->do_targetq;
+       dispatch_function_t func = dou._do->do_finalizer;
+       void *ctxt = dou._do->do_ctxt;
+       bool allow_free = true;
+
        if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) {
                DISPATCH_INTERNAL_CRASH(dou._do->do_next, "Release while enqueued");
        }
-       dx_dispose(dou._do);
-       return _dispatch_dealloc(dou);
+
+       dx_dispose(dou._do, &allow_free);
+
+       // Past this point, the only thing left of the object is its memory
+       if (likely(allow_free)) {
+               _dispatch_object_finalize(dou);
+               _dispatch_object_dealloc(dou);
+       }
+       if (func && ctxt) {
+               dispatch_async_f(tq, ctxt, func);
+       }
+       if (tq) _dispatch_release_tailcall(tq);
 }
 
 void *
 dispatch_get_context(dispatch_object_t dou)
 {
        DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou);
-       if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
-                       slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) {
+       if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT ||
+                       dx_hastypeflag(dou._do, QUEUE_ROOT) ||
+                       dx_hastypeflag(dou._do, QUEUE_BASE))) {
                return NULL;
        }
        return dou._do->do_ctxt;
@@ -213,8 +248,9 @@ void
 dispatch_set_context(dispatch_object_t dou, void *context)
 {
        DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context);
-       if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
-                       slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) {
+       if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT ||
+                       dx_hastypeflag(dou._do, QUEUE_ROOT) ||
+                       dx_hastypeflag(dou._do, QUEUE_BASE))) {
                return;
        }
        dou._do->do_ctxt = context;
@@ -224,8 +260,9 @@ void
 dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer)
 {
        DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer);
-       if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
-                       slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) {
+       if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT ||
+                       dx_hastypeflag(dou._do, QUEUE_ROOT) ||
+                       dx_hastypeflag(dou._do, QUEUE_BASE))) {
                return;
        }
        dou._do->do_finalizer = finalizer;
@@ -237,10 +274,11 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq)
        DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, tq);
        if (dx_vtable(dou._do)->do_set_targetq) {
                dx_vtable(dou._do)->do_set_targetq(dou._do, tq);
-       } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT &&
-                       !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) {
+       } else if (likely(dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT &&
+                       !dx_hastypeflag(dou._do, QUEUE_ROOT) &&
+                       !dx_hastypeflag(dou._do, QUEUE_BASE))) {
                if (slowpath(!tq)) {
-                       tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false);
+                       tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
                }
                _dispatch_object_set_target_queue_inline(dou._do, tq);
        }
@@ -268,7 +306,9 @@ void
 dispatch_resume(dispatch_object_t dou)
 {
        DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou);
-       if (dx_vtable(dou._do)->do_resume) {
+       // the do_suspend below is not a typo. Having a do_resume but no do_suspend
+       // allows for objects to support activate, but have no-ops suspend/resume
+       if (dx_vtable(dou._do)->do_suspend) {
                dx_vtable(dou._do)->do_resume(dou._do, false);
        }
 }
@@ -276,6 +316,6 @@ dispatch_resume(dispatch_object_t dou)
 size_t
 _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz)
 {
-       return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, ",
+       return dsnprintf(buf, bufsiz, "xref = %d, ref = %d, ",
                        dou._do->do_xref_cnt + 1, dou._do->do_ref_cnt + 1);
 }
index a9153ac3e512c4c7d44337f74ff1d6569915fbf7..efee829471e9827563eeb1383f7b8ff41c9ce565 100644 (file)
 #error Objective C GC isn't supported anymore
 #endif
 
+#if __has_include(<objc/objc-internal.h>)
 #include <objc/objc-internal.h>
+#else
+extern id _Nullable objc_retain(id _Nullable obj) __asm__("_objc_retain");
+extern void objc_release(id _Nullable obj) __asm__("_objc_release");
+extern void _objc_init(void);
+extern void _objc_atfork_prepare(void);
+extern void _objc_atfork_parent(void);
+extern void _objc_atfork_child(void);
+#endif // __has_include(<objc/objc-internal.h>)
 #include <objc/objc-exception.h>
 #include <Foundation/NSString.h>
 
+// NOTE: this file must not contain any atomic operations
+
 #pragma mark -
 #pragma mark _os_object_t
 
@@ -251,7 +262,7 @@ _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz)
        NSUInteger offset = 0;
        NSString *desc = [dou debugDescription];
        [desc getBytes:buf maxLength:bufsiz-1 usedLength:&offset
-                       encoding:NSUTF8StringEncoding options:0
+                       encoding:NSUTF8StringEncoding options:(NSStringEncodingConversionOptions)0
                        range:NSMakeRange(0, [desc length]) remainingRange:NULL];
        if (offset) buf[offset] = 0;
        return offset;
@@ -281,9 +292,14 @@ DISPATCH_UNAVAILABLE_INIT()
        } else {
                strlcpy(buf, dx_kind(obj), sizeof(buf));
        }
-       return [nsstring stringWithFormat:
-                       [nsstring stringWithUTF8String:"<%s: %s>"],
-                       class_getName([self class]), buf];
+       NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+       if (!format) return nil;
+       return [nsstring stringWithFormat:format, class_getName([self class]), buf];
+}
+
+- (void)dealloc DISPATCH_NORETURN {
+       DISPATCH_INTERNAL_CRASH(0, "Calling dealloc on a dispatch object");
+       [super dealloc]; // make clang happy
 }
 
 @end
@@ -295,9 +311,10 @@ DISPATCH_UNAVAILABLE_INIT()
 - (NSString *)description {
        Class nsstring = objc_lookUpClass("NSString");
        if (!nsstring) return nil;
-       return [nsstring stringWithFormat:
-                       [nsstring stringWithUTF8String:"<%s: %s[%p]>"],
-                       class_getName([self class]), dispatch_queue_get_label(self), self];
+       NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+       if (!format) return nil;
+       return [nsstring stringWithFormat:format, class_getName([self class]),
+                       dispatch_queue_get_label(self), self];
 }
 
 - (void)_xref_dispose {
@@ -325,6 +342,7 @@ DISPATCH_UNAVAILABLE_INIT()
 
 - (void)_xref_dispose {
        _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self);
+       _dispatch_mach_xref_dispose((struct dispatch_mach_s *)self);
        [super _xref_dispose];
 }
 
@@ -369,6 +387,14 @@ DISPATCH_CLASS_IMPL(disk)
 DISPATCH_UNAVAILABLE_INIT()
 DISPATCH_OBJC_LOAD()
 
+-(id)retain {
+       return (id)_voucher_retain_inline((struct voucher_s *)self);
+}
+
+-(oneway void)release {
+       return _voucher_release_inline((struct voucher_s *)self);
+}
+
 - (void)_xref_dispose {
        return _voucher_xref_dispose(self); // calls _os_object_release_internal()
 }
@@ -382,9 +408,9 @@ DISPATCH_OBJC_LOAD()
        if (!nsstring) return nil;
        char buf[2048];
        _voucher_debug(self, buf, sizeof(buf));
-       return [nsstring stringWithFormat:
-                       [nsstring stringWithUTF8String:"<%s: %s>"],
-                       class_getName([self class]), buf];
+       NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+       if (!format) return nil;
+       return [nsstring stringWithFormat:format, class_getName([self class]), buf];
 }
 
 @end
@@ -411,20 +437,20 @@ DISPATCH_OBJC_LOAD()
 
 #if DISPATCH_COCOA_COMPAT
 
-void *
-_dispatch_last_resort_autorelease_pool_push(void)
+void
+_dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic)
 {
        if (!slowpath(_os_object_debug_missing_pools)) {
-               return _dispatch_autorelease_pool_push();
+               dic->dic_autorelease_pool = _dispatch_autorelease_pool_push();
        }
-       return NULL;
 }
 
 void
-_dispatch_last_resort_autorelease_pool_pop(void *context)
+_dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic)
 {
        if (!slowpath(_os_object_debug_missing_pools)) {
-               return _dispatch_autorelease_pool_pop(context);
+               _dispatch_autorelease_pool_pop(dic->dic_autorelease_pool);
+               dic->dic_autorelease_pool = NULL;
        }
 }
 
@@ -466,6 +492,19 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
 }
 
 #if HAVE_MACH
+#undef _dispatch_client_callout3
+void
+_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason,
+               dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f)
+{
+       @try {
+               return f(ctxt, reason, dmsg);
+       }
+       @catch (...) {
+               objc_terminate();
+       }
+}
+
 #undef _dispatch_client_callout4
 void
 _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
index 40430b6c2c33d43e7ed6a51cbb719b3ebbccb561..4504f658713fc803735a2d3f82af170ac0f0ad4c 100644 (file)
 #define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \
        unsigned long const do_type; \
        const char *const do_kind; \
-       void (*const do_invoke)(struct x##_s *, dispatch_invoke_flags_t)
+       void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \
+                       dispatch_invoke_flags_t); \
+       void (*const do_push)(struct x##_s *, dispatch_object_t, \
+                       dispatch_qos_t)
 
 #define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \
        DISPATCH_INVOKABLE_VTABLE_HEADER(x); \
        void (*const do_wakeup)(struct x##_s *, \
-                       pthread_priority_t, dispatch_wakeup_flags_t); \
-       void (*const do_dispose)(struct x##_s *)
+                       dispatch_qos_t, dispatch_wakeup_flags_t); \
+       void (*const do_dispose)(struct x##_s *, bool *allow_free)
 
 #define DISPATCH_OBJECT_VTABLE_HEADER(x) \
        DISPATCH_QUEUEABLE_VTABLE_HEADER(x); \
        void (*const do_set_targetq)(struct x##_s *, dispatch_queue_t); \
        void (*const do_suspend)(struct x##_s *); \
        void (*const do_resume)(struct x##_s *, bool activate); \
-       void (*const do_finalize_activation)(struct x##_s *); \
+       void (*const do_finalize_activation)(struct x##_s *, bool *allow_resume); \
        size_t (*const do_debug)(struct x##_s *, char *, size_t)
 
 #define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable)
 #define dx_hastypeflag(x, f) (dx_vtable(x)->do_type & _DISPATCH_##f##_TYPEFLAG)
 #define dx_kind(x) dx_vtable(x)->do_kind
 #define dx_debug(x, y, z) dx_vtable(x)->do_debug((x), (y), (z))
-#define dx_dispose(x) dx_vtable(x)->do_dispose(x)
-#define dx_invoke(x, z) dx_vtable(x)->do_invoke(x, z)
+#define dx_dispose(x, y) dx_vtable(x)->do_dispose(x, y)
+#define dx_invoke(x, y, z) dx_vtable(x)->do_invoke(x, y, z)
+#define dx_push(x, y, z) dx_vtable(x)->do_push(x, y, z)
 #define dx_wakeup(x, y, z) dx_vtable(x)->do_wakeup(x, y, z)
 
 #define DISPATCH_OBJECT_GLOBAL_REFCNT          _OS_OBJECT_GLOBAL_REFCNT
        .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT
 #endif
 
-#ifdef __LP64__
+#if DISPATCH_SIZEOF_PTR == 8
 // the bottom nibble must not be zero, the rest of the bits should be random
 // we sign extend the 64-bit version so that a better instruction encoding is
 // generated on Intel
 #endif
 
 DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t,
-       // The caller of dx_wakeup owns an internal refcount on the object being
-       // woken up
-       DISPATCH_WAKEUP_CONSUME                 = 0x00000001,
+       // The caller of dx_wakeup owns two internal refcounts on the object being
+       // woken up. Two are needed for WLH wakeups where two threads need
+       // the object to remain valid in a non-coordinated way
+       // - the thread doing the poke for the duration of the poke
+       // - drainers for the duration of their drain
+       DISPATCH_WAKEUP_CONSUME_2               = 0x00000001,
 
        // Some change to the object needs to be published to drainers.
        // If the drainer isn't the same thread, some scheme such as the dispatch
        // queue DIRTY bit must be used and a release barrier likely has to be
        // involved before dx_wakeup returns
-       DISPATCH_WAKEUP_FLUSH                                   = 0x00000002,
+       DISPATCH_WAKEUP_MAKE_DIRTY              = 0x00000002,
 
-       // A slow waiter was just enqueued
-       DISPATCH_WAKEUP_SLOW_WAITER                             = 0x00000004,
+       // This wakeup is made by a sync owner that still holds the drain lock
+       DISPATCH_WAKEUP_BARRIER_COMPLETE        = 0x00000004,
 
-       // The caller desires to apply an override on the object being woken up
-       // and has already adjusted the `oq_override` field. When this flag is
-       // passed, the priority passed to dx_wakeup() should not be 0
-       DISPATCH_WAKEUP_OVERRIDING              = 0x00000008,
+       // This wakeup is caused by a dispatch_block_wait()
+       DISPATCH_WAKEUP_BLOCK_WAIT              = 0x00000008,
+);
 
-       // At the time this queue was woken up it had an override that must be
-       // preserved (used to solve a race with _dispatch_queue_drain_try_unlock())
-       DISPATCH_WAKEUP_WAS_OVERRIDDEN          = 0x00000010,
+typedef struct dispatch_invoke_context_s {
+       struct dispatch_object_s *dic_deferred;
+#if HAVE_PTHREAD_WORKQUEUE_NARROWING
+       uint64_t dic_next_narrow_check;
+#endif
+#if DISPATCH_COCOA_COMPAT
+       void *dic_autorelease_pool;
+#endif
+} dispatch_invoke_context_s, *dispatch_invoke_context_t;
 
-       // This wakeup is caused by a handoff from a slow waiter.
-       DISPATCH_WAKEUP_WAITER_HANDOFF          = 0x00000020,
+#if HAVE_PTHREAD_WORKQUEUE_NARROWING
+#define DISPATCH_THREAD_IS_NARROWING 1
 
-#define _DISPATCH_WAKEUP_OVERRIDE_BITS \
-               ((dispatch_wakeup_flags_t)(DISPATCH_WAKEUP_OVERRIDING | \
-               DISPATCH_WAKEUP_WAS_OVERRIDDEN))
-);
+#define dispatch_with_disabled_narrowing(dic, ...) ({ \
+               uint64_t suspend_narrow_check = dic->dic_next_narrow_check; \
+               dic->dic_next_narrow_check = 0; \
+               __VA_ARGS__; \
+               dic->dic_next_narrow_check = suspend_narrow_check; \
+       })
+#else
+#define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__
+#endif
 
 DISPATCH_ENUM(dispatch_invoke_flags, uint32_t,
        DISPATCH_INVOKE_NONE                                    = 0x00000000,
@@ -270,12 +287,22 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t,
        // This invoke is a stealer, meaning that it doesn't own the
        // enqueue lock at drain lock time.
        //
-       // @const DISPATCH_INVOKE_OVERRIDING
-       // This invoke is draining the hierarchy on another root queue and needs
-       // to fake the identity of the original one.
+       // @const DISPATCH_INVOKE_WLH
+       // This invoke is for a bottom WLH
        //
        DISPATCH_INVOKE_STEALING                                = 0x00000001,
-       DISPATCH_INVOKE_OVERRIDING                              = 0x00000002,
+       DISPATCH_INVOKE_WLH                                             = 0x00000002,
+
+       // Misc flags
+       //
+       // @const DISPATCH_INVOKE_ASYNC_REPLY
+       // An asynchronous reply to a message is being handled.
+       //
+       // @const DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS
+       // The next serial drain should not allow sync waiters.
+       //
+       DISPATCH_INVOKE_ASYNC_REPLY                             = 0x00000004,
+       DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS   = 0x00000008,
 
        // Below this point flags are propagated to recursive calls to drain(),
        // continuation pop() or dx_invoke().
@@ -328,32 +355,37 @@ enum {
        _DISPATCH_DISK_TYPE                             =    0x70000, // meta-type for io disks
 
        _DISPATCH_QUEUE_ROOT_TYPEFLAG   =     0x0100, // bit set for any root queues
+       _DISPATCH_QUEUE_BASE_TYPEFLAG   =     0x0200, // base of a hierarchy
+                                                     // targets a root queue
 
 #define DISPATCH_CONTINUATION_TYPE(name)  \
                (_DISPATCH_CONTINUATION_TYPE | DC_##name##_TYPE)
-       DISPATCH_DATA_TYPE                              = 1 | _DISPATCH_NODE_TYPE,
-       DISPATCH_MACH_MSG_TYPE                  = 2 | _DISPATCH_NODE_TYPE,
-       DISPATCH_QUEUE_ATTR_TYPE                = 3 | _DISPATCH_NODE_TYPE,
-
-       DISPATCH_IO_TYPE                                = 0 | _DISPATCH_IO_TYPE,
-       DISPATCH_OPERATION_TYPE                 = 0 | _DISPATCH_OPERATION_TYPE,
-       DISPATCH_DISK_TYPE                              = 0 | _DISPATCH_DISK_TYPE,
-
-       DISPATCH_QUEUE_LEGACY_TYPE              = 1 | _DISPATCH_QUEUE_TYPE,
-       DISPATCH_QUEUE_SERIAL_TYPE              = 2 | _DISPATCH_QUEUE_TYPE,
-       DISPATCH_QUEUE_CONCURRENT_TYPE  = 3 | _DISPATCH_QUEUE_TYPE,
-       DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE |
-                       _DISPATCH_QUEUE_ROOT_TYPEFLAG,
-       DISPATCH_QUEUE_RUNLOOP_TYPE             = 5 | _DISPATCH_QUEUE_TYPE |
+       DISPATCH_DATA_TYPE                                      = 1 | _DISPATCH_NODE_TYPE,
+       DISPATCH_MACH_MSG_TYPE                          = 2 | _DISPATCH_NODE_TYPE,
+       DISPATCH_QUEUE_ATTR_TYPE                        = 3 | _DISPATCH_NODE_TYPE,
+
+       DISPATCH_IO_TYPE                                        = 0 | _DISPATCH_IO_TYPE,
+       DISPATCH_OPERATION_TYPE                         = 0 | _DISPATCH_OPERATION_TYPE,
+       DISPATCH_DISK_TYPE                                      = 0 | _DISPATCH_DISK_TYPE,
+
+       DISPATCH_QUEUE_LEGACY_TYPE                      = 1 | _DISPATCH_QUEUE_TYPE,
+       DISPATCH_QUEUE_SERIAL_TYPE                      = 2 | _DISPATCH_QUEUE_TYPE,
+       DISPATCH_QUEUE_CONCURRENT_TYPE          = 3 | _DISPATCH_QUEUE_TYPE,
+       DISPATCH_QUEUE_GLOBAL_ROOT_TYPE         = 4 | _DISPATCH_QUEUE_TYPE |
                        _DISPATCH_QUEUE_ROOT_TYPEFLAG,
-       DISPATCH_QUEUE_MGR_TYPE                 = 6 | _DISPATCH_QUEUE_TYPE,
-       DISPATCH_QUEUE_SPECIFIC_TYPE    = 7 | _DISPATCH_QUEUE_TYPE,
+       DISPATCH_QUEUE_NETWORK_EVENT_TYPE       = 5 | _DISPATCH_QUEUE_TYPE |
+                       _DISPATCH_QUEUE_BASE_TYPEFLAG,
+       DISPATCH_QUEUE_RUNLOOP_TYPE                     = 6 | _DISPATCH_QUEUE_TYPE |
+                       _DISPATCH_QUEUE_BASE_TYPEFLAG,
+       DISPATCH_QUEUE_MGR_TYPE                         = 7 | _DISPATCH_QUEUE_TYPE |
+                       _DISPATCH_QUEUE_BASE_TYPEFLAG,
+       DISPATCH_QUEUE_SPECIFIC_TYPE            = 8 | _DISPATCH_QUEUE_TYPE,
 
-       DISPATCH_SEMAPHORE_TYPE                 = 1 | _DISPATCH_SEMAPHORE_TYPE,
-       DISPATCH_GROUP_TYPE                             = 2 | _DISPATCH_SEMAPHORE_TYPE,
+       DISPATCH_SEMAPHORE_TYPE                         = 1 | _DISPATCH_SEMAPHORE_TYPE,
+       DISPATCH_GROUP_TYPE                                     = 2 | _DISPATCH_SEMAPHORE_TYPE,
 
-       DISPATCH_SOURCE_KEVENT_TYPE             = 1 | _DISPATCH_SOURCE_TYPE,
-       DISPATCH_MACH_CHANNEL_TYPE              = 2 | _DISPATCH_SOURCE_TYPE,
+       DISPATCH_SOURCE_KEVENT_TYPE                     = 1 | _DISPATCH_SOURCE_TYPE,
+       DISPATCH_MACH_CHANNEL_TYPE                      = 2 | _DISPATCH_SOURCE_TYPE,
 
 };
 
@@ -413,38 +445,29 @@ struct dispatch_object_s {
 
 #if OS_OBJECT_HAVE_OBJC1
 #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \
-       struct dispatch_object_s *volatile ns##_items_head; \
-       unsigned long ns##_serialnum; \
-       union { \
-               uint64_t volatile __state_field__; \
-               DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
+       DISPATCH_UNION_LE(uint64_t volatile __state_field__, \
                        dispatch_lock __state_field__##_lock, \
                        uint32_t __state_field__##_bits \
-               ); \
-       }; /* needs to be 64-bit aligned */ \
-       /* LP64 global queue cacheline boundary */ \
+       ) DISPATCH_ATOMIC64_ALIGN; \
+       struct dispatch_object_s *volatile ns##_items_head; \
+       unsigned long ns##_serialnum; \
        const char *ns##_label; \
-       voucher_t ns##_override_voucher; \
+       struct dispatch_object_s *volatile ns##_items_tail; \
        dispatch_priority_t ns##_priority; \
-       dispatch_priority_t volatile ns##_override; \
-       struct dispatch_object_s *volatile ns##_items_tail
+       int volatile ns##_sref_cnt
 #else
 #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \
        struct dispatch_object_s *volatile ns##_items_head; \
-       union { \
-               uint64_t volatile __state_field__; \
-               DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
+       DISPATCH_UNION_LE(uint64_t volatile __state_field__, \
                        dispatch_lock __state_field__##_lock, \
                        uint32_t __state_field__##_bits \
-               ); \
-       }; /* needs to be 64-bit aligned */ \
+       ) DISPATCH_ATOMIC64_ALIGN; \
        /* LP64 global queue cacheline boundary */ \
        unsigned long ns##_serialnum; \
        const char *ns##_label; \
-       voucher_t ns##_override_voucher; \
+       struct dispatch_object_s *volatile ns##_items_tail; \
        dispatch_priority_t ns##_priority; \
-       dispatch_priority_t volatile ns##_override; \
-       struct dispatch_object_s *volatile ns##_items_tail
+       int volatile ns##_sref_cnt
 #endif
 
 OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object,
@@ -462,7 +485,9 @@ struct os_mpsc_queue_s {
 
 size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf,
                size_t bufsiz);
-void *_dispatch_alloc(const void *vtable, size_t size);
+void *_dispatch_object_alloc(const void *vtable, size_t size);
+void _dispatch_object_finalize(dispatch_object_t dou);
+void _dispatch_object_dealloc(dispatch_object_t dou);
 #if !USE_OBJC
 void _dispatch_xref_dispose(dispatch_object_t dou);
 #endif
@@ -470,17 +495,22 @@ void _dispatch_dispose(dispatch_object_t dou);
 #if DISPATCH_COCOA_COMPAT
 #if USE_OBJC
 #include <objc/runtime.h>
+#if __has_include(<objc/objc-internal.h>)
 #include <objc/objc-internal.h>
+#else
+extern void *objc_autoreleasePoolPush(void);
+extern void objc_autoreleasePoolPop(void *context);
+#endif // __has_include(<objc/objc-internal.h>)
 #define _dispatch_autorelease_pool_push() \
-       objc_autoreleasePoolPush()
+               objc_autoreleasePoolPush()
 #define _dispatch_autorelease_pool_pop(context) \
-       objc_autoreleasePoolPop(context)
+               objc_autoreleasePoolPop(context)
 #else
 void *_dispatch_autorelease_pool_push(void);
 void _dispatch_autorelease_pool_pop(void *context);
 #endif
-void *_dispatch_last_resort_autorelease_pool_push(void);
-void _dispatch_last_resort_autorelease_pool_pop(void *context);
+void _dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic);
+void _dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic);
 
 #define dispatch_invoke_with_autoreleasepool(flags, ...)  ({ \
                void *pool = NULL; \
@@ -496,7 +526,6 @@ void _dispatch_last_resort_autorelease_pool_pop(void *context);
        do { (void)flags; __VA_ARGS__; } while (0)
 #endif
 
-
 #if USE_OBJC
 OS_OBJECT_OBJC_CLASS_DECL(object);
 #endif
@@ -560,20 +589,20 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz);
  *   a barrier to perform prior to tearing down an object when the refcount
  *   reached -1.
  */
-#define _os_atomic_refcnt_perform2o(o, f, op, m)   ({ \
+#define _os_atomic_refcnt_perform2o(o, f, op, n, m)   ({ \
                typeof(o) _o = (o); \
                int _ref_cnt = _o->f; \
                if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \
-                       _ref_cnt = os_atomic_##op##2o(_o, f, m); \
+                       _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \
                } \
                _ref_cnt; \
        })
 
-#define _os_atomic_refcnt_inc2o(o, m) \
-               _os_atomic_refcnt_perform2o(o, m, inc, relaxed)
+#define _os_atomic_refcnt_add2o(o, m, n) \
+               _os_atomic_refcnt_perform2o(o, m, add, n, relaxed)
 
-#define _os_atomic_refcnt_dec2o(o, m) \
-               _os_atomic_refcnt_perform2o(o, m, dec, release)
+#define _os_atomic_refcnt_sub2o(o, m, n) \
+               _os_atomic_refcnt_perform2o(o, m, sub, n, release)
 
 #define _os_atomic_refcnt_dispose_barrier2o(o, m) \
                (void)os_atomic_load2o(o, m, acquire)
@@ -596,19 +625,19 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz);
  *
  */
 #define _os_object_xrefcnt_inc(o) \
-               _os_atomic_refcnt_inc2o(o, os_obj_xref_cnt)
+               _os_atomic_refcnt_add2o(o, os_obj_xref_cnt, 1)
 
 #define _os_object_xrefcnt_dec(o) \
-               _os_atomic_refcnt_dec2o(o, os_obj_xref_cnt)
+               _os_atomic_refcnt_sub2o(o, os_obj_xref_cnt, 1)
 
 #define _os_object_xrefcnt_dispose_barrier(o) \
                _os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt)
 
-#define _os_object_refcnt_inc(o) \
-               _os_atomic_refcnt_inc2o(o, os_obj_ref_cnt)
+#define _os_object_refcnt_add(o, n) \
+               _os_atomic_refcnt_add2o(o, os_obj_ref_cnt, n)
 
-#define _os_object_refcnt_dec(o) \
-               _os_atomic_refcnt_dec2o(o, os_obj_ref_cnt)
+#define _os_object_refcnt_sub(o, n) \
+               _os_atomic_refcnt_sub2o(o, os_obj_ref_cnt, n)
 
 #define _os_object_refcnt_dispose_barrier(o) \
                _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt)
index d7d6a8e6442dc73d5630575f1027d7512fc4bb3d..c01538c9d53e77fa845891cac47cff13ff252290 100644 (file)
@@ -40,9 +40,15 @@ dispatch_once(dispatch_once_t *val, dispatch_block_t block)
 }
 #endif
 
-DISPATCH_NOINLINE
-void
-dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
+#if DISPATCH_ONCE_INLINE_FASTPATH
+#define DISPATCH_ONCE_SLOW_INLINE inline DISPATCH_ALWAYS_INLINE
+#else
+#define DISPATCH_ONCE_SLOW_INLINE DISPATCH_NOINLINE
+#endif // DISPATCH_ONCE_INLINE_FASTPATH
+
+DISPATCH_ONCE_SLOW_INLINE
+static void
+dispatch_once_f_slow(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
 {
 #if DISPATCH_GATE_USE_FOR_DISPATCH_ONCE
        dispatch_once_gate_t l = (dispatch_once_gate_t)val;
@@ -63,61 +69,9 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
                dow.dow_thread = _dispatch_tid_self();
                _dispatch_client_callout(ctxt, func);
 
-               // The next barrier must be long and strong.
-               //
-               // The scenario: SMP systems with weakly ordered memory models
-               // and aggressive out-of-order instruction execution.
-               //
-               // The problem:
-               //
-               // The dispatch_once*() wrapper macro causes the callee's
-               // instruction stream to look like this (pseudo-RISC):
-               //
-               //      load r5, pred-addr
-               //      cmpi r5, -1
-               //      beq  1f
-               //      call dispatch_once*()
-               //      1f:
-               //      load r6, data-addr
-               //
-               // May be re-ordered like so:
-               //
-               //      load r6, data-addr
-               //      load r5, pred-addr
-               //      cmpi r5, -1
-               //      beq  1f
-               //      call dispatch_once*()
-               //      1f:
-               //
-               // Normally, a barrier on the read side is used to workaround
-               // the weakly ordered memory model. But barriers are expensive
-               // and we only need to synchronize once! After func(ctxt)
-               // completes, the predicate will be marked as "done" and the
-               // branch predictor will correctly skip the call to
-               // dispatch_once*().
-               //
-               // A far faster alternative solution: Defeat the speculative
-               // read-ahead of peer CPUs.
-               //
-               // Modern architectures will throw away speculative results
-               // once a branch mis-prediction occurs. Therefore, if we can
-               // ensure that the predicate is not marked as being complete
-               // until long after the last store by func(ctxt), then we have
-               // defeated the read-ahead of peer CPUs.
-               //
-               // In other words, the last "store" by func(ctxt) must complete
-               // and then N cycles must elapse before ~0l is stored to *val.
-               // The value of N is whatever is sufficient to defeat the
-               // read-ahead mechanism of peer CPUs.
-               //
-               // On some CPUs, the most fully synchronizing instruction might
-               // need to be issued.
-
-               os_atomic_maximally_synchronizing_barrier();
-               // above assumed to contain release barrier
-               next = os_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed);
+               next = (_dispatch_once_waiter_t)_dispatch_once_xchg_done(val);
                while (next != tail) {
-                       _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next);
+                       tmp = (_dispatch_once_waiter_t)_dispatch_wait_until(next->dow_next);
                        event = &next->dow_event;
                        next = tmp;
                        _dispatch_thread_event_signal(event);
@@ -129,7 +83,7 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
                        if (next == DISPATCH_ONCE_DONE) {
                                break;
                        }
-                       if (os_atomic_cmpxchgvw(vval, next, tail, &next, release)) {
+                       if (os_atomic_cmpxchgv(vval, next, tail, &next, release)) {
                                dow.dow_thread = next->dow_thread;
                                dow.dow_next = next;
                                if (dow.dow_thread) {
@@ -147,3 +101,15 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
        }
 #endif
 }
+
+DISPATCH_NOINLINE
+void
+dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
+{
+#if !DISPATCH_ONCE_INLINE_FASTPATH
+       if (likely(os_atomic_load(val, acquire) == DLOCK_ONCE_DONE)) {
+               return;
+       }
+#endif // !DISPATCH_ONCE_INLINE_FASTPATH
+       return dispatch_once_f_slow(val, ctxt, func);
+}
index ede3c56b3bccaa68a9d48d05015dd7f7b0fe0f48..13bcf7a93fc09cfd07ed2c2d313c2b7137609a21 100644 (file)
@@ -101,3 +101,41 @@ provider dispatch {
 #pragma D attributes Private/Private/Common provider dispatch function
 #pragma D attributes Evolving/Evolving/Common provider dispatch name
 #pragma D attributes Evolving/Evolving/Common provider dispatch args
+
+typedef struct voucher_s *voucher_t;
+
+/*
+ * Probes for vouchers
+ */
+provider voucher {
+
+    /*
+     * Voucher lifetime:
+     *
+     * voucher$target:::create     A new voucher is being created
+     * voucher$target:::dispose    A voucher is being freed
+     * voucher$target:::retain     A voucher is being retained
+     * voucher$target:::release    A voucher is being released
+     */
+    probe create(voucher_t voucher, mach_port_t kv, uint64_t activity_id);
+    probe dispose(voucher_t voucher);
+    probe retain(voucher_t voucher, int resulting_refcnt);
+    probe release(voucher_t voucher, int resulting_refcnt);
+
+    /*
+     * Thread adoption
+     *
+     * voucher$target:::adopt      A voucher is being adopted by the current thread
+     * voucher$target:::orphan     A voucher is being orphanned by the current thread
+     */
+    probe adopt(voucher_t voucher);
+    probe orphan(voucher_t voucher);
+
+};
+
+#pragma D attributes Evolving/Evolving/Common provider voucher provider
+#pragma D attributes Private/Private/Common provider voucher module
+#pragma D attributes Private/Private/Common provider voucher function
+#pragma D attributes Evolving/Evolving/Common provider voucher name
+#pragma D attributes Evolving/Evolving/Common provider voucher args
+
index e87de8d86bbb5ec8ed4476935b360cb92880df5f..23eb63a7e5cd6fed0ef6f7368fa9c19250bccbbb 100644 (file)
 
 #include "internal.h"
 #if HAVE_MACH
-#include "protocol.h"
+#include "protocol.h" // _dispatch_send_wakeup_runloop_thread
 #endif
 
+#if HAVE_PTHREAD_WORKQUEUES || DISPATCH_USE_INTERNAL_WORKQUEUE
+#define DISPATCH_USE_WORKQUEUES 1
+#endif
 #if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \
                !defined(DISPATCH_ENABLE_THREAD_POOL)
 #define DISPATCH_ENABLE_THREAD_POOL 1
 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL
 #define DISPATCH_USE_PTHREAD_POOL 1
 #endif
-#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) \
-               && !defined(DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK)
-#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1
-#endif
-#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK && \
-               !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \
+#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || \
+               DISPATCH_DEBUG) && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \
                !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK)
 #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1
 #endif
-#if HAVE_PTHREAD_WORKQUEUE_QOS && !DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
-#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0
+#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && (DISPATCH_DEBUG || \
+               (!DISPATCH_USE_KEVENT_WORKQUEUE && !HAVE_PTHREAD_WORKQUEUE_QOS)) && \
+               !defined(DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP)
+#define DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1
+#endif
+#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP || \
+               DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || \
+               DISPATCH_USE_INTERNAL_WORKQUEUE
+#if !DISPATCH_USE_INTERNAL_WORKQUEUE
+#define DISPATCH_USE_WORKQ_PRIORITY 1
+#endif
+#define DISPATCH_USE_WORKQ_OPTIONS 1
 #endif
-#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \
+
+#if DISPATCH_USE_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \
                !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
 #define pthread_workqueue_t void*
 #endif
 
 static void _dispatch_sig_thread(void *ctxt);
 static void _dispatch_cache_cleanup(void *value);
-static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp);
 static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc);
 static void _dispatch_queue_cleanup(void *ctxt);
+static void _dispatch_wlh_cleanup(void *ctxt);
 static void _dispatch_deferred_items_cleanup(void *ctxt);
 static void _dispatch_frame_cleanup(void *ctxt);
 static void _dispatch_context_cleanup(void *ctxt);
-static void _dispatch_non_barrier_complete(dispatch_queue_t dq);
-static inline void _dispatch_global_queue_poke(dispatch_queue_t dq);
+static void _dispatch_queue_barrier_complete(dispatch_queue_t dq,
+               dispatch_qos_t qos, dispatch_wakeup_flags_t flags);
+static void _dispatch_queue_non_barrier_complete(dispatch_queue_t dq);
+static void _dispatch_queue_push_sync_waiter(dispatch_queue_t dq,
+               dispatch_sync_context_t dsc, dispatch_qos_t qos);
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+static void _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq,
+               dispatch_queue_t dq, dispatch_qos_t qos);
+static inline void _dispatch_queue_class_wakeup_with_override(dispatch_queue_t,
+               uint64_t dq_state, dispatch_wakeup_flags_t flags);
+#endif
 #if HAVE_PTHREAD_WORKQUEUES
 static void _dispatch_worker_thread4(void *context);
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 static void _dispatch_worker_thread3(pthread_priority_t priority);
 #endif
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 static void _dispatch_worker_thread2(int priority, int options, void *context);
 #endif
 #endif
 #if DISPATCH_USE_PTHREAD_POOL
 static void *_dispatch_worker_thread(void *context);
-static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset);
 #endif
 
 #if DISPATCH_COCOA_COMPAT
 static dispatch_once_t _dispatch_main_q_handle_pred;
 static void _dispatch_runloop_queue_poke(dispatch_queue_t dq,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags);
+               dispatch_qos_t qos, dispatch_wakeup_flags_t flags);
 static void _dispatch_runloop_queue_handle_init(void *ctxt);
 static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq);
 #endif
 
-static void _dispatch_root_queues_init_once(void *context);
-static dispatch_once_t _dispatch_root_queues_pred;
-
 #pragma mark -
 #pragma mark dispatch_root_queue
 
@@ -150,22 +163,29 @@ static struct dispatch_pthread_root_queue_context_s
 };
 #endif
 
-#define MAX_PTHREAD_COUNT 255
+#ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT
+#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255
+#endif
 
 struct dispatch_root_queue_context_s {
        union {
                struct {
-                       unsigned int volatile dgq_pending;
-#if HAVE_PTHREAD_WORKQUEUES
+                       int volatile dgq_pending;
+#if DISPATCH_USE_WORKQUEUES
                        qos_class_t dgq_qos;
-                       int dgq_wq_priority, dgq_wq_options;
+#if DISPATCH_USE_WORKQ_PRIORITY
+                       int dgq_wq_priority;
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
+                       int dgq_wq_options;
+#endif
 #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL
                        pthread_workqueue_t dgq_kworkqueue;
 #endif
-#endif // HAVE_PTHREAD_WORKQUEUES
+#endif // DISPATCH_USE_WORKQUEUES
 #if DISPATCH_USE_PTHREAD_POOL
                        void *dgq_ctxt;
-                       uint32_t volatile dgq_thread_pool_size;
+                       int32_t volatile dgq_thread_pool_size;
 #endif
                };
                char _dgq_pad[DISPATCH_CACHELINE_SIZE];
@@ -184,132 +204,180 @@ typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t;
 DISPATCH_CACHELINE_ALIGN
 static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
        [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_MAINTENANCE,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = 0,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_MAINTENANCE,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_BACKGROUND,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = 0,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_BACKGROUND,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_UTILITY,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = 0,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_UTILITY,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_DEFAULT,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = 0,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_DEFAULT,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_USER_INITIATED,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = 0,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_USER_INITIATED,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_USER_INTERACTIVE,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = 0,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS],
 #endif
        }}},
        [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{
-#if HAVE_PTHREAD_WORKQUEUES
-               .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE,
+#if DISPATCH_USE_WORKQUEUES
+               .dgq_qos = QOS_CLASS_USER_INTERACTIVE,
+#if DISPATCH_USE_WORKQ_PRIORITY
                .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL,
+#endif
+#if DISPATCH_USE_WORKQ_OPTIONS
                .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
+#endif
 #if DISPATCH_ENABLE_THREAD_POOL
                .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
                                DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT],
@@ -321,68 +389,75 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
 //         renaming this symbol
 DISPATCH_CACHELINE_ALIGN
 struct dispatch_queue_s _dispatch_root_queues[] = {
-#define _DISPATCH_ROOT_QUEUE_ENTRY(n, ...) \
-       [DISPATCH_ROOT_QUEUE_IDX_##n] = { \
+#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
+       ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
+               DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
+               DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
+#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
+       [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
                DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \
                .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
                .do_ctxt = &_dispatch_root_queue_contexts[ \
-                               DISPATCH_ROOT_QUEUE_IDX_##n], \
-               .dq_width = DISPATCH_QUEUE_WIDTH_POOL, \
-               .dq_override_voucher = DISPATCH_NO_VOUCHER, \
-               .dq_override = DISPATCH_SATURATED_OVERRIDE, \
+                               _DISPATCH_ROOT_QUEUE_IDX(n, flags)], \
+               .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
+               .dq_priority = _dispatch_priority_make(DISPATCH_QOS_##n, 0) | flags | \
+                               DISPATCH_PRIORITY_FLAG_ROOTQUEUE | \
+                               ((flags & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) ? 0 : \
+                               DISPATCH_QOS_##n << DISPATCH_PRIORITY_OVERRIDE_SHIFT), \
                __VA_ARGS__ \
        }
-       _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS,
+       _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,
                .dq_label = "com.apple.root.maintenance-qos",
                .dq_serialnum = 4,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS_OVERCOMMIT,
+       _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
                .dq_label = "com.apple.root.maintenance-qos.overcommit",
                .dq_serialnum = 5,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS,
+       _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0,
                .dq_label = "com.apple.root.background-qos",
                .dq_serialnum = 6,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS_OVERCOMMIT,
+       _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
                .dq_label = "com.apple.root.background-qos.overcommit",
                .dq_serialnum = 7,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS,
+       _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0,
                .dq_label = "com.apple.root.utility-qos",
                .dq_serialnum = 8,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS_OVERCOMMIT,
+       _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
                .dq_label = "com.apple.root.utility-qos.overcommit",
                .dq_serialnum = 9,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS,
+       _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE,
                .dq_label = "com.apple.root.default-qos",
                .dq_serialnum = 10,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS_OVERCOMMIT,
+       _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
+                       DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
                .dq_label = "com.apple.root.default-qos.overcommit",
                .dq_serialnum = 11,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS,
+       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0,
                .dq_label = "com.apple.root.user-initiated-qos",
                .dq_serialnum = 12,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS_OVERCOMMIT,
+       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
                .dq_label = "com.apple.root.user-initiated-qos.overcommit",
                .dq_serialnum = 13,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS,
+       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0,
                .dq_label = "com.apple.root.user-interactive-qos",
                .dq_serialnum = 14,
        ),
-       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS_OVERCOMMIT,
+       _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
                .dq_label = "com.apple.root.user-interactive-qos.overcommit",
                .dq_serialnum = 15,
        ),
 };
 
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 static const dispatch_queue_t _dispatch_wq2root_queues[][2] = {
        [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[
                        DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS],
@@ -405,37 +480,7 @@ static const dispatch_queue_t _dispatch_wq2root_queues[][2] = {
                        &_dispatch_root_queues[
                        DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT],
 };
-#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
-
-#define DISPATCH_PRIORITY_COUNT 5
-
-enum {
-       // No DISPATCH_PRIORITY_IDX_MAINTENANCE define because there is no legacy
-       // maintenance priority
-       DISPATCH_PRIORITY_IDX_BACKGROUND = 0,
-       DISPATCH_PRIORITY_IDX_NON_INTERACTIVE,
-       DISPATCH_PRIORITY_IDX_LOW,
-       DISPATCH_PRIORITY_IDX_DEFAULT,
-       DISPATCH_PRIORITY_IDX_HIGH,
-};
-
-static qos_class_t _dispatch_priority2qos[] = {
-       [DISPATCH_PRIORITY_IDX_BACKGROUND] = _DISPATCH_QOS_CLASS_BACKGROUND,
-       [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = _DISPATCH_QOS_CLASS_UTILITY,
-       [DISPATCH_PRIORITY_IDX_LOW] = _DISPATCH_QOS_CLASS_UTILITY,
-       [DISPATCH_PRIORITY_IDX_DEFAULT] = _DISPATCH_QOS_CLASS_DEFAULT,
-       [DISPATCH_PRIORITY_IDX_HIGH] = _DISPATCH_QOS_CLASS_USER_INITIATED,
-};
-
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-static const int _dispatch_priority2wq[] = {
-       [DISPATCH_PRIORITY_IDX_BACKGROUND] = WORKQ_BG_PRIOQUEUE,
-       [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = WORKQ_NON_INTERACTIVE_PRIOQUEUE,
-       [DISPATCH_PRIORITY_IDX_LOW] = WORKQ_LOW_PRIOQUEUE,
-       [DISPATCH_PRIORITY_IDX_DEFAULT] = WORKQ_DEFAULT_PRIOQUEUE,
-       [DISPATCH_PRIORITY_IDX_HIGH] = WORKQ_HIGH_PRIOQUEUE,
-};
-#endif
+#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 
 #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
 static struct dispatch_queue_s _dispatch_mgr_root_queue;
@@ -449,12 +494,13 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue;
 DISPATCH_CACHELINE_ALIGN
 struct dispatch_queue_s _dispatch_mgr_q = {
        DISPATCH_GLOBAL_OBJECT_HEADER(queue_mgr),
-       .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1),
+       .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
+                       DISPATCH_QUEUE_ROLE_BASE_ANON,
        .do_targetq = &_dispatch_mgr_root_queue,
        .dq_label = "com.apple.libdispatch-manager",
-       .dq_width = 1,
-       .dq_override_voucher = DISPATCH_NO_VOUCHER,
-       .dq_override = DISPATCH_SATURATED_OVERRIDE,
+       .dq_atomic_flags = DQF_WIDTH(1),
+       .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER |
+                       DISPATCH_PRIORITY_SATURATED_OVERRIDE,
        .dq_serialnum = 2,
 };
 
@@ -464,48 +510,16 @@ dispatch_get_global_queue(long priority, unsigned long flags)
        if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
                return DISPATCH_BAD_INPUT;
        }
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
-       qos_class_t qos;
-       switch (priority) {
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-       case _DISPATCH_QOS_CLASS_MAINTENANCE:
-               if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
-                               .dq_priority) {
-                       // map maintenance to background on old kernel
-                       qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
-               } else {
-                       qos = (qos_class_t)priority;
-               }
-               break;
-#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-       case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
-               qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
-               break;
-       case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
-               qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE];
-               break;
-       case DISPATCH_QUEUE_PRIORITY_LOW:
-               qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_LOW];
-               break;
-       case DISPATCH_QUEUE_PRIORITY_DEFAULT:
-               qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_DEFAULT];
-               break;
-       case DISPATCH_QUEUE_PRIORITY_HIGH:
-               qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH];
-               break;
-       case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-               if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]
-                               .dq_priority) {
-                       qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH];
-                       break;
-               }
+       dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);
+#if !HAVE_PTHREAD_WORKQUEUE_QOS
+       if (qos == QOS_CLASS_MAINTENANCE) {
+               qos = DISPATCH_QOS_BACKGROUND;
+       } else if (qos == QOS_CLASS_USER_INTERACTIVE) {
+               qos = DISPATCH_QOS_USER_INITIATED;
+       }
 #endif
-               // fallthrough
-       default:
-               qos = (qos_class_t)priority;
-               break;
+       if (qos == DISPATCH_QOS_UNSPECIFIED) {
+               return DISPATCH_BAD_INPUT;
        }
        return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
 }
@@ -515,7 +529,7 @@ static inline dispatch_queue_t
 _dispatch_get_current_queue(void)
 {
        return _dispatch_queue_get_current() ?:
-                       _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
+                       _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
 }
 
 dispatch_queue_t
@@ -551,21 +565,20 @@ dispatch_assert_queue(dispatch_queue_t dq)
                                "dispatch_assert_queue()");
        }
        uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
-       if (unlikely(_dq_state_drain_pended(dq_state))) {
-               goto fail;
-       }
-       if (likely(_dq_state_drain_owner(dq_state) == _dispatch_tid_self())) {
+       if (likely(_dq_state_drain_locked_by_self(dq_state))) {
                return;
        }
-       if (likely(dq->dq_width > 1)) {
-               // we can look at the width: if it is changing while we read it,
-               // it means that a barrier is running on `dq` concurrently, which
-               // proves that we're not on `dq`. Hence reading a stale '1' is ok.
-               if (fastpath(_dispatch_thread_frame_find_queue(dq))) {
+       // we can look at the width: if it is changing while we read it,
+       // it means that a barrier is running on `dq` concurrently, which
+       // proves that we're not on `dq`. Hence reading a stale '1' is ok.
+       //
+       // However if we can have thread bound queues, these mess with lock
+       // ownership and we always have to take the slowpath
+       if (likely(DISPATCH_COCOA_COMPAT || dq->dq_width > 1)) {
+               if (likely(_dispatch_thread_frame_find_queue(dq))) {
                        return;
                }
        }
-fail:
        _dispatch_assert_queue_fail(dq, true);
 }
 
@@ -578,14 +591,14 @@ dispatch_assert_queue_not(dispatch_queue_t dq)
                                "dispatch_assert_queue_not()");
        }
        uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
-       if (_dq_state_drain_pended(dq_state)) {
-               return;
-       }
-       if (likely(_dq_state_drain_owner(dq_state) != _dispatch_tid_self())) {
-               if (likely(dq->dq_width == 1)) {
-                       // we can look at the width: if it is changing while we read it,
-                       // it means that a barrier is running on `dq` concurrently, which
-                       // proves that we're not on `dq`. Hence reading a stale '1' is ok.
+       if (likely(!_dq_state_drain_locked_by_self(dq_state))) {
+               // we can look at the width: if it is changing while we read it,
+               // it means that a barrier is running on `dq` concurrently, which
+               // proves that we're not on `dq`. Hence reading a stale '1' is ok.
+               //
+               // However if we can have thread bound queues, these mess with lock
+               // ownership and we always have to take the slowpath
+               if (likely(!DISPATCH_COCOA_COMPAT && dq->dq_width == 1)) {
                        return;
                }
                if (likely(!_dispatch_thread_frame_find_queue(dq))) {
@@ -625,48 +638,14 @@ dispatch_assert_queue_barrier(dispatch_queue_t dq)
 #pragma mark -
 #pragma mark dispatch_init
 
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-pthread_priority_t _dispatch_background_priority;
-pthread_priority_t _dispatch_user_initiated_priority;
-
-static void
-_dispatch_root_queues_init_qos(int supported)
-{
-       pthread_priority_t p;
-       qos_class_t qos;
-       unsigned int i;
-       for (i = 0; i < DISPATCH_PRIORITY_COUNT; i++) {
-               p = _pthread_qos_class_encode_workqueue(_dispatch_priority2wq[i], 0);
-               qos = _pthread_qos_class_decode(p, NULL, NULL);
-               dispatch_assert(qos != _DISPATCH_QOS_CLASS_UNSPECIFIED);
-               _dispatch_priority2qos[i] = qos;
-       }
-       for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
-               qos = _dispatch_root_queue_contexts[i].dgq_qos;
-               if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE &&
-                               !(supported & WORKQ_FEATURE_MAINTENANCE)) {
-                       continue;
-               }
-               unsigned long flags = i & 1 ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0;
-               flags |= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
-               if (i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS ||
-                               i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT) {
-                       flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
-               }
-               p = _pthread_qos_class_encode(qos, 0, flags);
-               _dispatch_root_queues[i].dq_priority = (dispatch_priority_t)p;
-       }
-}
-#endif // HAVE_PTHREAD_WORKQUEUE_QOS
-
 static inline bool
 _dispatch_root_queues_init_workq(int *wq_supported)
 {
-       int r;
+       int r; (void)r;
        bool result = false;
        *wq_supported = 0;
-#if HAVE_PTHREAD_WORKQUEUES
-       bool disable_wq = false;
+#if DISPATCH_USE_WORKQUEUES
+       bool disable_wq = false; (void)disable_wq;
 #if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG
        disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ"));
 #endif
@@ -677,10 +656,11 @@ _dispatch_root_queues_init_workq(int *wq_supported)
 #endif
 #if DISPATCH_USE_KEVENT_WORKQUEUE
        bool disable_kevent_wq = false;
-#if DISPATCH_DEBUG
+#if DISPATCH_DEBUG || DISPATCH_PROFILE
        disable_kevent_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KEVENT_WQ"));
 #endif
 #endif
+
        if (!disable_wq && !disable_qos) {
                *wq_supported = _pthread_workqueue_supported();
 #if DISPATCH_USE_KEVENT_WORKQUEUE
@@ -691,13 +671,10 @@ _dispatch_root_queues_init_workq(int *wq_supported)
                                        offsetof(struct dispatch_queue_s, dq_serialnum), 0);
 #if DISPATCH_USE_MGR_THREAD
                        _dispatch_kevent_workqueue_enabled = !r;
-#endif
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-                       _dispatch_evfilt_machport_direct_enabled = !r;
 #endif
                        result = !r;
                } else
-#endif
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
                if (*wq_supported & WORKQ_FEATURE_FINEPRIO) {
 #if DISPATCH_USE_MGR_THREAD
                        r = _pthread_workqueue_init(_dispatch_worker_thread3,
@@ -705,10 +682,13 @@ _dispatch_root_queues_init_workq(int *wq_supported)
                        result = !r;
 #endif
                }
-               if (result) _dispatch_root_queues_init_qos(*wq_supported);
+               if (!(*wq_supported & WORKQ_FEATURE_MAINTENANCE)) {
+                       DISPATCH_INTERNAL_CRASH(*wq_supported,
+                                       "QoS Maintenance support required");
+               }
        }
 #endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
        if (!result && !disable_wq) {
                pthread_workqueue_setdispatchoffset_np(
                                offsetof(struct dispatch_queue_s, dq_serialnum));
@@ -718,7 +698,7 @@ _dispatch_root_queues_init_workq(int *wq_supported)
 #endif
                result = !r;
        }
-#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL
        if (!result) {
 #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
@@ -728,7 +708,7 @@ _dispatch_root_queues_init_workq(int *wq_supported)
                        (void)dispatch_assume_zero(r);
                }
 #endif
-               int i;
+               size_t i;
                for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
                        pthread_workqueue_t pwq = NULL;
                        dispatch_root_queue_context_t qc;
@@ -747,7 +727,15 @@ _dispatch_root_queues_init_workq(int *wq_supported)
                                result = result || dispatch_assume(pwq);
                        }
 #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
-                       qc->dgq_kworkqueue = pwq ? pwq : (void*)(~0ul);
+                       if (pwq) {
+                               qc->dgq_kworkqueue = pwq;
+                       } else {
+                               qc->dgq_kworkqueue = (void*)(~0ul);
+                               // because the fastpath of _dispatch_global_queue_poke didn't
+                               // know yet that we're using the internal pool implementation
+                               // we have to undo its setting of dgq_pending
+                               qc->dgq_pending = 0;
+                       }
                }
 #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
                if (!disable_wq) {
@@ -757,23 +745,23 @@ _dispatch_root_queues_init_workq(int *wq_supported)
 #endif
        }
 #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL
-#endif // HAVE_PTHREAD_WORKQUEUES
+#endif // DISPATCH_USE_WORKQUEUES
        return result;
 }
 
 #if DISPATCH_USE_PTHREAD_POOL
 static inline void
 _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc,
-               uint8_t pool_size, bool overcommit)
+               int32_t pool_size, bool overcommit)
 {
        dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
-       uint32_t thread_pool_size = overcommit ? MAX_PTHREAD_COUNT :
-                       dispatch_hw_config(active_cpus);
+       int32_t thread_pool_size = overcommit ? DISPATCH_WORKQ_MAX_PTHREAD_COUNT :
+                       (int32_t)dispatch_hw_config(active_cpus);
        if (slowpath(pool_size) && pool_size < thread_pool_size) {
                thread_pool_size = pool_size;
        }
        qc->dgq_thread_pool_size = thread_pool_size;
-#if HAVE_PTHREAD_WORKQUEUES
+#if DISPATCH_USE_WORKQUEUES
        if (qc->dgq_qos) {
                (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr));
                (void)dispatch_assume_zero(pthread_attr_setdetachstate(
@@ -784,21 +772,12 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc,
 #endif
        }
 #endif // HAVE_PTHREAD_WORKQUEUES
-       _os_semaphore_t *sema = &pqc->dpq_thread_mediator.dsema_sema;
-       _os_semaphore_init(sema, _OS_SEM_POLICY_LIFO);
-       _os_semaphore_create(sema, _OS_SEM_POLICY_LIFO);
+       _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema;
+       _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO);
+       _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO);
 }
 #endif // DISPATCH_USE_PTHREAD_POOL
 
-static dispatch_once_t _dispatch_root_queues_pred;
-
-void
-_dispatch_root_queues_init(void)
-{
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
-}
-
 static void
 _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED)
 {
@@ -806,10 +785,10 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED)
        _dispatch_fork_becomes_unsafe();
        if (!_dispatch_root_queues_init_workq(&wq_supported)) {
 #if DISPATCH_ENABLE_THREAD_POOL
-               int i;
+               size_t i;
                for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
                        bool overcommit = true;
-#if TARGET_OS_EMBEDDED
+#if TARGET_OS_EMBEDDED || (DISPATCH_USE_INTERNAL_WORKQUEUE && HAVE_DISPATCH_WORKQ_MONITORING)
                        // some software hangs if the non-overcommitting queues do not
                        // overcommit when threads block. Someday, this behavior should
                        // apply to all platforms
@@ -827,12 +806,19 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED)
        }
 }
 
+void
+_dispatch_root_queues_init(void)
+{
+       static dispatch_once_t _dispatch_root_queues_pred;
+       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
+                       _dispatch_root_queues_init_once);
+}
+
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 libdispatch_init(void)
 {
-       dispatch_assert(DISPATCH_QUEUE_QOS_COUNT == 6);
-       dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 12);
+       dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 2 * DISPATCH_QOS_MAX);
 
        dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW ==
                        -DISPATCH_QUEUE_PRIORITY_HIGH);
@@ -840,13 +826,7 @@ libdispatch_init(void)
                        DISPATCH_ROOT_QUEUE_COUNT);
        dispatch_assert(countof(_dispatch_root_queue_contexts) ==
                        DISPATCH_ROOT_QUEUE_COUNT);
-       dispatch_assert(countof(_dispatch_priority2qos) ==
-                       DISPATCH_PRIORITY_COUNT);
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       dispatch_assert(countof(_dispatch_priority2wq) ==
-                       DISPATCH_PRIORITY_COUNT);
-#endif
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
        dispatch_assert(sizeof(_dispatch_wq2root_queues) /
                        sizeof(_dispatch_wq2root_queues[0][0]) ==
                        WORKQ_NUM_PRIOQUEUE * 2);
@@ -868,17 +848,10 @@ libdispatch_init(void)
        dispatch_assert(sizeof(struct dispatch_root_queue_context_s) %
                        DISPATCH_CACHELINE_SIZE == 0);
 
-
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       // 26497968 _dispatch_user_initiated_priority should be set for qos
-       //          propagation to work properly
-       pthread_priority_t p = _pthread_qos_class_encode(qos_class_main(), 0, 0);
-       _dispatch_main_q.dq_priority = (dispatch_priority_t)p;
-       _dispatch_main_q.dq_override = p & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_USER_INITIATED, 0, 0);
-       _dispatch_user_initiated_priority = p;
-       p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_BACKGROUND, 0, 0);
-       _dispatch_background_priority = p;
+       dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main());
+       dispatch_priority_t pri = _dispatch_priority_make(qos, 0);
+       _dispatch_main_q.dq_priority = _dispatch_priority_with_override_qos(pri, qos);
 #if DISPATCH_DEBUG
        if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) {
                _dispatch_set_qos_class_enabled = 1;
@@ -889,25 +862,24 @@ libdispatch_init(void)
 #if DISPATCH_USE_THREAD_LOCAL_STORAGE
        _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup);
 #else
+       _dispatch_thread_key_create(&dispatch_priority_key, NULL);
+       _dispatch_thread_key_create(&dispatch_r2k_key, NULL);
        _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup);
-       _dispatch_thread_key_create(&dispatch_deferred_items_key,
-                       _dispatch_deferred_items_cleanup);
        _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup);
-       _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup);
        _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup);
        _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup);
-       _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL);
        _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key,
                        NULL);
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
+       _dispatch_thread_key_create(&dispatch_basepri_key, NULL);
+#if DISPATCH_INTROSPECTION
+       _dispatch_thread_key_create(&dispatch_introspection_key , NULL);
+#elif DISPATCH_PERF_MON
        _dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
 #endif
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               _dispatch_thread_key_create(&dispatch_sema4_key,
-                               _dispatch_thread_semaphore_dispose);
-       }
-#endif
+       _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup);
+       _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup);
+       _dispatch_thread_key_create(&dispatch_deferred_items_key,
+                       _dispatch_deferred_items_cleanup);
 #endif
 
 #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707
@@ -923,50 +895,18 @@ libdispatch_init(void)
                        dispatch_atfork_parent, dispatch_atfork_child));
 #endif
        _dispatch_hw_config_init();
+       _dispatch_time_init();
        _dispatch_vtable_init();
        _os_object_init();
        _voucher_init();
        _dispatch_introspection_init();
 }
 
-#if HAVE_MACH
-static dispatch_once_t _dispatch_mach_host_port_pred;
-static mach_port_t _dispatch_mach_host_port;
-
-static void
-_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED)
-{
-       kern_return_t kr;
-       mach_port_t mp, mhp = mach_host_self();
-       kr = host_get_host_port(mhp, &mp);
-       DISPATCH_VERIFY_MIG(kr);
-       if (fastpath(!kr)) {
-               // mach_host_self returned the HOST_PRIV port
-               kr = mach_port_deallocate(mach_task_self(), mhp);
-               DISPATCH_VERIFY_MIG(kr);
-               mhp = mp;
-       } else if (kr != KERN_INVALID_ARGUMENT) {
-               (void)dispatch_assume_zero(kr);
-       }
-       if (!fastpath(mhp)) {
-               DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port");
-       }
-       _dispatch_mach_host_port = mhp;
-}
-
-mach_port_t
-_dispatch_get_mach_host_port(void)
-{
-       dispatch_once_f(&_dispatch_mach_host_port_pred, NULL,
-                       _dispatch_mach_host_port_init);
-       return _dispatch_mach_host_port;
-}
-#endif
-
 #if DISPATCH_USE_THREAD_LOCAL_STORAGE
 #include <unistd.h>
 #include <sys/syscall.h>
 
+#ifndef __ANDROID__
 #ifdef SYS_gettid
 DISPATCH_ALWAYS_INLINE
 static inline pid_t
@@ -976,34 +916,54 @@ gettid(void)
 }
 #else
 #error "SYS_gettid unavailable on this system"
-#endif
+#endif /* SYS_gettid */
+#endif /* ! __ANDROID__ */
 
 #define _tsd_call_cleanup(k, f)  do { \
                if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \
-    } while (0)
+       } while (0)
+
+#ifdef __ANDROID__
+static void (*_dispatch_thread_detach_callback)(void);
+
+void
+_dispatch_install_thread_detach_callback(dispatch_function_t cb)
+{
+    if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) {
+        DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice");
+    }
+}
+#endif
 
 void
 _libdispatch_tsd_cleanup(void *ctx)
 {
        struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx;
 
+       _tsd_call_cleanup(dispatch_priority_key, NULL);
+       _tsd_call_cleanup(dispatch_r2k_key, NULL);
+
        _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup);
        _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup);
        _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup);
        _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup);
        _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key,
                        NULL);
-       _tsd_call_cleanup(dispatch_defaultpriority_key, NULL);
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
+       _tsd_call_cleanup(dispatch_basepri_key, NULL);
+#if DISPATCH_INTROSPECTION
+       _tsd_call_cleanup(dispatch_introspection_key, NULL);
+#elif DISPATCH_PERF_MON
        _tsd_call_cleanup(dispatch_bcounter_key, NULL);
 #endif
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       _tsd_call_cleanup(dispatch_sema4_key, _dispatch_thread_semaphore_dispose);
-#endif
-       _tsd_call_cleanup(dispatch_priority_key, NULL);
+       _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup);
        _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup);
        _tsd_call_cleanup(dispatch_deferred_items_key,
                        _dispatch_deferred_items_cleanup);
+#ifdef __ANDROID__
+       if (_dispatch_thread_detach_callback) {
+               _dispatch_thread_detach_callback();
+       }
+#endif
        tsd->tid = 0;
 }
 
@@ -1020,18 +980,18 @@ DISPATCH_NOTHROW
 void
 _dispatch_queue_atfork_child(void)
 {
+       dispatch_queue_t main_q = &_dispatch_main_q;
        void *crash = (void *)0x100;
        size_t i;
 
-#if HAVE_MACH
-       _dispatch_mach_host_port_pred = 0;
-       _dispatch_mach_host_port = MACH_PORT_NULL;
-#endif
+       if (_dispatch_queue_is_thread_bound(main_q)) {
+               _dispatch_queue_set_bound_thread(main_q);
+       }
 
        if (!_dispatch_is_multithreaded_inline()) return;
 
-       _dispatch_main_q.dq_items_head = crash;
-       _dispatch_main_q.dq_items_tail = crash;
+       main_q->dq_items_head = crash;
+       main_q->dq_items_tail = crash;
 
        _dispatch_mgr_q.dq_items_head = crash;
        _dispatch_mgr_q.dq_items_tail = crash;
@@ -1042,6 +1002,33 @@ _dispatch_queue_atfork_child(void)
        }
 }
 
+DISPATCH_NOINLINE
+void
+_dispatch_fork_becomes_unsafe_slow(void)
+{
+       uint8_t value = os_atomic_or(&_dispatch_unsafe_fork,
+                       _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed);
+       if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) {
+               DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited");
+       }
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_prohibit_transition_to_multithreaded(bool prohibit)
+{
+       if (prohibit) {
+               uint8_t value = os_atomic_or(&_dispatch_unsafe_fork,
+                               _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed);
+               if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) {
+                       DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded");
+               }
+       } else {
+               os_atomic_and(&_dispatch_unsafe_fork,
+                               (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed);
+       }
+}
+
 #pragma mark -
 #pragma mark dispatch_queue_attr_t
 
@@ -1051,13 +1038,13 @@ _dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority)
 {
        qos_class_t qos = (qos_class_t)qos_class;
        switch (qos) {
-       case _DISPATCH_QOS_CLASS_MAINTENANCE:
-       case _DISPATCH_QOS_CLASS_BACKGROUND:
-       case _DISPATCH_QOS_CLASS_UTILITY:
-       case _DISPATCH_QOS_CLASS_DEFAULT:
-       case _DISPATCH_QOS_CLASS_USER_INITIATED:
-       case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-       case _DISPATCH_QOS_CLASS_UNSPECIFIED:
+       case QOS_CLASS_MAINTENANCE:
+       case QOS_CLASS_BACKGROUND:
+       case QOS_CLASS_UTILITY:
+       case QOS_CLASS_DEFAULT:
+       case QOS_CLASS_USER_INITIATED:
+       case QOS_CLASS_USER_INTERACTIVE:
+       case QOS_CLASS_UNSPECIFIED:
                break;
        default:
                return false;
@@ -1068,20 +1055,6 @@ _dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority)
        return true;
 }
 
-#define DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(qos) \
-               [_DISPATCH_QOS_CLASS_##qos] = DQA_INDEX_QOS_CLASS_##qos
-
-static const
-_dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = {
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UNSPECIFIED),
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(MAINTENANCE),
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(BACKGROUND),
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UTILITY),
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(DEFAULT),
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INITIATED),
-       DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INTERACTIVE),
-};
-
 #define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \
                ((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \
                DQA_INDEX_NON_OVERCOMMIT : \
@@ -1099,10 +1072,10 @@ _dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = {
 
 #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio))
 
-#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)])
+#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (qos)
 
 static inline dispatch_queue_attr_t
-_dispatch_get_queue_attr(qos_class_t qos, int prio,
+_dispatch_get_queue_attr(dispatch_qos_t qos, int prio,
                _dispatch_queue_attr_overcommit_t overcommit,
                dispatch_autorelease_frequency_t frequency,
                bool concurrent, bool inactive)
@@ -1119,16 +1092,16 @@ _dispatch_get_queue_attr(qos_class_t qos, int prio,
 dispatch_queue_attr_t
 _dispatch_get_default_queue_attr(void)
 {
-       return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0,
+       return _dispatch_get_queue_attr(DISPATCH_QOS_UNSPECIFIED, 0,
                                _dispatch_queue_attr_overcommit_unspecified,
                                DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false);
 }
 
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa,
-               dispatch_qos_class_t qos_class, int relative_priority)
+               dispatch_qos_class_t qos_class, int relpri)
 {
-       if (!_dispatch_qos_class_valid(qos_class, relative_priority)) {
+       if (!_dispatch_qos_class_valid(qos_class, relpri)) {
                return DISPATCH_BAD_INPUT;
        }
        if (!slowpath(dqa)) {
@@ -1136,8 +1109,8 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa,
        } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
                DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
        }
-       return _dispatch_get_queue_attr(qos_class, relative_priority,
-                       dqa->dqa_overcommit, dqa->dqa_autorelease_frequency,
+       return _dispatch_get_queue_attr(_dispatch_qos_from_qos_class(qos_class),
+                       relpri, dqa->dqa_overcommit, dqa->dqa_autorelease_frequency,
                        dqa->dqa_concurrent, dqa->dqa_inactive);
 }
 
@@ -1149,8 +1122,9 @@ dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa)
        } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
                DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
        }
-       return _dispatch_get_queue_attr(dqa->dqa_qos_class,
-                       dqa->dqa_relative_priority, dqa->dqa_overcommit,
+       dispatch_priority_t pri = dqa->dqa_qos_and_relpri;
+       return _dispatch_get_queue_attr(_dispatch_priority_qos(pri),
+                       _dispatch_priority_relpri(pri), dqa->dqa_overcommit,
                        dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true);
 }
 
@@ -1163,8 +1137,9 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa,
        } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
                DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
        }
-       return _dispatch_get_queue_attr(dqa->dqa_qos_class,
-                       dqa->dqa_relative_priority, overcommit ?
+       dispatch_priority_t pri = dqa->dqa_qos_and_relpri;
+       return _dispatch_get_queue_attr(_dispatch_priority_qos(pri),
+                       _dispatch_priority_relpri(pri), overcommit ?
                        _dispatch_queue_attr_overcommit_enabled :
                        _dispatch_queue_attr_overcommit_disabled,
                        dqa->dqa_autorelease_frequency, dqa->dqa_concurrent,
@@ -1188,32 +1163,135 @@ dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa,
        } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
                DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
        }
-       return _dispatch_get_queue_attr(dqa->dqa_qos_class,
-                       dqa->dqa_relative_priority, dqa->dqa_overcommit,
+       dispatch_priority_t pri = dqa->dqa_qos_and_relpri;
+       return _dispatch_get_queue_attr(_dispatch_priority_qos(pri),
+                       _dispatch_priority_relpri(pri), dqa->dqa_overcommit,
                        frequency, dqa->dqa_concurrent, dqa->dqa_inactive);
 }
 
 #pragma mark -
 #pragma mark dispatch_queue_t
 
-// skip zero
-// 1 - main_q
-// 2 - mgr_q
-// 3 - mgr_root_q
-// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
-// we use 'xadd' on Intel, so the initial value == next assigned
-unsigned long volatile _dispatch_queue_serial_numbers = 16;
+void
+dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label)
+{
+       if (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) {
+               return;
+       }
+       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq);
+       if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) {
+               DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue");
+       }
+       dq->dq_label = label;
+}
+
+static inline bool
+_dispatch_base_queue_is_wlh(dispatch_queue_t dq, dispatch_queue_t tq)
+{
+       (void)dq; (void)tq;
+       return false;
+}
+
+static void
+_dispatch_queue_inherit_wlh_from_target(dispatch_queue_t dq,
+               dispatch_queue_t tq)
+{
+       uint64_t old_state, new_state, role;
+
+       if (!dx_hastypeflag(tq, QUEUE_ROOT)) {
+               role = DISPATCH_QUEUE_ROLE_INNER;
+       } else if (_dispatch_base_queue_is_wlh(dq, tq)) {
+               role = DISPATCH_QUEUE_ROLE_BASE_WLH;
+       } else {
+               role = DISPATCH_QUEUE_ROLE_BASE_ANON;
+       }
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK;
+               new_state |= role;
+               if (old_state == new_state) {
+                       os_atomic_rmw_loop_give_up(break);
+               }
+       });
+
+       dispatch_wlh_t cur_wlh = _dispatch_get_wlh();
+       if (cur_wlh == (dispatch_wlh_t)dq && !_dq_state_is_base_wlh(new_state)) {
+               _dispatch_event_loop_leave_immediate(cur_wlh, new_state);
+       }
+       if (!dx_hastypeflag(tq, QUEUE_ROOT)) {
+#if DISPATCH_ALLOW_NON_LEAF_RETARGET
+               _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED);
+#else
+               _dispatch_queue_atomic_flags_set_and_clear(tq, DQF_TARGETED, DQF_LEGACY);
+#endif
+       }
+}
+
+unsigned long volatile _dispatch_queue_serial_numbers =
+               DISPATCH_QUEUE_SERIAL_NUMBER_INIT;
+
+dispatch_priority_t
+_dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq,
+               dispatch_wlh_t *wlh_out)
+{
+       dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
+       dispatch_queue_t tq = dq->do_targetq;
+       dispatch_priority_t tqp = tq->dq_priority &DISPATCH_PRIORITY_REQUESTED_MASK;
+       dispatch_wlh_t wlh = DISPATCH_WLH_ANON;
+
+       if (_dq_state_is_base_wlh(dq->dq_state)) {
+               wlh = (dispatch_wlh_t)dq;
+       }
+
+       while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) {
+               if (unlikely(tq == &_dispatch_mgr_q)) {
+                       if (wlh_out) *wlh_out = DISPATCH_WLH_ANON;
+                       return DISPATCH_PRIORITY_FLAG_MANAGER;
+               }
+               if (unlikely(_dispatch_queue_is_thread_bound(tq))) {
+                       // thread-bound hierarchies are weird, we need to install
+                       // from the context of the thread this hierarchy is bound to
+                       if (wlh_out) *wlh_out = NULL;
+                       return 0;
+               }
+               if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) {
+                       // this queue may not be activated yet, so the queue graph may not
+                       // have stabilized yet
+                       _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, dq);
+                       if (wlh_out) *wlh_out = NULL;
+                       return 0;
+               }
+
+               if (_dq_state_is_base_wlh(tq->dq_state)) {
+                       wlh = (dispatch_wlh_t)tq;
+               } else if (unlikely(_dispatch_queue_is_legacy(tq))) {
+                       // we're not allowed to dereference tq->do_targetq
+                       _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, dq);
+                       if (wlh_out) *wlh_out = NULL;
+                       return 0;
+               }
+
+               if (!(tq->dq_priority & DISPATCH_PRIORITY_FLAG_INHERIT)) {
+                       if (p < tqp) p = tqp;
+               }
+               tq = tq->do_targetq;
+               tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
+       }
+
+       if (unlikely(!tqp)) {
+               // pthread root queues opt out of QoS
+               if (wlh_out) *wlh_out = DISPATCH_WLH_ANON;
+               return DISPATCH_PRIORITY_FLAG_MANAGER;
+       }
+       if (wlh_out) *wlh_out = wlh;
+       return _dispatch_priority_inherit_from_root_queue(p, tq);
+}
 
 DISPATCH_NOINLINE
 static dispatch_queue_t
 _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                dispatch_queue_t tq, bool legacy)
 {
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-       // Be sure the root queue priorities are set
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
-#endif
        if (!slowpath(dqa)) {
                dqa = _dispatch_get_default_queue_attr();
        } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
@@ -1224,25 +1302,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
        // Step 1: Normalize arguments (qos, overcommit, tq)
        //
 
-       qos_class_t qos = dqa->dqa_qos_class;
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-       if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE &&
-                       !_dispatch_root_queues[
-                       DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) {
-               qos = _DISPATCH_QOS_CLASS_USER_INITIATED;
+       dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri);
+#if !HAVE_PTHREAD_WORKQUEUE_QOS
+       if (qos == DISPATCH_QOS_USER_INTERACTIVE) {
+               qos = DISPATCH_QOS_USER_INITIATED;
        }
-#endif
-       bool maintenance_fallback = false;
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-       maintenance_fallback = true;
-#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-       if (maintenance_fallback) {
-               if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE &&
-                               !_dispatch_root_queues[
-                               DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) {
-                       qos = _DISPATCH_QOS_CLASS_BACKGROUND;
-               }
+       if (qos == DISPATCH_QOS_MAINTENANCE) {
+               qos = DISPATCH_QOS_BACKGROUND;
        }
+#endif // !HAVE_PTHREAD_WORKQUEUE_QOS
 
        _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit;
        if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
@@ -1256,14 +1324,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                        tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) {
                // Handle discrepancies between attr and target queue, attributes win
                if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
-                       if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
+                       if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
                                overcommit = _dispatch_queue_attr_overcommit_enabled;
                        } else {
                                overcommit = _dispatch_queue_attr_overcommit_disabled;
                        }
                }
-               if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) {
-                       tq = _dispatch_get_root_queue_with_overcommit(tq,
+               if (qos == DISPATCH_QOS_UNSPECIFIED) {
+                       dispatch_qos_t tq_qos = _dispatch_priority_qos(tq->dq_priority);
+                       tq = _dispatch_get_root_queue(tq_qos,
                                        overcommit == _dispatch_queue_attr_overcommit_enabled);
                } else {
                        tq = NULL;
@@ -1275,7 +1344,7 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                        DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute "
                                        "and use this kind of target queue");
                }
-               if (qos != _DISPATCH_QOS_CLASS_UNSPECIFIED) {
+               if (qos != DISPATCH_QOS_UNSPECIFIED) {
                        DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute "
                                        "and use this kind of target queue");
                }
@@ -1288,10 +1357,9 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                }
        }
        if (!tq) {
-               qos_class_t tq_qos = qos == _DISPATCH_QOS_CLASS_UNSPECIFIED ?
-                               _DISPATCH_QOS_CLASS_DEFAULT : qos;
-               tq = _dispatch_get_root_queue(tq_qos, overcommit ==
-                               _dispatch_queue_attr_overcommit_enabled);
+               tq = _dispatch_get_root_queue(
+                               qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
+                               overcommit == _dispatch_queue_attr_overcommit_enabled);
                if (slowpath(!tq)) {
                        DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
                }
@@ -1325,6 +1393,9 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                dqf |= DQF_AUTORELEASE_ALWAYS;
                break;
        }
+       if (legacy) {
+               dqf |= DQF_LEGACY;
+       }
        if (label) {
                const char *tmp = _dispatch_strdup_if_mutable(label);
                if (tmp != label) {
@@ -1333,26 +1404,26 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                }
        }
 
-       dispatch_queue_t dq = _dispatch_alloc(vtable,
+       dispatch_queue_t dq = _dispatch_object_alloc(vtable,
                        sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD);
        _dispatch_queue_init(dq, dqf, dqa->dqa_concurrent ?
-                       DISPATCH_QUEUE_WIDTH_MAX : 1, dqa->dqa_inactive);
+                       DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
+                       (dqa->dqa_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
 
        dq->dq_label = label;
-
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-       dq->dq_priority = (dispatch_priority_t)_pthread_qos_class_encode(qos,
-                       dqa->dqa_relative_priority,
-                       overcommit == _dispatch_queue_attr_overcommit_enabled ?
-                       _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0);
+       dq->dq_priority = dqa->dqa_qos_and_relpri;
+       if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
+               dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+       }
 #endif
        _dispatch_retain(tq);
-       if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) {
+       if (qos == QOS_CLASS_UNSPECIFIED) {
                // legacy way of inherithing the QoS from the target
                _dispatch_queue_priority_inherit_from_target(dq, tq);
        }
        if (!dqa->dqa_inactive) {
-               _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED);
+               _dispatch_queue_inherit_wlh_from_target(dq, tq);
        }
        dq->do_targetq = tq;
        _dispatch_object_debug(dq, "%s", __func__);
@@ -1377,30 +1448,26 @@ dispatch_queue_t
 dispatch_queue_create_with_accounting_override_voucher(const char *label,
                dispatch_queue_attr_t attr, voucher_t voucher)
 {
-       dispatch_queue_t dq = dispatch_queue_create_with_target(label, attr,
-                       DISPATCH_TARGET_QUEUE_DEFAULT);
-       dq->dq_override_voucher = _voucher_create_accounting_voucher(voucher);
-       return dq;
+       (void)label; (void)attr; (void)voucher;
+       DISPATCH_CLIENT_CRASH(0, "Unsupported interface");
 }
 
 void
-_dispatch_queue_destroy(dispatch_queue_t dq)
+_dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free)
 {
        uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
        uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
 
-       if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
+       if (dx_hastypeflag(dq, QUEUE_ROOT)) {
                initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE;
        }
-       if (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE) {
-               // dispatch_cancel_and_wait may apply overrides in a racy way with
-               // the source cancellation finishing. This race is expensive and not
-               // really worthwhile to resolve since the source becomes dead anyway.
-               dq_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE;
-       }
+       dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+       dq_state &= ~DISPATCH_QUEUE_DIRTY;
+       dq_state &= ~DISPATCH_QUEUE_ROLE_MASK;
        if (slowpath(dq_state != initial_state)) {
                if (_dq_state_drain_locked(dq_state)) {
-                       DISPATCH_CLIENT_CRASH(dq, "Release of a locked queue");
+                       DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
+                                       "Release of a locked queue");
                }
 #ifndef __LP64__
                dq_state >>= 32;
@@ -1408,9 +1475,6 @@ _dispatch_queue_destroy(dispatch_queue_t dq)
                DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
                                "Release of a queue with corrupt state");
        }
-       if (slowpath(dq == _dispatch_queue_get_current())) {
-               DISPATCH_CLIENT_CRASH(dq, "Release of a queue by itself");
-       }
        if (slowpath(dq->dq_items_tail)) {
                DISPATCH_CLIENT_CRASH(dq->dq_items_tail,
                                "Release of a queue while items are enqueued");
@@ -1419,31 +1483,61 @@ _dispatch_queue_destroy(dispatch_queue_t dq)
        // trash the queue so that use after free will crash
        dq->dq_items_head = (void *)0x200;
        dq->dq_items_tail = (void *)0x200;
-       // poison the state with something that is suspended and is easy to spot
-       dq->dq_state = 0xdead000000000000;
 
        dispatch_queue_t dqsq = os_atomic_xchg2o(dq, dq_specific_q,
                        (void *)0x200, relaxed);
        if (dqsq) {
                _dispatch_release(dqsq);
        }
-       if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) {
-               if (dq->dq_override_voucher) _voucher_release(dq->dq_override_voucher);
-               dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
+
+       // fastpath for queues that never got their storage retained
+       if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) {
+               // poison the state with something that is suspended and is easy to spot
+               dq->dq_state = 0xdead000000000000;
+               return;
        }
+
+       // Take over freeing the memory from _dispatch_object_dealloc()
+       //
+       // As soon as we call _dispatch_queue_release_storage(), we forfeit
+       // the possibility for the caller of dx_dispose() to finalize the object
+       // so that responsibility is ours.
+       _dispatch_object_finalize(dq);
+       *allow_free = false;
+       dq->dq_label = "<released queue, pending free>";
+       dq->do_targetq = NULL;
+       dq->do_finalizer = NULL;
+       dq->do_ctxt = NULL;
+       return _dispatch_queue_release_storage(dq);
 }
 
 // 6618342 Contact the team that owns the Instrument DTrace probe before
 //         renaming this symbol
 void
-_dispatch_queue_dispose(dispatch_queue_t dq)
+_dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free)
 {
        _dispatch_object_debug(dq, "%s", __func__);
        _dispatch_introspection_queue_dispose(dq);
        if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) {
                free((void*)dq->dq_label);
        }
-       _dispatch_queue_destroy(dq);
+       _dispatch_queue_destroy(dq, allow_free);
+}
+
+void
+_dispatch_queue_xref_dispose(dispatch_queue_t dq)
+{
+       uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+       if (unlikely(_dq_state_is_suspended(dq_state))) {
+               long state = (long)dq_state;
+               if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32);
+               if (unlikely(_dq_state_is_inactive(dq_state))) {
+                       // Arguments for and against this assert are within 6705399
+                       DISPATCH_CLIENT_CRASH(state, "Release of an inactive object");
+               }
+               DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object");
+       }
+       os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed);
 }
 
 DISPATCH_NOINLINE
@@ -1468,11 +1562,11 @@ _dispatch_queue_suspend_slow(dispatch_queue_t dq)
                // threads could have touched this value while we were trying to acquire
                // the lock, or because another thread raced us to do the same operation
                // and got to the lock first.
-               if (slowpath(os_sub_overflow(dq_state, delta, &value))) {
+               if (unlikely(os_sub_overflow(dq_state, delta, &value))) {
                        os_atomic_rmw_loop_give_up(goto retry);
                }
        });
-       if (slowpath(os_add_overflow(dq->dq_side_suspend_cnt,
+       if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt,
                        DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) {
                DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()");
        }
@@ -1492,7 +1586,7 @@ _dispatch_queue_suspend(dispatch_queue_t dq)
 
        os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
                value = DISPATCH_QUEUE_SUSPEND_INTERVAL;
-               if (slowpath(os_add_overflow(dq_state, value, &value))) {
+               if (unlikely(os_add_overflow(dq_state, value, &value))) {
                        os_atomic_rmw_loop_give_up({
                                return _dispatch_queue_suspend_slow(dq);
                        });
@@ -1502,7 +1596,7 @@ _dispatch_queue_suspend(dispatch_queue_t dq)
        if (!_dq_state_is_suspended(dq_state)) {
                // rdar://8181908 we need to extend the queue life for the duration
                // of the call to wakeup at _dispatch_queue_resume() time.
-               _dispatch_retain(dq);
+               _dispatch_retain_2(dq);
        }
 }
 
@@ -1531,7 +1625,7 @@ _dispatch_queue_resume_slow(dispatch_queue_t dq)
                // threads could have touched this value while we were trying to acquire
                // the lock, or because another thread raced us to do the same operation
                // and got to the lock first.
-               if (slowpath(os_add_overflow(dq_state, delta, &value))) {
+               if (unlikely(os_add_overflow(dq_state, delta, &value))) {
                        os_atomic_rmw_loop_give_up(goto retry);
                }
        });
@@ -1547,12 +1641,15 @@ DISPATCH_NOINLINE
 static void
 _dispatch_queue_resume_finalize_activation(dispatch_queue_t dq)
 {
+       bool allow_resume = true;
        // Step 2: run the activation finalizer
        if (dx_vtable(dq)->do_finalize_activation) {
-               dx_vtable(dq)->do_finalize_activation(dq);
+               dx_vtable(dq)->do_finalize_activation(dq, &allow_resume);
        }
        // Step 3: consume the suspend count
-       return dx_vtable(dq)->do_resume(dq, false);
+       if (allow_resume) {
+               return dx_vtable(dq)->do_resume(dq, false);
+       }
 }
 
 void
@@ -1560,9 +1657,15 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate)
 {
        // covers all suspend and inactive bits, including side suspend bit
        const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK;
+       uint64_t pending_barrier_width =
+                       (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
+       uint64_t set_owner_and_set_full_width_and_in_barrier =
+                       _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT |
+                       DISPATCH_QUEUE_IN_BARRIER;
+
        // backward compatibility: only dispatch sources can abuse
        // dispatch_resume() to really mean dispatch_activate()
-       bool resume_can_activate = (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE);
+       bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE);
        uint64_t dq_state, value;
 
        dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT);
@@ -1612,42 +1715,48 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate)
                                        + DISPATCH_QUEUE_NEEDS_ACTIVATION) {
                                // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 }
                                value = dq_state - DISPATCH_QUEUE_NEEDS_ACTIVATION;
-                       } else if (resume_can_activate && (dq_state & suspend_bits) ==
+                       } else if (is_source && (dq_state & suspend_bits) ==
                                        DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) {
                                // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 }
                                value = dq_state - DISPATCH_QUEUE_INACTIVE
                                                - DISPATCH_QUEUE_NEEDS_ACTIVATION
                                                + DISPATCH_QUEUE_SUSPEND_INTERVAL;
-                       } else {
-                               value = DISPATCH_QUEUE_SUSPEND_INTERVAL;
-                               if (slowpath(os_sub_overflow(dq_state, value, &value))) {
-                                       // underflow means over-resume or a suspend count transfer
-                                       // to the side count is needed
-                                       os_atomic_rmw_loop_give_up({
-                                               if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) {
-                                                       goto over_resume;
-                                               }
-                                               return _dispatch_queue_resume_slow(dq);
-                                       });
-                               }
-                               if (_dq_state_is_runnable(value) &&
-                                               !_dq_state_drain_locked(value)) {
-                                       uint64_t full_width = value;
-                                       if (_dq_state_has_pending_barrier(value)) {
-                                               full_width -= DISPATCH_QUEUE_PENDING_BARRIER;
-                                               full_width += DISPATCH_QUEUE_WIDTH_INTERVAL;
-                                               full_width += DISPATCH_QUEUE_IN_BARRIER;
-                                       } else {
-                                               full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
-                                               full_width += DISPATCH_QUEUE_IN_BARRIER;
-                                       }
-                                       if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) ==
-                                                       DISPATCH_QUEUE_WIDTH_FULL_BIT) {
-                                               value = full_width;
-                                               value &= ~DISPATCH_QUEUE_DIRTY;
-                                               value |= _dispatch_tid_self();
+                       } else if (unlikely(os_sub_overflow(dq_state,
+                                       DISPATCH_QUEUE_SUSPEND_INTERVAL, &value))) {
+                               // underflow means over-resume or a suspend count transfer
+                               // to the side count is needed
+                               os_atomic_rmw_loop_give_up({
+                                       if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) {
+                                               goto over_resume;
                                        }
-                               }
+                                       return _dispatch_queue_resume_slow(dq);
+                               });
+               //
+               // below this, value = dq_state - DISPATCH_QUEUE_SUSPEND_INTERVAL
+               //
+                       } else if (!_dq_state_is_runnable(value)) {
+                               // Out of width or still suspended.
+                               // For the former, force _dispatch_queue_non_barrier_complete
+                               // to reconsider whether it has work to do
+                               value |= DISPATCH_QUEUE_DIRTY;
+                       } else if (_dq_state_drain_locked(value)) {
+                               // still locked by someone else, make drain_try_unlock() fail
+                               // and reconsider whether it has work to do
+                               value |= DISPATCH_QUEUE_DIRTY;
+                       } else if (!is_source && (_dq_state_has_pending_barrier(value) ||
+                                       value + pending_barrier_width <
+                                       DISPATCH_QUEUE_WIDTH_FULL_BIT)) {
+                               // if we can, acquire the full width drain lock
+                               // and then perform a lock transfer
+                               //
+                               // However this is never useful for a source where there are no
+                               // sync waiters, so never take the lock and do a plain wakeup
+                               value &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
+                               value |= set_owner_and_set_full_width_and_in_barrier;
+                       } else {
+                               // clear overrides and force a wakeup
+                               value &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+                               value &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
                        }
                });
        }
@@ -1660,7 +1769,7 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate)
        if (activate) {
                // if we're still in an activate codepath here we should have
                // { sc:>0 na:1 }, if not we've got a corrupt state
-               if (!fastpath(_dq_state_is_suspended(value))) {
+               if (unlikely(!_dq_state_is_suspended(value))) {
                        DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state");
                }
                return;
@@ -1670,23 +1779,29 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate)
                return;
        }
 
-       if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) {
-               _dispatch_try_lock_transfer_or_wakeup(dq);
-       } else if (_dq_state_should_wakeup(value)) {
+       if (_dq_state_is_dirty(dq_state)) {
                // <rdar://problem/14637483>
-               // seq_cst wrt state changes that were flushed and not acted upon
-               os_atomic_thread_fence(acquire);
-               pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq,
-                               _dispatch_queue_is_thread_bound(dq));
-               // Balancing the retain() done in suspend() for rdar://8181908
-               return dx_wakeup(dq, pp, DISPATCH_WAKEUP_CONSUME);
+               // dependency ordering for dq state changes that were flushed
+               // and not acted upon
+               os_atomic_thread_fence(dependency);
+               dq = os_atomic_force_dependency_on(dq, dq_state);
        }
-
-       // Balancing the retain() done in suspend() for rdar://8181908
-       return _dispatch_release_tailcall(dq);
+       // Balancing the retain_2 done in suspend() for rdar://8181908
+       dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2;
+       if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) {
+               flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE;
+       } else if (!_dq_state_is_runnable(value)) {
+               if (_dq_state_is_base_wlh(dq_state)) {
+                       _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
+               }
+               return _dispatch_release_2(dq);
+       }
+       dispatch_assert(!_dq_state_received_sync_wait(dq_state));
+       dispatch_assert(!_dq_state_in_sync_transfer(dq_state));
+       return dx_wakeup(dq, _dq_state_max_qos(dq_state), flags);
 
 over_resume:
-       if (slowpath(_dq_state_is_inactive(dq_state))) {
+       if (unlikely(_dq_state_is_inactive(dq_state))) {
                DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object");
        }
        DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object");
@@ -1702,19 +1817,13 @@ dispatch_queue_get_label(dispatch_queue_t dq)
 }
 
 qos_class_t
-dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr)
+dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr)
 {
-       qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED;
-       int relative_priority = 0;
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       pthread_priority_t dqp = dq->dq_priority;
-       if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0;
-       qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL);
-#else
-       (void)dq;
-#endif
-       if (relative_priority_ptr) *relative_priority_ptr = relative_priority;
-       return qos;
+       dispatch_qos_class_t qos = _dispatch_priority_qos(dq->dq_priority);
+       if (relpri_ptr) {
+               *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0;
+       }
+       return _dispatch_qos_to_qos_class(qos);
 }
 
 static void
@@ -1724,23 +1833,24 @@ _dispatch_queue_set_width2(void *ctxt)
        uint32_t tmp;
        dispatch_queue_t dq = _dispatch_queue_get_current();
 
-       if (w > 0) {
-               tmp = (unsigned int)w;
-       } else switch (w) {
-       case 0:
-               tmp = 1;
-               break;
-       case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS:
-               tmp = dispatch_hw_config(physical_cpus);
-               break;
-       case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS:
-               tmp = dispatch_hw_config(active_cpus);
-               break;
-       default:
-               // fall through
-       case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS:
-               tmp = dispatch_hw_config(logical_cpus);
-               break;
+       if (w >= 0) {
+               tmp = w ? (unsigned int)w : 1;
+       } else {
+               dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority());
+               switch (w) {
+               case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS:
+                       tmp = _dispatch_qos_max_parallelism(qos,
+                                       DISPATCH_MAX_PARALLELISM_PHYSICAL);
+                       break;
+               case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS:
+                       tmp = _dispatch_qos_max_parallelism(qos,
+                                       DISPATCH_MAX_PARALLELISM_ACTIVE);
+                       break;
+               case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS:
+               default:
+                       tmp = _dispatch_qos_max_parallelism(qos, 0);
+                       break;
+               }
        }
        if (tmp > DISPATCH_QUEUE_WIDTH_MAX) {
                tmp = DISPATCH_QUEUE_WIDTH_MAX;
@@ -1748,17 +1858,18 @@ _dispatch_queue_set_width2(void *ctxt)
 
        dispatch_queue_flags_t old_dqf, new_dqf;
        os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, {
-               new_dqf = old_dqf & ~DQF_WIDTH_MASK;
-               new_dqf |= (tmp << DQF_WIDTH_SHIFT);
+               new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp);
        });
+       _dispatch_queue_inherit_wlh_from_target(dq, dq->do_targetq);
        _dispatch_object_debug(dq, "%s", __func__);
 }
 
 void
 dispatch_queue_set_width(dispatch_queue_t dq, long width)
 {
-       if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
-                       slowpath(dx_hastypeflag(dq, QUEUE_ROOT))) {
+       if (unlikely(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT ||
+                       dx_hastypeflag(dq, QUEUE_ROOT) ||
+                       dx_hastypeflag(dq, QUEUE_BASE))) {
                return;
        }
 
@@ -1773,8 +1884,15 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width)
                DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type");
        }
 
-       _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width,
-                       _dispatch_queue_set_width2);
+       if (likely((int)width >= 0)) {
+               _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width,
+                               _dispatch_queue_set_width2, DISPATCH_BARRIER_TRYSYNC_SUSPEND);
+       } else {
+               // The negative width constants need to execute on the queue to
+               // query the queue QoS
+               _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width,
+                               _dispatch_queue_set_width2);
+       }
 }
 
 static void
@@ -1785,13 +1903,18 @@ _dispatch_queue_legacy_set_target_queue(void *ctxt)
        dispatch_queue_t otq = dq->do_targetq;
 
        if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
+#if DISPATCH_ALLOW_NON_LEAF_RETARGET
                _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq);
                _dispatch_bug_deprecated("Changing the target of a queue "
                                "already targeted by other dispatch objects");
+#else
+               DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue "
+                               "already targeted by other dispatch objects");
+#endif
        }
 
        _dispatch_queue_priority_inherit_from_target(dq, tq);
-       _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED);
+       _dispatch_queue_inherit_wlh_from_target(dq, tq);
 #if HAVE_PTHREAD_WORKQUEUE_QOS
        // see _dispatch_queue_class_wakeup()
        _dispatch_queue_sidelock_lock(dq);
@@ -1813,10 +1936,9 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq)
        dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT &&
                        dq->do_targetq);
 
-       if (slowpath(!tq)) {
+       if (unlikely(!tq)) {
                bool is_concurrent_q = (dq->dq_width > 1);
-               tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-                               !is_concurrent_q);
+               tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, !is_concurrent_q);
        }
 
        if (_dispatch_queue_try_inactive_suspend(dq)) {
@@ -1824,19 +1946,33 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq)
                return dx_vtable(dq)->do_resume(dq, false);
        }
 
-       if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) {
-               DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of a queue or "
-                               "source with an accounting override voucher "
+#if !DISPATCH_ALLOW_NON_LEAF_RETARGET
+       if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
+               DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue "
+                               "already targeted by other dispatch objects");
+       }
+#endif
+
+       if (unlikely(!_dispatch_queue_is_legacy(dq))) {
+#if DISPATCH_ALLOW_NON_LEAF_RETARGET
+               if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
+                       DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue "
+                                       "already targeted by other dispatch objects");
+               }
+#endif
+               DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object "
                                "after it has been activated");
        }
 
        unsigned long type = dx_type(dq);
        switch (type) {
        case DISPATCH_QUEUE_LEGACY_TYPE:
+#if DISPATCH_ALLOW_NON_LEAF_RETARGET
                if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
                        _dispatch_bug_deprecated("Changing the target of a queue "
                                        "already targeted by other dispatch objects");
                }
+#endif
                break;
        case DISPATCH_SOURCE_KEVENT_TYPE:
        case DISPATCH_MACH_CHANNEL_TYPE:
@@ -1844,18 +1980,14 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq)
                _dispatch_bug_deprecated("Changing the target of a source "
                                "after it has been activated");
                break;
-
-       case DISPATCH_QUEUE_SERIAL_TYPE:
-       case DISPATCH_QUEUE_CONCURRENT_TYPE:
-               DISPATCH_CLIENT_CRASH(type, "Cannot change the target of this queue "
-                               "after it has been activated");
        default:
                DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type");
        }
 
        _dispatch_retain(tq);
        return _dispatch_barrier_trysync_or_async_f(dq, tq,
-                       _dispatch_queue_legacy_set_target_queue);
+                       _dispatch_queue_legacy_set_target_queue,
+                       DISPATCH_BARRIER_TRYSYNC_SUSPEND);
 }
 
 #pragma mark -
@@ -1866,7 +1998,7 @@ static struct dispatch_pthread_root_queue_context_s
                _dispatch_mgr_root_queue_pthread_context;
 static struct dispatch_root_queue_context_s
                _dispatch_mgr_root_queue_context = {{{
-#if HAVE_PTHREAD_WORKQUEUES
+#if DISPATCH_USE_WORKQUEUES
        .dgq_kworkqueue = (void*)(~0ul),
 #endif
        .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context,
@@ -1878,9 +2010,9 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue = {
        .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE,
        .do_ctxt = &_dispatch_mgr_root_queue_context,
        .dq_label = "com.apple.root.libdispatch-manager",
-       .dq_width = DISPATCH_QUEUE_WIDTH_POOL,
-       .dq_override = DISPATCH_SATURATED_OVERRIDE,
-       .dq_override_voucher = DISPATCH_NO_VOUCHER,
+       .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL),
+       .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER |
+                       DISPATCH_PRIORITY_SATURATED_OVERRIDE,
        .dq_serialnum = 3,
 };
 #endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
@@ -1896,17 +2028,16 @@ static struct {
 
 static dispatch_once_t _dispatch_mgr_sched_pred;
 
-// TODO: switch to "event-reflector thread" property <rdar://problem/18126138>
-
 #if HAVE_PTHREAD_WORKQUEUE_QOS
+// TODO: switch to "event-reflector thread" property <rdar://problem/18126138>
 // Must be kept in sync with list of qos classes in sys/qos.h
 static const int _dispatch_mgr_sched_qos2prio[] = {
-       [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4,
-       [_DISPATCH_QOS_CLASS_BACKGROUND] = 4,
-       [_DISPATCH_QOS_CLASS_UTILITY] = 20,
-       [_DISPATCH_QOS_CLASS_DEFAULT] = 31,
-       [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37,
-       [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47,
+       [QOS_CLASS_MAINTENANCE] = 4,
+       [QOS_CLASS_BACKGROUND] = 4,
+       [QOS_CLASS_UTILITY] = 20,
+       [QOS_CLASS_DEFAULT] = 31,
+       [QOS_CLASS_USER_INITIATED] = 37,
+       [QOS_CLASS_USER_INTERACTIVE] = 47,
 };
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 
@@ -1926,8 +2057,8 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED)
        (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, &param));
 #if HAVE_PTHREAD_WORKQUEUE_QOS
        qos_class_t qos = qos_class_main();
-       if (qos == _DISPATCH_QOS_CLASS_DEFAULT) {
-               qos = _DISPATCH_QOS_CLASS_USER_INITIATED; // rdar://problem/17279292
+       if (qos == QOS_CLASS_DEFAULT) {
+               qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292
        }
        if (qos) {
                _dispatch_mgr_sched.qos = qos;
@@ -1960,8 +2091,6 @@ _dispatch_mgr_root_queue_init(void)
                        (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr,
                                        qos, 0));
                }
-               _dispatch_mgr_q.dq_priority =
-                               (dispatch_priority_t)_pthread_qos_class_encode(qos, 0, 0);
        }
 #endif
        param.sched_priority = _dispatch_mgr_sched.prio;
@@ -2033,8 +2162,7 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr)
                if (p >= prio) os_atomic_rmw_loop_give_up(return);
        });
 #if DISPATCH_USE_KEVENT_WORKQUEUE
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
+       _dispatch_root_queues_init();
        if (_dispatch_kevent_workqueue_enabled) {
                pthread_priority_t pp = 0;
                if (prio > _dispatch_mgr_sched.default_prio) {
@@ -2068,8 +2196,7 @@ void
 _dispatch_kevent_workqueue_init(void)
 {
        // Initialize kevent workqueue support
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
+       _dispatch_root_queues_init();
        if (!_dispatch_kevent_workqueue_enabled) return;
        dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init);
        qos_class_t qos = _dispatch_mgr_sched.qos;
@@ -2077,7 +2204,6 @@ _dispatch_kevent_workqueue_init(void)
        pthread_priority_t pp = 0;
        if (qos) {
                pp = _pthread_qos_class_encode(qos, 0, 0);
-               _dispatch_mgr_q.dq_priority = (dispatch_priority_t)pp;
        }
        if (prio > _dispatch_mgr_sched.default_prio) {
                pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG;
@@ -2087,7 +2213,7 @@ _dispatch_kevent_workqueue_init(void)
                (void)dispatch_assume_zero(r);
        }
 }
-#endif
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
 
 #pragma mark -
 #pragma mark dispatch_pthread_root_queue
@@ -2103,12 +2229,12 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags,
        dispatch_pthread_root_queue_context_t pqc;
        dispatch_queue_flags_t dqf = 0;
        size_t dqs;
-       uint8_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ?
-                       (uint8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0;
+       int32_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ?
+                       (int8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0;
 
        dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD;
        dqs = roundup(dqs, _Alignof(struct dispatch_root_queue_context_s));
-       dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs +
+       dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_root), dqs +
                        sizeof(struct dispatch_root_queue_context_s) +
                        sizeof(struct dispatch_pthread_root_queue_context_s));
        qc = (void*)dq + dqs;
@@ -2123,16 +2249,15 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags,
                }
        }
 
-       _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, false);
+       _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, 0);
        dq->dq_label = label;
-       dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE,
-       dq->dq_override = DISPATCH_SATURATED_OVERRIDE;
+       dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE;
        dq->do_ctxt = qc;
-       dq->do_targetq = NULL;
+       dq->dq_priority = DISPATCH_PRIORITY_SATURATED_OVERRIDE;
 
        pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore);
        qc->dgq_ctxt = pqc;
-#if HAVE_PTHREAD_WORKQUEUES
+#if DISPATCH_USE_WORKQUEUES
        qc->dgq_kworkqueue = (void*)(~0ul);
 #endif
        _dispatch_root_queue_init_pthread_pool(qc, pool_size, true);
@@ -2184,7 +2309,7 @@ dispatch_pthread_root_queue_copy_current(void)
 {
        dispatch_queue_t dq = _dispatch_queue_get_current();
        if (!dq) return NULL;
-       while (slowpath(dq->do_targetq)) {
+       while (unlikely(dq->do_targetq)) {
                dq = dq->do_targetq;
        }
        if (dx_type(dq) != DISPATCH_QUEUE_GLOBAL_ROOT_TYPE ||
@@ -2197,7 +2322,7 @@ dispatch_pthread_root_queue_copy_current(void)
 #endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
 
 void
-_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq)
+_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq, bool *allow_free)
 {
        if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
                DISPATCH_INTERNAL_CRASH(dq, "Global root queue disposed");
@@ -2209,17 +2334,16 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq)
        dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
 
        pthread_attr_destroy(&pqc->dpq_thread_attr);
-       _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator);
+       _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL);
        if (pqc->dpq_thread_configure) {
                Block_release(pqc->dpq_thread_configure);
        }
-       dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-                       false);
+       dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 #endif
        if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) {
                free((void*)dq->dq_label);
        }
-       _dispatch_queue_destroy(dq);
+       _dispatch_queue_destroy(dq, allow_free);
 }
 
 #pragma mark -
@@ -2229,7 +2353,7 @@ struct dispatch_queue_specific_queue_s {
        DISPATCH_QUEUE_HEADER(queue_specific_queue);
        TAILQ_HEAD(dispatch_queue_specific_head_s,
                        dispatch_queue_specific_s) dqsq_contexts;
-} DISPATCH_QUEUE_ALIGN;
+} DISPATCH_ATOMIC64_ALIGN;
 
 struct dispatch_queue_specific_s {
        const void *dqs_key;
@@ -2240,19 +2364,19 @@ struct dispatch_queue_specific_s {
 DISPATCH_DECL(dispatch_queue_specific);
 
 void
-_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq)
+_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq,
+               bool *allow_free)
 {
        dispatch_queue_specific_t dqs, tmp;
+       dispatch_queue_t rq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 
        TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) {
                if (dqs->dqs_destructor) {
-                       dispatch_async_f(_dispatch_get_root_queue(
-                                       _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt,
-                                       dqs->dqs_destructor);
+                       dispatch_async_f(rq, dqs->dqs_ctxt, dqs->dqs_destructor);
                }
                free(dqs);
        }
-       _dispatch_queue_destroy(dqsq->_as_dq);
+       _dispatch_queue_destroy(dqsq->_as_dq, allow_free);
 }
 
 static void
@@ -2260,13 +2384,13 @@ _dispatch_queue_init_specific(dispatch_queue_t dq)
 {
        dispatch_queue_specific_queue_t dqsq;
 
-       dqsq = _dispatch_alloc(DISPATCH_VTABLE(queue_specific_queue),
+       dqsq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_specific_queue),
                        sizeof(struct dispatch_queue_specific_queue_s));
-       _dispatch_queue_init(dqsq->_as_dq, DQF_NONE,
-                       DISPATCH_QUEUE_WIDTH_MAX, false);
+       _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, DISPATCH_QUEUE_WIDTH_MAX,
+                       DISPATCH_QUEUE_ROLE_BASE_ANON);
        dqsq->do_xref_cnt = -1;
        dqsq->do_targetq = _dispatch_get_root_queue(
-                       _DISPATCH_QOS_CLASS_USER_INITIATED, true);
+                       DISPATCH_QOS_USER_INITIATED, true);
        dqsq->dq_label = "queue-specific";
        TAILQ_INIT(&dqsq->dqsq_contexts);
        if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL,
@@ -2287,7 +2411,7 @@ _dispatch_queue_set_specific(void *ctxt)
                        // Destroy previous context for existing key
                        if (dqs->dqs_destructor) {
                                dispatch_async_f(_dispatch_get_root_queue(
-                                               _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt,
+                                               DISPATCH_QOS_DEFAULT, false), dqs->dqs_ctxt,
                                                dqs->dqs_destructor);
                        }
                        if (dqsn->dqs_ctxt) {
@@ -2324,7 +2448,7 @@ dispatch_queue_set_specific(dispatch_queue_t dq, const void *key,
                _dispatch_queue_init_specific(dq);
        }
        _dispatch_barrier_trysync_or_async_f(dq->dq_specific_q, dqs,
-                       _dispatch_queue_set_specific);
+                       _dispatch_queue_set_specific, 0);
 }
 
 static void
@@ -2345,6 +2469,18 @@ _dispatch_queue_get_specific(void *ctxt)
        *ctxtp = NULL;
 }
 
+DISPATCH_ALWAYS_INLINE
+static inline void *
+_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key)
+{
+       void *ctxt = NULL;
+       if (fastpath(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE && dq->dq_specific_q)){
+               ctxt = (void *)key;
+               dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific);
+       }
+       return ctxt;
+}
+
 DISPATCH_NOINLINE
 void *
 dispatch_queue_get_specific(dispatch_queue_t dq, const void *key)
@@ -2352,13 +2488,7 @@ dispatch_queue_get_specific(dispatch_queue_t dq, const void *key)
        if (slowpath(!key)) {
                return NULL;
        }
-       void *ctxt = NULL;
-
-       if (fastpath(dq->dq_specific_q)) {
-               ctxt = (void *)key;
-               dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific);
-       }
-       return ctxt;
+       return _dispatch_queue_get_specific_inline(dq, key);
 }
 
 DISPATCH_NOINLINE
@@ -2372,12 +2502,8 @@ dispatch_get_specific(const void *key)
        dispatch_queue_t dq = _dispatch_queue_get_current();
 
        while (slowpath(dq)) {
-               if (slowpath(dq->dq_specific_q)) {
-                       ctxt = (void *)key;
-                       dispatch_sync_f(dq->dq_specific_q, &ctxt,
-                                       _dispatch_queue_get_specific);
-                       if (ctxt) break;
-               }
+               ctxt = _dispatch_queue_get_specific_inline(dq, key);
+               if (ctxt) break;
                dq = dq->do_targetq;
        }
        return ctxt;
@@ -2392,7 +2518,7 @@ _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID(
                DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type");
        }
        uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
-       return _dq_state_drain_locked_by(dq_state, _dispatch_tid_self());
+       return _dq_state_drain_locked_by_self(dq_state);
 }
 #endif
 
@@ -2404,12 +2530,13 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz)
 {
        size_t offset = 0;
        dispatch_queue_t target = dq->do_targetq;
+       const char *tlabel = target && target->dq_label ? target->dq_label : "";
        uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
 
-       offset += dsnprintf(&buf[offset], bufsiz - offset,
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, "
                        "target = %s[%p], width = 0x%x, state = 0x%016llx",
-                       target && target->dq_label ? target->dq_label : "", target,
-                       dq->dq_width, (unsigned long long)dq_state);
+                       dq->dq_sref_cnt + 1, tlabel, target, dq->dq_width,
+                       (unsigned long long)dq_state);
        if (_dq_state_is_suspended(dq_state)) {
                offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d",
                        _dq_state_suspend_cnt(dq_state));
@@ -2425,8 +2552,9 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz)
        if (_dq_state_is_dirty(dq_state)) {
                offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty");
        }
-       if (_dq_state_has_override(dq_state)) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, ", async-override");
+       dispatch_qos_t qos = _dq_state_max_qos(dq_state);
+       if (qos) {
+               offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos);
        }
        mach_port_t owner = _dq_state_drain_owner(dq_state);
        if (!_dispatch_queue_is_thread_bound(dq) && owner) {
@@ -2472,34 +2600,40 @@ dispatch_debug_queue(dispatch_queue_t dq, const char* str) {
 }
 #endif
 
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
-static OSSpinLock _dispatch_stats_lock;
+#if DISPATCH_PERF_MON
+
+#define DISPATCH_PERF_MON_BUCKETS 8
+
 static struct {
-       uint64_t time_total;
-       uint64_t count_total;
-       uint64_t thread_total;
-} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set
+       uint64_t volatile time_total;
+       uint64_t volatile count_total;
+       uint64_t volatile thread_total;
+} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS];
+DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS;
 
-static void
-_dispatch_queue_merge_stats(uint64_t start)
+void
+_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type)
 {
        uint64_t delta = _dispatch_absolute_time() - start;
        unsigned long count;
-
+       int bucket = 0;
        count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
        _dispatch_thread_setspecific(dispatch_bcounter_key, NULL);
-
-       int bucket = flsl((long)count);
-
-       // 64-bit counters on 32-bit require a lock or a queue
-       OSSpinLockLock(&_dispatch_stats_lock);
-
-       _dispatch_stats[bucket].time_total += delta;
-       _dispatch_stats[bucket].count_total += count;
-       _dispatch_stats[bucket].thread_total++;
-
-       OSSpinLockUnlock(&_dispatch_stats_lock);
+       if (count == 0) {
+               bucket = 0;
+               if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type);
+       } else {
+               bucket = MIN(DISPATCH_PERF_MON_BUCKETS - 1,
+                                        (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count));
+               os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed);
+       }
+       os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed);
+       os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed);
+       if (trace) {
+               _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type);
+       }
 }
+
 #endif
 
 #pragma mark -
@@ -2519,8 +2653,8 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp,
                                pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND;
                                // when we unbind, overcomitness can flip, so we need to learn
                                // it from the defaultpri, see _dispatch_priority_compute_update
-                               pp |= (_dispatch_get_defaultpriority() &
-                                               _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
+                               pp |= (_dispatch_get_basepri() &
+                                               DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
                        } else {
                                // else we need to keep the one that is set in the current pri
                                pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
@@ -2528,8 +2662,9 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp,
                        if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) {
                                pflags |= _PTHREAD_SET_SELF_QOS_FLAG;
                        }
-                       if (unlikely(DISPATCH_QUEUE_DRAIN_OWNER(&_dispatch_mgr_q) ==
-                                       _dispatch_tid_self())) {
+                       uint64_t mgr_dq_state =
+                                       os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed);
+                       if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) {
                                DISPATCH_INTERNAL_CRASH(pp,
                                                "Changing the QoS while on the manager queue");
                        }
@@ -2558,7 +2693,7 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp,
 DISPATCH_NOINLINE
 voucher_t
 _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority,
-               voucher_t v, _dispatch_thread_set_self_t flags)
+               voucher_t v, dispatch_thread_set_self_t flags)
 {
        voucher_t ov = DISPATCH_NO_VOUCHER;
        mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER;
@@ -2573,9 +2708,6 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority,
                        kv = _voucher_swap_and_get_mach_voucher(ov, v);
                }
        }
-#if !PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK
-       flags &= ~(_dispatch_thread_set_self_t)DISPATCH_THREAD_PARK;
-#endif
        if (!(flags & DISPATCH_THREAD_PARK)) {
                _dispatch_set_priority_and_mach_voucher_slow(priority, kv);
        }
@@ -2589,6 +2721,34 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority,
 #pragma mark -
 #pragma mark dispatch_continuation_t
 
+const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = {
+       DC_VTABLE_ENTRY(ASYNC_REDIRECT,
+               .do_kind = "dc-redirect",
+               .do_invoke = _dispatch_async_redirect_invoke),
+#if HAVE_MACH
+       DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN,
+               .do_kind = "dc-mach-send-drain",
+               .do_invoke = _dispatch_mach_send_barrier_drain_invoke),
+       DC_VTABLE_ENTRY(MACH_SEND_BARRIER,
+               .do_kind = "dc-mach-send-barrier",
+               .do_invoke = _dispatch_mach_barrier_invoke),
+       DC_VTABLE_ENTRY(MACH_RECV_BARRIER,
+               .do_kind = "dc-mach-recv-barrier",
+               .do_invoke = _dispatch_mach_barrier_invoke),
+       DC_VTABLE_ENTRY(MACH_ASYNC_REPLY,
+               .do_kind = "dc-mach-async-reply",
+               .do_invoke = _dispatch_mach_msg_async_reply_invoke),
+#endif
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+       DC_VTABLE_ENTRY(OVERRIDE_STEALING,
+               .do_kind = "dc-override-stealing",
+               .do_invoke = _dispatch_queue_override_invoke),
+       DC_VTABLE_ENTRY(OVERRIDE_OWNING,
+               .do_kind = "dc-override-owning",
+               .do_invoke = _dispatch_queue_override_invoke),
+#endif
+};
+
 static void
 _dispatch_force_cache_cleanup(void)
 {
@@ -2622,7 +2782,7 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc)
        dc = _dispatch_thread_getspecific(dispatch_cache_key);
        int cnt;
        if (!dc || (cnt = dc->dc_cache_cnt -
-                       _dispatch_continuation_cache_limit) <= 0){
+                       _dispatch_continuation_cache_limit) <= 0) {
                return;
        }
        do {
@@ -2633,38 +2793,11 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc)
 }
 #endif
 
-DISPATCH_ALWAYS_INLINE_NDEBUG
-static inline void
-_dispatch_continuation_slow_item_signal(dispatch_queue_t dq,
-               dispatch_object_t dou)
-{
-       dispatch_continuation_t dc = dou._dc;
-       pthread_priority_t pp = dq->dq_override;
-
-       _dispatch_trace_continuation_pop(dq, dc);
-       if (pp > (dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-               _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, pp);
-       }
-       _dispatch_thread_event_signal((dispatch_thread_event_t)dc->dc_other);
-       _dispatch_introspection_queue_item_complete(dc);
-}
-
 DISPATCH_NOINLINE
 static void
 _dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc)
 {
-       _dispatch_queue_push(dq, dc,
-                       _dispatch_continuation_get_override_priority(dq, dc));
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_continuation_push_sync_slow(dispatch_queue_t dq,
-               dispatch_continuation_t dc)
-{
-       _dispatch_queue_push_inline(dq, dc,
-                       _dispatch_continuation_get_override_priority(dq, dc),
-                       DISPATCH_WAKEUP_SLOW_WAITER);
+       dx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc));
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -2821,20 +2954,16 @@ _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd)
        }
        if (atomic_flags & DBF_CANCELED) goto out;
 
-       pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY;
-       _dispatch_thread_set_self_t adopt_flags = 0;
-       if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
-               op = _dispatch_get_priority();
+       pthread_priority_t op = 0, p = 0;
+       op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority);
+       if (op) {
                p = dbpd->dbpd_priority;
-               if (_dispatch_block_sync_should_enforce_qos_class(flags)) {
-                       adopt_flags |= DISPATCH_PRIORITY_ENFORCE;
-               }
        }
        voucher_t ov, v = DISPATCH_NO_VOUCHER;
        if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
                v = dbpd->dbpd_voucher;
        }
-       ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags);
+       ov = _dispatch_set_priority_and_voucher(p, v, 0);
        dbpd->dbpd_thread = _dispatch_tid_self();
        _dispatch_client_callout(dbpd->dbpd_block,
                        _dispatch_Block_invoke(dbpd->dbpd_block));
@@ -2854,28 +2983,18 @@ _dispatch_block_sync_invoke(void *block)
        dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b);
        dispatch_block_flags_t flags = dbpd->dbpd_flags;
        unsigned int atomic_flags = dbpd->dbpd_atomic_flags;
-       if (slowpath(atomic_flags & DBF_WAITED)) {
+       if (unlikely(atomic_flags & DBF_WAITED)) {
                DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both "
                                "run more than once and waited for");
        }
        if (atomic_flags & DBF_CANCELED) goto out;
 
-       pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY;
-       _dispatch_thread_set_self_t adopt_flags = 0;
-       if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
-               op = _dispatch_get_priority();
-               p = dbpd->dbpd_priority;
-               if (_dispatch_block_sync_should_enforce_qos_class(flags)) {
-                       adopt_flags |= DISPATCH_PRIORITY_ENFORCE;
-               }
-       }
-       voucher_t ov, v = DISPATCH_NO_VOUCHER;
+       voucher_t ov = DISPATCH_NO_VOUCHER;
        if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
-               v = dbpd->dbpd_voucher;
+               ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0);
        }
-       ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags);
        dbpd->dbpd_block();
-       _dispatch_reset_priority_and_voucher(op, ov);
+       _dispatch_reset_voucher(ov, 0);
 out:
        if ((atomic_flags & DBF_PERFORM) == 0) {
                if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) {
@@ -2887,13 +3006,57 @@ out:
        oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed);
        if (oq) {
                // balances dispatch_{,barrier_,}sync
-               _os_object_release_internal(oq->_as_os_obj);
+               _os_object_release_internal_n(oq->_as_os_obj, 2);
        }
 }
 
-DISPATCH_ALWAYS_INLINE
 static void
-_dispatch_block_async_invoke2(dispatch_block_t b, bool release)
+_dispatch_block_async_invoke_reset_max_qos(dispatch_queue_t dq,
+               dispatch_qos_t qos)
+{
+       uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos);
+
+       // Only dispatch queues can reach this point (as opposed to sources or more
+       // complex objects) which allows us to handle the DIRTY bit protocol by only
+       // looking at the tail
+       dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE);
+
+again:
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               dispatch_assert(_dq_state_is_base_wlh(old_state));
+               if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) {
+                       // Nothing to do if the QoS isn't going down
+                       os_atomic_rmw_loop_give_up(return);
+               }
+               if (_dq_state_is_dirty(old_state)) {
+                       os_atomic_rmw_loop_give_up({
+                               // just renew the drain lock with an acquire barrier, to see
+                               // what the enqueuer that set DIRTY has done.
+                               // the xor generates better assembly as DISPATCH_QUEUE_DIRTY
+                               // is already in a register
+                               os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
+                               if (!dq->dq_items_tail) {
+                                       goto again;
+                               }
+                               return;
+                       });
+               }
+
+               new_state  = old_state;
+               new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+               new_state |= qos_bits;
+       });
+
+       _dispatch_deferred_items_get()->ddi_wlh_needs_update = true;
+       _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE);
+}
+
+#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE           0x1
+#define DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET 0x2
+
+DISPATCH_NOINLINE
+static void
+_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags)
 {
        dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b);
        unsigned int atomic_flags = dbpd->dbpd_atomic_flags;
@@ -2901,6 +3064,17 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release)
                DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both "
                                "run more than once and waited for");
        }
+
+       if (unlikely((dbpd->dbpd_flags &
+                       DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE) &&
+                       !(invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET))) {
+               dispatch_queue_t dq = _dispatch_get_current_queue();
+               dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority());
+               if ((dispatch_wlh_t)dq == _dispatch_get_wlh() && !dq->dq_items_tail) {
+                       _dispatch_block_async_invoke_reset_max_qos(dq, qos);
+               }
+       }
+
        if (!slowpath(atomic_flags & DBF_CANCELED)) {
                dbpd->dbpd_block();
        }
@@ -2909,13 +3083,14 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release)
                        dispatch_group_leave(_dbpd_group(dbpd));
                }
        }
-       os_mpsc_queue_t oq;
-       oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed);
+
+       os_mpsc_queue_t oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed);
        if (oq) {
                // balances dispatch_{,barrier_,group_}async
-               _os_object_release_internal_inline(oq->_as_os_obj);
+               _os_object_release_internal_n_inline(oq->_as_os_obj, 2);
        }
-       if (release) {
+
+       if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) {
                Block_release(b);
        }
 }
@@ -2923,20 +3098,35 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release)
 static void
 _dispatch_block_async_invoke(void *block)
 {
-       _dispatch_block_async_invoke2(block, false);
+       _dispatch_block_async_invoke2(block, 0);
 }
 
 static void
 _dispatch_block_async_invoke_and_release(void *block)
 {
-       _dispatch_block_async_invoke2(block, true);
+       _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE);
+}
+
+static void
+_dispatch_block_async_invoke_and_release_mach_barrier(void *block)
+{
+       _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE |
+                       DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_block_supports_wait_and_cancel(dispatch_block_private_data_t dbpd)
+{
+       return dbpd && !(dbpd->dbpd_flags &
+                       DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE);
 }
 
 void
 dispatch_block_cancel(dispatch_block_t db)
 {
        dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
-       if (!dbpd) {
+       if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) {
                DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to "
                                "dispatch_block_cancel()");
        }
@@ -2947,7 +3137,7 @@ long
 dispatch_block_testcancel(dispatch_block_t db)
 {
        dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
-       if (!dbpd) {
+       if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) {
                DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to "
                                "dispatch_block_testcancel()");
        }
@@ -2958,7 +3148,7 @@ long
 dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout)
 {
        dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
-       if (!dbpd) {
+       if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) {
                DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to "
                                "dispatch_block_wait()");
        }
@@ -2985,8 +3175,8 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout)
                // neither of us would ever release. Side effect: After a _wait
                // that times out, subsequent waits will not boost the qos of the
                // still-running block.
-               dx_wakeup(boost_oq, pp, DISPATCH_WAKEUP_OVERRIDING |
-                               DISPATCH_WAKEUP_CONSUME);
+               dx_wakeup(boost_oq, _dispatch_qos_from_pp(pp),
+                               DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_CONSUME_2);
        }
 
        mach_port_t boost_th = dbpd->dbpd_thread;
@@ -3050,10 +3240,13 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc,
 
        // balanced in d_block_async_invoke_and_release or d_block_wait
        if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, oq, relaxed)) {
-               _os_object_retain_internal_inline(oq->_as_os_obj);
+               _os_object_retain_internal_n_inline(oq->_as_os_obj, 2);
        }
 
-       if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
+       if (dc_flags & DISPATCH_OBJ_MACH_BARRIER) {
+               dispatch_assert(dc_flags & DISPATCH_OBJ_CONSUME_BIT);
+               dc->dc_func = _dispatch_block_async_invoke_and_release_mach_barrier;
+       } else if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
                dc->dc_func = _dispatch_block_async_invoke_and_release;
        } else {
                dc->dc_func = _dispatch_block_async_invoke;
@@ -3081,28 +3274,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc,
        dc->dc_flags = dc_flags;
 }
 
-void
-_dispatch_continuation_update_bits(dispatch_continuation_t dc,
-               uintptr_t dc_flags)
-{
-       dc->dc_flags = dc_flags;
-       if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) {
-               if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) {
-                       dc->dc_func = _dispatch_block_async_invoke_and_release;
-               } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) {
-                       dc->dc_func = _dispatch_call_block_and_release;
-               }
-       } else {
-               if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) {
-                       dc->dc_func = _dispatch_block_async_invoke;
-               } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) {
-                       dc->dc_func = _dispatch_Block_invoke(dc->dc_ctxt);
-               }
-       }
-}
-
 #endif // __BLOCKS__
-
 #pragma mark -
 #pragma mark dispatch_barrier_async
 
@@ -3153,12 +3325,12 @@ _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
        dc->dc_ctxt = ctxt;
        dc->dc_voucher = DISPATCH_NO_VOUCHER;
        dc->dc_priority = DISPATCH_NO_PRIORITY;
-       _dispatch_queue_push(dq, dc, 0);
+       dx_push(dq, dc, 0);
 }
 
 #ifdef __BLOCKS__
 void
-dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void))
+dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work)
 {
        dispatch_continuation_t dc = _dispatch_continuation_alloc();
        uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT;
@@ -3173,7 +3345,7 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void))
 
 void
 _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags)
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
 {
        dispatch_thread_frame_s dtf;
        struct dispatch_continuation_s *other_dc = dc->dc_other;
@@ -3182,9 +3354,7 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
        // the "right" root queue was stuffed into dc_func
        dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func;
        dispatch_queue_t dq = dc->dc_data, rq, old_dq;
-       struct _dispatch_identity_s di;
-
-       pthread_priority_t op, dp, old_dp;
+       dispatch_priority_t old_dbp;
 
        if (ctxt_flags) {
                flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK;
@@ -3192,44 +3362,29 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
        }
        old_dq = _dispatch_get_current_queue();
        if (assumed_rq) {
-               _dispatch_queue_set_current(assumed_rq);
-               _dispatch_root_queue_identity_assume(&di, 0);
-       }
-
-       old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp);
-       op = dq->dq_override;
-       if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-               _dispatch_wqthread_override_start(_dispatch_tid_self(), op);
-               // Ensure that the root queue sees that this thread was overridden.
-               _dispatch_set_defaultpriority_override();
+               old_dbp = _dispatch_root_queue_identity_assume(assumed_rq);
+               _dispatch_set_basepri(dq->dq_priority);
+       } else {
+               old_dbp = _dispatch_set_basepri(dq->dq_priority);
        }
 
        _dispatch_thread_frame_push(&dtf, dq);
        _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER,
                        DISPATCH_OBJ_CONSUME_BIT, {
-               _dispatch_continuation_pop(other_dc, dq, flags);
+               _dispatch_continuation_pop(other_dc, dic, flags, dq);
        });
        _dispatch_thread_frame_pop(&dtf);
-       if (assumed_rq) {
-               _dispatch_root_queue_identity_restore(&di);
-               _dispatch_queue_set_current(old_dq);
-       }
-       _dispatch_reset_defaultpriority(old_dp);
+       if (assumed_rq) _dispatch_queue_set_current(old_dq);
+       _dispatch_reset_basepri(old_dbp);
 
        rq = dq->do_targetq;
        while (slowpath(rq->do_targetq) && rq != old_dq) {
-               _dispatch_non_barrier_complete(rq);
+               _dispatch_queue_non_barrier_complete(rq);
                rq = rq->do_targetq;
        }
 
-       _dispatch_non_barrier_complete(dq);
-
-       if (dtf.dtf_deferred) {
-               struct dispatch_object_s *dou = dtf.dtf_deferred;
-               return _dispatch_queue_drain_deferred_invoke(dq, flags, 0, dou);
-       }
-
-       _dispatch_release_tailcall(dq);
+       _dispatch_queue_non_barrier_complete(dq);
+       _dispatch_release_tailcall(dq); // pairs with _dispatch_async_redirect_wrap
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -3246,14 +3401,14 @@ _dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou)
        dc->dc_other = dou._do;
        dc->dc_voucher = DISPATCH_NO_VOUCHER;
        dc->dc_priority = DISPATCH_NO_PRIORITY;
-       _dispatch_retain(dq);
+       _dispatch_retain(dq); // released in _dispatch_async_redirect_invoke
        return dc;
 }
 
 DISPATCH_NOINLINE
 static void
 _dispatch_async_f_redirect(dispatch_queue_t dq,
-               dispatch_object_t dou, pthread_priority_t pp)
+               dispatch_object_t dou, dispatch_qos_t qos)
 {
        if (!slowpath(_dispatch_object_is_redirection(dou))) {
                dou._dc = _dispatch_async_redirect_wrap(dq, dou);
@@ -3275,7 +3430,7 @@ _dispatch_async_f_redirect(dispatch_queue_t dq,
                dq = dq->do_targetq;
        }
 
-       _dispatch_queue_push(dq, dou, pp);
+       dx_push(dq, dou, qos);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -3288,7 +3443,8 @@ _dispatch_continuation_redirect(dispatch_queue_t dq,
        // by _dispatch_async_f2.
        // However we want to end up on the root queue matching `dc` qos, so pick up
        // the current override of `dq` which includes dc's overrde (and maybe more)
-       _dispatch_async_f_redirect(dq, dc, dq->dq_override);
+       uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+       _dispatch_async_f_redirect(dq, dc, _dq_state_max_qos(dq_state));
        _dispatch_introspection_queue_item_complete(dc);
 }
 
@@ -3309,7 +3465,7 @@ _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc)
        }
 
        return _dispatch_async_f_redirect(dq, dc,
-                       _dispatch_continuation_get_override_priority(dq, dc));
+                       _dispatch_continuation_override_qos(dq, dc));
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -3345,7 +3501,7 @@ dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt,
 
 #ifdef __BLOCKS__
 void
-dispatch_async(dispatch_queue_t dq, void (^work)(void))
+dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
 {
        dispatch_continuation_t dc = _dispatch_continuation_alloc();
        uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
@@ -3394,31 +3550,66 @@ dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
 #endif
 
 #pragma mark -
-#pragma mark dispatch_sync / dispatch_barrier_sync recurse and invoke
+#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete
 
 DISPATCH_NOINLINE
 static void
-_dispatch_sync_function_invoke_slow(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func)
+_dispatch_queue_non_barrier_complete(dispatch_queue_t dq)
 {
-       voucher_t ov;
-       dispatch_thread_frame_s dtf;
-       _dispatch_thread_frame_push(&dtf, dq);
-       ov = _dispatch_set_priority_and_voucher(0, dq->dq_override_voucher, 0);
-       _dispatch_client_callout(ctxt, func);
-       _dispatch_perfmon_workitem_inc();
-       _dispatch_reset_voucher(ov, 0);
-       _dispatch_thread_frame_pop(&dtf);
+       uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self();
+
+       // see _dispatch_queue_resume()
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL;
+               if (unlikely(_dq_state_drain_locked(old_state))) {
+                       // make drain_try_unlock() fail and reconsider whether there's
+                       // enough width now for a new item
+                       new_state |= DISPATCH_QUEUE_DIRTY;
+               } else if (likely(_dq_state_is_runnable(new_state))) {
+                       uint64_t full_width = new_state;
+                       if (_dq_state_has_pending_barrier(old_state)) {
+                               full_width -= DISPATCH_QUEUE_PENDING_BARRIER;
+                               full_width += DISPATCH_QUEUE_WIDTH_INTERVAL;
+                               full_width += DISPATCH_QUEUE_IN_BARRIER;
+                       } else {
+                               full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
+                               full_width += DISPATCH_QUEUE_IN_BARRIER;
+                       }
+                       if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) ==
+                                       DISPATCH_QUEUE_WIDTH_FULL_BIT) {
+                               new_state = full_width;
+                               new_state &= ~DISPATCH_QUEUE_DIRTY;
+                               new_state |= owner_self;
+                       } else if (_dq_state_is_dirty(old_state)) {
+                               new_state |= DISPATCH_QUEUE_ENQUEUED;
+                       }
+               }
+       });
+
+       if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) {
+               if (_dq_state_is_dirty(old_state)) {
+                       // <rdar://problem/14637483>
+                       // dependency ordering for dq state changes that were flushed
+                       // and not acted upon
+                       os_atomic_thread_fence(dependency);
+                       dq = os_atomic_force_dependency_on(dq, old_state);
+               }
+               return _dispatch_queue_barrier_complete(dq, 0, 0);
+       }
+
+       if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
+               _dispatch_retain_2(dq);
+               dispatch_assert(!_dq_state_is_base_wlh(new_state));
+               return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state));
+       }
 }
 
+
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt,
                dispatch_function_t func)
 {
-       if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) {
-               return _dispatch_sync_function_invoke_slow(dq, ctxt, func);
-       }
        dispatch_thread_frame_s dtf;
        _dispatch_thread_frame_push(&dtf, dq);
        _dispatch_client_callout(ctxt, func);
@@ -3434,638 +3625,737 @@ _dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt,
        _dispatch_sync_function_invoke_inline(dq, ctxt, func);
 }
 
-void
-_dispatch_sync_recurse_invoke(void *ctxt)
+DISPATCH_NOINLINE
+static void
+_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq,
+               uintptr_t dc_flags)
 {
-       dispatch_continuation_t dc = ctxt;
-       _dispatch_sync_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func);
+       bool barrier = (dc_flags & DISPATCH_OBJ_BARRIER_BIT);
+       do {
+               if (dq == stop_dq) return;
+               if (barrier) {
+                       _dispatch_queue_barrier_complete(dq, 0, 0);
+               } else {
+                       _dispatch_queue_non_barrier_complete(dq);
+               }
+               dq = dq->do_targetq;
+               barrier = (dq->dq_width == 1);
+       } while (unlikely(dq->do_targetq));
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_sync_function_recurse(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
+DISPATCH_NOINLINE
+static void
+_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t func, uintptr_t dc_flags)
 {
-       struct dispatch_continuation_s dc = {
-               .dc_data = dq,
-               .dc_func = func,
-               .dc_ctxt = ctxt,
-       };
-       _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp);
+       _dispatch_sync_function_invoke_inline(dq, ctxt, func);
+       _dispatch_sync_complete_recurse(dq, NULL, dc_flags);
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_non_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt,
+_dispatch_sync_invoke_and_complete(dispatch_queue_t dq, void *ctxt,
                dispatch_function_t func)
 {
        _dispatch_sync_function_invoke_inline(dq, ctxt, func);
-       _dispatch_non_barrier_complete(dq);
+       _dispatch_queue_non_barrier_complete(dq);
 }
 
+/*
+ * For queues we can cheat and inline the unlock code, which is invalid
+ * for objects with a more complex state machine (sources or mach channels)
+ */
 DISPATCH_NOINLINE
 static void
-_dispatch_non_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
+_dispatch_queue_barrier_sync_invoke_and_complete(dispatch_queue_t dq,
+               void *ctxt, dispatch_function_t func)
 {
-       _dispatch_sync_function_recurse(dq, ctxt, func, pp);
-       _dispatch_non_barrier_complete(dq);
-}
+       _dispatch_sync_function_invoke_inline(dq, ctxt, func);
+       if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
+               return _dispatch_queue_barrier_complete(dq, 0, 0);
+       }
 
-DISPATCH_ALWAYS_INLINE
-static void
-_dispatch_non_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
-{
-       _dispatch_introspection_non_barrier_sync_begin(dq, func);
-       if (slowpath(dq->do_targetq->do_targetq)) {
-               return _dispatch_non_barrier_sync_f_recurse(dq, ctxt, func, pp);
+       // Presence of any of these bits requires more work that only
+       // _dispatch_queue_barrier_complete() handles properly
+       //
+       // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without
+       // checking the role is sloppy, but is a super fast check, and neither of
+       // these bits should be set if the lock was never contended/discovered.
+       const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
+                       DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
+                       DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
+                       DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
+       uint64_t old_state, new_state;
+
+       // similar to _dispatch_queue_drain_try_unlock
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+               new_state  = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
+               new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+               new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+               if (unlikely(old_state & fail_unlock_mask)) {
+                       os_atomic_rmw_loop_give_up({
+                               return _dispatch_queue_barrier_complete(dq, 0, 0);
+                       });
+               }
+       });
+       if (_dq_state_is_base_wlh(old_state)) {
+               _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
        }
-       _dispatch_non_barrier_sync_f_invoke(dq, ctxt, func);
 }
 
 #pragma mark -
-#pragma mark dispatch_barrier_sync
+#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake
+
+#define DISPATCH_SYNC_WAITER_NO_UNLOCK (~0ull)
 
 DISPATCH_NOINLINE
 static void
-_dispatch_barrier_complete(dispatch_queue_t dq)
+_dispatch_sync_waiter_wake(dispatch_sync_context_t dsc,
+               dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state)
 {
-       uint64_t owned = DISPATCH_QUEUE_IN_BARRIER +
-                       dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
+       dispatch_wlh_t waiter_wlh = dsc->dc_data;
 
-       if (slowpath(dq->dq_items_tail)) {
-               return _dispatch_try_lock_transfer_or_wakeup(dq);
+       if (_dq_state_in_sync_transfer(old_state) ||
+                       _dq_state_in_sync_transfer(new_state) ||
+                       (waiter_wlh != DISPATCH_WLH_ANON)) {
+               _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state);
        }
-
-       if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) {
-               // someone enqueued a slow item at the head
-               // looping may be its last chance
-               return _dispatch_try_lock_transfer_or_wakeup(dq);
+       if (waiter_wlh == DISPATCH_WLH_ANON) {
+               if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) {
+                       _dispatch_wqthread_override_start(dsc->dsc_waiter,
+                                       dsc->dsc_override_qos);
+               }
+               _dispatch_thread_event_signal(&dsc->dsc_event);
        }
+       _dispatch_introspection_queue_item_complete(dsc->_as_dc);
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
+_dispatch_sync_waiter_redirect_or_wake(dispatch_queue_t dq, uint64_t owned,
+               dispatch_object_t dou)
 {
-       _dispatch_sync_function_recurse(dq, ctxt, func, pp);
-       _dispatch_barrier_complete(dq);
-}
+       dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc;
+       uint64_t next_owner = 0, old_state, new_state;
+       dispatch_wlh_t wlh = NULL;
 
-DISPATCH_NOINLINE
-static void
-_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func)
-{
-       _dispatch_sync_function_invoke_inline(dq, ctxt, func);
-       _dispatch_barrier_complete(dq);
-}
+       _dispatch_trace_continuation_pop(dq, dsc->_as_dc);
 
-DISPATCH_ALWAYS_INLINE
-static void
-_dispatch_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
-{
-       _dispatch_introspection_barrier_sync_begin(dq, func);
-       if (slowpath(dq->do_targetq->do_targetq)) {
-               return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp);
+       if (owned == DISPATCH_SYNC_WAITER_NO_UNLOCK) {
+               dispatch_assert(!(dsc->dc_flags & DISPATCH_OBJ_BARRIER_BIT));
+               new_state = old_state = os_atomic_load2o(dq, dq_state, relaxed);
+       } else {
+               if (dsc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) {
+                       next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter);
+               }
+               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+                       new_state  = old_state - owned;
+                       new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+                       new_state &= ~DISPATCH_QUEUE_DIRTY;
+                       new_state |= next_owner;
+                       if (_dq_state_is_base_wlh(old_state)) {
+                               new_state |= DISPATCH_QUEUE_SYNC_TRANSFER;
+                       }
+               });
+               if (_dq_state_is_base_wlh(old_state)) {
+                       wlh = (dispatch_wlh_t)dq;
+               } else if (_dq_state_received_override(old_state)) {
+                       // Ensure that the root queue sees that this thread was overridden.
+                       _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
+               }
+       }
+
+       if (dsc->dc_data == DISPATCH_WLH_ANON) {
+               if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) {
+                       dsc->dsc_override_qos = _dq_state_max_qos(old_state);
+               }
+       }
+
+       if (unlikely(_dq_state_is_inner_queue(old_state))) {
+               dispatch_queue_t tq = dq->do_targetq;
+               if (likely(tq->dq_width == 1)) {
+                       dsc->dc_flags = DISPATCH_OBJ_BARRIER_BIT |
+                                       DISPATCH_OBJ_SYNC_WAITER_BIT;
+               } else {
+                       dsc->dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT;
+               }
+               _dispatch_introspection_queue_item_complete(dsc->_as_dc);
+               return _dispatch_queue_push_sync_waiter(tq, dsc, 0);
        }
-       _dispatch_barrier_sync_f_invoke(dq, ctxt, func);
-}
 
-typedef struct dispatch_barrier_sync_context_s {
-       struct dispatch_continuation_s dbsc_dc;
-       dispatch_thread_frame_s dbsc_dtf;
-} *dispatch_barrier_sync_context_t;
+       return _dispatch_sync_waiter_wake(dsc, wlh, old_state, new_state);
+}
 
+DISPATCH_NOINLINE
 static void
-_dispatch_barrier_sync_f_slow_invoke(void *ctxt)
+_dispatch_queue_class_barrier_complete(dispatch_queue_t dq, dispatch_qos_t qos,
+               dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target,
+               uint64_t owned)
 {
-       dispatch_barrier_sync_context_t dbsc = ctxt;
-       dispatch_continuation_t dc = &dbsc->dbsc_dc;
-       dispatch_queue_t dq = dc->dc_data;
-       dispatch_thread_event_t event = (dispatch_thread_event_t)dc->dc_other;
+       uint64_t old_state, new_state, enqueue;
+       dispatch_queue_t tq;
 
-       dispatch_assert(dq == _dispatch_queue_get_current());
-#if DISPATCH_COCOA_COMPAT
-       if (slowpath(_dispatch_queue_is_thread_bound(dq))) {
-               dispatch_assert(_dispatch_thread_frame_get_current() == NULL);
+       if (target == DISPATCH_QUEUE_WAKEUP_MGR) {
+               tq = &_dispatch_mgr_q;
+               enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+       } else if (target) {
+               tq = (target == DISPATCH_QUEUE_WAKEUP_TARGET) ? dq->do_targetq : target;
+               enqueue = DISPATCH_QUEUE_ENQUEUED;
+       } else {
+               tq = NULL;
+               enqueue = 0;
+       }
 
-               // the block runs on the thread the queue is bound to and not
-               // on the calling thread, but we mean to see the calling thread
-               // dispatch thread frames, so we fake the link, and then undo it
-               _dispatch_thread_frame_set_current(&dbsc->dbsc_dtf);
-               // The queue is bound to a non-dispatch thread (e.g. main thread)
-               _dispatch_continuation_voucher_adopt(dc, DISPATCH_NO_VOUCHER,
-                               DISPATCH_OBJ_CONSUME_BIT);
-               _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
-               os_atomic_store2o(dc, dc_func, NULL, release);
-               _dispatch_thread_frame_set_current(NULL);
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+               new_state  = _dq_state_merge_qos(old_state - owned, qos);
+               new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+               if (unlikely(_dq_state_is_suspended(old_state))) {
+                       if (likely(_dq_state_is_base_wlh(old_state))) {
+                               new_state &= ~DISPATCH_QUEUE_ENQUEUED;
+                       }
+               } else if (enqueue) {
+                       if (!_dq_state_is_enqueued(old_state)) {
+                               new_state |= enqueue;
+                       }
+               } else if (unlikely(_dq_state_is_dirty(old_state))) {
+                       os_atomic_rmw_loop_give_up({
+                               // just renew the drain lock with an acquire barrier, to see
+                               // what the enqueuer that set DIRTY has done.
+                               // the xor generates better assembly as DISPATCH_QUEUE_DIRTY
+                               // is already in a register
+                               os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
+                               flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE;
+                               return dx_wakeup(dq, qos, flags);
+                       });
+               } else if (likely(_dq_state_is_base_wlh(old_state))) {
+                       new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+                       new_state &= ~DISPATCH_QUEUE_ENQUEUED;
+               } else {
+                       new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+               }
+       });
+       old_state -= owned;
+       dispatch_assert(_dq_state_drain_locked_by_self(old_state));
+       dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state));
+
+
+       if (_dq_state_received_override(old_state)) {
+               // Ensure that the root queue sees that this thread was overridden.
+               _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
        }
+
+       if (tq) {
+               if (likely((old_state ^ new_state) & enqueue)) {
+                       dispatch_assert(_dq_state_is_enqueued(new_state));
+                       dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2);
+                       return _dispatch_queue_push_queue(tq, dq, new_state);
+               }
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+               // <rdar://problem/27694093> when doing sync to async handoff
+               // if the queue received an override we have to forecefully redrive
+               // the same override so that a new stealer is enqueued because
+               // the previous one may be gone already
+               if (_dq_state_should_override(new_state)) {
+                       return _dispatch_queue_class_wakeup_with_override(dq, new_state,
+                                       flags);
+               }
 #endif
-       _dispatch_thread_event_signal(event); // release
+       }
+       if (flags & DISPATCH_WAKEUP_CONSUME_2) {
+               return _dispatch_release_2_tailcall(dq);
+       }
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
+_dispatch_queue_barrier_complete(dispatch_queue_t dq, dispatch_qos_t qos,
+               dispatch_wakeup_flags_t flags)
 {
-       if (slowpath(!dq->do_targetq)) {
-               // see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
-               return _dispatch_sync_function_invoke(dq, ctxt, func);
-       }
+       dispatch_continuation_t dc_tmp, dc_start = NULL, dc_end = NULL;
+       dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
+       struct dispatch_object_s *dc = NULL;
+       uint64_t owned = DISPATCH_QUEUE_IN_BARRIER +
+                       dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
+       size_t count = 0;
 
-       if (!pp) {
-               pp = _dispatch_get_priority();
-               pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-               pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
-       }
-       dispatch_thread_event_s event;
-       _dispatch_thread_event_init(&event);
-       struct dispatch_barrier_sync_context_s dbsc = {
-               .dbsc_dc = {
-                       .dc_data = dq,
-#if DISPATCH_COCOA_COMPAT
-                       .dc_func = func,
-                       .dc_ctxt = ctxt,
-#endif
-                       .dc_other = &event,
+       dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE);
+
+       if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) {
+               dc = _dispatch_queue_head(dq);
+               if (!_dispatch_object_is_sync_waiter(dc)) {
+                       // not a slow item, needs to wake up
+               } else if (likely(dq->dq_width == 1) ||
+                               _dispatch_object_is_barrier(dc)) {
+                       // rdar://problem/8290662 "barrier/writer lock transfer"
+                       dc_start = dc_end = (dispatch_continuation_t)dc;
+                       owned = 0;
+                       count = 1;
+                       dc = _dispatch_queue_next(dq, dc);
+               } else {
+                       // <rdar://problem/10164594> "reader lock transfer"
+                       // we must not wake waiters immediately because our right
+                       // for dequeuing is granted through holding the full "barrier" width
+                       // which a signaled work item could relinquish out from our feet
+                       dc_start = (dispatch_continuation_t)dc;
+                       do {
+                               // no check on width here because concurrent queues
+                               // do not respect width for blocked readers, the thread
+                               // is already spent anyway
+                               dc_end = (dispatch_continuation_t)dc;
+                               owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
+                               count++;
+                               dc = _dispatch_queue_next(dq, dc);
+                       } while (dc && _dispatch_object_is_sync_waiter_non_barrier(dc));
                }
-       };
-#if DISPATCH_COCOA_COMPAT
-       // It's preferred to execute synchronous blocks on the current thread
-       // due to thread-local side effects, etc. However, blocks submitted
-       // to the main thread MUST be run on the main thread
-       if (slowpath(_dispatch_queue_is_thread_bound(dq))) {
-               // consumed by _dispatch_barrier_sync_f_slow_invoke
-               // or in the DISPATCH_COCOA_COMPAT hunk below
-               _dispatch_continuation_voucher_set(&dbsc.dbsc_dc, dq, 0);
-               // save frame linkage for _dispatch_barrier_sync_f_slow_invoke
-               _dispatch_thread_frame_save_state(&dbsc.dbsc_dtf);
-               // thread bound queues cannot mutate their target queue hierarchy
-               // so it's fine to look now
-               _dispatch_introspection_barrier_sync_begin(dq, func);
-       }
-#endif
-       uint32_t th_self = _dispatch_tid_self();
-       struct dispatch_continuation_s dbss = {
-               .dc_flags = DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT,
-               .dc_func = _dispatch_barrier_sync_f_slow_invoke,
-               .dc_ctxt = &dbsc,
-               .dc_data = (void*)(uintptr_t)th_self,
-               .dc_priority = pp,
-               .dc_other = &event,
-               .dc_voucher = DISPATCH_NO_VOUCHER,
-       };
 
-       uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
-       if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) {
-               DISPATCH_CLIENT_CRASH(dq, "dispatch_barrier_sync called on queue "
-                               "already owned by current thread");
+               if (count) {
+                       do {
+                               dc_tmp = dc_start;
+                               dc_start = dc_start->do_next;
+                               _dispatch_sync_waiter_redirect_or_wake(dq, owned, dc_tmp);
+                               owned = DISPATCH_SYNC_WAITER_NO_UNLOCK;
+                       } while (dc_tmp != dc_end);
+                       if (flags & DISPATCH_WAKEUP_CONSUME_2) {
+                               return _dispatch_release_2_tailcall(dq);
+                       }
+                       return;
+               }
+               if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) {
+                       _dispatch_retain_2(dq);
+                       flags |= DISPATCH_WAKEUP_CONSUME_2;
+               }
+               target = DISPATCH_QUEUE_WAKEUP_TARGET;
        }
 
-       _dispatch_continuation_push_sync_slow(dq, &dbss);
-       _dispatch_thread_event_wait(&event); // acquire
-       _dispatch_thread_event_destroy(&event);
-       if (_dispatch_queue_received_override(dq, pp)) {
-               // Ensure that the root queue sees that this thread was overridden.
-               // pairs with the _dispatch_wqthread_override_start in
-               // _dispatch_continuation_slow_item_signal
-               _dispatch_set_defaultpriority_override();
-       }
+       return _dispatch_queue_class_barrier_complete(dq, qos, flags, target,owned);
+}
 
 #if DISPATCH_COCOA_COMPAT
-       // Queue bound to a non-dispatch thread
-       if (dbsc.dbsc_dc.dc_func == NULL) {
-               return;
-       } else if (dbsc.dbsc_dc.dc_voucher) {
-               // this almost never happens, unless a dispatch_sync() onto a thread
-               // bound queue went to the slow path at the same time dispatch_main()
-               // is called, or the queue is detached from the runloop.
-               _voucher_release(dbsc.dbsc_dc.dc_voucher);
-       }
-#endif
+static void
+_dispatch_sync_thread_bound_invoke(void *ctxt)
+{
+       dispatch_sync_context_t dsc = ctxt;
+       dispatch_queue_t cq = _dispatch_queue_get_current();
+       dispatch_queue_t orig_dq = dsc->dc_other;
+       dispatch_thread_frame_s dtf;
+       dispatch_assert(_dispatch_queue_is_thread_bound(cq));
+
+       // the block runs on the thread the queue is bound to and not
+       // on the calling thread, but we mean to see the calling thread
+       // dispatch thread frames, so we fake the link, and then undo it
+       _dispatch_thread_frame_push_and_rebase(&dtf, orig_dq, &dsc->dsc_dtf);
+       _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func);
+       _dispatch_thread_frame_pop(&dtf);
 
-       _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp);
+       // communicate back to _dispatch_sync_wait who the thread bound queue
+       // was so that we skip it during _dispatch_sync_complete_recurse
+       dsc->dc_other = cq;
+       dsc->dsc_func = NULL;
+       _dispatch_thread_event_signal(&dsc->dsc_event); // release
 }
+#endif
 
 DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_barrier_sync_f2(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
+static inline uint64_t
+_dispatch_sync_wait_prepare(dispatch_queue_t dq)
 {
-       if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) {
-               // global concurrent queues and queues bound to non-dispatch threads
-               // always fall into the slow case
-               return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp);
-       }
-       //
-       // TODO: the more correct thing to do would be to set dq_override to the qos
-       // of the thread that just acquired the barrier lock here. Unwinding that
-       // would slow down the uncontended fastpath however.
-       //
-       // The chosen tradeoff is that if an enqueue on a lower priority thread
-       // contends with this fastpath, this thread may receive a useless override.
-       // Improving this requires the override level to be part of the atomic
-       // dq_state
-       //
-       _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp);
+       uint64_t old_state, new_state;
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               if (_dq_state_is_suspended(old_state) ||
+                               !_dq_state_is_base_wlh(old_state)) {
+                       os_atomic_rmw_loop_give_up(return old_state);
+               }
+               if (!_dq_state_drain_locked(old_state) ||
+                               _dq_state_in_sync_transfer(old_state)) {
+                       os_atomic_rmw_loop_give_up(return old_state);
+               }
+               new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
+       });
+       return new_state;
 }
 
-DISPATCH_NOINLINE
 static void
-_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func, pthread_priority_t pp)
+_dispatch_sync_waiter_compute_wlh(dispatch_queue_t dq,
+               dispatch_sync_context_t dsc)
 {
-       _dispatch_barrier_sync_f2(dq, ctxt, func, pp);
-}
+       bool needs_locking = _dispatch_queue_is_legacy(dq);
 
-DISPATCH_NOINLINE
-void
-dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func)
-{
-       _dispatch_barrier_sync_f2(dq, ctxt, func, 0);
+       if (needs_locking) {
+               dsc->dsc_release_storage = true;
+               _dispatch_queue_sidelock_lock(dq);
+       }
+
+       dispatch_queue_t tq = dq->do_targetq;
+       uint64_t dq_state = _dispatch_sync_wait_prepare(tq);
+
+       if (_dq_state_is_suspended(dq_state) ||
+                       _dq_state_is_base_anon(dq_state)) {
+               dsc->dsc_release_storage = false;
+               dsc->dc_data = DISPATCH_WLH_ANON;
+       } else if (_dq_state_is_base_wlh(dq_state)) {
+               if (dsc->dsc_release_storage) {
+                       _dispatch_queue_retain_storage(tq);
+               }
+               dsc->dc_data = (dispatch_wlh_t)tq;
+       } else {
+               _dispatch_sync_waiter_compute_wlh(tq, dsc);
+       }
+       if (needs_locking) _dispatch_queue_sidelock_unlock(dq);
 }
 
-#ifdef __BLOCKS__
 DISPATCH_NOINLINE
 static void
-_dispatch_sync_block_with_private_data(dispatch_queue_t dq,
-               void (^work)(void), dispatch_block_flags_t flags)
+_dispatch_sync_wait(dispatch_queue_t top_dq, void *ctxt,
+               dispatch_function_t func, uintptr_t top_dc_flags,
+               dispatch_queue_t dq, uintptr_t dc_flags)
 {
-       pthread_priority_t pp = _dispatch_block_get_priority(work);
+       pthread_priority_t pp = _dispatch_get_priority();
+       dispatch_tid tid = _dispatch_tid_self();
+       dispatch_qos_t qos;
+       uint64_t dq_state;
 
-       flags |= _dispatch_block_get_flags(work);
-       if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
-               pthread_priority_t tp = _dispatch_get_priority();
-               tp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-               if (pp < tp) {
-                       pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG;
-               } else if (_dispatch_block_sync_should_enforce_qos_class(flags)) {
-                       pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
-               }
+       dq_state = _dispatch_sync_wait_prepare(dq);
+       if (unlikely(_dq_state_drain_locked_by(dq_state, tid))) {
+               DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
+                               "dispatch_sync called on queue "
+                               "already owned by current thread");
        }
-       // balanced in d_block_sync_invoke or d_block_wait
-       if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work),
-                       dbpd_queue, NULL, dq->_as_oq, relaxed)) {
-               _dispatch_retain(dq);
+
+       struct dispatch_sync_context_s dsc = {
+               .dc_flags    = dc_flags | DISPATCH_OBJ_SYNC_WAITER_BIT,
+               .dc_other    = top_dq,
+               .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
+               .dc_voucher  = DISPATCH_NO_VOUCHER,
+               .dsc_func    = func,
+               .dsc_ctxt    = ctxt,
+               .dsc_waiter  = tid,
+       };
+       if (_dq_state_is_suspended(dq_state) ||
+                       _dq_state_is_base_anon(dq_state)) {
+               dsc.dc_data = DISPATCH_WLH_ANON;
+       } else if (_dq_state_is_base_wlh(dq_state)) {
+               dsc.dc_data = (dispatch_wlh_t)dq;
+       } else {
+               _dispatch_sync_waiter_compute_wlh(dq, &dsc);
        }
-       if (flags & DISPATCH_BLOCK_BARRIER) {
-               _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, pp);
+#if DISPATCH_COCOA_COMPAT
+       // It's preferred to execute synchronous blocks on the current thread
+       // due to thread-local side effects, etc. However, blocks submitted
+       // to the main thread MUST be run on the main thread
+       //
+       // Since we don't know whether that will happen, save the frame linkage
+       // for the sake of _dispatch_sync_thread_bound_invoke
+       _dispatch_thread_frame_save_state(&dsc.dsc_dtf);
+
+       // Since the continuation doesn't have the CONSUME bit, the voucher will be
+       // retained on adoption on the thread bound queue if it happens so we can
+       // borrow this thread's reference
+       dsc.dc_voucher = _voucher_get();
+       dsc.dc_func = _dispatch_sync_thread_bound_invoke;
+       dsc.dc_ctxt = &dsc;
+#endif
+
+       if (dsc.dc_data == DISPATCH_WLH_ANON) {
+               dsc.dsc_override_qos_floor = dsc.dsc_override_qos =
+                               _dispatch_get_basepri_override_qos_floor();
+               qos = _dispatch_qos_from_pp(pp);
+               _dispatch_thread_event_init(&dsc.dsc_event);
        } else {
-               _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, pp);
+               qos = 0;
+       }
+       _dispatch_queue_push_sync_waiter(dq, &dsc, qos);
+       if (dsc.dc_data == DISPATCH_WLH_ANON) {
+               _dispatch_thread_event_wait(&dsc.dsc_event); // acquire
+               _dispatch_thread_event_destroy(&dsc.dsc_event);
+               // If _dispatch_sync_waiter_wake() gave this thread an override,
+               // ensure that the root queue sees it.
+               if (dsc.dsc_override_qos > dsc.dsc_override_qos_floor) {
+                       _dispatch_set_basepri_override_qos(dsc.dsc_override_qos);
+               }
+       } else {
+               _dispatch_event_loop_wait_for_ownership(&dsc);
        }
-}
-
-void
-dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void))
-{
-       if (slowpath(_dispatch_block_has_private_data(work))) {
-               dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER;
-               return _dispatch_sync_block_with_private_data(dq, work, flags);
+       _dispatch_introspection_sync_begin(top_dq);
+#if DISPATCH_COCOA_COMPAT
+       if (unlikely(dsc.dsc_func == NULL)) {
+               // Queue bound to a non-dispatch thread, the continuation already ran
+               // so just unlock all the things, except for the thread bound queue
+               dispatch_queue_t bound_dq = dsc.dc_other;
+               return _dispatch_sync_complete_recurse(top_dq, bound_dq, top_dc_flags);
        }
-       dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work));
-}
 #endif
+       _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags);
+}
 
 DISPATCH_NOINLINE
-void
-_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func)
+static void
+_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t func, uintptr_t dc_flags)
 {
-       // Use for mutation of queue-/source-internal state only, ignores target
-       // queue hierarchy!
-       if (!fastpath(_dispatch_queue_try_acquire_barrier_sync(dq))) {
-               return _dispatch_barrier_async_detached_f(dq, ctxt, func);
+       if (unlikely(!dq->do_targetq)) {
+               return _dispatch_sync_function_invoke(dq, ctxt, func);
        }
-       // skip the recursion because it's about the queue state only
-       _dispatch_barrier_sync_f_invoke(dq, ctxt, func);
+       _dispatch_sync_wait(dq, ctxt, func, dc_flags, dq, dc_flags);
 }
 
 #pragma mark -
-#pragma mark dispatch_sync
+#pragma mark dispatch_sync / dispatch_barrier_sync
 
 DISPATCH_NOINLINE
 static void
-_dispatch_non_barrier_complete(dispatch_queue_t dq)
+_dispatch_sync_recurse(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t func, uintptr_t dc_flags)
 {
-       uint64_t old_state, new_state;
+       dispatch_tid tid = _dispatch_tid_self();
+       dispatch_queue_t tq = dq->do_targetq;
 
-       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
-               new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL;
-               if (_dq_state_is_runnable(new_state)) {
-                       if (!_dq_state_is_runnable(old_state)) {
-                               // we're making a FULL -> non FULL transition
-                               new_state |= DISPATCH_QUEUE_DIRTY;
+       do {
+               if (likely(tq->dq_width == 1)) {
+                       if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) {
+                               return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq,
+                                               DISPATCH_OBJ_BARRIER_BIT);
                        }
-                       if (!_dq_state_drain_locked(new_state)) {
-                               uint64_t full_width = new_state;
-                               if (_dq_state_has_pending_barrier(new_state)) {
-                                       full_width -= DISPATCH_QUEUE_PENDING_BARRIER;
-                                       full_width += DISPATCH_QUEUE_WIDTH_INTERVAL;
-                                       full_width += DISPATCH_QUEUE_IN_BARRIER;
-                               } else {
-                                       full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
-                                       full_width += DISPATCH_QUEUE_IN_BARRIER;
-                               }
-                               if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) ==
-                                               DISPATCH_QUEUE_WIDTH_FULL_BIT) {
-                                       new_state = full_width;
-                                       new_state &= ~DISPATCH_QUEUE_DIRTY;
-                                       new_state |= _dispatch_tid_self();
-                               }
+               } else {
+                       if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) {
+                               return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, 0);
                        }
                }
-       });
+               tq = tq->do_targetq;
+       } while (unlikely(tq->do_targetq));
 
-       if (_dq_state_is_in_barrier(new_state)) {
-               return _dispatch_try_lock_transfer_or_wakeup(dq);
-       }
-       if (!_dq_state_is_runnable(old_state)) {
-               _dispatch_queue_try_wakeup(dq, new_state,
-                               DISPATCH_WAKEUP_WAITER_HANDOFF);
-       }
+       return _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags);
 }
 
 DISPATCH_NOINLINE
-static void
-_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
-               pthread_priority_t pp)
-{
-       dispatch_assert(dq->do_targetq);
-       if (!pp) {
-               pp = _dispatch_get_priority();
-               pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-               pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
-       }
-       dispatch_thread_event_s event;
-       _dispatch_thread_event_init(&event);
-       uint32_t th_self = _dispatch_tid_self();
-       struct dispatch_continuation_s dc = {
-               .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT,
-#if DISPATCH_INTROSPECTION
-               .dc_func = func,
-               .dc_ctxt = ctxt,
-#endif
-               .dc_data = (void*)(uintptr_t)th_self,
-               .dc_other = &event,
-               .dc_priority = pp,
-               .dc_voucher = DISPATCH_NO_VOUCHER,
-       };
+void
+dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t func)
+{
+       dispatch_tid tid = _dispatch_tid_self();
 
-       uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
-       if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) {
-               DISPATCH_CLIENT_CRASH(dq, "dispatch_sync called on queue "
-                               "already owned by current thread");
+       // The more correct thing to do would be to merge the qos of the thread
+       // that just acquired the barrier lock into the queue state.
+       //
+       // However this is too expensive for the fastpath, so skip doing it.
+       // The chosen tradeoff is that if an enqueue on a lower priority thread
+       // contends with this fastpath, this thread may receive a useless override.
+       //
+       // Global concurrent queues and queues bound to non-dispatch threads
+       // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
+       if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) {
+               return _dispatch_sync_f_slow(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT);
        }
 
-       _dispatch_continuation_push_sync_slow(dq, &dc);
-       _dispatch_thread_event_wait(&event); // acquire
-       _dispatch_thread_event_destroy(&event);
-       if (_dispatch_queue_received_override(dq, pp)) {
-               // Ensure that the root queue sees that this thread was overridden.
-               // pairs with the _dispatch_wqthread_override_start in
-               // _dispatch_continuation_slow_item_signal
-               _dispatch_set_defaultpriority_override();
+       _dispatch_introspection_sync_begin(dq);
+       if (unlikely(dq->do_targetq->do_targetq)) {
+               return _dispatch_sync_recurse(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT);
        }
-       _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp);
+       _dispatch_queue_barrier_sync_invoke_and_complete(dq, ctxt, func);
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
-               pthread_priority_t pp)
+DISPATCH_NOINLINE
+void
+dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
 {
-       // <rdar://problem/24738102&24743140> reserving non barrier width
-       // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width
-       // equivalent), so we have to check that this thread hasn't enqueued
-       // anything ahead of this call or we can break ordering
-       if (slowpath(dq->dq_items_tail)) {
-               return _dispatch_sync_f_slow(dq, ctxt, func, pp);
+       if (likely(dq->dq_width == 1)) {
+               return dispatch_barrier_sync_f(dq, ctxt, func);
        }
-       // concurrent queues do not respect width on sync
-       if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) {
-               return _dispatch_sync_f_slow(dq, ctxt, func, pp);
+
+       // Global concurrent queues and queues bound to non-dispatch threads
+       // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
+       if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) {
+               return _dispatch_sync_f_slow(dq, ctxt, func, 0);
+       }
+
+       _dispatch_introspection_sync_begin(dq);
+       if (unlikely(dq->do_targetq->do_targetq)) {
+               return _dispatch_sync_recurse(dq, ctxt, func, 0);
        }
-       _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp);
+       _dispatch_sync_invoke_and_complete(dq, ctxt, func);
 }
 
+#ifdef __BLOCKS__
 DISPATCH_NOINLINE
 static void
-_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
-               pthread_priority_t pp)
+_dispatch_sync_block_with_private_data(dispatch_queue_t dq,
+               dispatch_block_t work, dispatch_block_flags_t flags)
 {
-       if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) {
-               return _dispatch_sync_f2(dq, ctxt, func, pp);
+       dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work);
+       pthread_priority_t op = 0, p = 0;
+
+       flags |= dbpd->dbpd_flags;
+       op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority);
+       if (op) {
+               p = dbpd->dbpd_priority;
+       }
+       voucher_t ov, v = DISPATCH_NO_VOUCHER;
+       if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
+               v = dbpd->dbpd_voucher;
+       }
+       ov = _dispatch_set_priority_and_voucher(p, v, 0);
+
+       // balanced in d_block_sync_invoke or d_block_wait
+       if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq->_as_oq, relaxed)) {
+               _dispatch_retain_2(dq);
        }
-       return _dispatch_barrier_sync_f(dq, ctxt, func, pp);
+       if (flags & DISPATCH_BLOCK_BARRIER) {
+               dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke);
+       } else {
+               dispatch_sync_f(dq, work, _dispatch_block_sync_invoke);
+       }
+       _dispatch_reset_priority_and_voucher(op, ov);
 }
 
-DISPATCH_NOINLINE
 void
-dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
+dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work)
 {
-       if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) {
-               return _dispatch_sync_f2(dq, ctxt, func, 0);
+       if (unlikely(_dispatch_block_has_private_data(work))) {
+               dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER;
+               return _dispatch_sync_block_with_private_data(dq, work, flags);
        }
-       return dispatch_barrier_sync_f(dq, ctxt, func);
+       dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work));
 }
 
-#ifdef __BLOCKS__
 void
-dispatch_sync(dispatch_queue_t dq, void (^work)(void))
+dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
 {
-       if (slowpath(_dispatch_block_has_private_data(work))) {
+       if (unlikely(_dispatch_block_has_private_data(work))) {
                return _dispatch_sync_block_with_private_data(dq, work, 0);
        }
        dispatch_sync_f(dq, work, _dispatch_Block_invoke(work));
 }
-#endif
+#endif // __BLOCKS__
 
 #pragma mark -
 #pragma mark dispatch_trysync
 
-struct trysync_context {
-       dispatch_queue_t tc_dq;
-       void *tc_ctxt;
-       dispatch_function_t tc_func;
-};
-
 DISPATCH_NOINLINE
-static int
-_dispatch_trysync_recurse(dispatch_queue_t dq,
-               struct trysync_context *tc, bool barrier)
+static void
+_dispatch_barrier_trysync_or_async_f_complete(dispatch_queue_t dq,
+               void *ctxt, dispatch_function_t func, uint32_t flags)
 {
-       dispatch_queue_t tq = dq->do_targetq;
+       dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_BARRIER_COMPLETE;
 
-       if (barrier) {
-               if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) {
-                       return EWOULDBLOCK;
-               }
-       } else {
-               // <rdar://problem/24743140> check nothing was queued by the current
-               // thread ahead of this call. _dispatch_queue_try_reserve_sync_width
-               // ignores the ENQUEUED bit which could cause it to miss a barrier_async
-               // made by the same thread just before.
-               if (slowpath(dq->dq_items_tail)) {
-                       return EWOULDBLOCK;
-               }
-               // concurrent queues do not respect width on sync
-               if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) {
-                       return EWOULDBLOCK;
+       _dispatch_sync_function_invoke_inline(dq, ctxt, func);
+       if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) {
+               uint64_t dq_state = os_atomic_sub2o(dq, dq_state,
+                               DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed);
+               if (!_dq_state_is_suspended(dq_state)) {
+                       wflags |= DISPATCH_WAKEUP_CONSUME_2;
                }
        }
-
-       int rc = 0;
-       if (_dispatch_queue_cannot_trysync(tq)) {
-               _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC);
-               rc = ENOTSUP;
-       } else if (tq->do_targetq) {
-               rc = _dispatch_trysync_recurse(tq, tc, tq->dq_width == 1);
-               if (rc == ENOTSUP) {
-                       _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC);
-               }
-       } else {
-               dispatch_thread_frame_s dtf;
-               _dispatch_thread_frame_push(&dtf, tq);
-               _dispatch_sync_function_invoke(tc->tc_dq, tc->tc_ctxt, tc->tc_func);
-               _dispatch_thread_frame_pop(&dtf);
-       }
-       if (barrier) {
-               _dispatch_barrier_complete(dq);
-       } else {
-               _dispatch_non_barrier_complete(dq);
-       }
-       return rc;
+       dx_wakeup(dq, 0, wflags);
 }
 
+// Use for mutation of queue-/source-internal state only
+// ignores target queue hierarchy!
 DISPATCH_NOINLINE
-bool
-_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t f)
+void
+_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t func, uint32_t flags)
 {
-       if (slowpath(!dq->do_targetq)) {
-               _dispatch_sync_function_invoke(dq, ctxt, f);
-               return true;
+       dispatch_tid tid = _dispatch_tid_self();
+       uint64_t suspend_count = (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) ? 1 : 0;
+       if (unlikely(!_dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid,
+                       suspend_count))) {
+               return _dispatch_barrier_async_detached_f(dq, ctxt, func);
        }
-       if (slowpath(_dispatch_queue_cannot_trysync(dq))) {
-               return false;
+       if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) {
+               _dispatch_retain_2(dq); // see _dispatch_queue_suspend
        }
-       struct trysync_context tc = {
-               .tc_dq = dq,
-               .tc_func = f,
-               .tc_ctxt = ctxt,
-       };
-       return _dispatch_trysync_recurse(dq, &tc, true) == 0;
+       _dispatch_barrier_trysync_or_async_f_complete(dq, ctxt, func, flags);
 }
 
 DISPATCH_NOINLINE
-bool
-_dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f)
-{
-       if (slowpath(!dq->do_targetq)) {
-               _dispatch_sync_function_invoke(dq, ctxt, f);
-               return true;
-       }
-       if (slowpath(_dispatch_queue_cannot_trysync(dq))) {
-               return false;
-       }
-       struct trysync_context tc = {
-               .tc_dq = dq,
-               .tc_func = f,
-               .tc_ctxt = ctxt,
-       };
-       return _dispatch_trysync_recurse(dq, &tc, dq->dq_width == 1) == 0;
-}
-
-#pragma mark -
-#pragma mark dispatch_after
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
-               void *ctxt, void *handler, bool block)
+static long
+_dispatch_trysync_recurse(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t f, uintptr_t dc_flags)
 {
-       dispatch_source_t ds;
-       uint64_t leeway, delta;
+       dispatch_tid tid = _dispatch_tid_self();
+       dispatch_queue_t q, tq = dq->do_targetq;
 
-       if (when == DISPATCH_TIME_FOREVER) {
-#if DISPATCH_DEBUG
-               DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity");
-#endif
-               return;
-       }
-
-       delta = _dispatch_timeout(when);
-       if (delta == 0) {
-               if (block) {
-                       return dispatch_async(queue, handler);
+       for (;;) {
+               if (likely(tq->do_targetq == NULL)) {
+                       _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, f, dc_flags);
+                       return true;
+               }
+               if (unlikely(_dispatch_queue_cannot_trysync(tq))) {
+                       for (q = dq; q != tq; q = q->do_targetq) {
+                               _dispatch_queue_atomic_flags_set(q, DQF_CANNOT_TRYSYNC);
+                       }
+                       break;
+               }
+               if (likely(tq->dq_width == 1)) {
+                       if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) {
+                               break;
+                       }
+               } else {
+                       if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) {
+                               break;
+                       }
                }
-               return dispatch_async_f(queue, ctxt, handler);
+               tq = tq->do_targetq;
        }
-       leeway = delta / 10; // <rdar://problem/13447496>
 
-       if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC;
-       if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC;
-
-       // this function can and should be optimized to not use a dispatch source
-       ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue);
-       dispatch_assert(ds);
-
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-       if (block) {
-               _dispatch_continuation_init(dc, ds, handler, 0, 0, 0);
-       } else {
-               _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0);
-       }
-       // reference `ds` so that it doesn't show up as a leak
-       dc->dc_data = ds;
-       _dispatch_source_set_event_handler_continuation(ds, dc);
-       dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway);
-       dispatch_activate(ds);
+       _dispatch_sync_complete_recurse(dq, tq, dc_flags);
+       return false;
 }
 
 DISPATCH_NOINLINE
-void
-dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt,
-               dispatch_function_t func)
+long
+_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
+               dispatch_function_t f)
 {
-       _dispatch_after(when, queue, ctxt, func, false);
+       dispatch_tid tid = _dispatch_tid_self();
+       if (unlikely(!dq->do_targetq)) {
+               DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue");
+       }
+       if (unlikely(_dispatch_queue_cannot_trysync(dq))) {
+               return false;
+       }
+       if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) {
+               return false;
+       }
+       return _dispatch_trysync_recurse(dq, ctxt, f, DISPATCH_OBJ_BARRIER_BIT);
 }
-
-#ifdef __BLOCKS__
-void
-dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
-               dispatch_block_t work)
+
+DISPATCH_NOINLINE
+long
+_dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f)
 {
-       _dispatch_after(when, queue, NULL, work, true);
+       if (likely(dq->dq_width == 1)) {
+               return _dispatch_barrier_trysync_f(dq, ctxt, f);
+       }
+       if (unlikely(!dq->do_targetq)) {
+               DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue");
+       }
+       if (unlikely(_dispatch_queue_cannot_trysync(dq))) {
+               return false;
+       }
+       if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) {
+               return false;
+       }
+       return _dispatch_trysync_recurse(dq, ctxt, f, 0);
 }
-#endif
 
 #pragma mark -
 #pragma mark dispatch_queue_wakeup
 
 DISPATCH_NOINLINE
 void
-_dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags)
 {
        dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
 
+       if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
+               return _dispatch_queue_barrier_complete(dq, qos, flags);
+       }
        if (_dispatch_queue_class_probe(dq)) {
                target = DISPATCH_QUEUE_WAKEUP_TARGET;
        }
-       if (target) {
-               return _dispatch_queue_class_wakeup(dq, pp, flags, target);
-       } else if (pp) {
-               return _dispatch_queue_class_override_drainer(dq, pp, flags);
-       } else if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
-       }
+       return _dispatch_queue_class_wakeup(dq, qos, flags, target);
 }
 
 #if DISPATCH_COCOA_COMPAT
@@ -4111,59 +4401,60 @@ _dispatch_runloop_queue_set_handle(dispatch_queue_t dq, dispatch_runloop_handle_
 }
 #endif // DISPATCH_COCOA_COMPAT
 
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_runloop_queue_reset_max_qos(dispatch_queue_class_t dqu)
+{
+       uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK |
+                       DISPATCH_QUEUE_RECEIVED_OVERRIDE;
+       old_state = os_atomic_and_orig2o(dqu._dq, dq_state, ~clear_bits, relaxed);
+       return _dq_state_max_qos(old_state);
+}
+
 void
-_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags)
 {
 #if DISPATCH_COCOA_COMPAT
        if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) {
                // <rdar://problem/14026816>
-               return _dispatch_queue_wakeup(dq, pp, flags);
+               return _dispatch_queue_wakeup(dq, qos, flags);
        }
 
+       if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) {
+               os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release);
+       }
        if (_dispatch_queue_class_probe(dq)) {
-               return _dispatch_runloop_queue_poke(dq, pp, flags);
+               return _dispatch_runloop_queue_poke(dq, qos, flags);
        }
 
-       pp = _dispatch_queue_reset_override_priority(dq, true);
-       if (pp) {
+       qos = _dispatch_runloop_queue_reset_max_qos(dq);
+       if (qos) {
                mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq);
                if (_dispatch_queue_class_probe(dq)) {
-                       _dispatch_runloop_queue_poke(dq, pp, flags);
+                       _dispatch_runloop_queue_poke(dq, qos, flags);
                }
                _dispatch_thread_override_end(owner, dq);
                return;
        }
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
+       if (flags & DISPATCH_WAKEUP_CONSUME_2) {
+               return _dispatch_release_2_tailcall(dq);
        }
 #else
-       return _dispatch_queue_wakeup(dq, pp, flags);
+       return _dispatch_queue_wakeup(dq, qos, flags);
 #endif
 }
 
 void
-_dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags)
 {
 #if DISPATCH_COCOA_COMPAT
        if (_dispatch_queue_is_thread_bound(dq)) {
-               return _dispatch_runloop_queue_wakeup(dq, pp, flags);
+               return _dispatch_runloop_queue_wakeup(dq, qos, flags);
        }
 #endif
-       return _dispatch_queue_wakeup(dq, pp, flags);
-}
-
-void
-_dispatch_root_queue_wakeup(dispatch_queue_t dq,
-               pthread_priority_t pp DISPATCH_UNUSED,
-               dispatch_wakeup_flags_t flags)
-{
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               // see _dispatch_queue_push_set_head
-               dispatch_assert(flags & DISPATCH_WAKEUP_FLUSH);
-       }
-       _dispatch_global_queue_poke(dq);
+       return _dispatch_queue_wakeup(dq, qos, flags);
 }
 
 #pragma mark -
@@ -4178,7 +4469,7 @@ _dispatch_runloop_queue_class_poke(dispatch_queue_t dq)
                return;
        }
 
-#if TARGET_OS_MAC
+#if HAVE_MACH
        mach_port_t mp = handle;
        kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0);
        switch (kr) {
@@ -4203,47 +4494,56 @@ _dispatch_runloop_queue_class_poke(dispatch_queue_t dq)
 
 DISPATCH_NOINLINE
 static void
-_dispatch_runloop_queue_poke(dispatch_queue_t dq,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+_dispatch_runloop_queue_poke(dispatch_queue_t dq, dispatch_qos_t qos,
+               dispatch_wakeup_flags_t flags)
 {
-       // it's not useful to handle WAKEUP_FLUSH because mach_msg() will have
-       // a release barrier and that when runloop queues stop being thread bound
+       // it's not useful to handle WAKEUP_MAKE_DIRTY because mach_msg() will have
+       // a release barrier and that when runloop queues stop being thread-bound
        // they have a non optional wake-up to start being a "normal" queue
        // either in _dispatch_runloop_queue_xref_dispose,
        // or in _dispatch_queue_cleanup2() for the main thread.
+       uint64_t old_state, new_state;
 
        if (dq == &_dispatch_main_q) {
                dispatch_once_f(&_dispatch_main_q_handle_pred, dq,
                                _dispatch_runloop_queue_handle_init);
        }
-       _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags);
-       if (flags & DISPATCH_WAKEUP_OVERRIDING) {
-               mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq);
+
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+               new_state = _dq_state_merge_qos(old_state, qos);
+               if (old_state == new_state) {
+                       os_atomic_rmw_loop_give_up(goto no_change);
+               }
+       });
+
+       dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority);
+       if (qos > dq_qos) {
+               mach_port_t owner = _dq_state_drain_owner(new_state);
+               pthread_priority_t pp = _dispatch_qos_to_pp(qos);
                _dispatch_thread_override_start(owner, pp, dq);
-               if (flags & DISPATCH_WAKEUP_WAS_OVERRIDDEN) {
+               if (_dq_state_max_qos(old_state) > dq_qos) {
                        _dispatch_thread_override_end(owner, dq);
                }
        }
+no_change:
        _dispatch_runloop_queue_class_poke(dq);
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
+       if (flags & DISPATCH_WAKEUP_CONSUME_2) {
+               return _dispatch_release_2_tailcall(dq);
        }
 }
 #endif
 
 DISPATCH_NOINLINE
 static void
-_dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n)
+_dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor)
 {
        dispatch_root_queue_context_t qc = dq->do_ctxt;
-       uint32_t i = n;
-       int r;
-
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
+       int remaining = n;
+       int r = ENOSYS;
 
+       _dispatch_root_queues_init();
        _dispatch_debug_root_queue(dq, __func__);
-#if HAVE_PTHREAD_WORKQUEUES
+#if DISPATCH_USE_WORKQUEUES
 #if DISPATCH_USE_PTHREAD_POOL
        if (qc->dgq_kworkqueue != (void*)(~0ul))
 #endif
@@ -4258,46 +4558,62 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n)
                                r = pthread_workqueue_additem_np(qc->dgq_kworkqueue,
                                                _dispatch_worker_thread4, dq, &wh, &gen_cnt);
                                (void)dispatch_assume_zero(r);
-                       } while (--i);
+                       } while (--remaining);
                        return;
                }
 #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
-               if (!dq->dq_priority) {
-                       r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority,
-                                       qc->dgq_wq_options, (int)i);
-                       (void)dispatch_assume_zero(r);
-                       return;
-               }
-#endif
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-               r = _pthread_workqueue_addthreads((int)i, dq->dq_priority);
-               (void)dispatch_assume_zero(r);
+               r = _pthread_workqueue_addthreads(remaining,
+                               _dispatch_priority_to_pp(dq->dq_priority));
+#elif DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+               r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority,
+                               qc->dgq_wq_options, remaining);
 #endif
+               (void)dispatch_assume_zero(r);
                return;
        }
-#endif // HAVE_PTHREAD_WORKQUEUES
+#endif // DISPATCH_USE_WORKQUEUES
 #if DISPATCH_USE_PTHREAD_POOL
        dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
        if (fastpath(pqc->dpq_thread_mediator.do_vtable)) {
                while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) {
-                       if (!--i) {
+                       _dispatch_root_queue_debug("signaled sleeping worker for "
+                                       "global queue: %p", dq);
+                       if (!--remaining) {
                                return;
                        }
                }
        }
-       uint32_t j, t_count;
+
+       bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+       if (overcommit) {
+               os_atomic_add2o(qc, dgq_pending, remaining, relaxed);
+       } else {
+               if (!os_atomic_cmpxchg2o(qc, dgq_pending, 0, remaining, relaxed)) {
+                       _dispatch_root_queue_debug("worker thread request still pending for "
+                                       "global queue: %p", dq);
+                       return;
+               }
+       }
+
+       int32_t can_request, t_count;
        // seq_cst with atomic store to tail <rdar://problem/16932833>
        t_count = os_atomic_load2o(qc, dgq_thread_pool_size, ordered);
        do {
-               if (!t_count) {
+               can_request = t_count < floor ? 0 : t_count - floor;
+               if (remaining > can_request) {
+                       _dispatch_root_queue_debug("pthread pool reducing request from %d to %d",
+                                       remaining, can_request);
+                       os_atomic_sub2o(qc, dgq_pending, remaining - can_request, relaxed);
+                       remaining = can_request;
+               }
+               if (remaining == 0) {
                        _dispatch_root_queue_debug("pthread pool is full for root queue: "
                                        "%p", dq);
                        return;
                }
-               j = i > t_count ? t_count : i;
        } while (!os_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count,
-                       t_count - j, &t_count, acquire));
+                       t_count - remaining, &t_count, acquire));
 
        pthread_attr_t *attr = &pqc->dpq_thread_attr;
        pthread_t tid, *pthr = &tid;
@@ -4307,24 +4623,25 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n)
        }
 #endif
        do {
-               _dispatch_retain(dq);
+               _dispatch_retain(dq); // released in _dispatch_worker_thread
                while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
                        if (r != EAGAIN) {
                                (void)dispatch_assume_zero(r);
                        }
                        _dispatch_temporary_resource_shortage();
                }
-       } while (--j);
+       } while (--remaining);
 #endif // DISPATCH_USE_PTHREAD_POOL
 }
 
-static inline void
-_dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n)
+DISPATCH_NOINLINE
+void
+_dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor)
 {
        if (!_dispatch_queue_class_probe(dq)) {
                return;
        }
-#if HAVE_PTHREAD_WORKQUEUES
+#if DISPATCH_USE_WORKQUEUES
        dispatch_root_queue_context_t qc = dq->do_ctxt;
        if (
 #if DISPATCH_USE_PTHREAD_POOL
@@ -4335,39 +4652,129 @@ _dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n)
                                "global queue: %p", dq);
                return;
        }
-#endif // HAVE_PTHREAD_WORKQUEUES
-       return  _dispatch_global_queue_poke_slow(dq, n);
+#endif // DISPATCH_USE_WORKQUEUES
+       return _dispatch_global_queue_poke_slow(dq, n, floor);
 }
 
-static inline void
-_dispatch_global_queue_poke(dispatch_queue_t dq)
+#pragma mark -
+#pragma mark dispatch_queue_drain
+
+void
+_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags, dispatch_queue_t dq)
 {
-       return _dispatch_global_queue_poke_n(dq, 1);
+       _dispatch_continuation_pop_inline(dou, dic, flags, dq);
 }
 
-DISPATCH_NOINLINE
 void
-_dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n)
+_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t ov,
+               dispatch_invoke_flags_t flags)
 {
-       return _dispatch_global_queue_poke_n(dq, n);
+       _dispatch_continuation_invoke_inline(dou, ov, flags);
 }
 
-#pragma mark -
-#pragma mark dispatch_queue_drain
+DISPATCH_NOINLINE
+static void
+_dispatch_return_to_kernel(void)
+{
+       if (unlikely(_dispatch_get_wlh() == DISPATCH_WLH_ANON)) {
+               _dispatch_clear_return_to_kernel();
+       } else {
+               _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE);
+       }
+}
 
 void
-_dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags)
+_dispatch_poll_for_events_4launchd(void)
+{
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+       if (_dispatch_get_wlh()) {
+               dispatch_assert(_dispatch_deferred_items_get()->ddi_wlh_servicing);
+               _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE);
+       }
+#endif
+}
+
+#if HAVE_PTHREAD_WORKQUEUE_NARROWING
+static os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_MAX];
+#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS
+static uint64_t _dispatch_narrow_check_interval_cache;
+#endif
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dispatch_narrow_check_interval(void)
 {
-       _dispatch_continuation_pop_inline(dou, dq, flags);
+#if DISPATCH_TIME_UNIT_USES_NANOSECONDS
+       return 50 * NSEC_PER_MSEC;
+#else
+       if (_dispatch_narrow_check_interval_cache == 0) {
+               _dispatch_narrow_check_interval_cache =
+                               _dispatch_time_nano2mach(50 * NSEC_PER_MSEC);
+       }
+       return _dispatch_narrow_check_interval_cache;
+#endif
 }
 
-void
-_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher,
-               dispatch_invoke_flags_t flags)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic,
+               dispatch_priority_t pri)
+{
+       if (_dispatch_priority_qos(pri) &&
+                       !(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) {
+               dic->dic_next_narrow_check = _dispatch_approximate_time() +
+                               _dispatch_narrow_check_interval();
+       }
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_queue_drain_should_narrow_slow(uint64_t now,
+               dispatch_invoke_context_t dic)
+{
+       if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) {
+               pthread_priority_t pp = _dispatch_get_priority();
+               dispatch_qos_t qos = _dispatch_qos_from_pp(pp);
+               if (unlikely(!qos || qos > countof(_dispatch_narrowing_deadlines))) {
+                       DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption");
+               }
+               size_t idx = qos - 1; // no entry needed for DISPATCH_QOS_UNSPECIFIED
+               os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx];
+               uint64_t oldval, newval = now + _dispatch_narrow_check_interval();
+
+               dic->dic_next_narrow_check = newval;
+               os_atomic_rmw_loop(deadline, oldval, newval, relaxed, {
+                       if (now < oldval) {
+                               os_atomic_rmw_loop_give_up(return false);
+                       }
+               });
+
+               if (!_pthread_workqueue_should_narrow(pp)) {
+                       return false;
+               }
+               dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING;
+       }
+       return true;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic)
 {
-       _dispatch_continuation_invoke_inline(dou, override_voucher, flags);
+       uint64_t next_check = dic->dic_next_narrow_check;
+       if (unlikely(next_check)) {
+               uint64_t now = _dispatch_approximate_time();
+               if (unlikely(next_check < now)) {
+                       return _dispatch_queue_drain_should_narrow_slow(now, dic);
+               }
+       }
+       return false;
 }
+#else
+#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0)
+#define _dispatch_queue_drain_should_narrow(dic)  false
+#endif
 
 /*
  * Drain comes in 2 flavours (serial/concurrent) and 2 modes
@@ -4397,86 +4804,109 @@ _dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher,
  * queue drain moves to the more efficient serial mode.
  */
 DISPATCH_ALWAYS_INLINE
-static dispatch_queue_t
-_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags,
-               uint64_t *owned_ptr, struct dispatch_object_s **dc_out,
-               bool serial_drain)
+static dispatch_queue_wakeup_target_t
+_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain)
 {
        dispatch_queue_t orig_tq = dq->do_targetq;
        dispatch_thread_frame_s dtf;
        struct dispatch_object_s *dc = NULL, *next_dc;
-       uint64_t owned = *owned_ptr;
+       uint64_t dq_state, owned = *owned_ptr;
+
+       if (unlikely(!dq->dq_items_tail)) return NULL;
 
        _dispatch_thread_frame_push(&dtf, dq);
-       if (_dq_state_is_in_barrier(owned)) {
+       if (serial_drain || _dq_state_is_in_barrier(owned)) {
                // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL`
                // but width can change while draining barrier work items, so we only
                // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER`
                owned = DISPATCH_QUEUE_IN_BARRIER;
+       } else {
+               owned &= DISPATCH_QUEUE_WIDTH_MASK;
        }
 
-       while (dq->dq_items_tail) {
-               dc = _dispatch_queue_head(dq);
-               do {
-                       if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(dq))) {
-                               goto out;
+       dc = _dispatch_queue_head(dq);
+       goto first_iteration;
+
+       for (;;) {
+               dc = next_dc;
+               if (unlikely(dic->dic_deferred)) {
+                       goto out_with_deferred_compute_owned;
+               }
+               if (unlikely(_dispatch_needs_to_return_to_kernel())) {
+                       _dispatch_return_to_kernel();
+               }
+               if (unlikely(!dc)) {
+                       if (!dq->dq_items_tail) {
+                               break;
                        }
-                       if (unlikely(orig_tq != dq->do_targetq)) {
-                               goto out;
+                       dc = _dispatch_queue_head(dq);
+               }
+               if (unlikely(serial_drain != (dq->dq_width == 1))) {
+                       break;
+               }
+               if (unlikely(_dispatch_queue_drain_should_narrow(dic))) {
+                       break;
+               }
+
+first_iteration:
+               dq_state = os_atomic_load(&dq->dq_state, relaxed);
+               if (unlikely(_dq_state_is_suspended(dq_state))) {
+                       break;
+               }
+               if (unlikely(orig_tq != dq->do_targetq)) {
+                       break;
+               }
+
+               if (serial_drain || _dispatch_object_is_barrier(dc)) {
+                       if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) {
+                               if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) {
+                                       goto out_with_no_width;
+                               }
+                               owned = DISPATCH_QUEUE_IN_BARRIER;
                        }
-                       if (unlikely(serial_drain != (dq->dq_width == 1))) {
-                               goto out;
+                       next_dc = _dispatch_queue_next(dq, dc);
+                       if (_dispatch_object_is_sync_waiter(dc)) {
+                               owned = 0;
+                               dic->dic_deferred = dc;
+                               goto out_with_deferred;
                        }
-                       if (serial_drain || _dispatch_object_is_barrier(dc)) {
-                               if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) {
-                                       goto out;
-                               }
-                               next_dc = _dispatch_queue_next(dq, dc);
-                               if (_dispatch_object_is_slow_item(dc)) {
-                                       owned = 0;
-                                       goto out_with_deferred;
-                               }
-                       } else {
-                               if (owned == DISPATCH_QUEUE_IN_BARRIER) {
-                                       // we just ran barrier work items, we have to make their
-                                       // effect visible to other sync work items on other threads
-                                       // that may start coming in after this point, hence the
-                                       // release barrier
-                                       os_atomic_and2o(dq, dq_state, ~owned, release);
-                                       owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
-                               } else if (unlikely(owned == 0)) {
-                                       if (_dispatch_object_is_slow_item(dc)) {
-                                               // sync "readers" don't observe the limit
-                                               _dispatch_queue_reserve_sync_width(dq);
-                                       } else if (!_dispatch_queue_try_acquire_async(dq)) {
-                                               goto out_with_no_width;
-                                       }
-                                       owned = DISPATCH_QUEUE_WIDTH_INTERVAL;
-                               }
-
-                               next_dc = _dispatch_queue_next(dq, dc);
-                               if (_dispatch_object_is_slow_item(dc)) {
-                                       owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
-                                       _dispatch_continuation_slow_item_signal(dq, dc);
-                                       continue;
+               } else {
+                       if (owned == DISPATCH_QUEUE_IN_BARRIER) {
+                               // we just ran barrier work items, we have to make their
+                               // effect visible to other sync work items on other threads
+                               // that may start coming in after this point, hence the
+                               // release barrier
+                               os_atomic_xor2o(dq, dq_state, owned, release);
+                               owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
+                       } else if (unlikely(owned == 0)) {
+                               if (_dispatch_object_is_sync_waiter(dc)) {
+                                       // sync "readers" don't observe the limit
+                                       _dispatch_queue_reserve_sync_width(dq);
+                               } else if (!_dispatch_queue_try_acquire_async(dq)) {
+                                       goto out_with_no_width;
                                }
+                               owned = DISPATCH_QUEUE_WIDTH_INTERVAL;
+                       }
 
-                               if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) {
-                                       owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
-                                       _dispatch_continuation_redirect(dq, dc);
-                                       continue;
-                               }
+                       next_dc = _dispatch_queue_next(dq, dc);
+                       if (_dispatch_object_is_sync_waiter(dc)) {
+                               owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
+                               _dispatch_sync_waiter_redirect_or_wake(dq,
+                                               DISPATCH_SYNC_WAITER_NO_UNLOCK, dc);
+                               continue;
                        }
 
-                       _dispatch_continuation_pop_inline(dc, dq, flags);
-                       _dispatch_perfmon_workitem_inc();
-                       if (unlikely(dtf.dtf_deferred)) {
-                               goto out_with_deferred_compute_owned;
+                       if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) {
+                               owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
+                               _dispatch_continuation_redirect(dq, dc);
+                               continue;
                        }
-               } while ((dc = next_dc));
+               }
+
+               _dispatch_continuation_pop_inline(dc, dic, flags, dq);
        }
 
-out:
        if (owned == DISPATCH_QUEUE_IN_BARRIER) {
                // if we're IN_BARRIER we really own the full width too
                owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
@@ -4484,14 +4914,15 @@ out:
        if (dc) {
                owned = _dispatch_queue_adjust_owned(dq, owned, dc);
        }
-       *owned_ptr = owned;
+       *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+       *owned_ptr |= owned;
        _dispatch_thread_frame_pop(&dtf);
        return dc ? dq->do_targetq : NULL;
 
 out_with_no_width:
-       *owned_ptr = 0;
+       *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
        _dispatch_thread_frame_pop(&dtf);
-       return NULL;
+       return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
 
 out_with_deferred_compute_owned:
        if (serial_drain) {
@@ -4501,41 +4932,75 @@ out_with_deferred_compute_owned:
                        // if we're IN_BARRIER we really own the full width too
                        owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
                }
-               if (next_dc) {
-                       owned = _dispatch_queue_adjust_owned(dq, owned, next_dc);
+               if (dc) {
+                       owned = _dispatch_queue_adjust_owned(dq, owned, dc);
                }
        }
 out_with_deferred:
-       *owned_ptr = owned;
-       if (unlikely(!dc_out)) {
+       *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+       *owned_ptr |= owned;
+       if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) {
                DISPATCH_INTERNAL_CRASH(dc,
                                "Deferred continuation on source, mach channel or mgr");
        }
-       *dc_out = dc;
        _dispatch_thread_frame_pop(&dtf);
        return dq->do_targetq;
 }
 
 DISPATCH_NOINLINE
-static dispatch_queue_t
+static dispatch_queue_wakeup_target_t
 _dispatch_queue_concurrent_drain(dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags, uint64_t *owned,
-               struct dispatch_object_s **dc_ptr)
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               uint64_t *owned)
 {
-       return _dispatch_queue_drain(dq, flags, owned, dc_ptr, false);
+       return _dispatch_queue_drain(dq, dic, flags, owned, false);
 }
 
 DISPATCH_NOINLINE
-dispatch_queue_t
-_dispatch_queue_serial_drain(dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags, uint64_t *owned,
-               struct dispatch_object_s **dc_ptr)
+dispatch_queue_wakeup_target_t
+_dispatch_queue_serial_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags, uint64_t *owned)
 {
        flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN;
-       return _dispatch_queue_drain(dq, flags, owned, dc_ptr, true);
+       return _dispatch_queue_drain(dq, dic, flags, owned, true);
 }
 
 #if DISPATCH_COCOA_COMPAT
+DISPATCH_NOINLINE
+static void
+_dispatch_main_queue_update_priority_from_thread(void)
+{
+       dispatch_queue_t dq = &_dispatch_main_q;
+       uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+       mach_port_t owner = _dq_state_drain_owner(dq_state);
+
+       dispatch_priority_t main_pri =
+                       _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority());
+       dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri);
+       dispatch_qos_t max_qos = _dq_state_max_qos(dq_state);
+       dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority);
+
+       // the main thread QoS was adjusted by someone else, learn the new QoS
+       // and reinitialize _dispatch_main_q.dq_priority
+       dq->dq_priority = _dispatch_priority_with_override_qos(main_pri, main_qos);
+
+       if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) {
+               // main thread is opted out of QoS and we had an override
+               return _dispatch_thread_override_end(owner, dq);
+       }
+
+       if (old_qos < max_qos && max_qos <= main_qos) {
+               // main QoS was raised, and we had an override which is now useless
+               return _dispatch_thread_override_end(owner, dq);
+       }
+
+       if (main_qos < max_qos && max_qos <= old_qos) {
+               // main thread QoS was lowered, and we actually need an override
+               pthread_priority_t pp = _dispatch_qos_to_pp(max_qos);
+               return _dispatch_thread_override_start(owner, pp, dq);
+       }
+}
+
 static void
 _dispatch_main_queue_drain(void)
 {
@@ -4546,45 +5011,54 @@ _dispatch_main_queue_drain(void)
                return;
        }
 
+       _dispatch_perfmon_start_notrace();
        if (!fastpath(_dispatch_queue_is_thread_bound(dq))) {
                DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called"
                                " after dispatch_main()");
        }
-       mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq);
-       if (slowpath(owner != _dispatch_tid_self())) {
-               DISPATCH_CLIENT_CRASH(owner, "_dispatch_main_queue_callback_4CF called"
+       uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+       if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) {
+               DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
+                               "_dispatch_main_queue_callback_4CF called"
                                " from the wrong thread");
        }
 
        dispatch_once_f(&_dispatch_main_q_handle_pred, dq,
                        _dispatch_runloop_queue_handle_init);
 
-       _dispatch_perfmon_start();
        // <rdar://problem/23256682> hide the frame chaining when CFRunLoop
        // drains the main runloop, as this should not be observable that way
+       _dispatch_adopt_wlh_anon();
        _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL);
 
-       pthread_priority_t old_pri = _dispatch_get_priority();
-       pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL);
+       pthread_priority_t pp = _dispatch_get_priority();
+       dispatch_priority_t pri = _dispatch_priority_from_pp(pp);
+       dispatch_qos_t qos = _dispatch_priority_qos(pri);
        voucher_t voucher = _voucher_copy();
 
+       if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) {
+               _dispatch_main_queue_update_priority_from_thread();
+       }
+       dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
+       _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED);
+
+       dispatch_invoke_context_s dic = { };
        struct dispatch_object_s *dc, *next_dc, *tail;
        dc = os_mpsc_capture_snapshot(dq, dq_items, &tail);
        do {
                next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next);
-               _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE);
-               _dispatch_perfmon_workitem_inc();
+               _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq);
        } while ((dc = next_dc));
 
-       // runloop based queues use their port for the queue PUBLISH pattern
-       // so this raw call to dx_wakeup(0) is valid
        dx_wakeup(dq, 0, 0);
        _dispatch_voucher_debug("main queue restore", voucher);
-       _dispatch_reset_defaultpriority(old_dp);
-       _dispatch_reset_priority_and_voucher(old_pri, voucher);
+       _dispatch_reset_basepri(old_dbp);
+       _dispatch_reset_basepri_override();
+       _dispatch_reset_priority_and_voucher(pp, voucher);
        _dispatch_thread_frame_pop(&dtf);
-       _dispatch_perfmon_end();
+       _dispatch_reset_wlh();
        _dispatch_force_cache_cleanup();
+       _dispatch_perfmon_end_notrace();
 }
 
 static bool
@@ -4593,230 +5067,107 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq)
        if (!dq->dq_items_tail) {
                return false;
        }
+       _dispatch_perfmon_start_notrace();
        dispatch_thread_frame_s dtf;
-       _dispatch_perfmon_start();
+       bool should_reset_wlh = _dispatch_adopt_wlh_anon_recurse();
        _dispatch_thread_frame_push(&dtf, dq);
-       pthread_priority_t old_pri = _dispatch_get_priority();
-       pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL);
+       pthread_priority_t pp = _dispatch_get_priority();
+       dispatch_priority_t pri = _dispatch_priority_from_pp(pp);
        voucher_t voucher = _voucher_copy();
+       dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
+       _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED);
 
+       dispatch_invoke_context_s dic = { };
        struct dispatch_object_s *dc, *next_dc;
        dc = _dispatch_queue_head(dq);
        next_dc = _dispatch_queue_next(dq, dc);
-       _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE);
-       _dispatch_perfmon_workitem_inc();
+       _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq);
 
        if (!next_dc) {
-               // runloop based queues use their port for the queue PUBLISH pattern
-               // so this raw call to dx_wakeup(0) is valid
                dx_wakeup(dq, 0, 0);
        }
 
        _dispatch_voucher_debug("runloop queue restore", voucher);
-       _dispatch_reset_defaultpriority(old_dp);
-       _dispatch_reset_priority_and_voucher(old_pri, voucher);
+       _dispatch_reset_basepri(old_dbp);
+       _dispatch_reset_basepri_override();
+       _dispatch_reset_priority_and_voucher(pp, voucher);
        _dispatch_thread_frame_pop(&dtf);
-       _dispatch_perfmon_end();
+       if (should_reset_wlh) _dispatch_reset_wlh();
        _dispatch_force_cache_cleanup();
+       _dispatch_perfmon_end_notrace();
        return next_dc;
 }
 #endif
 
-DISPATCH_NOINLINE
-void
-_dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq)
-{
-       dispatch_continuation_t dc_tmp, dc_start, dc_end;
-       struct dispatch_object_s *dc = NULL;
-       uint64_t dq_state, owned;
-       size_t count = 0;
-
-       owned  = DISPATCH_QUEUE_IN_BARRIER;
-       owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
-attempt_running_slow_head:
-       if (slowpath(dq->dq_items_tail) && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) {
-               dc = _dispatch_queue_head(dq);
-               if (!_dispatch_object_is_slow_item(dc)) {
-                       // not a slow item, needs to wake up
-               } else if (fastpath(dq->dq_width == 1) ||
-                               _dispatch_object_is_barrier(dc)) {
-                       // rdar://problem/8290662 "barrier/writer lock transfer"
-                       dc_start = dc_end = (dispatch_continuation_t)dc;
-                       owned = 0;
-                       count = 1;
-                       dc = _dispatch_queue_next(dq, dc);
-               } else {
-                       // <rdar://problem/10164594> "reader lock transfer"
-                       // we must not signal semaphores immediately because our right
-                       // for dequeuing is granted through holding the full "barrier" width
-                       // which a signaled work item could relinquish out from our feet
-                       dc_start = (dispatch_continuation_t)dc;
-                       do {
-                               // no check on width here because concurrent queues
-                               // do not respect width for blocked readers, the thread
-                               // is already spent anyway
-                               dc_end = (dispatch_continuation_t)dc;
-                               owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
-                               count++;
-                               dc = _dispatch_queue_next(dq, dc);
-                       } while (dc && _dispatch_object_is_slow_non_barrier(dc));
-               }
-
-               if (count) {
-                       _dispatch_queue_drain_transfer_lock(dq, owned, dc_start);
-                       do {
-                               // signaled job will release the continuation
-                               dc_tmp = dc_start;
-                               dc_start = dc_start->do_next;
-                               _dispatch_continuation_slow_item_signal(dq, dc_tmp);
-                       } while (dc_tmp != dc_end);
-                       return;
-               }
-       }
-
-       if (dc || dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) {
-               // <rdar://problem/23336992> the following wakeup is needed for sources
-               // or mach channels: when ds_pending_data is set at the same time
-               // as a trysync_f happens, lock transfer code above doesn't know about
-               // ds_pending_data or the wakeup logic, but lock transfer is useless
-               // for sources and mach channels in the first place.
-               owned = _dispatch_queue_adjust_owned(dq, owned, dc);
-               dq_state = _dispatch_queue_drain_unlock(dq, owned, NULL);
-               return _dispatch_queue_try_wakeup(dq, dq_state,
-                               DISPATCH_WAKEUP_WAITER_HANDOFF);
-       } else if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) {
-               // someone enqueued a slow item at the head
-               // looping may be its last chance
-               goto attempt_running_slow_head;
-       }
-}
-
 void
 _dispatch_mgr_queue_drain(void)
 {
        const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN;
+       dispatch_invoke_context_s dic = { };
        dispatch_queue_t dq = &_dispatch_mgr_q;
        uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
 
-       if (dq->dq_items_tail) {
-               _dispatch_perfmon_start();
-               if (slowpath(_dispatch_queue_serial_drain(dq, flags, &owned, NULL))) {
-                       DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue");
-               }
-               _dispatch_voucher_debug("mgr queue clear", NULL);
-               _voucher_clear();
-               _dispatch_reset_defaultpriority_override();
-               _dispatch_perfmon_end();
-       }
-
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-       if (!_dispatch_kevent_workqueue_enabled)
-#endif
-       {
-               _dispatch_force_cache_cleanup();
-       }
-}
-
-#pragma mark -
-#pragma mark dispatch_queue_invoke
-
-void
-_dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags, uint64_t to_unlock,
-               struct dispatch_object_s *dc)
-{
-       if (_dispatch_object_is_slow_item(dc)) {
-               dispatch_assert(to_unlock == 0);
-               _dispatch_queue_drain_transfer_lock(dq, to_unlock, dc);
-               _dispatch_continuation_slow_item_signal(dq, dc);
-               return _dispatch_release_tailcall(dq);
-       }
-
-       bool should_defer_again = false, should_pend_queue = true;
-       uint64_t old_state, new_state;
-
-       if (_dispatch_get_current_queue()->do_targetq) {
-               _dispatch_thread_frame_get_current()->dtf_deferred = dc;
-               should_defer_again = true;
-               should_pend_queue = false;
-       }
-
-       if (dq->dq_width > 1) {
-               should_pend_queue = false;
-       } else if (should_pend_queue) {
-               dispatch_assert(to_unlock ==
-                               DISPATCH_QUEUE_WIDTH_INTERVAL + DISPATCH_QUEUE_IN_BARRIER);
-               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{
-                       new_state = old_state;
-                       if (_dq_state_has_waiters(old_state) ||
-                                       _dq_state_is_enqueued(old_state)) {
-                               os_atomic_rmw_loop_give_up(break);
-                       }
-                       new_state += DISPATCH_QUEUE_DRAIN_PENDED;
-                       new_state -= DISPATCH_QUEUE_IN_BARRIER;
-                       new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL;
-               });
-               should_pend_queue = (new_state & DISPATCH_QUEUE_DRAIN_PENDED);
-       }
-
-       if (!should_pend_queue) {
-               if (to_unlock & DISPATCH_QUEUE_IN_BARRIER) {
-                       _dispatch_try_lock_transfer_or_wakeup(dq);
-                       _dispatch_release(dq);
-               } else if (to_unlock) {
-                       uint64_t dq_state = _dispatch_queue_drain_unlock(dq, to_unlock, NULL);
-                       _dispatch_queue_try_wakeup(dq, dq_state, DISPATCH_WAKEUP_CONSUME);
-               } else {
-                       _dispatch_release(dq);
+       if (dq->dq_items_tail) {
+               _dispatch_perfmon_start();
+               _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED);
+               if (slowpath(_dispatch_queue_serial_drain(dq, &dic, flags, &owned))) {
+                       DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue");
                }
-               dq = NULL;
+               _dispatch_voucher_debug("mgr queue clear", NULL);
+               _voucher_clear();
+               _dispatch_reset_basepri_override();
+               _dispatch_perfmon_end(perfmon_thread_manager);
        }
 
-       if (!should_defer_again) {
-               dx_invoke(dc, flags & _DISPATCH_INVOKE_PROPAGATE_MASK);
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+       if (!_dispatch_kevent_workqueue_enabled)
+#endif
+       {
+               _dispatch_force_cache_cleanup();
        }
+}
 
-       if (dq) {
-               uint32_t self = _dispatch_tid_self();
-               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{
-                       new_state = old_state;
-                       if (!_dq_state_drain_pended(old_state) ||
-                                       _dq_state_drain_owner(old_state) != self) {
-                               os_atomic_rmw_loop_give_up({
-                                       // We may have been overridden, so inform the root queue
-                                       _dispatch_set_defaultpriority_override();
-                                       return _dispatch_release_tailcall(dq);
-                               });
-                       }
-                       new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
-               });
-               if (_dq_state_has_override(old_state)) {
-                       // Ensure that the root queue sees that this thread was overridden.
-                       _dispatch_set_defaultpriority_override();
-               }
-               return dx_invoke(dq, flags | DISPATCH_INVOKE_STEALING);
+#pragma mark -
+#pragma mark dispatch_queue_invoke
+
+void
+_dispatch_queue_drain_sync_waiter(dispatch_queue_t dq,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               uint64_t owned)
+{
+       struct dispatch_object_s *dc = dic->dic_deferred;
+       dispatch_assert(_dispatch_object_is_sync_waiter(dc));
+       dic->dic_deferred = NULL;
+       if (flags & DISPATCH_INVOKE_WLH) {
+               // Leave the enqueued bit in place, completion of the last sync waiter
+               // in the handoff chain is responsible for dequeuing
+               //
+               // We currently have a +2 to consume, but we need to keep a +1
+               // for the thread request
+               dispatch_assert(_dq_state_is_enqueued_on_target(owned));
+               dispatch_assert(!_dq_state_is_enqueued_on_manager(owned));
+               owned &= ~DISPATCH_QUEUE_ENQUEUED;
+               _dispatch_release_no_dispose(dq);
+       } else {
+               // The sync waiter must own a reference
+               _dispatch_release_2_no_dispose(dq);
        }
+       return _dispatch_sync_waiter_redirect_or_wake(dq, owned, dc);
 }
 
 void
-_dispatch_queue_finalize_activation(dispatch_queue_t dq)
+_dispatch_queue_finalize_activation(dispatch_queue_t dq,
+               DISPATCH_UNUSED bool *allow_resume)
 {
        dispatch_queue_t tq = dq->do_targetq;
        _dispatch_queue_priority_inherit_from_target(dq, tq);
-       _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED);
-       if (dq->dq_override_voucher == DISPATCH_NO_VOUCHER) {
-               voucher_t v = tq->dq_override_voucher;
-               if (v != DISPATCH_NO_VOUCHER) {
-                       if (v) _voucher_retain(v);
-                       dq->dq_override_voucher = v;
-               }
-       }
+       _dispatch_queue_inherit_wlh_from_target(dq, tq);
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline dispatch_queue_t
-dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags,
-               uint64_t *owned, struct dispatch_object_s **dc_ptr)
+static inline dispatch_queue_wakeup_target_t
+dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags, uint64_t *owned)
 {
        dispatch_queue_t otq = dq->do_targetq;
        dispatch_queue_t cq = _dispatch_queue_get_current();
@@ -4825,18 +5176,19 @@ dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags,
                return otq;
        }
        if (dq->dq_width == 1) {
-               return _dispatch_queue_serial_drain(dq, flags, owned, dc_ptr);
+               return _dispatch_queue_serial_drain(dq, dic, flags, owned);
        }
-       return _dispatch_queue_concurrent_drain(dq, flags, owned, dc_ptr);
+       return _dispatch_queue_concurrent_drain(dq, dic, flags, owned);
 }
 
 // 6618342 Contact the team that owns the Instrument DTrace probe before
 //         renaming this symbol
 DISPATCH_NOINLINE
 void
-_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags)
+_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags)
 {
-       _dispatch_queue_class_invoke(dq, flags, dispatch_queue_invoke2);
+       _dispatch_queue_class_invoke(dq, dic, flags, 0, dispatch_queue_invoke2);
 }
 
 #pragma mark -
@@ -4845,16 +5197,16 @@ _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags)
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 void
 _dispatch_queue_override_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags)
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags)
 {
        dispatch_queue_t old_rq = _dispatch_queue_get_current();
        dispatch_queue_t assumed_rq = dc->dc_other;
+       dispatch_priority_t old_dp;
        voucher_t ov = DISPATCH_NO_VOUCHER;
        dispatch_object_t dou;
 
        dou._do = dc->dc_data;
-       _dispatch_queue_set_current(assumed_rq);
-       flags |= DISPATCH_INVOKE_OVERRIDING;
+       old_dp = _dispatch_root_queue_identity_assume(assumed_rq);
        if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) {
                flags |= DISPATCH_INVOKE_STEALING;
        } else {
@@ -4864,55 +5216,44 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc,
        }
        _dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, {
                if (_dispatch_object_has_vtable(dou._do)) {
-                       dx_invoke(dou._do, flags);
+                       dx_invoke(dou._do, dic, flags);
                } else {
                        _dispatch_continuation_invoke_inline(dou, ov, flags);
                }
        });
+       _dispatch_reset_basepri(old_dp);
        _dispatch_queue_set_current(old_rq);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_need_global_root_queue_override(dispatch_queue_t rq,
-               pthread_priority_t pp)
+_dispatch_root_queue_push_needs_override(dispatch_queue_t rq,
+               dispatch_qos_t qos)
 {
-       pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+       dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority);
+       bool defaultqueue = rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
-       if (unlikely(!rqp)) return false;
+       if (unlikely(!rqos)) return false;
 
-       pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       return defaultqueue ? pp && pp != rqp : pp > rqp;
+       return defaultqueue ? qos && qos != rqos : qos > rqos;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_need_global_root_queue_override_stealer(dispatch_queue_t rq,
-               pthread_priority_t pp, dispatch_wakeup_flags_t wflags)
+_dispatch_root_queue_push_queue_override_needed(dispatch_queue_t rq,
+               dispatch_qos_t qos)
 {
-       pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
-
-       if (unlikely(!rqp)) return false;
-
-       if (wflags & DISPATCH_WAKEUP_WAITER_HANDOFF) {
-               if (!(wflags & _DISPATCH_WAKEUP_OVERRIDE_BITS)) {
-                       return false;
-               }
-       }
-
-       pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       return defaultqueue || pp > rqp;
+       // for root queues, the override is the guaranteed minimum override level
+       return qos > _dispatch_priority_override_qos(rq->dq_priority);
 }
 
 DISPATCH_NOINLINE
 static void
 _dispatch_root_queue_push_override(dispatch_queue_t orig_rq,
-               dispatch_object_t dou, pthread_priority_t pp)
+               dispatch_object_t dou, dispatch_qos_t qos)
 {
-       bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-       dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit);
+       bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+       dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit);
        dispatch_continuation_t dc = dou._dc;
 
        if (_dispatch_object_is_redirection(dc)) {
@@ -4930,69 +5271,57 @@ _dispatch_root_queue_push_override(dispatch_queue_t orig_rq,
                dc->dc_priority = DISPATCH_NO_PRIORITY;
                dc->dc_voucher = DISPATCH_NO_VOUCHER;
        }
-
-       DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
-       _dispatch_queue_push_inline(rq, dc, 0, 0);
+       _dispatch_root_queue_push_inline(rq, dc, dc, 1);
 }
 
 DISPATCH_NOINLINE
 static void
 _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq,
-               dispatch_queue_t dq, pthread_priority_t pp)
+               dispatch_queue_t dq, dispatch_qos_t qos)
 {
-       bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-       dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit);
+       bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+       dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit);
        dispatch_continuation_t dc = _dispatch_continuation_alloc();
 
        dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING);
-       _dispatch_retain(dq);
+       _dispatch_retain_2(dq);
        dc->dc_func = NULL;
        dc->dc_ctxt = dc;
        dc->dc_other = orig_rq;
        dc->dc_data = dq;
        dc->dc_priority = DISPATCH_NO_PRIORITY;
        dc->dc_voucher = DISPATCH_NO_VOUCHER;
-
-       DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
-       _dispatch_queue_push_inline(rq, dc, 0, 0);
+       _dispatch_root_queue_push_inline(rq, dc, dc, 1);
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags, uint64_t dq_state)
+_dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq,
+               uint64_t dq_state, dispatch_wakeup_flags_t flags)
 {
-       mach_port_t owner = _dq_state_drain_owner(dq_state);
-       pthread_priority_t pp2;
+       dispatch_qos_t oqos, qos = _dq_state_max_qos(dq_state);
        dispatch_queue_t tq;
        bool locked;
 
-       if (owner) {
-               int rc = _dispatch_wqthread_override_start_check_owner(owner, pp,
+       if (_dq_state_is_base_anon(dq_state)) {
+               mach_port_t owner = _dq_state_drain_owner(dq_state);
+               if (owner) {
+                       (void)_dispatch_wqthread_override_start_check_owner(owner, qos,
                                &dq->dq_state_lock);
-               // EPERM means the target of the override is not a work queue thread
-               // and could be a thread bound queue such as the main queue.
-               // When that happens we must get to that queue and wake it up if we
-               // want the override to be appplied and take effect.
-               if (rc != EPERM) {
                        goto out;
                }
        }
 
-       if (_dq_state_is_suspended(dq_state)) {
-               goto out;
-       }
-
        tq = dq->do_targetq;
 
-       if (_dispatch_queue_has_immutable_target(dq)) {
+       if (likely(!_dispatch_queue_is_legacy(dq))) {
                locked = false;
        } else if (_dispatch_is_in_root_queues_array(tq)) {
                // avoid locking when we recognize the target queue as a global root
                // queue it is gross, but is a very common case. The locking isn't
                // needed because these target queues cannot go away.
                locked = false;
-       } else if (_dispatch_queue_sidelock_trylock(dq, pp)) {
+       } else if (_dispatch_queue_sidelock_trylock(dq, qos)) {
                // <rdar://problem/17735825> to traverse the tq chain safely we must
                // lock it to ensure it cannot change
                locked = true;
@@ -5002,10 +5331,9 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
                //
                // Leading to being there, the current thread has:
                // 1. enqueued an object on `dq`
-               // 2. raised the dq_override value of `dq`
-               // 3. set the HAS_OVERRIDE bit and not seen an owner
-               // 4. tried and failed to acquire the side lock
-               //
+               // 2. raised the max_qos value, set RECEIVED_OVERRIDE on `dq`
+               //    and didn't see an owner
+               // 3. tried and failed to acquire the side lock
                //
                // The side lock owner can only be one of three things:
                //
@@ -5015,20 +5343,19 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
                //   the eventual dispatch_resume().
                //
                // - A dispatch_set_target_queue() call. The fact that we saw no `owner`
-               //   means that the trysync it does wasn't being drained when (3)
+               //   means that the trysync it does wasn't being drained when (2)
                //   happened which can only be explained by one of these interleavings:
                //
                //    o `dq` became idle between when the object queued in (1) ran and
                //      the set_target_queue call and we were unlucky enough that our
-               //      step (3) happened while this queue was idle. There is no reason
+               //      step (2) happened while this queue was idle. There is no reason
                //              to override anything anymore, the queue drained to completion
                //      while we were preempted, our job is done.
                //
-               //    o `dq` is queued but not draining during (1-3), then when we try
-               //      to lock at (4) the queue is now draining a set_target_queue.
-               //      Since we set HAS_OVERRIDE with a release barrier, the effect of
-               //      (2) was visible to the drainer when he acquired the drain lock,
-               //      and that guy has applied our override. Our job is done.
+               //    o `dq` is queued but not draining during (1-2), then when we try
+               //      to lock at (3) the queue is now draining a set_target_queue.
+               //      This drainer must have seen the effects of (2) and that guy has
+               //      applied our override. Our job is done.
                //
                // - Another instance of _dispatch_queue_class_wakeup_with_override(),
                //   which is fine because trylock leaves a hint that we failed our
@@ -5040,14 +5367,12 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
        }
 
 apply_again:
-       if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
-               if (_dispatch_need_global_root_queue_override_stealer(tq, pp, flags)) {
-                       _dispatch_root_queue_push_override_stealer(tq, dq, pp);
+       if (dx_hastypeflag(tq, QUEUE_ROOT)) {
+               if (_dispatch_root_queue_push_queue_override_needed(tq, qos)) {
+                       _dispatch_root_queue_push_override_stealer(tq, dq, qos);
                }
-       } else if (flags & DISPATCH_WAKEUP_WAITER_HANDOFF) {
-               dx_wakeup(tq, pp, flags);
-       } else if (_dispatch_queue_need_override(tq, pp)) {
-               dx_wakeup(tq, pp, DISPATCH_WAKEUP_OVERRIDING);
+       } else if (_dispatch_queue_need_override(tq, qos)) {
+               dx_wakeup(tq, qos, 0);
        }
        while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) {
                // rdar://problem/24081326
@@ -5056,9 +5381,9 @@ apply_again:
                // tried to acquire the side lock while we were running, and could have
                // had a better override than ours to apply.
                //
-               pp2 = dq->dq_override;
-               if (pp2 > pp) {
-                       pp = pp2;
+               oqos = _dq_state_max_qos(os_atomic_load2o(dq, dq_state, relaxed));
+               if (oqos > qos) {
+                       qos = oqos;
                        // The other instance had a better priority than ours, override
                        // our thread, and apply the override that wasn't applied to `dq`
                        // because of us.
@@ -5067,264 +5392,262 @@ apply_again:
        }
 
 out:
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
+       if (flags & DISPATCH_WAKEUP_CONSUME_2) {
+               return _dispatch_release_2_tailcall(dq);
        }
 }
+
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
+               uint64_t dq_state, dispatch_wakeup_flags_t flags)
+{
+       dispatch_assert(_dq_state_should_override(dq_state));
+
+       return _dispatch_queue_class_wakeup_with_override_slow(dq, dq_state, flags);
+}
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 
 DISPATCH_NOINLINE
 void
-_dispatch_queue_class_override_drainer(dispatch_queue_t dq,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+_dispatch_root_queue_push(dispatch_queue_t rq, dispatch_object_t dou,
+               dispatch_qos_t qos)
 {
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       uint64_t dq_state, value;
-
-       //
-       // Someone is trying to override the last work item of the queue.
-       // Do not remember this override on the queue because we know the precise
-       // duration the override is required for: until the current drain unlocks.
-       //
-       // That is why this function only tries to set HAS_OVERRIDE if we can
-       // still observe a drainer, and doesn't need to set the DIRTY bit
-       // because oq_override wasn't touched and there is no race to resolve
-       //
-       os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
-               if (!_dq_state_drain_locked(dq_state)) {
-                       os_atomic_rmw_loop_give_up(break);
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+       if (unlikely(ddi && ddi->ddi_can_stash)) {
+               dispatch_object_t old_dou = ddi->ddi_stashed_dou;
+               dispatch_priority_t rq_overcommit;
+               rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+
+               if (likely(!old_dou._do || rq_overcommit)) {
+                       dispatch_queue_t old_rq = ddi->ddi_stashed_rq;
+                       dispatch_qos_t old_qos = ddi->ddi_stashed_qos;
+                       ddi->ddi_stashed_rq = rq;
+                       ddi->ddi_stashed_dou = dou;
+                       ddi->ddi_stashed_qos = qos;
+                       _dispatch_debug("deferring item %p, rq %p, qos %d",
+                                       dou._do, rq, qos);
+                       if (rq_overcommit) {
+                               ddi->ddi_can_stash = false;
+                       }
+                       if (likely(!old_dou._do)) {
+                               return;
+                       }
+                       // push the previously stashed item
+                       qos = old_qos;
+                       rq = old_rq;
+                       dou = old_dou;
                }
-               value = dq_state | DISPATCH_QUEUE_HAS_OVERRIDE;
-       });
-       if (_dq_state_drain_locked(dq_state)) {
-               return _dispatch_queue_class_wakeup_with_override(dq, pp,
-                               flags, dq_state);
        }
-#else
-       (void)pp;
-#endif // HAVE_PTHREAD_WORKQUEUE_QOS
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
+#endif
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+       if (_dispatch_root_queue_push_needs_override(rq, qos)) {
+               return _dispatch_root_queue_push_override(rq, dou, qos);
        }
+#else
+       (void)qos;
+#endif
+       _dispatch_root_queue_push_inline(rq, dou, dou, 1);
 }
 
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-DISPATCH_NOINLINE
-static void
-_dispatch_trystash_to_deferred_items(dispatch_queue_t dq, dispatch_object_t dou,
-               pthread_priority_t pp, dispatch_deferred_items_t ddi)
-{
-       dispatch_priority_t old_pp = ddi->ddi_stashed_pp;
-       dispatch_queue_t old_dq = ddi->ddi_stashed_dq;
-       struct dispatch_object_s *old_dou = ddi->ddi_stashed_dou;
-       dispatch_priority_t rq_overcommit;
-
-       rq_overcommit = dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-       if (likely(!old_pp || rq_overcommit)) {
-               ddi->ddi_stashed_dq = dq;
-               ddi->ddi_stashed_dou = dou._do;
-               ddi->ddi_stashed_pp = (dispatch_priority_t)pp | rq_overcommit |
-                               _PTHREAD_PRIORITY_PRIORITY_MASK;
-               if (likely(!old_pp)) {
-                       return;
-               }
-               // push the previously stashed item
-               pp = old_pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-               dq = old_dq;
-               dou._do = old_dou;
+void
+_dispatch_root_queue_wakeup(dispatch_queue_t dq,
+               DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
+{
+       if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) {
+               DISPATCH_INTERNAL_CRASH(dq->dq_priority,
+                               "Don't try to wake up or override a root queue");
        }
-       if (_dispatch_need_global_root_queue_override(dq, pp)) {
-               return _dispatch_root_queue_push_override(dq, dou, pp);
+       if (flags & DISPATCH_WAKEUP_CONSUME_2) {
+               return _dispatch_release_2_tailcall(dq);
        }
-       // bit of cheating: we should really pass `pp` but we know that we are
-       // pushing onto a global queue at this point, and we just checked that
-       // `pp` doesn't matter.
-       DISPATCH_COMPILER_CAN_ASSUME(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
-       _dispatch_queue_push_inline(dq, dou, 0, 0);
 }
-#endif
 
 DISPATCH_NOINLINE
-static void
-_dispatch_queue_push_slow(dispatch_queue_t dq, dispatch_object_t dou,
-               pthread_priority_t pp)
+void
+_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
+               dispatch_qos_t qos)
 {
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-                       _dispatch_root_queues_init_once);
-       _dispatch_queue_push(dq, dou, pp);
+       _dispatch_queue_push_inline(dq, dou, qos);
 }
 
 DISPATCH_NOINLINE
 void
-_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
-               pthread_priority_t pp)
+_dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
+               dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
 {
-       _dispatch_assert_is_valid_qos_override(pp);
-       if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-               dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
-               if (unlikely(ddi && !(ddi->ddi_stashed_pp &
-                               (dispatch_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK))) {
-                       dispatch_assert(_dispatch_root_queues_pred == DLOCK_ONCE_DONE);
-                       return _dispatch_trystash_to_deferred_items(dq, dou, pp, ddi);
+       dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT);
+
+       if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) {
+               _dispatch_retain_2(dq);
+               flags |= DISPATCH_WAKEUP_CONSUME_2;
+       }
+
+       if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
+               //
+               // _dispatch_queue_class_barrier_complete() is about what both regular
+               // queues and sources needs to evaluate, but the former can have sync
+               // handoffs to perform which _dispatch_queue_class_barrier_complete()
+               // doesn't handle, only _dispatch_queue_barrier_complete() does.
+               //
+               // _dispatch_queue_wakeup() is the one for plain queues that calls
+               // _dispatch_queue_barrier_complete(), and this is only taken for non
+               // queue types.
+               //
+               dispatch_assert(dx_metatype(dq) != _DISPATCH_QUEUE_TYPE);
+               qos = _dispatch_queue_override_qos(dq, qos);
+               return _dispatch_queue_class_barrier_complete(dq, qos, flags, target,
+                               DISPATCH_QUEUE_SERIAL_DRAIN_OWNED);
+       }
+
+       if (target) {
+               uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED;
+               if (target == DISPATCH_QUEUE_WAKEUP_MGR) {
+                       enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR;
+               }
+               qos = _dispatch_queue_override_qos(dq, qos);
+               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+                       new_state = _dq_state_merge_qos(old_state, qos);
+                       if (likely(!_dq_state_is_suspended(old_state) &&
+                                       !_dq_state_is_enqueued(old_state) &&
+                                       (!_dq_state_drain_locked(old_state) ||
+                                       (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR &&
+                                       _dq_state_is_base_wlh(old_state))))) {
+                               new_state |= enqueue;
+                       }
+                       if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) {
+                               new_state |= DISPATCH_QUEUE_DIRTY;
+                       } else if (new_state == old_state) {
+                               os_atomic_rmw_loop_give_up(goto done);
+                       }
+               });
+
+               if (likely((old_state ^ new_state) & enqueue)) {
+                       dispatch_queue_t tq;
+                       if (target == DISPATCH_QUEUE_WAKEUP_TARGET) {
+                               // the rmw_loop above has no acquire barrier, as the last block
+                               // of a queue asyncing to that queue is not an uncommon pattern
+                               // and in that case the acquire would be completely useless
+                               //
+                               // so instead use depdendency ordering to read
+                               // the targetq pointer.
+                               os_atomic_thread_fence(dependency);
+                               tq = os_atomic_load_with_dependency_on2o(dq, do_targetq,
+                                               (long)new_state);
+                       } else {
+                               tq = target;
+                       }
+                       dispatch_assert(_dq_state_is_enqueued(new_state));
+                       return _dispatch_queue_push_queue(tq, dq, new_state);
                }
-#endif
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-               // can't use dispatch_once_f() as it would create a frame
-               if (unlikely(_dispatch_root_queues_pred != DLOCK_ONCE_DONE)) {
-                       return _dispatch_queue_push_slow(dq, dou, pp);
+               if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) {
+                       if (_dq_state_should_override(new_state)) {
+                               return _dispatch_queue_class_wakeup_with_override(dq, new_state,
+                                               flags);
+                       }
                }
-               if (_dispatch_need_global_root_queue_override(dq, pp)) {
-                       return _dispatch_root_queue_push_override(dq, dou, pp);
+       } else if (qos) {
+               //
+               // Someone is trying to override the last work item of the queue.
+               //
+               uint64_t old_state, new_state;
+               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+                       if (!_dq_state_drain_locked(old_state) ||
+                                       !_dq_state_is_enqueued(old_state)) {
+                               os_atomic_rmw_loop_give_up(goto done);
+                       }
+                       new_state = _dq_state_merge_qos(old_state, qos);
+                       if (new_state == old_state) {
+                               os_atomic_rmw_loop_give_up(goto done);
+                       }
+               });
+               if (_dq_state_should_override(new_state)) {
+                       return _dispatch_queue_class_wakeup_with_override(dq, new_state,
+                                       flags);
                }
-#endif
+#endif // HAVE_PTHREAD_WORKQUEUE_QOS
+       }
+done:
+       if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) {
+               return _dispatch_release_2_tailcall(dq);
        }
-       _dispatch_queue_push_inline(dq, dou, pp, 0);
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp,
-               dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
+_dispatch_queue_push_sync_waiter(dispatch_queue_t dq,
+               dispatch_sync_context_t dsc, dispatch_qos_t qos)
 {
-       dispatch_queue_t tq;
+       uint64_t old_state, new_state;
 
-       if (flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAS_OVERRIDDEN)) {
-               // _dispatch_queue_drain_try_unlock may have reset the override while
-               // we were becoming the enqueuer
-               _dispatch_queue_reinstate_override_priority(dq, (dispatch_priority_t)pp);
-       }
-       if (!(flags & DISPATCH_WAKEUP_CONSUME)) {
-               _dispatch_retain(dq);
+       if (unlikely(dx_type(dq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE)) {
+               DISPATCH_CLIENT_CRASH(0,
+                               "dispatch_sync onto a network event queue");
        }
-       if (target == DISPATCH_QUEUE_WAKEUP_TARGET) {
-               // try_become_enqueuer has no acquire barrier, as the last block
-               // of a queue asyncing to that queue is not an uncommon pattern
-               // and in that case the acquire is completely useless
-               //
-               // so instead use a thread fence here when we will read the targetq
-               // pointer because that is the only thing that really requires
-               // that barrier.
-               os_atomic_thread_fence(acquire);
-               tq = dq->do_targetq;
-       } else {
-               dispatch_assert(target == DISPATCH_QUEUE_WAKEUP_MGR);
-               tq = &_dispatch_mgr_q;
-       }
-       return _dispatch_queue_push(tq, dq, pp);
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
-               dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
-{
-       uint64_t old_state, new_state, bits = 0;
 
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags);
-#endif
+       _dispatch_trace_continuation_push(dq, dsc->_as_dc);
 
-       if (flags & DISPATCH_WAKEUP_FLUSH) {
-               bits = DISPATCH_QUEUE_DIRTY;
-       }
-       if (flags & DISPATCH_WAKEUP_OVERRIDING) {
-               //
-               // Setting the dirty bit here is about forcing callers of
-               // _dispatch_queue_drain_try_unlock() to loop again when an override
-               // has just been set to close the following race:
-               //
-               // Drainer (in drain_try_unlokc():
-               //    override_reset();
-               //    preempted....
-               //
-               // Enqueuer:
-               //    atomic_or(oq_override, override, relaxed);
-               //    atomic_or(dq_state, HAS_OVERRIDE, release);
-               //
-               // Drainer:
-               //    ... resumes
-               //    successful drain_unlock() and leaks `oq_override`
-               //
-               bits = DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_HAS_OVERRIDE;
-       }
+       if (unlikely(_dispatch_queue_push_update_tail(dq, dsc->_as_do))) {
+               // for slow waiters, we borrow the reference of the caller
+               // so we don't need to protect the wakeup with a temporary retain
+               _dispatch_queue_push_update_head(dq, dsc->_as_do);
+               if (unlikely(_dispatch_queue_is_thread_bound(dq))) {
+                       return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
+               }
 
-       if (flags & DISPATCH_WAKEUP_SLOW_WAITER) {
                uint64_t pending_barrier_width =
                                (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
-               uint64_t xor_owner_and_set_full_width_and_in_barrier =
-                               _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT |
-                               DISPATCH_QUEUE_IN_BARRIER;
-
-#ifdef DLOCK_NOWAITERS_BIT
-               bits  |= DLOCK_NOWAITERS_BIT;
-#else
-               bits  |= DLOCK_WAITERS_BIT;
-#endif
-               flags ^= DISPATCH_WAKEUP_SLOW_WAITER;
-               dispatch_assert(!(flags & DISPATCH_WAKEUP_CONSUME));
-
+               uint64_t set_owner_and_set_full_width_and_in_barrier =
+                               _dispatch_lock_value_for_self() |
+                               DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
+               // similar to _dispatch_queue_drain_try_unlock()
                os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
-                       new_state = old_state | bits;
-                       if (_dq_state_drain_pended(old_state)) {
-                               // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
-                               // but we want to be more efficient wrt the WAITERS_BIT
-                               new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
-                               new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED;
-                       }
-                       if (unlikely(_dq_state_drain_locked(new_state))) {
-#ifdef DLOCK_NOWAITERS_BIT
-                               new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT;
-#endif
-                       } else if (unlikely(!_dq_state_is_runnable(new_state) ||
-                                       !(flags & DISPATCH_WAKEUP_FLUSH))) {
-                               // either not runnable, or was not for the first item (26700358)
-                               // so we should not try to lock and handle overrides instead
+                       new_state  = _dq_state_merge_qos(old_state, qos);
+                       new_state |= DISPATCH_QUEUE_DIRTY;
+                       if (unlikely(_dq_state_drain_locked(old_state) ||
+                                       !_dq_state_is_runnable(old_state))) {
+                               // not runnable, so we should just handle overrides
+                       } else if (_dq_state_is_base_wlh(old_state) &&
+                                       _dq_state_is_enqueued(old_state)) {
+                               // 32123779 let the event thread redrive since it's out already
                        } else if (_dq_state_has_pending_barrier(old_state) ||
                                        new_state + pending_barrier_width <
                                        DISPATCH_QUEUE_WIDTH_FULL_BIT) {
                                // see _dispatch_queue_drain_try_lock
                                new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
-                               new_state ^= xor_owner_and_set_full_width_and_in_barrier;
-                       } else {
-                               new_state |= DISPATCH_QUEUE_ENQUEUED;
+                               new_state |= set_owner_and_set_full_width_and_in_barrier;
                        }
                });
+
+               if (_dq_state_is_base_wlh(old_state) &&
+                               (dsc->dsc_waiter == _dispatch_tid_self())) {
+                       dsc->dsc_wlh_was_first = true;
+               }
+
                if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) {
-                       return _dispatch_try_lock_transfer_or_wakeup(dq);
+                       return _dispatch_queue_barrier_complete(dq, qos, 0);
                }
-       } else if (bits) {
-               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{
-                       new_state = old_state | bits;
-                       if (likely(_dq_state_should_wakeup(old_state))) {
-                               new_state |= DISPATCH_QUEUE_ENQUEUED;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+               if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) {
+                       if (_dq_state_should_override(new_state)) {
+                               return _dispatch_queue_class_wakeup_with_override(dq,
+                                               new_state, 0);
                        }
-               });
-       } else {
-               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed,{
-                       new_state = old_state;
-                       if (likely(_dq_state_should_wakeup(old_state))) {
-                               new_state |= DISPATCH_QUEUE_ENQUEUED;
-                       } else {
-                               os_atomic_rmw_loop_give_up(break);
+               }
+       } else if (unlikely(qos)) {
+               os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+                       new_state = _dq_state_merge_qos(old_state, qos);
+                       if (old_state == new_state) {
+                               os_atomic_rmw_loop_give_up(return);
                        }
                });
-       }
-
-       if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
-               return _dispatch_queue_class_wakeup_enqueue(dq, pp, flags, target);
-       }
-
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-       if ((flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAITER_HANDOFF))
-                       && target == DISPATCH_QUEUE_WAKEUP_TARGET) {
-               return _dispatch_queue_class_wakeup_with_override(dq, pp,
-                               flags, new_state);
-       }
-#endif
-
-       if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dq);
+               if (_dq_state_should_override(new_state)) {
+                       return _dispatch_queue_class_wakeup_with_override(dq, new_state, 0);
+               }
+#endif // HAVE_PTHREAD_WORKQUEUE_QOS
        }
 }
 
@@ -5369,7 +5692,7 @@ out:
                (void)os_atomic_dec2o(qc, dgq_pending, relaxed);
        }
        if (!available) {
-               _dispatch_global_queue_poke(dq);
+               _dispatch_global_queue_poke(dq, 1, 0);
        }
        return available;
 }
@@ -5432,50 +5755,110 @@ start:
                        goto out;
                }
                // There must be a next item now.
-               _dispatch_wait_until(next = head->do_next);
+               next = os_mpsc_get_next(head, do_next);
        }
 
        os_atomic_store2o(dq, dq_items_head, next, relaxed);
-       _dispatch_global_queue_poke(dq);
+       _dispatch_global_queue_poke(dq, 1, 0);
 out:
        return head;
 }
 
+#if DISPATCH_USE_KEVENT_WORKQUEUE
 void
-_dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
-               struct dispatch_object_s *dou, pthread_priority_t pp)
-{
-       struct _dispatch_identity_s di;
+_dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi
+               DISPATCH_PERF_MON_ARGS_PROTO)
+{
+       dispatch_queue_t rq = ddi->ddi_stashed_rq;
+       dispatch_queue_t dq = ddi->ddi_stashed_dou._dq;
+       _dispatch_queue_set_current(rq);
+       dispatch_priority_t old_pri = _dispatch_set_basepri_wlh(rq->dq_priority);
+       dispatch_invoke_context_s dic = { };
+       dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN |
+                       DISPATCH_INVOKE_REDIRECTING_DRAIN | DISPATCH_INVOKE_WLH;
+       _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority);
+       uint64_t dq_state;
+
+       ddi->ddi_wlh_servicing = true;
+       if (unlikely(_dispatch_needs_to_return_to_kernel())) {
+               _dispatch_return_to_kernel();
+       }
+retry:
+       dispatch_assert(ddi->ddi_wlh_needs_delete);
+       _dispatch_trace_continuation_pop(rq, dq);
+
+       if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) {
+               dx_invoke(dq, &dic, flags);
+               if (!ddi->ddi_wlh_needs_delete) {
+                       goto park;
+               }
+               dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+               if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286
+                       goto park;
+               }
+               if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) {
+                       _dispatch_retain(dq);
+                       _dispatch_trace_continuation_push(dq->do_targetq, dq);
+                       goto retry;
+               }
+       } else {
+               if (_dq_state_is_suspended(dq_state)) {
+                       dispatch_assert(!_dq_state_is_enqueued(dq_state));
+                       _dispatch_release_2_no_dispose(dq);
+               } else {
+                       dispatch_assert(_dq_state_is_enqueued(dq_state));
+                       dispatch_assert(_dq_state_drain_locked(dq_state));
+                       _dispatch_release_no_dispose(dq);
+               }
+       }
 
-       // fake that we queued `dou` on `dq` for introspection purposes
-       _dispatch_trace_continuation_push(dq, dou);
+       _dispatch_event_loop_leave_deferred((dispatch_wlh_t)dq, dq_state);
 
-       pp = _dispatch_priority_inherit_from_root_queue(pp, dq);
-       _dispatch_queue_set_current(dq);
-       _dispatch_root_queue_identity_assume(&di, pp);
+park:
+       // event thread that could steal
+       _dispatch_perfmon_end(perfmon_thread_event_steal);
+       _dispatch_reset_basepri(old_pri);
+       _dispatch_reset_basepri_override();
+       _dispatch_queue_set_current(NULL);
+
+       _dispatch_voucher_debug("root queue clear", NULL);
+       _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK);
+}
+
+void
+_dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi
+               DISPATCH_PERF_MON_ARGS_PROTO)
+{
+       dispatch_queue_t rq = ddi->ddi_stashed_rq;
+       _dispatch_queue_set_current(rq);
+       dispatch_priority_t old_pri = _dispatch_set_basepri(rq->dq_priority);
+
+       dispatch_invoke_context_s dic = { };
+       dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN |
+                       DISPATCH_INVOKE_REDIRECTING_DRAIN;
 #if DISPATCH_COCOA_COMPAT
-       void *pool = _dispatch_last_resort_autorelease_pool_push();
+       _dispatch_last_resort_autorelease_pool_push(&dic);
 #endif // DISPATCH_COCOA_COMPAT
+       _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority);
+       _dispatch_continuation_pop_inline(ddi->ddi_stashed_dou, &dic, flags, rq);
 
-       _dispatch_perfmon_start();
-       _dispatch_continuation_pop_inline(dou, dq,
-                       DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN);
-       _dispatch_perfmon_workitem_inc();
-       _dispatch_perfmon_end();
-
+       // event thread that could steal
+       _dispatch_perfmon_end(perfmon_thread_event_steal);
 #if DISPATCH_COCOA_COMPAT
-       _dispatch_last_resort_autorelease_pool_pop(pool);
+       _dispatch_last_resort_autorelease_pool_pop(&dic);
 #endif // DISPATCH_COCOA_COMPAT
-       _dispatch_reset_defaultpriority(di.old_pp);
+       _dispatch_reset_basepri(old_pri);
+       _dispatch_reset_basepri_override();
        _dispatch_queue_set_current(NULL);
 
        _dispatch_voucher_debug("root queue clear", NULL);
        _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK);
 }
+#endif
 
 DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe)
 static void
-_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri)
+_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp)
 {
 #if DISPATCH_DEBUG
        dispatch_queue_t cq;
@@ -5484,28 +5867,43 @@ _dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri)
        }
 #endif
        _dispatch_queue_set_current(dq);
-       if (dq->dq_priority) pri = dq->dq_priority;
-       pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri, NULL);
-#if DISPATCH_COCOA_COMPAT
-       void *pool = _dispatch_last_resort_autorelease_pool_push();
-#endif // DISPATCH_COCOA_COMPAT
+       dispatch_priority_t pri = dq->dq_priority;
+       if (!pri) pri = _dispatch_priority_from_pp(pp);
+       dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
+       _dispatch_adopt_wlh_anon();
 
-       _dispatch_perfmon_start();
        struct dispatch_object_s *item;
        bool reset = false;
+       dispatch_invoke_context_s dic = { };
+#if DISPATCH_COCOA_COMPAT
+       _dispatch_last_resort_autorelease_pool_push(&dic);
+#endif // DISPATCH_COCOA_COMPAT
+       dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN |
+                       DISPATCH_INVOKE_REDIRECTING_DRAIN;
+       _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri);
+       _dispatch_perfmon_start();
        while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) {
                if (reset) _dispatch_wqthread_override_reset();
-               _dispatch_continuation_pop_inline(item, dq,
-                               DISPATCH_INVOKE_WORKER_DRAIN|DISPATCH_INVOKE_REDIRECTING_DRAIN);
-               _dispatch_perfmon_workitem_inc();
-               reset = _dispatch_reset_defaultpriority_override();
+               _dispatch_continuation_pop_inline(item, &dic, flags, dq);
+               reset = _dispatch_reset_basepri_override();
+               if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) {
+                       break;
+               }
+       }
+
+       // overcommit or not. worker thread
+       if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
+               _dispatch_perfmon_end(perfmon_thread_worker_oc);
+       } else {
+               _dispatch_perfmon_end(perfmon_thread_worker_non_oc);
        }
-       _dispatch_perfmon_end();
 
 #if DISPATCH_COCOA_COMPAT
-       _dispatch_last_resort_autorelease_pool_pop(pool);
+       _dispatch_last_resort_autorelease_pool_pop(&dic);
 #endif // DISPATCH_COCOA_COMPAT
-       _dispatch_reset_defaultpriority(old_dp);
+       _dispatch_reset_wlh();
+       _dispatch_reset_basepri(old_dbp);
+       _dispatch_reset_basepri_override();
        _dispatch_queue_set_current(NULL);
 }
 
@@ -5520,7 +5918,7 @@ _dispatch_worker_thread4(void *context)
        dispatch_root_queue_context_t qc = dq->do_ctxt;
 
        _dispatch_introspection_thread_add();
-       int pending = (int)os_atomic_dec2o(qc, dgq_pending, relaxed);
+       int pending = os_atomic_dec2o(qc, dgq_pending, relaxed);
        dispatch_assert(pending >= 0);
        _dispatch_root_queue_drain(dq, _dispatch_get_priority());
        _dispatch_voucher_debug("root queue clear", NULL);
@@ -5535,12 +5933,12 @@ _dispatch_worker_thread3(pthread_priority_t pp)
        dispatch_queue_t dq;
        pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
        _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
-       dq = _dispatch_get_root_queue_for_priority(pp, overcommit);
+       dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit);
        return _dispatch_worker_thread4(dq);
 }
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 // 6618342 Contact the team that owns the Instrument DTrace probe before
 //         renaming this symbol
 static void
@@ -5553,7 +5951,7 @@ _dispatch_worker_thread2(int priority, int options,
 
        return _dispatch_worker_thread4(dq);
 }
-#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 #endif // HAVE_PTHREAD_WORKQUEUES
 
 #if DISPATCH_USE_PTHREAD_POOL
@@ -5566,6 +5964,11 @@ _dispatch_worker_thread(void *context)
        dispatch_root_queue_context_t qc = dq->do_ctxt;
        dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
 
+       int pending = os_atomic_dec2o(qc, dgq_pending, relaxed);
+       if (unlikely(pending < 0)) {
+               DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow");
+       }
+
        if (pqc->dpq_observer_hooks.queue_will_execute) {
                _dispatch_set_pthread_root_queue_observer_hooks(
                                &pqc->dpq_observer_hooks);
@@ -5574,15 +5977,19 @@ _dispatch_worker_thread(void *context)
                pqc->dpq_thread_configure();
        }
 
-       sigset_t mask;
-       int r;
        // workaround tweaks the kernel workqueue does for us
-       r = sigfillset(&mask);
-       (void)dispatch_assume_zero(r);
-       r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL);
-       (void)dispatch_assume_zero(r);
+       _dispatch_sigmask();
        _dispatch_introspection_thread_add();
 
+#if DISPATCH_USE_INTERNAL_WORKQUEUE
+       bool overcommit = (qc->dgq_wq_options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT);
+       bool manager = (dq == &_dispatch_mgr_root_queue);
+       bool monitored = !(overcommit || manager);
+       if (monitored) {
+               _dispatch_workq_worker_register(dq, qc->dgq_qos);
+       }
+#endif
+
        const int64_t timeout = 5ull * NSEC_PER_SEC;
        pthread_priority_t old_pri = _dispatch_get_priority();
        do {
@@ -5591,43 +5998,31 @@ _dispatch_worker_thread(void *context)
        } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator,
                        dispatch_time(0, timeout)) == 0);
 
+#if DISPATCH_USE_INTERNAL_WORKQUEUE
+       if (monitored) {
+               _dispatch_workq_worker_unregister(dq, qc->dgq_qos);
+       }
+#endif
        (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release);
-       _dispatch_global_queue_poke(dq);
-       _dispatch_release(dq);
-
+       _dispatch_global_queue_poke(dq, 1, 0);
+       _dispatch_release(dq); // retained in _dispatch_global_queue_poke_slow
        return NULL;
 }
+#endif // DISPATCH_USE_PTHREAD_POOL
 
-int
-_dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset)
-{
-       int r;
-
-       /* Workaround: 6269619 Not all signals can be delivered on any thread */
-
-       r = sigdelset(set, SIGILL);
-       (void)dispatch_assume_zero(r);
-       r = sigdelset(set, SIGTRAP);
-       (void)dispatch_assume_zero(r);
-#if HAVE_DECL_SIGEMT
-       r = sigdelset(set, SIGEMT);
-       (void)dispatch_assume_zero(r);
-#endif
-       r = sigdelset(set, SIGFPE);
-       (void)dispatch_assume_zero(r);
-       r = sigdelset(set, SIGBUS);
-       (void)dispatch_assume_zero(r);
-       r = sigdelset(set, SIGSEGV);
-       (void)dispatch_assume_zero(r);
-       r = sigdelset(set, SIGSYS);
-       (void)dispatch_assume_zero(r);
-       r = sigdelset(set, SIGPIPE);
-       (void)dispatch_assume_zero(r);
+#pragma mark -
+#pragma mark dispatch_network_root_queue
+#if TARGET_OS_MAC
 
-       return pthread_sigmask(how, set, oset);
+dispatch_queue_t
+_dispatch_network_root_queue_create_4NW(const char *label,
+               const pthread_attr_t *attrs, dispatch_block_t configure)
+{
+       unsigned long flags = dispatch_pthread_root_queue_flags_pool_size(1);
+       return dispatch_pthread_root_queue_create(label, flags, attrs, configure);
 }
-#endif // DISPATCH_USE_PTHREAD_POOL
 
+#endif // TARGET_OS_MAC
 #pragma mark -
 #pragma mark dispatch_runloop_queue
 
@@ -5645,9 +6040,10 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags)
                return DISPATCH_BAD_INPUT;
        }
        dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD;
-       dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs);
-       _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, false);
-       dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true);
+       dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_runloop), dqs);
+       _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1,
+                       DISPATCH_QUEUE_ROLE_BASE_ANON);
+       dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
        dq->dq_label = label ? label : "runloop-queue"; // no-copy contract
        _dispatch_runloop_queue_handle_init(dq);
        _dispatch_queue_set_bound_thread(dq);
@@ -5660,19 +6056,19 @@ _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq)
 {
        _dispatch_object_debug(dq, "%s", __func__);
 
-       pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, true);
+       dispatch_qos_t qos = _dispatch_runloop_queue_reset_max_qos(dq);
        _dispatch_queue_clear_bound_thread(dq);
-       dx_wakeup(dq, pp, DISPATCH_WAKEUP_FLUSH);
-       if (pp) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq);
+       dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
+       if (qos) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq);
 }
 
 void
-_dispatch_runloop_queue_dispose(dispatch_queue_t dq)
+_dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free)
 {
        _dispatch_object_debug(dq, "%s", __func__);
        _dispatch_introspection_queue_dispose(dq);
        _dispatch_runloop_queue_handle_dispose(dq);
-       _dispatch_queue_destroy(dq);
+       _dispatch_queue_destroy(dq, allow_free);
 }
 
 bool
@@ -5696,6 +6092,7 @@ _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq)
        _dispatch_runloop_queue_wakeup(dq, 0, false);
 }
 
+#if TARGET_OS_MAC
 dispatch_runloop_handle_t
 _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq)
 {
@@ -5704,6 +6101,7 @@ _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq)
        }
        return _dispatch_runloop_queue_get_handle(dq);
 }
+#endif
 
 static void
 _dispatch_runloop_queue_handle_init(void *ctxt)
@@ -5837,9 +6235,7 @@ _dispatch_main_queue_callback_4CF(
 void
 dispatch_main(void)
 {
-       dispatch_once_f(&_dispatch_root_queues_pred, NULL,
-               _dispatch_root_queues_init_once);
-
+       _dispatch_root_queues_init();
 #if HAVE_PTHREAD_MAIN_NP
        if (pthread_main_np()) {
 #endif
@@ -5856,6 +6252,7 @@ dispatch_main(void)
                pthread_key_t dispatch_main_key;
                pthread_key_create(&dispatch_main_key, _dispatch_sig_thread);
                pthread_setspecific(dispatch_main_key, &dispatch_main_key);
+               _dispatch_sigmask();
 #endif
                pthread_exit(NULL);
                DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned");
@@ -5890,52 +6287,25 @@ static void
 _dispatch_queue_cleanup2(void)
 {
        dispatch_queue_t dq = &_dispatch_main_q;
-       _dispatch_queue_clear_bound_thread(dq);
+       uint64_t old_state, new_state;
 
-       // <rdar://problem/22623242>
-       // Here is what happens when both this cleanup happens because of
-       // dispatch_main() being called, and a concurrent enqueuer makes the queue
-       // non empty.
-       //
-       // _dispatch_queue_cleanup2:
-       //     atomic_and(dq_is_thread_bound, ~DQF_THREAD_BOUND, relaxed);
-       //     maximal_barrier();
-       //     if (load(dq_items_tail, seq_cst)) {
-       //         // do the wake up the normal serial queue way
-       //     } else {
-       //         // do no wake up  <----
-       //     }
-       //
-       // enqueuer:
-       //     store(dq_items_tail, new_tail, release);
-       //     if (load(dq_is_thread_bound, relaxed)) {
-       //         // do the wake up the runloop way <----
-       //     } else {
-       //         // do the wake up the normal serial way
-       //     }
+       // Turning the main queue from a runloop queue into an ordinary serial queue
+       // is a 3 steps operation:
+       // 1. finish taking the main queue lock the usual way
+       // 2. clear the THREAD_BOUND flag
+       // 3. do a handoff
        //
-       // what would be bad is to take both paths marked <---- because the queue
-       // wouldn't be woken up until the next time it's used (which may never
-       // happen)
-       //
-       // An enqueuer that speculates the load of the old value of thread_bound
-       // and then does the store may wake up the main queue the runloop way.
-       // But then, the cleanup thread will see that store because the load
-       // of dq_items_tail is sequentially consistent, and we have just thrown away
-       // our pipeline.
-       //
-       // By the time cleanup2() is out of the maximally synchronizing barrier,
-       // no other thread can speculate the wrong load anymore, and both cleanup2()
-       // and a concurrent enqueuer would treat the queue in the standard non
-       // thread bound way
-
-       _dispatch_queue_atomic_flags_clear(dq,
-                       DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC);
-       os_atomic_maximally_synchronizing_barrier();
-       // no need to drop the override, the thread will die anyway
-       // the barrier above includes an acquire, so it's ok to do this raw
-       // call to dx_wakeup(0)
-       dx_wakeup(dq, 0, 0);
+       // If an enqueuer executes concurrently, he may do the wakeup the runloop
+       // way, because he still believes the queue to be thread-bound, but the
+       // dirty bit will force this codepath to notice the enqueue, and the usual
+       // lock transfer will do the proper wakeup.
+       os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+               new_state = old_state & ~DISPATCH_QUEUE_DIRTY;
+               new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
+               new_state += DISPATCH_QUEUE_IN_BARRIER;
+       });
+       _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC);
+       _dispatch_queue_barrier_complete(dq, 0, 0);
 
        // overload the "probably" variable to mean that dispatch_main() or
        // similar non-POSIX API was called
@@ -5944,7 +6314,7 @@ _dispatch_queue_cleanup2(void)
 #ifndef __linux__
        if (_dispatch_program_is_probably_callback_driven) {
                _dispatch_barrier_async_detached_f(_dispatch_get_root_queue(
-                               _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread);
+                               DISPATCH_QOS_DEFAULT, true), NULL, _dispatch_sig_thread);
                sleep(1); // workaround 6778970
        }
 #endif
@@ -5967,6 +6337,16 @@ _dispatch_queue_cleanup(void *ctxt)
                        "Premature thread exit while a dispatch queue is running");
 }
 
+static void
+_dispatch_wlh_cleanup(void *ctxt)
+{
+       // POSIX defines that destructors are only called if 'ctxt' is non-null
+       dispatch_queue_t wlh;
+       wlh = (dispatch_queue_t)((uintptr_t)ctxt & ~DISPATCH_WLH_STORAGE_REF);
+       _dispatch_queue_release_storage(wlh);
+}
+
+DISPATCH_NORETURN
 static void
 _dispatch_deferred_items_cleanup(void *ctxt)
 {
@@ -5975,6 +6355,7 @@ _dispatch_deferred_items_cleanup(void *ctxt)
                        "Premature thread exit with unhandled deferred items");
 }
 
+DISPATCH_NORETURN
 static void
 _dispatch_frame_cleanup(void *ctxt)
 {
@@ -5983,6 +6364,7 @@ _dispatch_frame_cleanup(void *ctxt)
                        "Premature thread exit while a dispatch frame is active");
 }
 
+DISPATCH_NORETURN
 static void
 _dispatch_context_cleanup(void *ctxt)
 {
index 2435b13e12a76d4bc4a402bec3048f167329d119..f70356a2c975759f4d639497b7e775a0cb2948ac 100644 (file)
 #define DISPATCH_CACHELINE_ALIGN \
                __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
 
+#define DISPATCH_CACHELINE_PAD_SIZE(type) \
+               (roundup(sizeof(type), DISPATCH_CACHELINE_SIZE) - sizeof(type))
+
 
 #pragma mark -
 #pragma mark dispatch_queue_t
 
 DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
-       DQF_NONE                                = 0x0000,
-       DQF_AUTORELEASE_ALWAYS  = 0x0001,
-       DQF_AUTORELEASE_NEVER   = 0x0002,
-#define _DQF_AUTORELEASE_MASK 0x0003
-       DQF_THREAD_BOUND                = 0x0004, // queue is bound to a thread
-       DQF_BARRIER_BIT                 = 0x0008, // queue is a barrier on its target
-       DQF_TARGETED                    = 0x0010, // queue is targeted by another object
-       DQF_LABEL_NEEDS_FREE    = 0x0020, // queue label was strduped; need to free it
-       DQF_CANNOT_TRYSYNC              = 0x0040,
-       DQF_RELEASED                    = 0x0080, // xref_cnt == -1
+       DQF_NONE                = 0x00000000,
+       DQF_AUTORELEASE_ALWAYS  = 0x00010000,
+       DQF_AUTORELEASE_NEVER   = 0x00020000,
+#define _DQF_AUTORELEASE_MASK 0x00030000
+       DQF_THREAD_BOUND        = 0x00040000, // queue is bound to a thread
+       DQF_BARRIER_BIT         = 0x00080000, // queue is a barrier on its target
+       DQF_TARGETED            = 0x00100000, // queue is targeted by another object
+       DQF_LABEL_NEEDS_FREE    = 0x00200000, // queue label was strduped; need to free it
+       DQF_CANNOT_TRYSYNC      = 0x00400000,
+       DQF_RELEASED            = 0x00800000, // xref_cnt == -1
+       DQF_LEGACY              = 0x01000000,
 
        // only applies to sources
        //
@@ -77,81 +81,71 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
        //    will be -p-.
        //
        // -pd
-       //    Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is
-       //    gone from the kernel, but ds_dkev lives. Next state will be --d.
+       //    Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote
+       //    is gone from the kernel. Next state will be --d.
        //
        // -p-
        //    Received an EV_ONESHOT event (from a--), or the delivery of an event
        //    causing the cancellation to fail with EINPROGRESS was delivered
-       //    (from ap-). The knote still lives, next state will be --d.
+       //    (from ap-). The muxnote still lives, next state will be --d.
        //
        // --d
-       //    Final state of the source, the knote is gone from the kernel and
-       //    ds_dkev is freed. The source can safely be released.
+       //    Final state of the source, the muxnote is gone from the kernel and
+       //    ds_refs is unregistered. The source can safely be released.
        //
        // a-d (INVALID)
        // apd (INVALID)
        //    Setting DSF_DELETED should also always atomically clear DSF_ARMED. If
-       //    the knote is gone from the kernel, it makes no sense whatsoever to
+       //    the muxnote is gone from the kernel, it makes no sense whatsoever to
        //    have it armed. And generally speaking, once `d` or `p` has been set,
        //    `a` cannot do a cleared -> set transition anymore
        //    (see _dispatch_source_try_set_armed).
        //
-       DSF_CANCEL_WAITER               = 0x0800, // synchronous waiters for cancel
-       DSF_CANCELED                    = 0x1000, // cancellation has been requested
-       DSF_ARMED                               = 0x2000, // source is armed
-       DSF_DEFERRED_DELETE             = 0x4000, // source is pending delete
-       DSF_DELETED                             = 0x8000, // source knote is deleted
+       DSF_WLH_CHANGED         = 0x04000000,
+       DSF_CANCEL_WAITER       = 0x08000000, // synchronous waiters for cancel
+       DSF_CANCELED            = 0x10000000, // cancellation has been requested
+       DSF_ARMED               = 0x20000000, // source is armed
+       DSF_DEFERRED_DELETE     = 0x40000000, // source is pending delete
+       DSF_DELETED             = 0x80000000, // source muxnote is deleted
 #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED)
 
-       DQF_WIDTH_MASK                  = 0xffff0000,
-#define DQF_WIDTH_SHIFT                16
+#define DQF_FLAGS_MASK        ((dispatch_queue_flags_t)0xffff0000)
+#define DQF_WIDTH_MASK        ((dispatch_queue_flags_t)0x0000ffff)
+#define DQF_WIDTH(n)          ((dispatch_queue_flags_t)(uint16_t)(n))
 );
 
 #define _DISPATCH_QUEUE_HEADER(x) \
        struct os_mpsc_queue_s _as_oq[0]; \
        DISPATCH_OBJECT_HEADER(x); \
        _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \
-       dispatch_queue_t dq_specific_q; \
-       union { \
-               uint32_t volatile dq_atomic_flags; \
-               DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
-                       uint16_t dq_atomic_bits, \
-                       uint16_t dq_width \
-               ); \
-       }; \
        uint32_t dq_side_suspend_cnt; \
-       DISPATCH_INTROSPECTION_QUEUE_HEADER; \
-       dispatch_unfair_lock_s dq_sidelock
-       /* LP64: 32bit hole on LP64 */
+       dispatch_unfair_lock_s dq_sidelock; \
+       union { \
+               dispatch_queue_t dq_specific_q; \
+               struct dispatch_source_refs_s *ds_refs; \
+               struct dispatch_timer_source_refs_s *ds_timer_refs; \
+               struct dispatch_mach_recv_refs_s *dm_recv_refs; \
+       }; \
+       DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \
+               const uint16_t dq_width, \
+               const uint16_t __dq_opaque \
+       ); \
+       DISPATCH_INTROSPECTION_QUEUE_HEADER
+       /* LP64: 32bit hole */
 
 #define DISPATCH_QUEUE_HEADER(x) \
        struct dispatch_queue_s _as_dq[0]; \
        _DISPATCH_QUEUE_HEADER(x)
 
-#define DISPATCH_QUEUE_ALIGN  __attribute__((aligned(8)))
+struct _dispatch_unpadded_queue_s {
+       _DISPATCH_QUEUE_HEADER(dummy);
+};
 
-#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff
-#define DISPATCH_QUEUE_WIDTH_MAX  0x7ffe
-#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
-               ({ uint16_t _width = (width); \
-               _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
+#define DISPATCH_QUEUE_CACHELINE_PAD \
+               DISPATCH_CACHELINE_PAD_SIZE(struct _dispatch_unpadded_queue_s)
 
 #define DISPATCH_QUEUE_CACHELINE_PADDING \
                char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
-#ifdef __LP64__
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
-               (sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
-               + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#elif OS_OBJECT_HAVE_OBJC1
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
-               (11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
-               + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#else
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
-               (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
-               + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#endif
 
 /*
  * dispatch queues `dq_state` demystified
@@ -161,27 +155,27 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
  * Most Significant 32 bit Word
  * ----------------------------
  *
- * sc: suspend count (bits 63 - 57)
+ * sc: suspend count (bits 63 - 58)
  *    The suspend count unsurprisingly holds the suspend count of the queue
  *    Only 7 bits are stored inline. Extra counts are transfered in a side
  *    suspend count and when that has happened, the ssc: bit is set.
  */
-#define DISPATCH_QUEUE_SUSPEND_INTERVAL                0x0200000000000000ull
-#define DISPATCH_QUEUE_SUSPEND_HALF                    0x40u
+#define DISPATCH_QUEUE_SUSPEND_INTERVAL                0x0400000000000000ull
+#define DISPATCH_QUEUE_SUSPEND_HALF                    0x20u
 /*
- * ssc: side suspend count (bit 56)
+ * ssc: side suspend count (bit 57)
  *    This bit means that the total suspend count didn't fit in the inline
  *    suspend count, and that there are additional suspend counts stored in the
  *    `dq_side_suspend_cnt` field.
  */
-#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT    0x0100000000000000ull
+#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT    0x0200000000000000ull
 /*
- * i: inactive bit (bit 55)
+ * i: inactive bit (bit 56)
  *    This bit means that the object is inactive (see dispatch_activate)
  */
-#define DISPATCH_QUEUE_INACTIVE                                0x0080000000000000ull
+#define DISPATCH_QUEUE_INACTIVE                                0x0100000000000000ull
 /*
- * na: needs activation (bit 54)
+ * na: needs activation (bit 55)
  *    This bit is set if the object is created inactive. It tells
  *    dispatch_queue_wakeup to perform various tasks at first wakeup.
  *
@@ -189,27 +183,32 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
  *    the object from being woken up (because _dq_state_should_wakeup will say
  *    no), except in the dispatch_activate/dispatch_resume codepath.
  */
-#define DISPATCH_QUEUE_NEEDS_ACTIVATION                0x0040000000000000ull
+#define DISPATCH_QUEUE_NEEDS_ACTIVATION                0x0080000000000000ull
 /*
  * This mask covers the suspend count (sc), side suspend count bit (ssc),
  * inactive (i) and needs activation (na) bits
  */
-#define DISPATCH_QUEUE_SUSPEND_BITS_MASK       0xffc0000000000000ull
+#define DISPATCH_QUEUE_SUSPEND_BITS_MASK       0xff80000000000000ull
 /*
- * ib: in barrier (bit 53)
+ * ib: in barrier (bit 54)
  *    This bit is set when the queue is currently executing a barrier
  */
-#define DISPATCH_QUEUE_IN_BARRIER                      0x0020000000000000ull
+#define DISPATCH_QUEUE_IN_BARRIER                      0x0040000000000000ull
 /*
- * qf: queue full (bit 52)
+ * qf: queue full (bit 53)
  *    This bit is a subtle hack that allows to check for any queue width whether
  *    the full width of the queue is used or reserved (depending on the context)
  *    In other words that the queue has reached or overflown its capacity.
  */
-#define DISPATCH_QUEUE_WIDTH_FULL_BIT                  0x0010000000000000ull
-#define DISPATCH_QUEUE_WIDTH_FULL                              0x8000ull
+#define DISPATCH_QUEUE_WIDTH_FULL_BIT          0x0020000000000000ull
+#define DISPATCH_QUEUE_WIDTH_FULL                      0x1000ull
+#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1)
+#define DISPATCH_QUEUE_WIDTH_MAX  (DISPATCH_QUEUE_WIDTH_FULL - 2)
+#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
+               ({ uint16_t _width = (width); \
+               _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
 /*
- * w:  width (bits 51 - 37)
+ * w:  width (bits 52 - 41)
  *    This encodes how many work items are in flight. Barriers hold `dq_width`
  *    of them while they run. This is encoded as a signed offset with respect,
  *    to full use, where the negative values represent how many available slots
@@ -218,19 +217,19 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
  *
  *    When this value is positive, then `wo` is always set to 1.
  */
-#define DISPATCH_QUEUE_WIDTH_INTERVAL          0x0000002000000000ull
-#define DISPATCH_QUEUE_WIDTH_MASK                      0x001fffe000000000ull
-#define DISPATCH_QUEUE_WIDTH_SHIFT                     37
+#define DISPATCH_QUEUE_WIDTH_INTERVAL          0x0000020000000000ull
+#define DISPATCH_QUEUE_WIDTH_MASK                      0x003ffe0000000000ull
+#define DISPATCH_QUEUE_WIDTH_SHIFT                     41
 /*
- * pb: pending barrier (bit 36)
+ * pb: pending barrier (bit 40)
  *    Drainers set this bit when they couldn't run the next work item and it is
  *    a barrier. When this bit is set, `dq_width - 1` work item slots are
  *    reserved so that no wakeup happens until the last work item in flight
  *    completes.
  */
-#define DISPATCH_QUEUE_PENDING_BARRIER         0x0000001000000000ull
+#define DISPATCH_QUEUE_PENDING_BARRIER         0x0000010000000000ull
 /*
- * d: dirty bit (bit 35)
+ * d: dirty bit (bit 39)
  *    This bit is set when a queue transitions from empty to not empty.
  *    This bit is set before dq_items_head is set, with appropriate barriers.
  *    Any thread looking at a queue head is responsible for unblocking any
@@ -342,68 +341,70 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
  *
  *    So on the async "acquire" side, there is no subtlety at all.
  */
-#define DISPATCH_QUEUE_DIRTY                           0x0000000800000000ull
+#define DISPATCH_QUEUE_DIRTY                           0x0000008000000000ull
 /*
- * qo: (bit 34)
- *    Set when a queue has a useful override set.
- *    This bit is only cleared when the final drain_try_unlock() succeeds.
- *
- *    When the queue dq_override is touched (overrides or-ed in), usually with
- *    _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set
- *    with a release barrier and one of these three things happen next:
- *
- *    - the queue is enqueued, which will cause it to be drained, and the
- *      override to be handled by _dispatch_queue_drain_try_unlock().
- *      In rare cases it could cause the queue to be queued while empty though.
+ * md: enqueued/draining on manager (bit 38)
+ *    Set when enqueued and draining on the manager hierarchy.
  *
- *    - the DIRTY bit is also set with a release barrier, which pairs with
- *      the handling of these bits by _dispatch_queue_drain_try_unlock(),
- *      so that dq_override is reset properly.
+ *    Unlike the ENQUEUED bit, it is kept until the queue is unlocked from its
+ *    invoke call on the manager. This is used to prevent stealing, and
+ *    overrides to be applied down the target queue chain.
+ */
+#define DISPATCH_QUEUE_ENQUEUED_ON_MGR         0x0000004000000000ull
+/*
+ * r: queue graph role (bits 37 - 36)
+ *    Queue role in the target queue graph
  *
- *    - the queue was suspended, and _dispatch_queue_resume() will handle the
- *      override as part of its wakeup sequence.
+ *    11: unused
+ *    10: WLH base
+ *    01: non wlh base
+ *    00: inner queue
  */
-#define DISPATCH_QUEUE_HAS_OVERRIDE                    0x0000000400000000ull
+#define DISPATCH_QUEUE_ROLE_MASK                       0x0000003000000000ull
+#define DISPATCH_QUEUE_ROLE_BASE_WLH           0x0000002000000000ull
+#define DISPATCH_QUEUE_ROLE_BASE_ANON          0x0000001000000000ull
+#define DISPATCH_QUEUE_ROLE_INNER                      0x0000000000000000ull
 /*
- * p: pended bit (bit 33)
- *    Set when a drain lock has been pended. When this bit is set,
- *    the drain lock is taken and ENQUEUED is never set.
+ * o: has override (bit 35, if role is DISPATCH_QUEUE_ROLE_BASE_ANON)
+ *    Set when a queue has received a QOS override and needs to reset it.
+ *    This bit is only cleared when the final drain_try_unlock() succeeds.
  *
- *    This bit marks a queue that needs further processing but was kept pended
- *    by an async drainer (not reenqueued) in the hope of being able to drain
- *    it further later.
+ * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH)
+ *    Set when a queue owner has been exposed to the kernel because of
+ *    dispatch_sync() contention.
  */
-#define DISPATCH_QUEUE_DRAIN_PENDED                    0x0000000200000000ull
+#define DISPATCH_QUEUE_RECEIVED_OVERRIDE       0x0000000800000000ull
+#define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT      0x0000000800000000ull
 /*
- * e: enqueued bit (bit 32)
- *    Set when a queue is enqueued on its target queue
+ * max_qos: max qos (bits 34 - 32)
+ *   This is the maximum qos that has been enqueued on the queue
  */
-#define DISPATCH_QUEUE_ENQUEUED                                0x0000000100000000ull
+#define DISPATCH_QUEUE_MAX_QOS_MASK                    0x0000000700000000ull
+#define DISPATCH_QUEUE_MAX_QOS_SHIFT           32
 /*
  * dl: drain lock (bits 31-0)
  *    This is used by the normal drain to drain exlusively relative to other
  *    drain stealers (like the QoS Override codepath). It holds the identity
  *    (thread port) of the current drainer.
+ *
+ * st: sync transfer (bit 1 or 30)
+ *    Set when a dispatch_sync() is transferred to
+ *
+ * e: enqueued bit (bit 0 or 31)
+ *    Set when a queue is enqueued on its target queue
  */
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK       0x00000002ffffffffull
-#ifdef DLOCK_NOWAITERS_BIT
-#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
-               ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT))
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
-               (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\
-                               ^ DLOCK_NOWAITERS_BIT)
-#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
-               (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
-                               DLOCK_NOWAITERS_BIT)
-#else
-#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
-               ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT))
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
-               ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))
+#define DISPATCH_QUEUE_DRAIN_OWNER_MASK                ((uint64_t)DLOCK_OWNER_MASK)
+#define DISPATCH_QUEUE_SYNC_TRANSFER           ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT)
+#define DISPATCH_QUEUE_ENQUEUED                                ((uint64_t)DLOCK_WAITERS_BIT)
+
 #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
-               (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
-                               DLOCK_WAITERS_BIT)
-#endif
+               (DISPATCH_QUEUE_ENQUEUED_ON_MGR | DISPATCH_QUEUE_ENQUEUED | \
+               DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_MAX_QOS_MASK)
+
+#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \
+               (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \
+               DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER)
+
 /*
  *******************************************************************************
  *
@@ -425,8 +426,6 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
  * that right. To do so, prior to taking any decision, they also try to own
  * the full "barrier" width on the given queue.
  *
- * see _dispatch_try_lock_transfer_or_wakeup
- *
  *******************************************************************************
  *
  * Enqueuing and wakeup rules
@@ -497,12 +496,17 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
                (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL)
 
 DISPATCH_CLASS_DECL(queue);
-#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
+
+#if !defined(__cplusplus) || !DISPATCH_INTROSPECTION
 struct dispatch_queue_s {
        _DISPATCH_QUEUE_HEADER(queue);
        DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
-} DISPATCH_QUEUE_ALIGN;
-#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
+} DISPATCH_ATOMIC64_ALIGN;
+
+#if __has_feature(c_static_assert) && !DISPATCH_INTROSPECTION
+_Static_assert(sizeof(struct dispatch_queue_s) <= 128, "dispatch queue size");
+#endif
+#endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION
 
 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue);
 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue);
@@ -520,16 +524,14 @@ typedef union {
        struct dispatch_source_s *_ds;
        struct dispatch_mach_s *_dm;
        struct dispatch_queue_specific_queue_s *_dqsq;
-       struct dispatch_timer_aggregate_s *_dta;
 #if USE_OBJC
        os_mpsc_queue_t _ojbc_oq;
        dispatch_queue_t _objc_dq;
        dispatch_source_t _objc_ds;
        dispatch_mach_t _objc_dm;
        dispatch_queue_specific_queue_t _objc_dqsq;
-       dispatch_timer_aggregate_t _objc_dta;
 #endif
-} dispatch_queue_class_t __attribute__((__transparent_union__));
+} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION;
 
 typedef struct dispatch_thread_context_s *dispatch_thread_context_t;
 typedef struct dispatch_thread_context_s {
@@ -546,52 +548,59 @@ typedef struct dispatch_thread_frame_s {
        // must be in the same order as our TSD keys!
        dispatch_queue_t dtf_queue;
        dispatch_thread_frame_t dtf_prev;
-       struct dispatch_object_s *dtf_deferred;
 } dispatch_thread_frame_s;
 
-DISPATCH_ENUM(dispatch_queue_wakeup_target, long,
-       DISPATCH_QUEUE_WAKEUP_NONE = 0,
-       DISPATCH_QUEUE_WAKEUP_TARGET,
-       DISPATCH_QUEUE_WAKEUP_MGR,
-);
+typedef dispatch_queue_t dispatch_queue_wakeup_target_t;
+#define DISPATCH_QUEUE_WAKEUP_NONE           ((dispatch_queue_wakeup_target_t)0)
+#define DISPATCH_QUEUE_WAKEUP_TARGET         ((dispatch_queue_wakeup_target_t)1)
+#define DISPATCH_QUEUE_WAKEUP_MGR            (&_dispatch_mgr_q)
+#define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1)
 
-void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags);
-void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp,
+void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target);
-
-void _dispatch_queue_destroy(dispatch_queue_t dq);
-void _dispatch_queue_dispose(dispatch_queue_t dq);
+dispatch_priority_t _dispatch_queue_compute_priority_and_wlh(
+               dispatch_queue_t dq, dispatch_wlh_t *wlh_out);
+void _dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free);
+void _dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free);
+void _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq);
 void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq);
 void _dispatch_queue_suspend(dispatch_queue_t dq);
 void _dispatch_queue_resume(dispatch_queue_t dq, bool activate);
-void _dispatch_queue_finalize_activation(dispatch_queue_t dq);
-void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
-void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n);
+void _dispatch_queue_finalize_activation(dispatch_queue_t dq,
+               bool *allow_resume);
+void _dispatch_queue_invoke(dispatch_queue_t dq,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+void _dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor);
 void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
-               pthread_priority_t pp);
-void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq);
-void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+               dispatch_qos_t qos);
+void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags);
-dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags, uint64_t *owned,
-               struct dispatch_object_s **dc_ptr);
-void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags, uint64_t to_unlock,
-               struct dispatch_object_s *dc);
-void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
-               dqsq);
-void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+dispatch_queue_wakeup_target_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               uint64_t *owned);
+void _dispatch_queue_drain_sync_waiter(dispatch_queue_t dq,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               uint64_t owned);
+void _dispatch_queue_specific_queue_dispose(
+               dispatch_queue_specific_queue_t dqsq, bool *allow_free);
+void _dispatch_root_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags);
-void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
-               struct dispatch_object_s *dou, pthread_priority_t pp);
-void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
-void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_root_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
+               dispatch_qos_t qos);
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+void _dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi
+               DISPATCH_PERF_MON_ARGS_PROTO);
+void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi
+               DISPATCH_PERF_MON_ARGS_PROTO);
+#endif
+void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq,
+               bool *allow_free);
+void _dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags);
-void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags);
 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
-void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
+void _dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free);
 void _dispatch_mgr_queue_drain(void);
 #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
 void _dispatch_mgr_priority_init(void);
@@ -603,13 +612,13 @@ void _dispatch_kevent_workqueue_init(void);
 #else
 static inline void _dispatch_kevent_workqueue_init(void) {}
 #endif
-void _dispatch_sync_recurse_invoke(void *ctxt);
 void _dispatch_apply_invoke(void *ctxt);
 void _dispatch_apply_redirect_invoke(void *ctxt);
 void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
                dispatch_function_t func);
+#define DISPATCH_BARRIER_TRYSYNC_SUSPEND 0x1
 void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
-               dispatch_function_t func);
+               dispatch_function_t func, uint32_t flags);
 void _dispatch_queue_atfork_child(void);
 
 #if DISPATCH_DEBUG
@@ -623,10 +632,9 @@ size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
                size_t bufsiz);
 
-#define DISPATCH_QUEUE_QOS_COUNT 6
-#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
+#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2)
 
-// must be in lowest to highest qos order (as encoded in pthread_priority_t)
+// must be in lowest to highest qos order (as encoded in dispatch_qos_t)
 // overcommit qos index values need bit 1 set
 enum {
        DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
@@ -644,18 +652,25 @@ enum {
        _DISPATCH_ROOT_QUEUE_IDX_COUNT,
 };
 
+// skip zero
+// 1 - main_q
+// 2 - mgr_q
+// 3 - mgr_root_q
+// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
+// we use 'xadd' on Intel, so the initial value == next assigned
+#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 16
 extern unsigned long volatile _dispatch_queue_serial_numbers;
 extern struct dispatch_queue_s _dispatch_root_queues[];
 extern struct dispatch_queue_s _dispatch_mgr_q;
 void _dispatch_root_queues_init(void);
 
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-extern pthread_priority_t _dispatch_background_priority;
-extern pthread_priority_t _dispatch_user_initiated_priority;
+#if DISPATCH_DEBUG
+#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
+       dispatch_assert_queue(&_dispatch_mgr_q)
+#else
+#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
 #endif
 
-typedef uint8_t _dispatch_qos_class_t;
-
 #pragma mark -
 #pragma mark dispatch_queue_attr_t
 
@@ -668,8 +683,7 @@ typedef enum {
 DISPATCH_CLASS_DECL(queue_attr);
 struct dispatch_queue_attr_s {
        OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr);
-       _dispatch_qos_class_t dqa_qos_class;
-       int8_t   dqa_relative_priority;
+       dispatch_priority_requested_t dqa_qos_and_relpri;
        uint16_t dqa_overcommit:2;
        uint16_t dqa_autorelease_frequency:2;
        uint16_t dqa_concurrent:1;
@@ -753,7 +767,6 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void);
        void *dc_ctxt; \
        void *dc_data; \
        void *dc_other
-#define _DISPATCH_SIZEOF_PTR 8
 #elif OS_OBJECT_HAVE_OBJC1
 #define DISPATCH_CONTINUATION_HEADER(x) \
        dispatch_function_t dc_func; \
@@ -771,7 +784,6 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void);
        void *dc_ctxt; \
        void *dc_data; \
        void *dc_other
-#define _DISPATCH_SIZEOF_PTR 4
 #else
 #define DISPATCH_CONTINUATION_HEADER(x) \
        union { \
@@ -789,24 +801,23 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void);
        void *dc_ctxt; \
        void *dc_data; \
        void *dc_other
-#define _DISPATCH_SIZEOF_PTR 4
 #endif
 #define _DISPATCH_CONTINUATION_PTRS 8
 #if DISPATCH_HW_CONFIG_UP
 // UP devices don't contend on continuations so we don't need to force them to
 // occupy a whole cacheline (which is intended to avoid contention)
 #define DISPATCH_CONTINUATION_SIZE \
-               (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
+               (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR)
 #else
 #define DISPATCH_CONTINUATION_SIZE  ROUND_UP_TO_CACHELINE_SIZE( \
-               (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
+               (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR))
 #endif
 #define ROUND_UP_TO_CONTINUATION_SIZE(x) \
                (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
                ~(DISPATCH_CONTINUATION_SIZE - 1u))
 
 // continuation is a dispatch_sync or dispatch_barrier_sync
-#define DISPATCH_OBJ_SYNC_SLOW_BIT                     0x001ul
+#define DISPATCH_OBJ_SYNC_WAITER_BIT           0x001ul
 // continuation acts as a barrier
 #define DISPATCH_OBJ_BARRIER_BIT                       0x002ul
 // continuation resources are freed on run
@@ -822,17 +833,35 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void);
 #define DISPATCH_OBJ_CTXT_FETCH_BIT                    0x040ul
 // use the voucher from the continuation even if the queue has voucher set
 #define DISPATCH_OBJ_ENFORCE_VOUCHER           0x080ul
+// never set on continuations, used by mach.c only
+#define DISPATCH_OBJ_MACH_BARRIER              0x1000000ul
 
-struct dispatch_continuation_s {
+typedef struct dispatch_continuation_s {
        struct dispatch_object_s _as_do[0];
        DISPATCH_CONTINUATION_HEADER(continuation);
-};
-typedef struct dispatch_continuation_s *dispatch_continuation_t;
+} *dispatch_continuation_t;
+
+typedef struct dispatch_sync_context_s {
+       struct dispatch_object_s _as_do[0];
+       struct dispatch_continuation_s _as_dc[0];
+       DISPATCH_CONTINUATION_HEADER(continuation);
+       dispatch_function_t dsc_func;
+       void *dsc_ctxt;
+#if DISPATCH_COCOA_COMPAT
+       dispatch_thread_frame_s dsc_dtf;
+#endif
+       dispatch_thread_event_s dsc_event;
+       dispatch_tid dsc_waiter;
+       dispatch_qos_t dsc_override_qos_floor;
+       dispatch_qos_t dsc_override_qos;
+       bool dsc_wlh_was_first;
+       bool dsc_release_storage;
+} *dispatch_sync_context_t;
 
 typedef struct dispatch_continuation_vtable_s {
        _OS_OBJECT_CLASS_HEADER();
        DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation);
-} *dispatch_continuation_vtable_t;
+} const *dispatch_continuation_vtable_t;
 
 #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
 #if TARGET_OS_EMBEDDED
@@ -848,8 +877,9 @@ dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
 void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
 void _dispatch_continuation_async(dispatch_queue_t dq,
        dispatch_continuation_t dc);
-void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq,
-               dispatch_invoke_flags_t flags);
+void _dispatch_continuation_pop(dispatch_object_t dou,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
+               dispatch_queue_t dq);
 void _dispatch_continuation_invoke(dispatch_object_t dou,
                voucher_t override_voucher, dispatch_invoke_flags_t flags);
 
@@ -871,6 +901,7 @@ enum {
        DC_MACH_SEND_BARRRIER_DRAIN_TYPE,
        DC_MACH_SEND_BARRIER_TYPE,
        DC_MACH_RECV_BARRIER_TYPE,
+       DC_MACH_ASYNC_REPLY_TYPE,
 #if HAVE_PTHREAD_WORKQUEUE_QOS
        DC_OVERRIDE_STEALING_TYPE,
        DC_OVERRIDE_OWNING_TYPE,
@@ -897,12 +928,12 @@ extern const struct dispatch_continuation_vtable_s
 
 void
 _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags);
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
 
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 void
 _dispatch_queue_override_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags);
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
 #endif
 
 #define DC_VTABLE(name)  (&_dispatch_continuation_vtables[DC_##name##_TYPE])
@@ -920,8 +951,14 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc,
 void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
                mach_voucher_t kv);
 voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri,
-               voucher_t voucher, _dispatch_thread_set_self_t flags);
-
+               voucher_t voucher, dispatch_thread_set_self_t flags);
+#else
+static inline void
+_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
+               mach_voucher_t kv)
+{
+       (void)pri; (void)kv;
+}
 #endif
 #pragma mark -
 #pragma mark dispatch_apply_t
@@ -932,7 +969,7 @@ struct dispatch_apply_s {
        dispatch_continuation_t da_dc;
        dispatch_thread_event_s da_event;
        dispatch_invoke_flags_t da_flags;
-       uint32_t da_thr_cnt;
+       int32_t da_thr_cnt;
 };
 typedef struct dispatch_apply_s *dispatch_apply_t;
 
@@ -941,7 +978,7 @@ typedef struct dispatch_apply_s *dispatch_apply_t;
 
 #ifdef __BLOCKS__
 
-#define DISPATCH_BLOCK_API_MASK (0x80u - 1)
+#define DISPATCH_BLOCK_API_MASK (0x100u - 1)
 #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
 #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
 
@@ -988,15 +1025,13 @@ void _dispatch_block_sync_invoke(void *block);
 
 void _dispatch_continuation_init_slow(dispatch_continuation_t dc,
                dispatch_queue_class_t dqu, dispatch_block_flags_t flags);
-void _dispatch_continuation_update_bits(dispatch_continuation_t dc,
-               uintptr_t dc_flags);
 
-bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
+long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
                dispatch_function_t func);
 
 /* exported for tests in dispatch_trysync.c */
 DISPATCH_EXPORT DISPATCH_NOTHROW
-bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt,
+long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt,
                dispatch_function_t f);
 
 #endif /* __BLOCKS__ */
index dc97ba90b79a5d1725c1ef426a4d3194f1bb2437..3fe94c6e3efda16a3e7bc70b74cc3bd8ac35d308 100644 (file)
@@ -32,10 +32,9 @@ _dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau)
        struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr;
 
        dsema->do_next = DISPATCH_OBJECT_LISTLESS;
-       dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-                       false);
+       dsema->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
        dsema->dsema_value = value;
-       _os_semaphore_init(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO);
+       _dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
 }
 
 #pragma mark -
@@ -53,15 +52,16 @@ dispatch_semaphore_create(long value)
                return DISPATCH_BAD_INPUT;
        }
 
-       dsema = (dispatch_semaphore_t)_dispatch_alloc(DISPATCH_VTABLE(semaphore),
-                       sizeof(struct dispatch_semaphore_s));
+       dsema = (dispatch_semaphore_t)_dispatch_object_alloc(
+                       DISPATCH_VTABLE(semaphore), sizeof(struct dispatch_semaphore_s));
        _dispatch_semaphore_class_init(value, dsema);
        dsema->dsema_orig = value;
        return dsema;
 }
 
 void
-_dispatch_semaphore_dispose(dispatch_object_t dou)
+_dispatch_semaphore_dispose(dispatch_object_t dou,
+               DISPATCH_UNUSED bool *allow_free)
 {
        dispatch_semaphore_t dsema = dou._dsema;
 
@@ -70,7 +70,7 @@ _dispatch_semaphore_dispose(dispatch_object_t dou)
                                "Semaphore object deallocated while in use");
        }
 
-       _os_semaphore_dispose(&dsema->dsema_sema);
+       _dispatch_sema4_dispose(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
 }
 
 size_t
@@ -95,8 +95,8 @@ DISPATCH_NOINLINE
 long
 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
 {
-       _os_semaphore_create(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO);
-       _os_semaphore_signal(&dsema->dsema_sema, 1);
+       _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
+       _dispatch_sema4_signal(&dsema->dsema_sema, 1);
        return 1;
 }
 
@@ -121,10 +121,10 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
 {
        long orig;
 
-       _os_semaphore_create(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO);
+       _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
        switch (timeout) {
        default:
-               if (!_os_semaphore_timedwait(&dsema->dsema_sema, timeout)) {
+               if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
                        break;
                }
                // Fall through and try to undo what the fast path did to
@@ -134,13 +134,13 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
                while (orig < 0) {
                        if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1,
                                        &orig, relaxed)) {
-                               return _OS_SEM_TIMEOUT();
+                               return _DSEMA4_TIMEOUT();
                        }
                }
                // Another thread called semaphore_signal().
                // Fall through and drain the wakeup.
        case DISPATCH_TIME_FOREVER:
-               _os_semaphore_wait(&dsema->dsema_sema);
+               _dispatch_sema4_wait(&dsema->dsema_sema);
                break;
        }
        return 0;
@@ -163,7 +163,7 @@ DISPATCH_ALWAYS_INLINE
 static inline dispatch_group_t
 _dispatch_group_create_with_count(long count)
 {
-       dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc(
+       dispatch_group_t dg = (dispatch_group_t)_dispatch_object_alloc(
                        DISPATCH_VTABLE(group), sizeof(struct dispatch_group_s));
        _dispatch_semaphore_class_init(count, dg);
        if (count) {
@@ -214,9 +214,10 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release)
        rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed);
        if (rval) {
                // wake group waiters
-               _os_semaphore_create(&dg->dg_sema, _OS_SEM_POLICY_FIFO);
-               _os_semaphore_signal(&dg->dg_sema, rval);
+               _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
+               _dispatch_sema4_signal(&dg->dg_sema, rval);
        }
+       uint16_t refs = needs_release ? 1 : 0; // <rdar://problem/22318411>
        if (head) {
                // async group notify blocks
                do {
@@ -225,11 +226,9 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release)
                        _dispatch_continuation_async(dsn_queue, head);
                        _dispatch_release(dsn_queue);
                } while ((head = next));
-               _dispatch_release(dg);
-       }
-       if (needs_release) {
-               _dispatch_release(dg); // <rdar://problem/22318411>
+               refs++;
        }
+       if (refs) _dispatch_release_n(dg, refs);
        return 0;
 }
 
@@ -247,7 +246,7 @@ dispatch_group_leave(dispatch_group_t dg)
 }
 
 void
-_dispatch_group_dispose(dispatch_object_t dou)
+_dispatch_group_dispose(dispatch_object_t dou, DISPATCH_UNUSED bool *allow_free)
 {
        dispatch_group_t dg = dou._dg;
 
@@ -256,7 +255,7 @@ _dispatch_group_dispose(dispatch_object_t dou)
                                "Group object deallocated while in use");
        }
 
-       _os_semaphore_dispose(&dg->dg_sema);
+       _dispatch_sema4_dispose(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
 }
 
 size_t
@@ -301,10 +300,10 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout)
                timeout = DISPATCH_TIME_FOREVER;
        }
 
-       _os_semaphore_create(&dg->dg_sema, _OS_SEM_POLICY_FIFO);
+       _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
        switch (timeout) {
        default:
-               if (!_os_semaphore_timedwait(&dg->dg_sema, timeout)) {
+               if (!_dispatch_sema4_timedwait(&dg->dg_sema, timeout)) {
                        break;
                }
                // Fall through and try to undo the earlier change to
@@ -314,13 +313,13 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout)
                while (orig_waiters) {
                        if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters,
                                        orig_waiters - 1, &orig_waiters, relaxed)) {
-                               return _OS_SEM_TIMEOUT();
+                               return _DSEMA4_TIMEOUT();
                        }
                }
                // Another thread is running _dispatch_group_wake()
                // Fall through and drain the wakeup.
        case DISPATCH_TIME_FOREVER:
-               _os_semaphore_wait(&dg->dg_sema);
+               _dispatch_sema4_wait(&dg->dg_sema);
                break;
        }
        return 0;
@@ -333,7 +332,7 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout)
                return 0;
        }
        if (timeout == 0) {
-               return _OS_SEM_TIMEOUT();
+               return _DSEMA4_TIMEOUT();
        }
        return _dispatch_group_wait_slow(dg, timeout);
 }
index f16152de2339048d00a3547bede4ce1ccf78f81e..f9d0983aa0bed14b8eae6b99ee1dd2a8d540c3b6 100644 (file)
@@ -32,7 +32,7 @@ struct dispatch_queue_s;
 #define DISPATCH_SEMAPHORE_HEADER(cls, ns) \
        DISPATCH_OBJECT_HEADER(cls); \
        long volatile ns##_value; \
-       _os_semaphore_t ns##_sema
+       _dispatch_sema4_t ns##_sema
 
 struct dispatch_semaphore_header_s {
        DISPATCH_SEMAPHORE_HEADER(semaphore, dsema);
@@ -60,14 +60,14 @@ typedef union {
        dispatch_semaphore_t _objc_dsema;
        dispatch_group_t _objc_dg;
 #endif
-} dispatch_semaphore_class_t __attribute__((__transparent_union__));
+} dispatch_semaphore_class_t DISPATCH_TRANSPARENT_UNION;
 
 dispatch_group_t _dispatch_group_create_and_enter(void);
-void _dispatch_group_dispose(dispatch_object_t dou);
+void _dispatch_group_dispose(dispatch_object_t dou, bool *allow_free);
 size_t _dispatch_group_debug(dispatch_object_t dou, char *buf,
                size_t bufsiz);
 
-void _dispatch_semaphore_dispose(dispatch_object_t dou);
+void _dispatch_semaphore_dispose(dispatch_object_t dou, bool *allow_free);
 size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf,
                size_t bufsiz);
 
index db288225e189ec2eea8dfef55716fa8cce1f0960..28e1c53a964c2840c790cfae03d65629bdddbc86 100644 (file)
 #define __DISPATCH_OS_SHIMS__
 
 #include <pthread.h>
-#if HAVE_PTHREAD_QOS_H && __has_include(<pthread/qos.h>)
-#include <pthread/qos.h>
-#if __has_include(<pthread/qos_private.h>)
-#include <pthread/qos_private.h>
-#define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE
-#define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED
-#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT
-#define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY
-#define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND
-#define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED
-#else // pthread/qos_private.h
-typedef unsigned long pthread_priority_t;
-#endif // pthread/qos_private.h
-#if __has_include(<sys/qos_private.h>)
-#include <sys/qos_private.h>
-#define _DISPATCH_QOS_CLASS_MAINTENANCE QOS_CLASS_MAINTENANCE
-#else // sys/qos_private.h
-#define _DISPATCH_QOS_CLASS_MAINTENANCE        0x05
-#endif // sys/qos_private.h
-#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
-#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
-#endif
-#ifndef _PTHREAD_PRIORITY_INHERIT_FLAG
-#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000
-#endif
-#ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
-#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
-#endif
-#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG
-#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
-#endif
-#ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG
-#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
-#endif
-#ifndef _PTHREAD_PRIORITY_OVERRIDE_FLAG
-#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
-#endif
-#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
-#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
-#endif
-#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
-#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
-#endif
-#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
-#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
-#endif
-
-#else // HAVE_PTHREAD_QOS_H
-typedef unsigned int qos_class_t;
-typedef unsigned long pthread_priority_t;
-#define QOS_MIN_RELATIVE_PRIORITY (-15)
-#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff)
-#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00
-#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull)
-#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff
-#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
-#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000
-#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
-#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
-#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
-#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
-#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
-#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
-#endif // HAVE_PTHREAD_QOS_H
-
 #ifdef __linux__
 #include "shims/linux_stubs.h"
 #endif
 
-typedef uint32_t dispatch_priority_t;
-#define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX)
-
-#ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE
-enum {
-       _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21,
-       _DISPATCH_QOS_CLASS_USER_INITIATED = 0x19,
-       _DISPATCH_QOS_CLASS_DEFAULT = 0x15,
-       _DISPATCH_QOS_CLASS_UTILITY = 0x11,
-       _DISPATCH_QOS_CLASS_BACKGROUND = 0x09,
-       _DISPATCH_QOS_CLASS_MAINTENANCE = 0x05,
-       _DISPATCH_QOS_CLASS_UNSPECIFIED = 0x00,
-};
-#endif // _DISPATCH_QOS_CLASS_USER_INTERACTIVE
+#ifdef __ANDROID__
+#include "shims/android_stubs.h"
+#endif
+
+#include "shims/hw_config.h"
+#include "shims/priority.h"
+
 #if HAVE_PTHREAD_WORKQUEUES
 #if __has_include(<pthread/workqueue_private.h>)
 #include <pthread/workqueue_private.h>
@@ -122,6 +50,10 @@ enum {
 #endif
 #endif // HAVE_PTHREAD_WORKQUEUES
 
+#if DISPATCH_USE_INTERNAL_WORKQUEUE
+#include "event/workqueue_internal.h"
+#endif
+
 #if HAVE_PTHREAD_NP_H
 #include <pthread_np.h>
 #endif
@@ -207,6 +139,60 @@ _pthread_qos_override_end_direct(mach_port_t thread, void *resource)
 #define _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND 0
 #endif
 
+#if PTHREAD_WORKQUEUE_SPI_VERSION < 20160427
+static inline bool
+_pthread_workqueue_should_narrow(pthread_priority_t priority)
+{
+       (void)priority;
+       return false;
+}
+#endif
+
+#if HAVE_PTHREAD_QOS_H && __has_include(<pthread/qos_private.h>) && \
+               defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \
+               DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \
+               DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900)
+#define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1
+#define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL
+#else
+#define DISPATCH_MAX_PARALLELISM_PHYSICAL 0x1
+#endif
+#define DISPATCH_MAX_PARALLELISM_ACTIVE 0x2
+_Static_assert(!(DISPATCH_MAX_PARALLELISM_PHYSICAL &
+               DISPATCH_MAX_PARALLELISM_ACTIVE), "Overlapping parallelism flags");
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_qos_max_parallelism(dispatch_qos_t qos, unsigned long flags)
+{
+       uint32_t p;
+       int r = 0;
+
+       if (qos) {
+#if DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM
+               r = pthread_qos_max_parallelism(_dispatch_qos_to_qos_class(qos),
+                               flags & PTHREAD_MAX_PARALLELISM_PHYSICAL);
+#endif
+       }
+       if (likely(r > 0)) {
+               p = (uint32_t)r;
+       } else {
+               p = (flags & DISPATCH_MAX_PARALLELISM_PHYSICAL) ?
+                               dispatch_hw_config(physical_cpus) :
+                               dispatch_hw_config(logical_cpus);
+       }
+       if (flags & DISPATCH_MAX_PARALLELISM_ACTIVE) {
+               uint32_t active_cpus = dispatch_hw_config(active_cpus);
+               if ((flags & DISPATCH_MAX_PARALLELISM_PHYSICAL) &&
+                               active_cpus < dispatch_hw_config(logical_cpus)) {
+                       active_cpus /= dispatch_hw_config(logical_cpus) /
+                                       dispatch_hw_config(physical_cpus);
+               }
+               if (active_cpus < p) p = active_cpus;
+       }
+       return p;
+}
+
 #if !HAVE_NORETURN_BUILTIN_TRAP
 /*
  * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not
@@ -227,12 +213,13 @@ void __builtin_trap(void);
 #ifndef __OS_INTERNAL_ATOMIC__
 #include "shims/atomic.h"
 #endif
+#define DISPATCH_ATOMIC64_ALIGN  __attribute__((aligned(8)))
+
 #include "shims/atomic_sfb.h"
 #include "shims/tsd.h"
 #include "shims/yield.h"
 #include "shims/lock.h"
 
-#include "shims/hw_config.h"
 #include "shims/perfmon.h"
 
 #include "shims/getprogname.h"
@@ -286,7 +273,8 @@ _dispatch_mempcpy(void *ptr, const void *data, size_t len)
 #define _dispatch_clear_stack(s) do { \
                void *a[(s)/sizeof(void*) ? (s)/sizeof(void*) : 1]; \
                a[0] = pthread_get_stackaddr_np(pthread_self()); \
-               bzero((void*)&a[1], (size_t)(a[0] - (void*)&a[1])); \
+               void* volatile const p = (void*)&a[1]; /* <rdar://32604885> */ \
+               bzero((void*)p, (size_t)(a[0] - (void*)&a[1])); \
        } while (0)
 #else
 #define _dispatch_clear_stack(s)
diff --git a/src/shims/android_stubs.h b/src/shims/android_stubs.h
new file mode 100644 (file)
index 0000000..c8032a3
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * This source file is part of the Swift.org open source project
+ *
+ * Copyright (c) 2015 Apple Inc. and the Swift project authors
+ *
+ * Licensed under Apache License v2.0 with Runtime Library Exception
+ *
+ * See http://swift.org/LICENSE.txt for license information
+ * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+ *
+ */
+
+// forward declarations for functions we are stubbing out
+// in the intial android port.
+
+#ifndef __DISPATCH__ANDROID__STUBS__INTERNAL
+#define __DISPATCH__ANDROID__STUBS__INTERNAL
+
+#if !__has_feature(c_static_assert)
+#define _Static_assert(...)
+#endif
+
+#endif /* __DISPATCH__ANDROID__STUBS__INTERNAL */
index 8a1ab18bd795e2b0f40423c9359fb358808a5855..64af8b2725879b386d84628a4fb60e476a24ed14 100644 (file)
 #ifndef __DISPATCH_SHIMS_ATOMIC__
 #define __DISPATCH_SHIMS_ATOMIC__
 
-#if !__has_extension(c_atomic) || \
-               !__has_extension(c_generic_selections) || \
-               !__has_include(<stdatomic.h>)
-#error libdispatch requires C11 with <stdatomic.h> and generic selections
+#if !__has_extension(c_atomic) || !__has_include(<stdatomic.h>)
+#error libdispatch requires C11 with <stdatomic.h>
 #endif
 
 #include <stdatomic.h>
 #define memory_order_ordered    memory_order_seq_cst
 #define memory_order_dependency memory_order_acquire
 
-#if __has_extension(c_generic_selections) && __has_extension(c_atomic)
-#define os_atomic(type) _Atomic(type)
-#else
-#define os_atomic(type) type volatile
-#endif
-
-#define _os_atomic_type_cases(type, expr) \
-               type *: expr, \
-               type volatile *: expr, \
-               _Atomic(type) *: expr, \
-               _Atomic(type) volatile *: expr
-
-#define _os_atomic_basetypeof(p) \
-               typeof(*_Generic((p), \
-               _os_atomic_type_cases(char, (char *)(p)), \
-               _os_atomic_type_cases(signed char, (signed char *)(p)), \
-               _os_atomic_type_cases(unsigned char, (unsigned char *)(p)), \
-               _os_atomic_type_cases(short, (short *)(p)), \
-               _os_atomic_type_cases(unsigned short, (unsigned short *)(p)), \
-               _os_atomic_type_cases(int, (int *)(p)), \
-               _os_atomic_type_cases(unsigned int, (unsigned int *)(p)), \
-               _os_atomic_type_cases(long, (long *)(p)), \
-               _os_atomic_type_cases(unsigned long, (unsigned long *)(p)), \
-               _os_atomic_type_cases(long long, (long long *)(p)), \
-               _os_atomic_type_cases(unsigned long long, (unsigned long long *)(p)), \
-               _os_atomic_type_cases(void *, (void **)(p)), \
-               _os_atomic_type_cases(const void *, (const void **)(p)), \
-               default: (void**)(p)))
+#define os_atomic(type) type _Atomic
 
 #define _os_atomic_c11_atomic(p) \
-               _Generic((p), \
-               _os_atomic_type_cases(char, (_Atomic(char)*)(p)), \
-               _os_atomic_type_cases(signed char, (_Atomic(signed char)*)(p)), \
-               _os_atomic_type_cases(unsigned char, (_Atomic(unsigned char)*)(p)), \
-               _os_atomic_type_cases(short, (_Atomic(short)*)(p)), \
-               _os_atomic_type_cases(unsigned short, (_Atomic(unsigned short)*)(p)), \
-               _os_atomic_type_cases(int, (_Atomic(int)*)(p)), \
-               _os_atomic_type_cases(unsigned int, (_Atomic(unsigned int)*)(p)), \
-               _os_atomic_type_cases(long, (_Atomic(long)*)(p)), \
-               _os_atomic_type_cases(unsigned long, (_Atomic(unsigned long)*)(p)), \
-               _os_atomic_type_cases(long long, (_Atomic(long long)*)(p)), \
-               _os_atomic_type_cases(unsigned long long, (_Atomic(unsigned long long)*)(p)), \
-               _os_atomic_type_cases(void *, (_Atomic(void*)*)(p)), \
-               _os_atomic_type_cases(const void *, (_Atomic(const void*)*)(p)), \
-               default: (_Atomic(void*)*)(p))
+               ((typeof(*(p)) _Atomic *)(p))
 
-#define os_atomic_thread_fence(m)  atomic_thread_fence(memory_order_##m)
-// see comment in dispatch_once.c
-#define os_atomic_maximally_synchronizing_barrier() \
-               atomic_thread_fence(memory_order_seq_cst)
+// This removes the _Atomic and volatile qualifiers on the type of *p
+#define _os_atomic_basetypeof(p) \
+               typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed))
 
 #define os_atomic_load(p, m) \
-               ({ _os_atomic_basetypeof(p) _r = \
-               atomic_load_explicit(_os_atomic_c11_atomic(p), \
-               memory_order_##m); (typeof(*(p)))_r; })
+               atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m)
 #define os_atomic_store(p, v, m) \
-               ({ _os_atomic_basetypeof(p) _v = (v); \
-               atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \
-               memory_order_##m); })
+               atomic_store_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m)
 #define os_atomic_xchg(p, v, m) \
-               ({ _os_atomic_basetypeof(p) _v = (v), _r = \
-               atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \
-               memory_order_##m); (typeof(*(p)))_r; })
+               atomic_exchange_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m)
 #define os_atomic_cmpxchg(p, e, v, m) \
-               ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \
+               ({ _os_atomic_basetypeof(p) _r = (e); \
                atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
-               &_r, _v, memory_order_##m, \
-               memory_order_relaxed); })
+               &_r, v, memory_order_##m, memory_order_relaxed); })
 #define os_atomic_cmpxchgv(p, e, v, g, m) \
-               ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
+               ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
                atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
-               &_r, _v, memory_order_##m, \
-               memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; })
+               &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; })
 #define os_atomic_cmpxchgvw(p, e, v, g, m) \
-               ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
+               ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
                atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \
-               &_r, _v, memory_order_##m, \
-               memory_order_relaxed); *(g) = (typeof(*(p)))_r;  _b; })
+               &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r;  _b; })
 
 #define _os_atomic_c11_op(p, v, m, o, op) \
                ({ _os_atomic_basetypeof(p) _v = (v), _r = \
                atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
                memory_order_##m); (typeof(*(p)))(_r op _v); })
 #define _os_atomic_c11_op_orig(p, v, m, o, op) \
-               ({ _os_atomic_basetypeof(p) _v = (v), _r = \
-               atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
-               memory_order_##m); (typeof(*(p)))_r; })
+               atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \
+               memory_order_##m)
 #define os_atomic_add(p, v, m) \
                _os_atomic_c11_op((p), (v), m, add, +)
 #define os_atomic_add_orig(p, v, m) \
 #define os_atomic_load_with_dependency_on2o(p, f, e) \
                os_atomic_load_with_dependency_on(&(p)->f, e)
 
-#define os_atomic_rmw_loop(p, ov, nv, m, ...)  ({ \
-               bool _result = false; \
-               typeof(p) _p = (p); \
-               ov = os_atomic_load(_p, relaxed); \
-               do { \
-                       __VA_ARGS__; \
-                       _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
-               } while (os_unlikely(!_result)); \
-               _result; \
-       })
-#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
-               os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
-#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
-               ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
-#define os_atomic_rmw_loop_give_up(expr) \
-               os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
+#define os_atomic_thread_fence(m)  atomic_thread_fence(memory_order_##m)
 
 #define os_atomic_load2o(p, f, m) \
                os_atomic_load(&(p)->f, m)
 #define os_atomic_dec_orig2o(p, f, m) \
                os_atomic_sub_orig2o(p, f, 1, m)
 
-#if defined(__x86_64__) || defined(__i386__)
-#undef os_atomic_maximally_synchronizing_barrier
-#ifdef __LP64__
-#define os_atomic_maximally_synchronizing_barrier() \
-               ({ unsigned long _clbr; __asm__ __volatile__( \
-               "cpuid" \
-               : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
-#else
-#ifdef __llvm__
-#define os_atomic_maximally_synchronizing_barrier() \
-               ({ unsigned long _clbr; __asm__ __volatile__( \
-               "cpuid" \
-               : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
-#else // gcc does not allow inline i386 asm to clobber ebx
-#define os_atomic_maximally_synchronizing_barrier() \
-               ({ unsigned long _clbr; __asm__ __volatile__( \
-               "pushl  %%ebx\n\t" \
-               "cpuid\n\t" \
-               "popl   %%ebx" \
-               : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
-#endif
-#endif
-#endif // defined(__x86_64__) || defined(__i386__)
+#define os_atomic_rmw_loop(p, ov, nv, m, ...)  ({ \
+               bool _result = false; \
+               typeof(p) _p = (p); \
+               ov = os_atomic_load(_p, relaxed); \
+               do { \
+                       __VA_ARGS__; \
+                       _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
+               } while (os_unlikely(!_result)); \
+               _result; \
+       })
+#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
+               os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
+#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
+               ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
+#define os_atomic_rmw_loop_give_up(expr) \
+               os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
 
 #endif // __DISPATCH_SHIMS_ATOMIC__
index 5f972b4feef818efb519e7711f2cd32b3933ac37..de074a444e0dcdf18ab2bb646565f81fd4d3aafa 100644 (file)
 #ifndef __DISPATCH_SHIMS_ATOMIC_SFB__
 #define __DISPATCH_SHIMS_ATOMIC_SFB__
 
-#if __clang__ && __clang_major__ < 5 // <rdar://problem/13833871>
-#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x))
-#endif
-
-// Returns UINT_MAX if all the bits in p were already set.
-#define os_atomic_set_first_bit(p,m) _os_atomic_set_first_bit(p,m)
-
-DISPATCH_ALWAYS_INLINE
-static inline unsigned int
-_os_atomic_set_first_bit(volatile unsigned long *p,
-               unsigned int max_index)
-{
-       unsigned int index;
-       unsigned long b, mask, b_masked;
-
-       for (;;) {
-               b = *p;
-               // ffs returns 1 + index, or 0 if none set.
-               index = (unsigned int)__builtin_ffsl((long)~b);
-               if (slowpath(index == 0)) {
-                       return UINT_MAX;
-               }
-               index--;
-               if (slowpath(index > max_index)) {
-                       return UINT_MAX;
-               }
-               mask = ((typeof(b))1) << index;
-               b_masked = b | mask;
-               if (__sync_bool_compare_and_swap(p, b, b_masked)) {
-                       return index;
-               }
-       }
-}
-
 #if defined(__x86_64__) || defined(__i386__)
 
-#undef os_atomic_set_first_bit
+// Returns UINT_MAX if all the bits in p were already set.
 DISPATCH_ALWAYS_INLINE
 static inline unsigned int
 os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max)
@@ -108,7 +74,35 @@ os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max)
        return (unsigned int)bit;
 }
 
+#else
+
+#if __clang__ && __clang_major__ < 5 // <rdar://problem/13833871>
+#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x))
 #endif
 
+DISPATCH_ALWAYS_INLINE
+static inline unsigned int
+os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max_index)
+{
+       unsigned int index;
+       unsigned long b, b_masked;
+
+       os_atomic_rmw_loop(p, b, b_masked, relaxed, {
+               // ffs returns 1 + index, or 0 if none set
+               index = (unsigned int)__builtin_ffsl((long)~b);
+               if (slowpath(index == 0)) {
+                       os_atomic_rmw_loop_give_up(return UINT_MAX);
+               }
+               index--;
+               if (slowpath(index > max_index)) {
+                       os_atomic_rmw_loop_give_up(return UINT_MAX);
+               }
+               b_masked = b | (1UL << index);
+       });
+
+       return index;
+}
+
+#endif
 
 #endif // __DISPATCH_SHIMS_ATOMIC_SFB__
index 74aba1318b502479a4787a3e626d557adf31cb40..7eb19787e624ddb6d4115772046b5567aced58ff 100644 (file)
 #define __DISPATCH_SHIMS_GETPROGNAME__
 
 #if !HAVE_GETPROGNAME
+
+#ifdef __ANDROID__
+extern const char *__progname;
+#endif /* __ANDROID */
+
 static inline char *
 getprogname(void)
 {
 # if HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME
        return program_invocation_short_name;
+# elif defined(__ANDROID__)
+       return __progname;
 # else
 #   error getprogname(3) is not available on this platform
 # endif
index cad211d21d57def3c2ac5b0f88baa03acf54a1e7..485dad663d0ef90a49bfc10a98ef8c5cf5e5db00 100644 (file)
 #ifndef __DISPATCH_SHIMS_HW_CONFIG__
 #define __DISPATCH_SHIMS_HW_CONFIG__
 
+#ifdef __SIZEOF_POINTER__
+#define DISPATCH_SIZEOF_PTR __SIZEOF_POINTER__
+#elif defined(_WIN64)
+#define DISPATCH_SIZEOF_PTR 8
+#elif defined(_WIN32)
+#define DISPATCH_SIZEOF_PTR 4
+#elif defined(_MSC_VER)
+#error "could not determine pointer size as a constant int for MSVC"
+#elif defined(__LP64__) || defined(__LLP64__)
+#define DISPATCH_SIZEOF_PTR 8
+#elif defined(__ILP32__)
+#define DISPATCH_SIZEOF_PTR 4
+#else
+#error "could not determine pointer size as a constant int"
+#endif // __SIZEOF_POINTER__
+
 #if !TARGET_OS_WIN32
 
 typedef enum {
@@ -85,9 +101,19 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c)
        switch (c) {
        case _dispatch_hw_config_logical_cpus:
        case _dispatch_hw_config_physical_cpus:
-               return sysconf(_SC_NPROCESSORS_CONF);
+               return (uint32_t)sysconf(_SC_NPROCESSORS_CONF);
        case _dispatch_hw_config_active_cpus:
-               return sysconf(_SC_NPROCESSORS_ONLN);
+               {
+#ifdef __USE_GNU
+                       // Prefer pthread_getaffinity_np because it considers
+                       // scheduler cpu affinity.  This matters if the program
+                       // is restricted to a subset of the online cpus (eg via numactl).
+                       cpu_set_t cpuset;
+                       if (pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) == 0)
+                               return (uint32_t)CPU_COUNT(&cpuset);
+#endif
+                       return (uint32_t)sysconf(_SC_NPROCESSORS_ONLN);
+               }
        }
 #else
        const char *name = NULL;
index 07ee8bc060e6fc6809b749a3d5fd10262e79db1e..4923eb0ca47c5f10d2cee357ecba605028df1aa1 100644 (file)
  */
 
 #include <stdint.h>
+#ifdef __ANDROID__
+#include <sys/syscall.h>
+#else
 #include <syscall.h>
+#endif /* __ANDROID__ */
 
 #if __has_include(<config/config_ac.h>)
 #include <config/config_ac.h>
index 0c12e827251e3cd33efd598a92970146928ae370..ec684170d0aa393a5397f74d2a9cf2bb88c072a0 100644 (file)
 #ifndef __DISPATCH__STUBS__INTERNAL
 #define __DISPATCH__STUBS__INTERNAL
 
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, temp)                         \
+       for ((var) = TAILQ_FIRST((head));                                      \
+               (var) && ((temp) = TAILQ_NEXT((var), field), 1); (var) = (temp))
+#endif
+
+#if DISPATCH_DEBUG
+#ifndef TRASHIT
+#define TRASHIT(x) do { (x) = (void *)-1; } while (0)
+#endif
+#endif
+
 /*
  * Stub out defines for some mach types and related macros
  */
@@ -27,73 +39,28 @@ typedef uint32_t mach_port_t;
 
 typedef uint32_t mach_error_t;
 
-typedef uint32_t mach_vm_size_t;
-
 typedef uint32_t mach_msg_return_t;
 
 typedef uint32_t mach_msg_bits_t;
 
-typedef uintptr_t mach_vm_address_t;
-
-typedef uint32_t dispatch_mach_msg_t;
-
-typedef uint32_t dispatch_mach_t;
-
-typedef uint32_t dispatch_mach_reason_t;
-
-typedef uint32_t voucher_activity_mode_t;
-
-typedef uint32_t voucher_activity_trace_id_t;
-
-typedef uint32_t voucher_activity_id_t;
+typedef void *dispatch_mach_msg_t;
 
-typedef uint32_t voucher_activity_flag_t;
+typedef uint64_t firehose_activity_id_t;
 
-typedef struct { } mach_msg_header_t;
-
-
-typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t,
-                                                dispatch_mach_msg_t, mach_error_t);
-
-typedef void (*dispatch_mach_msg_destructor_t)(void*);
+typedef void *mach_msg_header_t;
 
 // Print a warning when an unported code path executes.
-#define LINUX_PORT_ERROR()  do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0)
+#define LINUX_PORT_ERROR()  do { \
+               printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",\
+               __FILE__,__LINE__,__FUNCTION__); } while (0)
 
 /*
  * Stub out defines for other missing types
  */
 
-#if __linux__
-// we fall back to use kevent
-#define kevent64_s kevent
-#define kevent64(kq,cl,nc,el,ne,f,to)  kevent(kq,cl,nc,el,ne,to)
-#endif
-
 // SIZE_T_MAX should not be hardcoded like this here.
-#define SIZE_T_MAX (0x7fffffff)
-
-// Define to 0 the NOTE_ values that are not present on Linux.
-// Revisit this...would it be better to ifdef out the uses instead??
-
-// The following values are passed as part of the EVFILT_TIMER requests
-
-#define IGNORE_KEVENT64_EXT   /* will force the kevent64_s.ext[] to not be used -> leeway ignored */
-
-#ifndef NOTE_SECONDS
-#define NOTE_SECONDS   0x01
-#define NOTE_USECONDS  0x02
-#define NOTE_NSECONDS  0x04
-#define NOTE_ABSOLUTE  0x08
-#define KEVENT_NSEC_NOT_SUPPORTED
+#ifndef SIZE_T_MAX
+#define SIZE_T_MAX (~(size_t)0)
 #endif
-#define NOTE_CRITICAL  0x10
-#define NOTE_BACKGROUND        0x20
-#define NOTE_LEEWAY    0x40
-
-// need to catch the following usage if it happens ..
-// we simply return '0' as a value probably not correct
-
-#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;})
 
 #endif
index a64e9c278b92e1b85e1471fa289f0eab5de51ba6..24af953c3178f2f94442d49b2559153a7b480ae1 100644 (file)
@@ -34,6 +34,7 @@
 _Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION,
                "values should be the same");
 
+#if !HAVE_UL_UNFAIR_LOCK
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags,
@@ -47,24 +48,33 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags,
        }
        thread_switch(_dispatch_lock_owner(value), option, timeout);
 }
+#endif // HAVE_UL_UNFAIR_LOCK
 #endif
 
 #pragma mark - semaphores
 
 #if USE_MACH_SEM
+#if __has_include(<os/semaphore_private.h>)
+#include <os/semaphore_private.h>
+#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1
+#else
+#define DISPATCH_USE_OS_SEMAPHORE_CACHE 0
+#endif
+
 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
+               DISPATCH_VERIFY_MIG(x); \
                if (unlikely((x) == KERN_INVALID_NAME)) { \
-                       DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
+                       DISPATCH_CLIENT_CRASH((x), \
+                               "Use-after-free of dispatch_semaphore_t or dispatch_group_t"); \
                } else if (unlikely(x)) { \
                        DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
                } \
        } while (0)
 
 void
-_os_semaphore_create_slow(_os_semaphore_t *s4, int policy)
+_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy)
 {
-       kern_return_t kr;
-       semaphore_t tmp;
+       semaphore_t tmp = MACH_PORT_NULL;
 
        _dispatch_fork_becomes_unsafe();
 
@@ -73,33 +83,42 @@ _os_semaphore_create_slow(_os_semaphore_t *s4, int policy)
        // Someday:
        // 1) Switch to a doubly-linked FIFO in user-space.
        // 2) User-space timers for the timeout.
-       // 3) Use the per-thread semaphore port.
 
-       while ((kr = semaphore_create(mach_task_self(), &tmp, policy, 0))) {
-               DISPATCH_VERIFY_MIG(kr);
-               _dispatch_temporary_resource_shortage();
+#if DISPATCH_USE_OS_SEMAPHORE_CACHE
+       if (policy == _DSEMA4_POLICY_FIFO) {
+               tmp = (_dispatch_sema4_t)os_get_cached_semaphore();
+               if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
+                       os_put_cached_semaphore((os_semaphore_t)tmp);
+               }
+               return;
        }
+#endif
 
-       if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
+       kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0);
+       DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+
+       if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
                kr = semaphore_destroy(mach_task_self(), tmp);
-               DISPATCH_VERIFY_MIG(kr);
                DISPATCH_SEMAPHORE_VERIFY_KR(kr);
        }
 }
 
 void
-_os_semaphore_dispose_slow(_os_semaphore_t *sema)
+_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy)
 {
-       kern_return_t kr;
        semaphore_t sema_port = *sema;
-       kr = semaphore_destroy(mach_task_self(), sema_port);
-       DISPATCH_VERIFY_MIG(kr);
-       DISPATCH_SEMAPHORE_VERIFY_KR(kr);
        *sema = MACH_PORT_DEAD;
+#if DISPATCH_USE_OS_SEMAPHORE_CACHE
+       if (policy == _DSEMA4_POLICY_FIFO) {
+               return os_put_cached_semaphore((os_semaphore_t)sema_port);
+       }
+#endif
+       kern_return_t kr = semaphore_destroy(mach_task_self(), sema_port);
+       DISPATCH_SEMAPHORE_VERIFY_KR(kr);
 }
 
 void
-_os_semaphore_signal(_os_semaphore_t *sema, long count)
+_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
 {
        do {
                kern_return_t kr = semaphore_signal(*sema);
@@ -108,7 +127,7 @@ _os_semaphore_signal(_os_semaphore_t *sema, long count)
 }
 
 void
-_os_semaphore_wait(_os_semaphore_t *sema)
+_dispatch_sema4_wait(_dispatch_sema4_t *sema)
 {
        kern_return_t kr;
        do {
@@ -118,7 +137,7 @@ _os_semaphore_wait(_os_semaphore_t *sema)
 }
 
 bool
-_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
+_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
 {
        mach_timespec_t _timeout;
        kern_return_t kr;
@@ -144,21 +163,21 @@ _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
        } while (0)
 
 void
-_os_semaphore_init(_os_semaphore_t *sema, int policy DISPATCH_UNUSED)
+_dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
 {
        int rc = sem_init(sema, 0, 0);
        DISPATCH_SEMAPHORE_VERIFY_RET(rc);
 }
 
 void
-_os_semaphore_dispose_slow(_os_semaphore_t *sema)
+_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
 {
        int rc = sem_destroy(sema);
        DISPATCH_SEMAPHORE_VERIFY_RET(rc);
 }
 
 void
-_os_semaphore_signal(_os_semaphore_t *sema, long count)
+_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
 {
        do {
                int ret = sem_post(sema);
@@ -167,14 +186,14 @@ _os_semaphore_signal(_os_semaphore_t *sema, long count)
 }
 
 void
-_os_semaphore_wait(_os_semaphore_t *sema)
+_dispatch_sema4_wait(_dispatch_sema4_t *sema)
 {
        int ret = sem_wait(sema);
        DISPATCH_SEMAPHORE_VERIFY_RET(ret);
 }
 
 bool
-_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
+_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
 {
        struct timespec _timeout;
        int ret;
@@ -236,7 +255,7 @@ _pop_timer_resolution(DWORD ms)
 }
 
 void
-_os_semaphore_create_slow(_os_semaphore_t *s4, int policy DISPATCH_UNUSED)
+_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED)
 {
        HANDLE tmp;
 
@@ -252,7 +271,7 @@ _os_semaphore_create_slow(_os_semaphore_t *s4, int policy DISPATCH_UNUSED)
 }
 
 void
-_os_semaphore_dispose_slow(_os_semaphore_t *sema)
+_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
 {
        HANDLE sema_handle = *sema;
        CloseHandle(sema_handle);
@@ -260,20 +279,20 @@ _os_semaphore_dispose_slow(_os_semaphore_t *sema)
 }
 
 void
-_os_semaphore_signal(_os_semaphore_t *sema, long count)
+_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
 {
        int ret = ReleaseSemaphore(*sema, count, NULL);
        dispatch_assume(ret);
 }
 
 void
-_os_semaphore_wait(_os_semaphore_t *sema)
+_dispatch_sema4_wait(_dispatch_sema4_t *sema)
 {
        WaitForSingleObject(*sema, INFINITE);
 }
 
 bool
-_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
+_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
 {
        uint64_t nsec;
        DWORD msec;
@@ -288,31 +307,7 @@ _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
        return wait_result == WAIT_TIMEOUT;
 }
 #else
-#error "port has to implement _os_semaphore_t"
-#endif
-
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-semaphore_t
-_dispatch_thread_semaphore_create(void)
-{
-       semaphore_t s4;
-       kern_return_t kr;
-       while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
-                       SYNC_POLICY_FIFO, 0))) {
-               DISPATCH_VERIFY_MIG(kr);
-               _dispatch_temporary_resource_shortage();
-       }
-       return s4;
-}
-
-void
-_dispatch_thread_semaphore_dispose(void *ctxt)
-{
-       semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt;
-       kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
-       DISPATCH_VERIFY_MIG(kr);
-       DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-}
+#error "port has to implement _dispatch_sema4_t"
 #endif
 
 #pragma mark - ulock wrappers
@@ -322,12 +317,13 @@ static int
 _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
                uint32_t flags)
 {
-       dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK);
        int rc;
        _dlock_syscall_switch(err,
                rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout),
                case 0: return rc > 0 ? ENOTEMPTY : 0;
                case ETIMEDOUT: case EFAULT: return err;
+               case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr,
+                               "corruption of lock owner");
                default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
        );
 }
@@ -335,7 +331,6 @@ _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
 static void
 _dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags)
 {
-       dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK);
        _dlock_syscall_switch(err,
                __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0),
                case 0: case ENOENT: break;
@@ -351,17 +346,13 @@ static int
 _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
                dispatch_lock_options_t flags)
 {
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               // <rdar://problem/25075359>
-               timeout =  timeout < 1000 ? 1 : timeout / 1000;
-               _dispatch_thread_switch(val, flags, timeout);
-               return 0;
-       }
        int rc;
        _dlock_syscall_switch(err,
                rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout),
                case 0: return rc > 0 ? ENOTEMPTY : 0;
                case ETIMEDOUT: case EFAULT: return err;
+               case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr,
+                               "corruption of lock owner");
                default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
        );
 }
@@ -369,10 +360,6 @@ _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
 static void
 _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags)
 {
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               // <rdar://problem/25075359>
-               return;
-       }
        _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0),
                case 0: case ENOENT: break;
                default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
@@ -383,7 +370,11 @@ _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags)
 #pragma mark - futex wrappers
 #if HAVE_FUTEX
 #include <sys/time.h>
+#ifdef __ANDROID__
+#include <sys/syscall.h>
+#else
 #include <syscall.h>
+#endif /* __ANDROID__ */
 
 DISPATCH_ALWAYS_INLINE
 static inline int
@@ -391,7 +382,7 @@ _dispatch_futex(uint32_t *uaddr, int op, uint32_t val,
                const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3,
                int opflags)
 {
-       return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
+       return (int)syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
 }
 
 static int
@@ -410,7 +401,7 @@ _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags)
 {
        int rc;
        _dlock_syscall_switch(err,
-               rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags),
+               rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, opflags),
                case 0: return;
                default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed");
        );
@@ -421,7 +412,7 @@ _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect,
              int opflags)
 {
        _dlock_syscall_switch(err,
-               _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout,
+               _dispatch_futex(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, timeout,
                                NULL, 0, opflags),
                case 0: return;
                default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed");
@@ -475,35 +466,18 @@ _dispatch_wake_by_address(uint32_t volatile *address)
 void
 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte)
 {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               kern_return_t kr = semaphore_signal(dte->dte_sema);
-               DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-               return;
-       }
-#endif
 #if HAVE_UL_COMPARE_AND_WAIT
        _dispatch_ulock_wake(&dte->dte_value, 0);
 #elif HAVE_FUTEX
        _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG);
 #else
-       _os_semaphore_signal(&dte->dte_sema, 1);
+       _dispatch_sema4_signal(&dte->dte_sema, 1);
 #endif
 }
 
 void
 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte)
 {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               kern_return_t kr;
-               do {
-                       kr = semaphore_wait(dte->dte_sema);
-               } while (unlikely(kr == KERN_ABORTED));
-               DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-               return;
-       }
-#endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
        for (;;) {
                uint32_t value = os_atomic_load(&dte->dte_value, acquire);
@@ -520,7 +494,7 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte)
 #endif
        }
 #else
-       _os_semaphore_wait(&dte->dte_sema);
+       _dispatch_sema4_wait(&dte->dte_sema);
 #endif
 }
 
@@ -531,30 +505,30 @@ void
 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
                dispatch_lock_options_t flags)
 {
-       dispatch_lock tid_self = _dispatch_tid_self(), next = tid_self;
-       dispatch_lock tid_old, tid_new;
+       dispatch_lock value_self = _dispatch_lock_value_for_self();
+       dispatch_lock old_value, new_value, next = value_self;
        int rc;
 
        for (;;) {
-               os_atomic_rmw_loop(&dul->dul_lock, tid_old, tid_new, acquire, {
-                       if (likely(!_dispatch_lock_is_locked(tid_old))) {
-                               tid_new = next;
+               os_atomic_rmw_loop(&dul->dul_lock, old_value, new_value, acquire, {
+                       if (likely(!_dispatch_lock_is_locked(old_value))) {
+                               new_value = next;
                        } else {
-                               tid_new = tid_old & ~DLOCK_NOWAITERS_BIT;
-                               if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
+                               new_value = old_value | DLOCK_WAITERS_BIT;
+                               if (new_value == old_value) os_atomic_rmw_loop_give_up(break);
                        }
                });
-               if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) {
+               if (unlikely(_dispatch_lock_is_locked_by(old_value, value_self))) {
                        DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
                }
-               if (tid_new == next) {
+               if (new_value == next) {
                        return;
                }
-               rc = _dispatch_unfair_lock_wait(&dul->dul_lock, tid_new, 0, flags);
+               rc = _dispatch_unfair_lock_wait(&dul->dul_lock, new_value, 0, flags);
                if (rc == ENOTEMPTY) {
-                       next = tid_self & ~DLOCK_NOWAITERS_BIT;
+                       next = value_self | DLOCK_WAITERS_BIT;
                } else {
-                       next = tid_self;
+                       next = value_self;
                }
        }
 }
@@ -571,30 +545,28 @@ void
 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
                dispatch_lock_options_t flags)
 {
-       dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
+       dispatch_lock cur, value_self = _dispatch_lock_value_for_self();
        uint32_t timeout = 1;
 
        while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock,
-                       DLOCK_OWNER_NULL, tid_self, &tid_cur, acquire))) {
-               if (unlikely(_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
+                       DLOCK_OWNER_NULL, value_self, &cur, acquire))) {
+               if (unlikely(_dispatch_lock_is_locked_by(cur, self))) {
                        DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
                }
-               _dispatch_thread_switch(tid_cur, flags, timeout++);
+               _dispatch_thread_switch(cur, flags, timeout++);
        }
 }
 #endif
 
 void
-_dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul,
-               dispatch_lock tid_cur)
+_dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, dispatch_lock cur)
 {
-       dispatch_lock_owner tid_self = _dispatch_tid_self();
-       if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
-               DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread");
+       if (unlikely(!_dispatch_lock_is_locked_by_self(cur))) {
+               DISPATCH_CLIENT_CRASH(cur, "lock not owned by current thread");
        }
 
 #if HAVE_UL_UNFAIR_LOCK
-       if (!(tid_cur & DLOCK_NOWAITERS_BIT)) {
+       if (_dispatch_lock_has_waiters(cur)) {
                _dispatch_unfair_lock_wake(&dul->dul_lock, 0);
        }
 #elif HAVE_FUTEX
@@ -611,41 +583,38 @@ void
 _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value,
                dispatch_lock_options_t flags)
 {
-       dispatch_lock tid_self = _dispatch_tid_self(), tid_old, tid_new;
+       dispatch_lock self = _dispatch_lock_value_for_self();
+       dispatch_lock old_value, new_value;
        uint32_t timeout = 1;
 
        for (;;) {
-               os_atomic_rmw_loop(&dgl->dgl_lock, tid_old, tid_new, acquire, {
-                       if (likely(tid_old == value)) {
+               os_atomic_rmw_loop(&dgl->dgl_lock, old_value, new_value, acquire, {
+                       if (likely(old_value == value)) {
                                os_atomic_rmw_loop_give_up_with_fence(acquire, return);
                        }
-#ifdef DLOCK_NOWAITERS_BIT
-                       tid_new = tid_old & ~DLOCK_NOWAITERS_BIT;
-#else
-                       tid_new = tid_old | DLOCK_WAITERS_BIT;
-#endif
-                       if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
+                       new_value = old_value | DLOCK_WAITERS_BIT;
+                       if (new_value == old_value) os_atomic_rmw_loop_give_up(break);
                });
-               if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) {
+               if (unlikely(_dispatch_lock_is_locked_by(old_value, self))) {
                        DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
                }
 #if HAVE_UL_UNFAIR_LOCK
-               _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags);
+               _dispatch_unfair_lock_wait(&dgl->dgl_lock, new_value, 0, flags);
 #elif HAVE_FUTEX
-               _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG);
+               _dispatch_futex_wait(&dgl->dgl_lock, new_value, NULL, FUTEX_PRIVATE_FLAG);
 #else
-               _dispatch_thread_switch(tid_new, flags, timeout++);
+               _dispatch_thread_switch(new_value, flags, timeout++);
 #endif
                (void)timeout;
+               (void)flags;
        }
 }
 
 void
-_dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur)
+_dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock cur)
 {
-       dispatch_lock_owner tid_self = _dispatch_tid_self();
-       if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
-               DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread");
+       if (unlikely(!_dispatch_lock_is_locked_by_self(cur))) {
+               DISPATCH_CLIENT_CRASH(cur, "lock not owned by current thread");
        }
 
 #if HAVE_UL_UNFAIR_LOCK
index 50dfaabf0794e1c95afbd83f1be9d04e3e18fce5..37a3ecfc89cd1c5d6286dc2e9b077153c2d1d333 100644 (file)
 #pragma mark - platform macros
 
 DISPATCH_ENUM(dispatch_lock_options, uint32_t,
-               DLOCK_LOCK_NONE                         = 0x00000000,
-               DLOCK_LOCK_DATA_CONTENTION  = 0x00010000,
+       DLOCK_LOCK_NONE                         = 0x00000000,
+       DLOCK_LOCK_DATA_CONTENTION  = 0x00010000,
 );
 
 #if TARGET_OS_MAC
 
-typedef mach_port_t dispatch_lock_owner;
+typedef mach_port_t dispatch_tid;
 typedef uint32_t dispatch_lock;
 
-#define DLOCK_OWNER_NULL                       ((dispatch_lock_owner)MACH_PORT_NULL)
 #define DLOCK_OWNER_MASK                       ((dispatch_lock)0xfffffffc)
-#define DLOCK_NOWAITERS_BIT                    ((dispatch_lock)0x00000001)
-#define DLOCK_NOFAILED_TRYLOCK_BIT     ((dispatch_lock)0x00000002)
-#define _dispatch_tid_self()           ((dispatch_lock_owner)_dispatch_thread_port())
+#define DLOCK_WAITERS_BIT                      ((dispatch_lock)0x00000001)
+#define DLOCK_FAILED_TRYLOCK_BIT       ((dispatch_lock)0x00000002)
 
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_lock_is_locked(dispatch_lock lock_value)
-{
-       return (lock_value & DLOCK_OWNER_MASK) != 0;
-}
+#define DLOCK_OWNER_NULL                       ((dispatch_tid)MACH_PORT_NULL)
+#define _dispatch_tid_self()           ((dispatch_tid)_dispatch_thread_port())
 
 DISPATCH_ALWAYS_INLINE
-static inline dispatch_lock_owner
+static inline dispatch_tid
 _dispatch_lock_owner(dispatch_lock lock_value)
 {
-       lock_value &= DLOCK_OWNER_MASK;
-       if (lock_value) {
-               lock_value |= DLOCK_NOWAITERS_BIT | DLOCK_NOFAILED_TRYLOCK_BIT;
+       if (lock_value & DLOCK_OWNER_MASK) {
+               return lock_value | DLOCK_WAITERS_BIT | DLOCK_FAILED_TRYLOCK_BIT;
        }
-       return lock_value;
+       return DLOCK_OWNER_NULL;
 }
 
+#elif defined(__linux__)
+
+#include <linux/futex.h>
+#include <unistd.h>
+#include <sys/syscall.h>   /* For SYS_xxx definitions */
+
+typedef uint32_t dispatch_tid;
+typedef uint32_t dispatch_lock;
+
+#define DLOCK_OWNER_MASK                       ((dispatch_lock)FUTEX_TID_MASK)
+#define DLOCK_WAITERS_BIT                      ((dispatch_lock)FUTEX_WAITERS)
+#define DLOCK_FAILED_TRYLOCK_BIT       ((dispatch_lock)FUTEX_OWNER_DIED)
+
+#define DLOCK_OWNER_NULL                       ((dispatch_tid)0)
+#define _dispatch_tid_self()        ((dispatch_tid)(_dispatch_get_tsd_base()->tid))
+
 DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid)
+static inline dispatch_tid
+_dispatch_lock_owner(dispatch_lock lock_value)
 {
-       // equivalent to _dispatch_lock_owner(lock_value) == tid
-       return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
+       return lock_value & DLOCK_OWNER_MASK;
 }
 
+#else
+#  error define _dispatch_lock encoding scheme for your platform here
+#endif
+
 DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_lock_has_waiters(dispatch_lock lock_value)
+static inline dispatch_lock
+_dispatch_lock_value_from_tid(dispatch_tid tid)
 {
-       bool nowaiters_bit = (lock_value & DLOCK_NOWAITERS_BIT);
-       return _dispatch_lock_is_locked(lock_value) != nowaiters_bit;
+       return tid & DLOCK_OWNER_MASK;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
+static inline dispatch_lock
+_dispatch_lock_value_for_self(void)
 {
-       return !(lock_value & DLOCK_NOFAILED_TRYLOCK_BIT);
+       return _dispatch_lock_value_from_tid(_dispatch_tid_self());
 }
 
-#elif defined(__linux__)
-#include <linux/futex.h>
-#include <unistd.h>
-#include <sys/syscall.h>   /* For SYS_xxx definitions */
-
-typedef uint32_t dispatch_lock;
-typedef pid_t dispatch_lock_owner;
-
-#define DLOCK_OWNER_NULL                       ((dispatch_lock_owner)0)
-#define DLOCK_OWNER_MASK                       ((dispatch_lock)FUTEX_TID_MASK)
-#define DLOCK_WAITERS_BIT                      ((dispatch_lock)FUTEX_WAITERS)
-#define DLOCK_FAILED_TRYLOCK_BIT       ((dispatch_lock)FUTEX_OWNER_DIED)
-#define _dispatch_tid_self() \
-               ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid))
-
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_lock_is_locked(dispatch_lock lock_value)
 {
+       // equivalent to _dispatch_lock_owner(lock_value) == 0
        return (lock_value & DLOCK_OWNER_MASK) != 0;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline dispatch_lock_owner
-_dispatch_lock_owner(dispatch_lock lock_value)
+static inline bool
+_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
 {
-       return (lock_value & DLOCK_OWNER_MASK);
+       // equivalent to _dispatch_lock_owner(lock_value) == tid
+       return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid)
+_dispatch_lock_is_locked_by_self(dispatch_lock lock_value)
 {
-       return _dispatch_lock_owner(lock_value) == tid;
+       // equivalent to _dispatch_lock_owner(lock_value) == tid
+       return ((lock_value ^ _dispatch_tid_self()) & DLOCK_OWNER_MASK) == 0;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -133,32 +132,18 @@ DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
 {
-       return !(lock_value & DLOCK_FAILED_TRYLOCK_BIT);
+       return (lock_value & DLOCK_FAILED_TRYLOCK_BIT);
 }
 
-#else
-#  error define _dispatch_lock encoding scheme for your platform here
-#endif
-
 #if __has_include(<sys/ulock.h>)
 #include <sys/ulock.h>
+#ifdef UL_COMPARE_AND_WAIT
+#define HAVE_UL_COMPARE_AND_WAIT 1
 #endif
-
-#ifndef HAVE_UL_COMPARE_AND_WAIT
-#if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200)
-#  define HAVE_UL_COMPARE_AND_WAIT 1
-#else
-#  define HAVE_UL_COMPARE_AND_WAIT 0
+#ifdef UL_UNFAIR_LOCK
+#define HAVE_UL_UNFAIR_LOCK 1
 #endif
-#endif // HAVE_UL_COMPARE_AND_WAIT
-
-#ifndef HAVE_UL_UNFAIR_LOCK
-#if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200)
-#  define HAVE_UL_UNFAIR_LOCK 1
-#else
-#  define HAVE_UL_UNFAIR_LOCK 0
 #endif
-#endif // HAVE_UL_UNFAIR_LOCK
 
 #ifndef HAVE_FUTEX
 #ifdef __linux__
@@ -170,105 +155,66 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
 
 #pragma mark - semaphores
 
-#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-#if TARGET_OS_MAC
-#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT)
-#else
-#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0
-#endif
-#endif
-
 #if USE_MACH_SEM
 
-typedef semaphore_t _os_semaphore_t;
-#define _OS_SEM_POLICY_FIFO  SYNC_POLICY_FIFO
-#define _OS_SEM_POLICY_LIFO  SYNC_POLICY_LIFO
-#define _OS_SEM_TIMEOUT() KERN_OPERATION_TIMED_OUT
+typedef semaphore_t _dispatch_sema4_t;
+#define _DSEMA4_POLICY_FIFO  SYNC_POLICY_FIFO
+#define _DSEMA4_POLICY_LIFO  SYNC_POLICY_LIFO
+#define _DSEMA4_TIMEOUT() KERN_OPERATION_TIMED_OUT
 
-#define _os_semaphore_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
-#define _os_semaphore_is_created(sema)   (*(sema) != MACH_PORT_NULL)
-void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy);
+#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
+#define _dispatch_sema4_is_created(sema)   (*(sema) != MACH_PORT_NULL)
+void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy);
 
 #elif USE_POSIX_SEM
 
-typedef sem_t _os_semaphore_t;
-#define _OS_SEM_POLICY_FIFO 0
-#define _OS_SEM_POLICY_LIFO 0
-#define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
+typedef sem_t _dispatch_sema4_t;
+#define _DSEMA4_POLICY_FIFO 0
+#define _DSEMA4_POLICY_LIFO 0
+#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
 
-void _os_semaphore_init(_os_semaphore_t *sema, int policy);
-#define _os_semaphore_is_created(sema) 1
-#define _os_semaphore_create_slow(sema, policy) ((void)0)
+void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy);
+#define _dispatch_sema4_is_created(sema) ((void)sema, 1)
+#define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy)
 
 #elif USE_WIN32_SEM
 
-typedef HANDLE _os_semaphore_t;
-#define _OS_SEM_POLICY_FIFO 0
-#define _OS_SEM_POLICY_LIFO 0
-#define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
+typedef HANDLE _dispatch_sema4_t;
+#define _DSEMA4_POLICY_FIFO 0
+#define _DSEMA4_POLICY_LIFO 0
+#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
 
-#define _os_semaphore_init(sema, policy) (void)(*(sema) = 0)
-#define _os_semaphore_is_created(sema)   (*(sema) != 0)
-void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy);
+#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0)
+#define _dispatch_sema4_is_created(sema)   (*(sema) != 0)
+void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy);
 
 #else
-#error "port has to implement _os_semaphore_t"
+#error "port has to implement _dispatch_sema4_t"
 #endif
 
-void _os_semaphore_dispose_slow(_os_semaphore_t *sema);
-void _os_semaphore_signal(_os_semaphore_t *sema, long count);
-void _os_semaphore_wait(_os_semaphore_t *sema);
-bool _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout);
+void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy);
+void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count);
+void _dispatch_sema4_wait(_dispatch_sema4_t *sema);
+bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout);
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_os_semaphore_create(_os_semaphore_t *sema, int policy)
+_dispatch_sema4_create(_dispatch_sema4_t *sema, int policy)
 {
-       if (!_os_semaphore_is_created(sema)) {
-               _os_semaphore_create_slow(sema, policy);
+       if (!_dispatch_sema4_is_created(sema)) {
+               _dispatch_sema4_create_slow(sema, policy);
        }
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_os_semaphore_dispose(_os_semaphore_t *sema)
+_dispatch_sema4_dispose(_dispatch_sema4_t *sema, int policy)
 {
-       if (_os_semaphore_is_created(sema)) {
-               _os_semaphore_dispose_slow(sema);
+       if (_dispatch_sema4_is_created(sema)) {
+               _dispatch_sema4_dispose_slow(sema, policy);
        }
 }
 
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-semaphore_t _dispatch_thread_semaphore_create(void);
-void _dispatch_thread_semaphore_dispose(void *);
-
-DISPATCH_ALWAYS_INLINE
-static inline semaphore_t
-_dispatch_get_thread_semaphore(void)
-{
-       semaphore_t sema = (semaphore_t)(uintptr_t)
-                       _dispatch_thread_getspecific(dispatch_sema4_key);
-       if (unlikely(!sema)) {
-               return _dispatch_thread_semaphore_create();
-       }
-       _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
-       return sema;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_put_thread_semaphore(semaphore_t sema)
-{
-       semaphore_t old_sema = (semaphore_t)(uintptr_t)
-                       _dispatch_thread_getspecific(dispatch_sema4_key);
-       _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema);
-       if (unlikely(old_sema)) {
-               return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema);
-       }
-}
-#endif
-
-
 #pragma mark - compare and wait
 
 DISPATCH_NOT_TAIL_CALLED
@@ -296,18 +242,13 @@ void _dispatch_wake_by_address(uint32_t volatile *address);
  * This locking primitive has no notion of ownership
  */
 typedef struct dispatch_thread_event_s {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       union {
-               _os_semaphore_t dte_sema;
-               uint32_t dte_value;
-       };
-#elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
+#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
        // 1 means signalled but not waited on yet
        // UINT32_MAX means waited on, but not signalled yet
        // 0 is the initial and final state
        uint32_t dte_value;
 #else
-       _os_semaphore_t dte_sema;
+       _dispatch_sema4_t dte_sema;
 #endif
 } dispatch_thread_event_s, *dispatch_thread_event_t;
 
@@ -319,16 +260,10 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_thread_event_init(dispatch_thread_event_t dte)
 {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               dte->dte_sema = _dispatch_get_thread_semaphore();
-               return;
-       }
-#endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
        dte->dte_value = 0;
 #else
-       _os_semaphore_init(&dte->dte_sema, _OS_SEM_POLICY_FIFO);
+       _dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
 #endif
 }
 
@@ -336,12 +271,6 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_thread_event_signal(dispatch_thread_event_t dte)
 {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               _dispatch_thread_event_signal_slow(dte);
-               return;
-       }
-#endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
        if (os_atomic_inc_orig(&dte->dte_value, release) == 0) {
                // 0 -> 1 transition doesn't need a signal
@@ -360,12 +289,6 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_thread_event_wait(dispatch_thread_event_t dte)
 {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               _dispatch_thread_event_wait_slow(dte);
-               return;
-       }
-#endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
        if (os_atomic_dec(&dte->dte_value, acquire) == 0) {
                // 1 -> 0 is always a valid transition, so we can return
@@ -382,17 +305,11 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_thread_event_destroy(dispatch_thread_event_t dte)
 {
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-               _dispatch_put_thread_semaphore(dte->dte_sema);
-               return;
-       }
-#endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
        // nothing to do
        dispatch_assert(dte->dte_value == 0);
 #else
-       _os_semaphore_dispose(&dte->dte_sema);
+       _dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
 #endif
 }
 
@@ -412,9 +329,9 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l)
 {
-       dispatch_lock tid_self = _dispatch_tid_self();
+       dispatch_lock value_self = _dispatch_lock_value_for_self();
        if (likely(os_atomic_cmpxchg(&l->dul_lock,
-                       DLOCK_OWNER_NULL, tid_self, acquire))) {
+                       DLOCK_OWNER_NULL, value_self, acquire))) {
                return;
        }
        return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE);
@@ -422,54 +339,42 @@ _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l)
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l,
-               dispatch_lock_owner *owner)
+_dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l, dispatch_tid *owner)
 {
-       dispatch_lock tid_old, tid_new, tid_self = _dispatch_tid_self();
+       dispatch_lock value_self = _dispatch_lock_value_for_self();
+       dispatch_lock old_value, new_value;
 
-       os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, acquire, {
-               if (likely(!_dispatch_lock_is_locked(tid_old))) {
-                       tid_new = tid_self;
+       os_atomic_rmw_loop(&l->dul_lock, old_value, new_value, acquire, {
+               if (likely(!_dispatch_lock_is_locked(old_value))) {
+                       new_value = value_self;
                } else {
-#ifdef DLOCK_NOFAILED_TRYLOCK_BIT
-                       tid_new = tid_old & ~DLOCK_NOFAILED_TRYLOCK_BIT;
-#else
-                       tid_new = tid_old | DLOCK_FAILED_TRYLOCK_BIT;
-#endif
+                       new_value = old_value | DLOCK_FAILED_TRYLOCK_BIT;
                }
        });
-       if (owner) *owner = _dispatch_lock_owner(tid_new);
-       return !_dispatch_lock_is_locked(tid_old);
+       if (owner) *owner = _dispatch_lock_owner(new_value);
+       return !_dispatch_lock_is_locked(old_value);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l)
 {
-       dispatch_lock tid_old, tid_new;
+       dispatch_lock old_value, new_value;
 
-       os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, release, {
-#ifdef DLOCK_NOFAILED_TRYLOCK_BIT
-               if (likely(tid_old & DLOCK_NOFAILED_TRYLOCK_BIT)) {
-                       tid_new = DLOCK_OWNER_NULL;
-               } else {
-                       tid_new = tid_old | DLOCK_NOFAILED_TRYLOCK_BIT;
-               }
-#else
-               if (likely(!(tid_old & DLOCK_FAILED_TRYLOCK_BIT))) {
-                       tid_new = DLOCK_OWNER_NULL;
+       os_atomic_rmw_loop(&l->dul_lock, old_value, new_value, release, {
+               if (unlikely(old_value & DLOCK_FAILED_TRYLOCK_BIT)) {
+                       new_value = old_value ^ DLOCK_FAILED_TRYLOCK_BIT;
                } else {
-                       tid_new = tid_old & ~DLOCK_FAILED_TRYLOCK_BIT;
+                       new_value = DLOCK_OWNER_NULL;
                }
-#endif
        });
-       if (unlikely(tid_new)) {
+       if (unlikely(new_value)) {
                // unlock failed, renew the lock, which needs an acquire barrier
                os_atomic_thread_fence(acquire);
                return false;
        }
-       if (unlikely(_dispatch_lock_has_waiters(tid_old))) {
-               _dispatch_unfair_lock_unlock_slow(l, tid_old);
+       if (unlikely(_dispatch_lock_has_waiters(old_value))) {
+               _dispatch_unfair_lock_unlock_slow(l, old_value);
        }
        return true;
 }
@@ -478,18 +383,18 @@ DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l)
 {
-       dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
+       dispatch_lock cur, value_self = _dispatch_lock_value_for_self();
 #if HAVE_FUTEX
        if (likely(os_atomic_cmpxchgv(&l->dul_lock,
-                       tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) {
+                       value_self, DLOCK_OWNER_NULL, &cur, release))) {
                return false;
        }
 #else
-       tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release);
-       if (likely(tid_cur == tid_self)) return false;
+       cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release);
+       if (likely(cur == value_self)) return false;
 #endif
-       _dispatch_unfair_lock_unlock_slow(l, tid_cur);
-       return _dispatch_lock_has_failed_trylock(tid_cur);
+       _dispatch_unfair_lock_unlock_slow(l, cur);
+       return _dispatch_lock_has_failed_trylock(cur);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -532,9 +437,8 @@ DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_gate_tryenter(dispatch_gate_t l)
 {
-       dispatch_lock tid_self = _dispatch_tid_self();
-       return likely(os_atomic_cmpxchg(&l->dgl_lock,
-                       DLOCK_GATE_UNLOCKED, tid_self, acquire));
+       return os_atomic_cmpxchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED,
+                       _dispatch_lock_value_for_self(), acquire);
 }
 
 #define _dispatch_gate_wait(l, flags) \
@@ -544,36 +448,39 @@ DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_gate_broadcast(dispatch_gate_t l)
 {
-       dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
-       tid_cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release);
-       if (likely(tid_cur == tid_self)) return;
-       _dispatch_gate_broadcast_slow(l, tid_cur);
+       dispatch_lock cur, value_self = _dispatch_lock_value_for_self();
+       cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release);
+       if (likely(cur == value_self)) return;
+       _dispatch_gate_broadcast_slow(l, cur);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_once_gate_tryenter(dispatch_once_gate_t l)
 {
-       dispatch_once_t tid_self = (dispatch_once_t)_dispatch_tid_self();
-       return likely(os_atomic_cmpxchg(&l->dgo_once,
-                       DLOCK_ONCE_UNLOCKED, tid_self, acquire));
+       return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
+                       (dispatch_once_t)_dispatch_lock_value_for_self(), acquire);
 }
 
 #define _dispatch_once_gate_wait(l) \
        _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
                        DLOCK_LOCK_NONE)
 
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_once_t
+_dispatch_once_xchg_done(dispatch_once_t *pred)
+{
+       return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release);
+}
+
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_once_gate_broadcast(dispatch_once_gate_t l)
 {
-       dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self();
-       // see once.c for explanation about this trick
-       os_atomic_maximally_synchronizing_barrier();
-       // above assumed to contain release barrier
-       tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed);
-       if (likely(tid_cur == tid_self)) return;
-       _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur);
+       dispatch_lock value_self = _dispatch_lock_value_for_self();
+       dispatch_once_t cur = _dispatch_once_xchg_done(&l->dgo_once);
+       if (likely(cur == (dispatch_once_t)value_self)) return;
+       _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)cur);
 }
 
 #endif // __DISPATCH_SHIMS_LOCK__
index 8af33ead9e4cf7bc8efdbae378ee64c662efa601..be9327baf0fbef31769a3d0f29af0ce4d23583f5 100644 (file)
 #ifndef __DISPATCH_SHIMS_PERFMON__
 #define __DISPATCH_SHIMS_PERFMON__
 
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
-
-#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \
-               (defined(__i386__) || defined(__x86_64__))
-#ifdef __LP64__
-#define _dispatch_perfmon_workitem_inc() asm("incq %%gs:%0" : "+m" \
-               (*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-               _PTHREAD_TSD_OFFSET)) :: "cc")
-#define _dispatch_perfmon_workitem_dec() asm("decq %%gs:%0" : "+m" \
-               (*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-               _PTHREAD_TSD_OFFSET)) :: "cc")
-#else
-#define _dispatch_perfmon_workitem_inc() asm("incl %%gs:%0" : "+m" \
-               (*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-               _PTHREAD_TSD_OFFSET)) :: "cc")
-#define _dispatch_perfmon_workitem_dec() asm("decl %%gs:%0" : "+m" \
-               (*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-               _PTHREAD_TSD_OFFSET)) :: "cc")
+#if DISPATCH_PERF_MON
+#if DISPATCH_INTROSPECTION
+#error invalid configuration
 #endif
-#else /* !USE_APPLE_TSD_OPTIMIZATIONS */
+
+typedef enum {
+       perfmon_thread_no_trace = 0,
+       perfmon_thread_event_no_steal,  // 1) Event threads that couldn't steal
+       perfmon_thread_event_steal,             // 2) Event threads failing to steal very late
+       perfmon_thread_worker_non_oc,   // 3) Non overcommit threads finding
+                                                                       //              nothing on the root queues
+       perfmon_thread_worker_oc,               // 4) Overcommit thread finding nothing to do
+       perfmon_thread_manager,
+} perfmon_thread_type;
+
+DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_perfmon_workitem_inc(void)
 {
@@ -54,6 +50,8 @@ _dispatch_perfmon_workitem_inc(void)
        cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
        _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt);
 }
+
+DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_perfmon_workitem_dec(void)
 {
@@ -61,18 +59,40 @@ _dispatch_perfmon_workitem_dec(void)
        cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
        _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt);
 }
-#endif /* USE_APPLE_TSD_OPTIMIZATIONS */
 
+#define DISPATCH_PERF_MON_ARGS_PROTO  , uint64_t perfmon_start
+#define DISPATCH_PERF_MON_ARGS        , perfmon_start
+#define DISPATCH_PERF_MON_VAR         uint64_t perfmon_start;
+#define DISPATCH_PERF_MON_VAR_INIT    uint64_t perfmon_start = 0;
+
+#define _dispatch_perfmon_start_impl(trace) ({ \
+               if (trace) _dispatch_ktrace0(DISPATCH_PERF_MON_worker_thread_start); \
+               perfmon_start = _dispatch_absolute_time(); \
+       })
 #define _dispatch_perfmon_start() \
-               uint64_t start = _dispatch_absolute_time()
-#define _dispatch_perfmon_end() \
-               _dispatch_queue_merge_stats(start)
+               DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(true)
+#define _dispatch_perfmon_start_notrace() \
+               DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(false)
+#define _dispatch_perfmon_end(thread_type) \
+               _dispatch_queue_merge_stats(perfmon_start, true, thread_type)
+#define _dispatch_perfmon_end_notrace() \
+               _dispatch_queue_merge_stats(perfmon_start, false, perfmon_thread_no_trace)
+
+void _dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type);
+
 #else
 
+#define DISPATCH_PERF_MON_ARGS_PROTO
+#define DISPATCH_PERF_MON_ARGS
+#define DISPATCH_PERF_MON_VAR
+#define DISPATCH_PERF_MON_VAR_INIT
 #define _dispatch_perfmon_workitem_inc()
 #define _dispatch_perfmon_workitem_dec()
+#define _dispatch_perfmon_start_impl(trace)
 #define _dispatch_perfmon_start()
-#define _dispatch_perfmon_end()
+#define _dispatch_perfmon_end(thread_type)
+#define _dispatch_perfmon_start_notrace()
+#define _dispatch_perfmon_end_notrace()
 
 #endif // DISPATCH_PERF_MON
 
diff --git a/src/shims/priority.h b/src/shims/priority.h
new file mode 100644 (file)
index 0000000..3e85ff5
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_SHIMS_PRIORITY__
+#define __DISPATCH_SHIMS_PRIORITY__
+
+#if HAVE_PTHREAD_QOS_H && __has_include(<pthread/qos_private.h>)
+#include <pthread/qos.h>
+#include <pthread/qos_private.h>
+#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
+#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
+#endif
+#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG
+#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
+#endif
+#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#endif
+#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
+#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
+#endif
+#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
+#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
+#endif
+#else // HAVE_PTHREAD_QOS_H
+OS_ENUM(qos_class, unsigned int,
+       QOS_CLASS_USER_INTERACTIVE = 0x21,
+       QOS_CLASS_USER_INITIATED = 0x19,
+       QOS_CLASS_DEFAULT = 0x15,
+       QOS_CLASS_UTILITY = 0x11,
+       QOS_CLASS_BACKGROUND = 0x09,
+       QOS_CLASS_MAINTENANCE = 0x05,
+       QOS_CLASS_UNSPECIFIED = 0x00,
+);
+typedef unsigned long pthread_priority_t;
+#define QOS_MIN_RELATIVE_PRIORITY (-15)
+#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff)
+#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00
+#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull)
+#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff
+#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
+#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
+#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
+#define _PTHREAD_PRIORITY_ENFORCE_FLAG  0x10000000
+
+#endif // HAVE_PTHREAD_QOS_H
+
+typedef uint32_t dispatch_qos_t;
+typedef uint32_t dispatch_priority_t;
+typedef uint32_t dispatch_priority_t;
+typedef uint16_t dispatch_priority_requested_t;
+
+#define DISPATCH_QOS_UNSPECIFIED            ((dispatch_qos_t)0)
+#define DISPATCH_QOS_MAINTENANCE            ((dispatch_qos_t)1)
+#define DISPATCH_QOS_BACKGROUND             ((dispatch_qos_t)2)
+#define DISPATCH_QOS_UTILITY                ((dispatch_qos_t)3)
+#define DISPATCH_QOS_DEFAULT                ((dispatch_qos_t)4)
+#define DISPATCH_QOS_USER_INITIATED         ((dispatch_qos_t)5)
+#define DISPATCH_QOS_USER_INTERACTIVE       ((dispatch_qos_t)6)
+#define DISPATCH_QOS_MAX                    DISPATCH_QOS_USER_INTERACTIVE
+#define DISPATCH_QOS_SATURATED              ((dispatch_qos_t)15)
+
+#define DISPATCH_PRIORITY_RELPRI_MASK        ((dispatch_priority_t)0x000000ff)
+#define DISPATCH_PRIORITY_RELPRI_SHIFT       0
+#define DISPATCH_PRIORITY_QOS_MASK           ((dispatch_priority_t)0x0000ff00)
+#define DISPATCH_PRIORITY_QOS_SHIFT          8
+#define DISPATCH_PRIORITY_REQUESTED_MASK     ((dispatch_priority_t)0x0000ffff)
+#define DISPATCH_PRIORITY_OVERRIDE_MASK      ((dispatch_priority_t)0x00ff0000)
+#define DISPATCH_PRIORITY_OVERRIDE_SHIFT     16
+#define DISPATCH_PRIORITY_FLAGS_MASK         ((dispatch_priority_t)0xff000000)
+
+#define DISPATCH_PRIORITY_SATURATED_OVERRIDE ((dispatch_priority_t)0x000f0000)
+
+#define DISPATCH_PRIORITY_FLAG_OVERCOMMIT    ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
+#define DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE  ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
+#define DISPATCH_PRIORITY_FLAG_MANAGER       ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
+#define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \
+               (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | \
+               DISPATCH_PRIORITY_FLAG_MANAGER)
+
+// not passed to pthread
+#define DISPATCH_PRIORITY_FLAG_INHERIT       ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG
+#define DISPATCH_PRIORITY_FLAG_ENFORCE       ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG
+#define DISPATCH_PRIORITY_FLAG_ROOTQUEUE     ((dispatch_priority_t)0x20000000) // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
+
+#pragma mark dispatch_qos
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_qos_from_qos_class(qos_class_t cls)
+{
+       switch ((unsigned int)cls) {
+       case QOS_CLASS_USER_INTERACTIVE: return DISPATCH_QOS_USER_INTERACTIVE;
+       case QOS_CLASS_USER_INITIATED:   return DISPATCH_QOS_USER_INITIATED;
+       case QOS_CLASS_DEFAULT:          return DISPATCH_QOS_DEFAULT;
+       case QOS_CLASS_UTILITY:          return DISPATCH_QOS_UTILITY;
+       case QOS_CLASS_BACKGROUND:       return DISPATCH_QOS_BACKGROUND;
+       case QOS_CLASS_MAINTENANCE:      return DISPATCH_QOS_MAINTENANCE;
+       default: return DISPATCH_QOS_UNSPECIFIED;
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline qos_class_t
+_dispatch_qos_to_qos_class(dispatch_qos_t qos)
+{
+       switch (qos) {
+       case DISPATCH_QOS_USER_INTERACTIVE: return QOS_CLASS_USER_INTERACTIVE;
+       case DISPATCH_QOS_USER_INITIATED:   return QOS_CLASS_USER_INITIATED;
+       case DISPATCH_QOS_DEFAULT:          return QOS_CLASS_DEFAULT;
+       case DISPATCH_QOS_UTILITY:          return QOS_CLASS_UTILITY;
+       case DISPATCH_QOS_BACKGROUND:       return QOS_CLASS_BACKGROUND;
+       case DISPATCH_QOS_MAINTENANCE:      return (qos_class_t)QOS_CLASS_MAINTENANCE;
+       default: return QOS_CLASS_UNSPECIFIED;
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_qos_from_queue_priority(long priority)
+{
+       switch (priority) {
+       case DISPATCH_QUEUE_PRIORITY_BACKGROUND:      return DISPATCH_QOS_BACKGROUND;
+       case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return DISPATCH_QOS_UTILITY;
+       case DISPATCH_QUEUE_PRIORITY_LOW:             return DISPATCH_QOS_UTILITY;
+       case DISPATCH_QUEUE_PRIORITY_DEFAULT:         return DISPATCH_QOS_DEFAULT;
+       case DISPATCH_QUEUE_PRIORITY_HIGH:            return DISPATCH_QOS_USER_INITIATED;
+       default: return _dispatch_qos_from_qos_class((qos_class_t)priority);
+       }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_qos_from_pp(pthread_priority_t pp)
+{
+       pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+       pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
+       return (dispatch_qos_t)__builtin_ffs((int)pp);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_qos_to_pp(dispatch_qos_t qos)
+{
+       pthread_priority_t pp;
+       pp = 1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT);
+       return pp | _PTHREAD_PRIORITY_PRIORITY_MASK;
+}
+
+// including maintenance
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_qos_is_background(dispatch_qos_t qos)
+{
+       return qos && qos <= DISPATCH_QOS_BACKGROUND;
+}
+
+#pragma mark dispatch_priority
+
+#define _dispatch_priority_make(qos, relpri) \
+       (qos ? ((((qos) << DISPATCH_PRIORITY_QOS_SHIFT) & DISPATCH_PRIORITY_QOS_MASK) | \
+        ((dispatch_priority_t)(relpri - 1) & DISPATCH_PRIORITY_RELPRI_MASK)) : 0)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_priority_t
+_dispatch_priority_with_override_qos(dispatch_priority_t pri,
+               dispatch_qos_t oqos)
+{
+       pri &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+       pri |= oqos << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+       return pri;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline int
+_dispatch_priority_relpri(dispatch_priority_t dbp)
+{
+       if (dbp & DISPATCH_PRIORITY_QOS_MASK) {
+               return (int8_t)(dbp & DISPATCH_PRIORITY_RELPRI_MASK) + 1;
+       }
+       return 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_priority_qos(dispatch_priority_t dbp)
+{
+       dbp &= DISPATCH_PRIORITY_QOS_MASK;
+       return dbp >> DISPATCH_PRIORITY_QOS_SHIFT;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_priority_override_qos(dispatch_priority_t dbp)
+{
+       dbp &= DISPATCH_PRIORITY_OVERRIDE_MASK;
+       return dbp >> DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_priority_t
+_dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags)
+{
+       dispatch_assert(!(pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG));
+
+       dispatch_priority_t dbp;
+       if (keep_flags) {
+               dbp = pp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK |
+                               DISPATCH_PRIORITY_RELPRI_MASK);
+       } else {
+               dbp = pp & DISPATCH_PRIORITY_RELPRI_MASK;
+       }
+
+       dbp |= _dispatch_qos_from_pp(pp) << DISPATCH_PRIORITY_QOS_SHIFT;
+       return dbp;
+}
+#define _dispatch_priority_from_pp(pp) \
+               _dispatch_priority_from_pp_impl(pp, true)
+#define _dispatch_priority_from_pp_strip_flags(pp) \
+               _dispatch_priority_from_pp_impl(pp, false)
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_priority_to_pp_impl(dispatch_priority_t dbp, bool keep_flags)
+{
+       pthread_priority_t pp;
+       if (keep_flags) {
+               pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK |
+                               DISPATCH_PRIORITY_RELPRI_MASK);
+       } else {
+               pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK;
+       }
+       dispatch_qos_t qos = _dispatch_priority_qos(dbp);
+       if (qos) {
+               pp |= (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT));
+       }
+       return pp;
+}
+#define _dispatch_priority_to_pp(pp) \
+               _dispatch_priority_to_pp_impl(pp, true)
+#define _dispatch_priority_to_pp_strip_flags(pp) \
+               _dispatch_priority_to_pp_impl(pp, false)
+
+#endif // __DISPATCH_SHIMS_PRIORITY__
index 13fe4f86b87ec289968ad95c17e42050ffa5e42f..0b8e926175abae70ac855269cf5a3435643cbf2d 100644 (file)
@@ -46,7 +46,15 @@ typedef enum {
 #define DISPATCH_CLOCK_COUNT  (DISPATCH_CLOCK_MACH + 1)
 } dispatch_clock_t;
 
+void _dispatch_time_init(void);
+
 #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME
+#define DISPATCH_TIME_UNIT_USES_NANOSECONDS 1
+#else
+#define DISPATCH_TIME_UNIT_USES_NANOSECONDS 0
+#endif
+
+#if DISPATCH_TIME_UNIT_USES_NANOSECONDS
 // x86 currently implements mach time in nanoseconds
 // this is NOT likely to change
 DISPATCH_ALWAYS_INLINE
@@ -63,52 +71,21 @@ _dispatch_time_nano2mach(uint64_t nsec)
        return nsec;
 }
 #else
-typedef struct _dispatch_host_time_data_s {
-       dispatch_once_t pred;
-       long double frac;
-       bool ratio_1_to_1;
-} _dispatch_host_time_data_s;
-extern _dispatch_host_time_data_s _dispatch_host_time_data;
-void _dispatch_get_host_time_init(void *context);
-
+#define DISPATCH_USE_HOST_TIME 1
+extern uint64_t (*_dispatch_host_time_mach2nano)(uint64_t machtime);
+extern uint64_t (*_dispatch_host_time_nano2mach)(uint64_t nsec);
 static inline uint64_t
 _dispatch_time_mach2nano(uint64_t machtime)
 {
-       _dispatch_host_time_data_s *const data = &_dispatch_host_time_data;
-       dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init);
-
-       if (!machtime || slowpath(data->ratio_1_to_1)) {
-               return machtime;
-       }
-       if (machtime >= INT64_MAX) {
-               return INT64_MAX;
-       }
-       long double big_tmp = ((long double)machtime * data->frac) + .5;
-       if (slowpath(big_tmp >= INT64_MAX)) {
-               return INT64_MAX;
-       }
-       return (uint64_t)big_tmp;
+       return _dispatch_host_time_mach2nano(machtime);
 }
 
 static inline uint64_t
 _dispatch_time_nano2mach(uint64_t nsec)
 {
-       _dispatch_host_time_data_s *const data = &_dispatch_host_time_data;
-       dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init);
-
-       if (!nsec || slowpath(data->ratio_1_to_1)) {
-               return nsec;
-       }
-       if (nsec >= INT64_MAX) {
-               return INT64_MAX;
-       }
-       long double big_tmp = ((long double)nsec / data->frac) + .5;
-       if (slowpath(big_tmp >= INT64_MAX)) {
-               return INT64_MAX;
-       }
-       return (uint64_t)big_tmp;
+       return _dispatch_host_time_nano2mach(nsec);
 }
-#endif
+#endif // DISPATCH_USE_HOST_TIME
 
 /* XXXRW: Some kind of overflow detection needed? */
 #define _dispatch_timespec_to_nano(ts) \
@@ -123,7 +100,7 @@ _dispatch_get_nanoseconds(void)
        dispatch_static_assert(sizeof(NSEC_PER_SEC) == 8);
        dispatch_static_assert(sizeof(USEC_PER_SEC) == 8);
 
-#if TARGET_OS_MAC && DISPATCH_HOST_SUPPORTS_OSX(101200)
+#if TARGET_OS_MAC
        return clock_gettime_nsec_np(CLOCK_REALTIME);
 #elif HAVE_DECL_CLOCK_REALTIME
        struct timespec ts;
@@ -169,7 +146,19 @@ DISPATCH_ALWAYS_INLINE
 static inline uint64_t
 _dispatch_approximate_time(void)
 {
+#if HAVE_MACH_APPROXIMATE_TIME
+       return mach_approximate_time();
+#elif HAVE_DECL_CLOCK_UPTIME_FAST && !defined(__linux__)
+       struct timespec ts;
+       dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts));
+       return _dispatch_timespec_to_nano(ts);
+#elif defined(__linux__)
+       struct timespec ts;
+       dispatch_assume_zero(clock_gettime(CLOCK_REALTIME_COARSE, &ts));
+       return _dispatch_timespec_to_nano(ts);
+#else
        return _dispatch_absolute_time();
+#endif
 }
 
 DISPATCH_ALWAYS_INLINE
index 2e3ece8b025db25270bec41b7c16d6f93f10f16e..c119e4f014399e9566c091af010766869e3fbe81 100644 (file)
@@ -59,6 +59,19 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t;
 #endif
 
 #if DISPATCH_USE_DIRECT_TSD
+#ifndef __TSD_THREAD_QOS_CLASS
+#define __TSD_THREAD_QOS_CLASS 4
+#endif
+#ifndef __TSD_RETURN_TO_KERNEL
+#define __TSD_RETURN_TO_KERNEL 5
+#endif
+#ifndef __TSD_MACH_SPECIAL_REPLY
+#define __TSD_MACH_SPECIAL_REPLY 8
+#endif
+
+static const unsigned long dispatch_priority_key       = __TSD_THREAD_QOS_CLASS;
+static const unsigned long dispatch_r2k_key                    = __TSD_RETURN_TO_KERNEL;
+
 // dispatch_queue_key & dispatch_frame_key need to be contiguous
 // in that order, and queue_key to be an even number
 static const unsigned long dispatch_queue_key          = __PTK_LIBDISPATCH_KEY0;
@@ -67,21 +80,13 @@ static const unsigned long dispatch_cache_key               = __PTK_LIBDISPATCH_KEY2;
 static const unsigned long dispatch_context_key                = __PTK_LIBDISPATCH_KEY3;
 static const unsigned long dispatch_pthread_root_queue_observer_hooks_key =
                __PTK_LIBDISPATCH_KEY4;
-static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5;
+static const unsigned long dispatch_basepri_key     = __PTK_LIBDISPATCH_KEY5;
 #if DISPATCH_INTROSPECTION
 static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6;
 #elif DISPATCH_PERF_MON
 static const unsigned long dispatch_bcounter_key       = __PTK_LIBDISPATCH_KEY6;
 #endif
-static const unsigned long dispatch_sema4_key          = __PTK_LIBDISPATCH_KEY7;
-
-#ifndef __TSD_THREAD_QOS_CLASS
-#define __TSD_THREAD_QOS_CLASS 4
-#endif
-#ifndef __TSD_THREAD_VOUCHER
-#define __TSD_THREAD_VOUCHER 6
-#endif
-static const unsigned long dispatch_priority_key       = __TSD_THREAD_QOS_CLASS;
+static const unsigned long dispatch_wlh_key                    = __PTK_LIBDISPATCH_KEY7;
 static const unsigned long dispatch_voucher_key                = __PTK_LIBDISPATCH_KEY8;
 static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9;
 
@@ -108,16 +113,15 @@ struct dispatch_tsd {
        void *dispatch_cache_key;
        void *dispatch_context_key;
        void *dispatch_pthread_root_queue_observer_hooks_key;
-       void *dispatch_defaultpriority_key;
+       void *dispatch_basepri_key;
 #if DISPATCH_INTROSPECTION
        void *dispatch_introspection_key;
 #elif DISPATCH_PERF_MON
        void *dispatch_bcounter_key;
-#endif
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-       void *dispatch_sema4_key;
 #endif
        void *dispatch_priority_key;
+       void *dispatch_r2k_key;
+       void *dispatch_wlh_key;
        void *dispatch_voucher_key;
        void *dispatch_deferred_items_key;
 };
@@ -160,19 +164,20 @@ _dispatch_get_tsd_base(void)
          _dispatch_thread_setspecific(k2,(p)[1]) )
 
 #else
+extern pthread_key_t dispatch_priority_key;
+extern pthread_key_t dispatch_r2k_key;
 extern pthread_key_t dispatch_queue_key;
 extern pthread_key_t dispatch_frame_key;
 extern pthread_key_t dispatch_cache_key;
 extern pthread_key_t dispatch_context_key;
 extern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key;
-extern pthread_key_t dispatch_defaultpriority_key;
+extern pthread_key_t dispatch_basepri_key;
 #if DISPATCH_INTROSPECTION
 extern pthread_key_t dispatch_introspection_key;
 #elif DISPATCH_PERF_MON
 extern pthread_key_t dispatch_bcounter_key;
 #endif
-extern pthread_key_t dispatch_sema4_key;
-extern pthread_key_t dispatch_priority_key;
+extern pthread_key_t dispatch_wlh_key;
 extern pthread_key_t dispatch_voucher_key;
 extern pthread_key_t dispatch_deferred_items_key;
 
@@ -308,6 +313,11 @@ _dispatch_thread_setspecific_packed_pair(pthread_key_t k1, pthread_key_t k2,
 #define _dispatch_set_thread_mig_reply_port(p) ( \
                _dispatch_thread_setspecific(_PTHREAD_TSD_SLOT_MIG_REPLY, \
                (void*)(uintptr_t)(p)))
+#define _dispatch_get_thread_special_reply_port() ((mach_port_t)(uintptr_t) \
+               _dispatch_thread_getspecific(__TSD_MACH_SPECIAL_REPLY))
+#define _dispatch_set_thread_special_reply_port(p) ( \
+               _dispatch_thread_setspecific(__TSD_MACH_SPECIAL_REPLY, \
+               (void*)(uintptr_t)(p)))
 #endif
 
 DISPATCH_TSD_INLINE DISPATCH_CONST
index 1850aeeed1e80ba65b9367f8f0a79ba2eb892935..67f8679ac5467b3f7a6b23f6663c74e9a4f9bd50 100644 (file)
 #pragma mark _dispatch_wait_until
 
 #if DISPATCH_HW_CONFIG_UP
-#define _dispatch_wait_until(c) do { \
+#define _dispatch_wait_until(c) ({ \
+               typeof(c) _c; \
                int _spins = 0; \
-               while (!fastpath(c)) { \
+               for (;;) { \
+                       if (likely(_c = (c))) break; \
                        _spins++; \
                        _dispatch_preemption_yield(_spins); \
-               } } while (0)
+               } \
+               _c; })
 #elif TARGET_OS_EMBEDDED
 // <rdar://problem/15440575>
 #ifndef DISPATCH_WAIT_SPINS
 #define DISPATCH_WAIT_SPINS 1024
 #endif
-#define _dispatch_wait_until(c) do { \
+#define _dispatch_wait_until(c) ({ \
+               typeof(c) _c; \
                int _spins = -(DISPATCH_WAIT_SPINS); \
-               while (!fastpath(c)) { \
+               for (;;) { \
+                       if (likely(_c = (c))) break; \
                        if (slowpath(_spins++ >= 0)) { \
                                _dispatch_preemption_yield(_spins); \
                        } else { \
                                dispatch_hardware_pause(); \
                        } \
-               } } while (0)
+               } \
+               _c; })
 #else
-#define _dispatch_wait_until(c) do { \
-               while (!fastpath(c)) { \
+#define _dispatch_wait_until(c) ({ \
+               typeof(c) _c; \
+               for (;;) { \
+                       if (likely(_c = (c))) break; \
                        dispatch_hardware_pause(); \
-               } } while (0)
+               } \
+               _c; })
 #endif
 
 #pragma mark -
index 4e1d80644bac83964e74311531a1badc56449cb4..6f504787d0c7b58c450e8594353d0d3d6d0890f9 100644 (file)
  */
 
 #include "internal.h"
-#if HAVE_MACH
-#include "protocol.h"
-#include "protocolServer.h"
-#endif
 
-#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1
-#define DKEV_UNREGISTER_DISCONNECTED 0x2
-#define DKEV_UNREGISTER_REPLY_REMOVE 0x4
-#define DKEV_UNREGISTER_WAKEUP 0x8
-
-static pthread_priority_t
-_dispatch_source_compute_kevent_priority(dispatch_source_t ds);
 static void _dispatch_source_handler_free(dispatch_source_t ds, long kind);
-static void _dispatch_source_merge_kevent(dispatch_source_t ds,
-               const _dispatch_kevent_qos_s *ke);
-static bool _dispatch_kevent_register(dispatch_kevent_t *dkp,
-               pthread_priority_t pp, uint32_t *flgp);
-static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg,
-               unsigned int options);
-static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
-               uint32_t del_flags);
-static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke);
-static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke);
-static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke);
-static void _dispatch_timers_unregister(dispatch_source_t ds,
-               dispatch_kevent_t dk);
-static void _dispatch_timers_update(dispatch_source_t ds);
-static void _dispatch_timer_aggregates_check(void);
-static void _dispatch_timer_aggregates_register(dispatch_source_t ds);
-static void _dispatch_timer_aggregates_update(dispatch_source_t ds,
-               unsigned int tidx);
-static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds,
-               unsigned int tidx);
+static void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval);
+
+#define DISPATCH_TIMERS_UNREGISTER 0x1
+#define DISPATCH_TIMERS_RETAIN_2 0x2
+static void _dispatch_timers_update(dispatch_unote_t du, uint32_t flags);
+static void _dispatch_timers_unregister(dispatch_timer_source_refs_t dt);
+
+static void _dispatch_source_timer_configure(dispatch_source_t ds);
 static inline unsigned long _dispatch_source_timer_data(
-               dispatch_source_refs_t dr, unsigned long prev);
-static void _dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke);
-static long _dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke);
-static void _dispatch_memorypressure_init(void);
-#if HAVE_MACH
-static void _dispatch_mach_host_calendar_change_register(void);
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-static void _dispatch_mach_recv_msg_buf_init(void);
-static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk,
-               uint32_t new_flags, uint32_t del_flags);
-#endif
-static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk,
-               uint32_t new_flags, uint32_t del_flags);
-static void _dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke);
-static mach_msg_size_t _dispatch_kevent_mach_msg_size(
-               _dispatch_kevent_qos_s *ke);
-#else
-static inline void _dispatch_mach_host_calendar_change_register(void) {}
-static inline void _dispatch_mach_recv_msg_buf_init(void) {}
-#endif
-static const char * _evfiltstr(short filt);
-#if DISPATCH_DEBUG
-static void dispatch_kevent_debug(const char *verb,
-               const _dispatch_kevent_qos_s *kev, int i, int n,
-               const char *function, unsigned int line);
-#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
-       dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q)
-#else
-static inline void
-dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev,
-               int i, int n, const char *function, unsigned int line)
-{
-       (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line;
-}
-#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
-#endif
-#define _dispatch_kevent_debug(verb, _kev) \
-               dispatch_kevent_debug(verb, _kev, 0, 1, __FUNCTION__, __LINE__)
-#define _dispatch_kevent_debug_n(verb, _kev, i, n) \
-               dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__)
-#ifndef DISPATCH_MGR_QUEUE_DEBUG
-#define DISPATCH_MGR_QUEUE_DEBUG 0
-#endif
-#if DISPATCH_MGR_QUEUE_DEBUG
-#define _dispatch_kevent_mgr_debug _dispatch_kevent_debug
-#else
-static inline void
-_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED) {}
-#endif
+               dispatch_source_t ds, dispatch_unote_t du);
 
 #pragma mark -
 #pragma mark dispatch_source_t
 
 dispatch_source_t
-dispatch_source_create(dispatch_source_type_t type, uintptr_t handle,
+dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle,
                unsigned long mask, dispatch_queue_t dq)
 {
-       // ensure _dispatch_evfilt_machport_direct_enabled is initialized
-       _dispatch_root_queues_init();
-       const _dispatch_kevent_qos_s *proto_kev = &type->ke;
+       dispatch_source_refs_t dr;
        dispatch_source_t ds;
-       dispatch_kevent_t dk;
 
-       // input validation
-       if (type == NULL || (mask & ~type->mask)) {
+       dr = dux_create(dst, handle, mask)._dr;
+       if (unlikely(!dr)) {
                return DISPATCH_BAD_INPUT;
        }
-       if (type->mask && !mask) {
-               // expect a non-zero mask when the type declares one ... except
-               switch (type->ke.filter) {
-               case DISPATCH_EVFILT_TIMER:
-                       break; // timers don't need masks
-               case DISPATCH_EVFILT_MACH_NOTIFICATION:
-                       break; // type->init handles zero mask as a legacy case
-               default:
-                       // otherwise reject as invalid input
-                       return DISPATCH_BAD_INPUT;
-               }
-       }
-
-       switch (type->ke.filter) {
-       case EVFILT_SIGNAL:
-               if (handle >= NSIG) {
-                       return DISPATCH_BAD_INPUT;
-               }
-               break;
-       case EVFILT_FS:
-#if DISPATCH_USE_MEMORYSTATUS
-       case EVFILT_MEMORYSTATUS:
-#endif
-       case DISPATCH_EVFILT_CUSTOM_ADD:
-       case DISPATCH_EVFILT_CUSTOM_OR:
-               if (handle) {
-                       return DISPATCH_BAD_INPUT;
-               }
-               break;
-       case DISPATCH_EVFILT_TIMER:
-               if ((handle == 0) != (type->ke.ident == 0)) {
-                       return DISPATCH_BAD_INPUT;
-               }
-               break;
-       default:
-               break;
-       }
 
-       ds = _dispatch_alloc(DISPATCH_VTABLE(source),
+       ds = _dispatch_object_alloc(DISPATCH_VTABLE(source),
                        sizeof(struct dispatch_source_s));
        // Initialize as a queue first, then override some settings below.
-       _dispatch_queue_init(ds->_as_dq, DQF_NONE, 1, true);
+       _dispatch_queue_init(ds->_as_dq, DQF_LEGACY, 1,
+                       DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER);
        ds->dq_label = "source";
        ds->do_ref_cnt++; // the reference the manager queue holds
-
-       switch (type->ke.filter) {
-       case DISPATCH_EVFILT_CUSTOM_OR:
-               dk = DISPATCH_KEV_CUSTOM_OR;
-               break;
-       case DISPATCH_EVFILT_CUSTOM_ADD:
-               dk = DISPATCH_KEV_CUSTOM_ADD;
-               break;
-       default:
-               dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-               dk->dk_kevent = *proto_kev;
-               dk->dk_kevent.ident = handle;
-               dk->dk_kevent.flags |= EV_ADD|EV_ENABLE;
-               dk->dk_kevent.fflags |= (uint32_t)mask;
-               dk->dk_kevent.udata = (_dispatch_kevent_qos_udata_t)dk;
-               TAILQ_INIT(&dk->dk_sources);
-
-               ds->ds_pending_data_mask = dk->dk_kevent.fflags;
-               ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident;
-               if (EV_UDATA_SPECIFIC & proto_kev->flags) {
-                       dk->dk_kevent.flags |= EV_DISPATCH;
-                       ds->ds_is_direct_kevent = true;
-                       ds->ds_needs_rearm = true;
-               }
-               break;
-       }
-       ds->ds_dkev = dk;
-
-       if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) {
-               ds->ds_needs_rearm = true;
-       } else if (!(EV_CLEAR & proto_kev->flags)) {
-               // we cheat and use EV_CLEAR to mean a "flag thingy"
-               ds->ds_is_adder = true;
-       }
-       // Some sources require special processing
-       if (type->init != NULL) {
-               type->init(ds, type, handle, mask);
-       }
-       dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder));
-       if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) {
-               // see _dispatch_source_merge_kevent
-               dispatch_assert(!(dk->dk_kevent.flags & EV_ONESHOT));
-               dispatch_assert(dk->dk_kevent.flags & EV_DISPATCH);
-               dispatch_assert(dk->dk_kevent.flags & EV_UDATA_SPECIFIC);
-       }
-
-       if (fastpath(!ds->ds_refs)) {
-               ds->ds_refs = _dispatch_calloc(1ul,
-                               sizeof(struct dispatch_source_refs_s));
-       }
-       ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds);
+       ds->ds_refs = dr;
+       dr->du_owner_wref = _dispatch_ptr2wref(ds);
 
        if (slowpath(!dq)) {
-               dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
+               dq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
        } else {
-               _dispatch_retain(dq);
+               _dispatch_retain((dispatch_queue_t _Nonnull)dq);
        }
        ds->do_targetq = dq;
+       if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_INTERVAL)) {
+               _dispatch_source_set_interval(ds, handle);
+       }
        _dispatch_object_debug(ds, "%s", __func__);
        return ds;
 }
 
 void
-_dispatch_source_dispose(dispatch_source_t ds)
+_dispatch_source_dispose(dispatch_source_t ds, bool *allow_free)
 {
        _dispatch_object_debug(ds, "%s", __func__);
        _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER);
        _dispatch_source_handler_free(ds, DS_EVENT_HANDLER);
        _dispatch_source_handler_free(ds, DS_CANCEL_HANDLER);
-       free(ds->ds_refs);
-       _dispatch_queue_destroy(ds->_as_dq);
+       _dispatch_unote_dispose(ds->ds_refs);
+       ds->ds_refs = NULL;
+       _dispatch_queue_destroy(ds->_as_dq, allow_free);
 }
 
 void
 _dispatch_source_xref_dispose(dispatch_source_t ds)
 {
-       dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH);
+       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+       if (unlikely(!(dqf & (DQF_LEGACY|DSF_CANCELED)))) {
+               DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been "
+                               "cancelled, but has a mandatory cancel handler");
+       }
+       dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY);
 }
 
 long
@@ -252,78 +102,121 @@ dispatch_source_testcancel(dispatch_source_t ds)
 unsigned long
 dispatch_source_get_mask(dispatch_source_t ds)
 {
-       unsigned long mask = ds->ds_pending_data_mask;
-       if (ds->ds_vmpressure_override) {
-               mask = NOTE_VM_PRESSURE;
+       dispatch_source_refs_t dr = ds->ds_refs;
+       if (ds->dq_atomic_flags & DSF_CANCELED) {
+               return 0;
+       }
+#if DISPATCH_USE_MEMORYSTATUS
+       if (dr->du_vmpressure_override) {
+               return NOTE_VM_PRESSURE;
        }
 #if TARGET_IPHONE_SIMULATOR
-       else if (ds->ds_memorypressure_override) {
-               mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+       if (dr->du_memorypressure_override) {
+               return NOTE_MEMORYSTATUS_PRESSURE_WARN;
        }
 #endif
-       return mask;
+#endif // DISPATCH_USE_MEMORYSTATUS
+       return dr->du_fflags;
 }
 
 uintptr_t
 dispatch_source_get_handle(dispatch_source_t ds)
 {
-       unsigned int handle = (unsigned int)ds->ds_ident_hack;
+       dispatch_source_refs_t dr = ds->ds_refs;
 #if TARGET_IPHONE_SIMULATOR
-       if (ds->ds_memorypressure_override) {
-               handle = 0;
+       if (dr->du_memorypressure_override) {
+               return 0;
        }
 #endif
-       return handle;
+       return dr->du_ident;
 }
 
 unsigned long
 dispatch_source_get_data(dispatch_source_t ds)
 {
-       unsigned long data = ds->ds_data;
-       if (ds->ds_vmpressure_override) {
-               data = NOTE_VM_PRESSURE;
+#if DISPATCH_USE_MEMORYSTATUS
+       dispatch_source_refs_t dr = ds->ds_refs;
+       if (dr->du_vmpressure_override) {
+               return NOTE_VM_PRESSURE;
        }
 #if TARGET_IPHONE_SIMULATOR
-       else if (ds->ds_memorypressure_override) {
-               data = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+       if (dr->du_memorypressure_override) {
+               return NOTE_MEMORYSTATUS_PRESSURE_WARN;
        }
 #endif
-       return data;
+#endif // DISPATCH_USE_MEMORYSTATUS
+       uint64_t value = os_atomic_load2o(ds, ds_data, relaxed);
+       return (unsigned long)(
+               ds->ds_refs->du_data_action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET
+               ? DISPATCH_SOURCE_GET_DATA(value) : value);
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_source_merge_data2(dispatch_source_t ds,
-               pthread_priority_t pp, unsigned long val)
-{
-       _dispatch_kevent_qos_s kev = {
-               .fflags = (typeof(kev.fflags))val,
-               .data = (typeof(kev.data))val,
-#if DISPATCH_USE_KEVENT_QOS
-               .qos = (_dispatch_kevent_priority_t)pp,
-#endif
-       };
-#if !DISPATCH_USE_KEVENT_QOS
-       (void)pp;
-#endif
-
-       dispatch_assert(ds->ds_dkev == DISPATCH_KEV_CUSTOM_OR ||
-                       ds->ds_dkev == DISPATCH_KEV_CUSTOM_ADD);
-       _dispatch_kevent_debug("synthetic data", &kev);
-       _dispatch_source_merge_kevent(ds, &kev);
+size_t
+dispatch_source_get_extended_data(dispatch_source_t ds,
+               dispatch_source_extended_data_t edata, size_t size)
+{
+       size_t target_size = MIN(size,
+               sizeof(struct dispatch_source_extended_data_s));
+       if (size > 0) {
+               unsigned long data, status = 0;
+               if (ds->ds_refs->du_data_action
+                               == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) {
+                       uint64_t combined = os_atomic_load(&ds->ds_data, relaxed);
+                       data = DISPATCH_SOURCE_GET_DATA(combined);
+                       status = DISPATCH_SOURCE_GET_STATUS(combined);
+               } else {
+                       data = dispatch_source_get_data(ds);
+               }
+               if (size >= offsetof(struct dispatch_source_extended_data_s, data)
+                               + sizeof(edata->data)) {
+                       edata->data = data;
+               }
+               if (size >= offsetof(struct dispatch_source_extended_data_s, status)
+                               + sizeof(edata->status)) {
+                       edata->status = status;
+               }
+               if (size > sizeof(struct dispatch_source_extended_data_s)) {
+                       memset(
+                               (char *)edata + sizeof(struct dispatch_source_extended_data_s),
+                               0, size - sizeof(struct dispatch_source_extended_data_s));
+               }
+       }
+       return target_size;
 }
 
+DISPATCH_NOINLINE
 void
-dispatch_source_merge_data(dispatch_source_t ds, unsigned long val)
+_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp,
+               unsigned long val)
 {
-       _dispatch_source_merge_data2(ds, 0, val);
+       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+       int filter = ds->ds_refs->du_filter;
+
+       if (unlikely(dqf & (DSF_CANCELED | DSF_DELETED))) {
+               return;
+       }
+
+       switch (filter) {
+       case DISPATCH_EVFILT_CUSTOM_ADD:
+               os_atomic_add2o(ds, ds_pending_data, val, relaxed);
+               break;
+       case DISPATCH_EVFILT_CUSTOM_OR:
+               os_atomic_or2o(ds, ds_pending_data, val, relaxed);
+               break;
+       case DISPATCH_EVFILT_CUSTOM_REPLACE:
+               os_atomic_store2o(ds, ds_pending_data, val, relaxed);
+               break;
+       default:
+               DISPATCH_CLIENT_CRASH(filter, "Invalid source type");
+       }
+
+       dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY);
 }
 
 void
-_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp,
-               unsigned long val)
+dispatch_source_merge_data(dispatch_source_t ds, unsigned long val)
 {
-       _dispatch_source_merge_data2(ds, pp, val);
+       _dispatch_source_merge_data(ds, 0, val);
 }
 
 #pragma mark -
@@ -443,6 +336,10 @@ _dispatch_source_set_handler(dispatch_source_t ds, long kind,
                _dispatch_source_handler_replace(ds, kind, dc);
                return dx_vtable(ds)->do_resume(ds, false);
        }
+       if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) {
+               DISPATCH_CLIENT_CRASH(kind, "Cannot change a handler of this source "
+                               "after it has been activated");
+       }
        _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds);
        if (kind == DS_REGISTN_HANDLER) {
                _dispatch_bug_deprecated("Setting registration handler after "
@@ -450,7 +347,7 @@ _dispatch_source_set_handler(dispatch_source_t ds, long kind,
        }
        dc->dc_data = (void *)kind;
        _dispatch_barrier_trysync_or_async_f(ds->_as_dq, dc,
-                       _dispatch_source_set_handler_slow);
+                       _dispatch_source_set_handler_slow, 0);
 }
 
 #ifdef __BLOCKS__
@@ -473,27 +370,40 @@ dispatch_source_set_event_handler_f(dispatch_source_t ds,
        _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc);
 }
 
-void
-_dispatch_source_set_event_handler_continuation(dispatch_source_t ds,
-               dispatch_continuation_t dc)
+#ifdef __BLOCKS__
+DISPATCH_NOINLINE
+static void
+_dispatch_source_set_cancel_handler(dispatch_source_t ds,
+               dispatch_block_t handler)
 {
-       _dispatch_trace_continuation_push(ds->_as_dq, dc);
-       _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc);
+       dispatch_continuation_t dc;
+       dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true);
+       _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc);
 }
 
-#ifdef __BLOCKS__
 void
 dispatch_source_set_cancel_handler(dispatch_source_t ds,
                dispatch_block_t handler)
 {
-       dispatch_continuation_t dc;
-       dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true);
-       _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc);
+       if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) {
+               DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on "
+                               "this source");
+       }
+       return _dispatch_source_set_cancel_handler(ds, handler);
 }
-#endif /* __BLOCKS__ */
 
 void
-dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
+dispatch_source_set_mandatory_cancel_handler(dispatch_source_t ds,
+               dispatch_block_t handler)
+{
+       _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY);
+       return _dispatch_source_set_cancel_handler(ds, handler);
+}
+#endif /* __BLOCKS__ */
+
+DISPATCH_NOINLINE
+static void
+_dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
                dispatch_function_t handler)
 {
        dispatch_continuation_t dc;
@@ -501,6 +411,25 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
        _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc);
 }
 
+void
+dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
+               dispatch_function_t handler)
+{
+       if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) {
+               DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on "
+                               "this source");
+       }
+       return _dispatch_source_set_cancel_handler_f(ds, handler);
+}
+
+void
+dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t ds,
+               dispatch_function_t handler)
+{
+       _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY);
+       return _dispatch_source_set_cancel_handler_f(ds, handler);
+}
+
 #ifdef __BLOCKS__
 void
 dispatch_source_set_registration_handler(dispatch_source_t ds,
@@ -538,7 +467,7 @@ _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq,
        if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) {
                dc->dc_ctxt = ds->do_ctxt;
        }
-       _dispatch_continuation_pop(dc, cq, flags);
+       _dispatch_continuation_pop(dc, NULL, flags, cq);
 }
 
 static void
@@ -548,7 +477,6 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq,
        dispatch_continuation_t dc;
 
        dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER);
-       ds->ds_pending_data_mask = 0;
        ds->ds_pending_data = 0;
        ds->ds_data = 0;
        _dispatch_source_handler_free(ds, DS_EVENT_HANDLER);
@@ -562,104 +490,87 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq,
        if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) {
                dc->dc_ctxt = ds->do_ctxt;
        }
-       _dispatch_continuation_pop(dc, cq, flags);
+       _dispatch_continuation_pop(dc, NULL, flags, cq);
 }
 
 static void
 _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq,
                dispatch_invoke_flags_t flags)
 {
-       unsigned long prev;
-
        dispatch_source_refs_t dr = ds->ds_refs;
        dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER);
-       prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed);
-       if (ds->ds_is_level) {
+       uint64_t prev;
+
+       if (dr->du_is_timer && !(dr->du_fflags & DISPATCH_TIMER_AFTER)) {
+               prev = _dispatch_source_timer_data(ds, dr);
+       } else {
+               prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed);
+       }
+       if (dr->du_data_action == DISPATCH_UNOTE_ACTION_DATA_SET) {
                ds->ds_data = ~prev;
-       } else if (ds->ds_is_timer && ds_timer(dr).target && prev) {
-               ds->ds_data = _dispatch_source_timer_data(dr, prev);
        } else {
                ds->ds_data = prev;
        }
-       if (!dispatch_assume(prev) || !dc) {
+       if (!dispatch_assume(prev != 0) || !dc) {
                return;
        }
-       _dispatch_continuation_pop(dc, cq, flags);
-       if (ds->ds_is_timer && (ds_timer(dr).flags & DISPATCH_TIMER_AFTER)) {
+       _dispatch_continuation_pop(dc, NULL, flags, cq);
+       if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER)) {
                _dispatch_source_handler_free(ds, DS_EVENT_HANDLER);
                dispatch_release(ds); // dispatch_after sources are one-shot
        }
 }
 
+DISPATCH_NOINLINE
 static void
-_dispatch_source_kevent_unregister(dispatch_source_t ds)
+_dispatch_source_refs_finalize_unregistration(dispatch_source_t ds)
+{
+       dispatch_queue_flags_t dqf;
+       dispatch_source_refs_t dr = ds->ds_refs;
+
+       dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq,
+                       DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER);
+       if (dqf & DSF_CANCEL_WAITER) {
+               _dispatch_wake_by_address(&ds->dq_atomic_flags);
+       }
+       _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr);
+       _dispatch_release_tailcall(ds); // the retain is done at creation time
+}
+
+void
+_dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options)
 {
        _dispatch_object_debug(ds, "%s", __func__);
-       uint32_t flags = (uint32_t)ds->ds_pending_data_mask;
-       dispatch_kevent_t dk = ds->ds_dkev;
        dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
-       if (ds->ds_is_custom_source) {
-               ds->ds_dkev = NULL;
-               goto done;
-       }
+       dispatch_source_refs_t dr = ds->ds_refs;
 
-       if (ds->ds_is_direct_kevent &&
-                       ((dqf & DSF_DELETED) || !(ds->ds_is_installed))) {
-               dk->dk_kevent.flags |= EV_DELETE; // already deleted
-               dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED);
-       }
-       if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) {
-               ds->ds_dkev = NULL;
-               if (ds->ds_is_installed) {
-                       _dispatch_timers_unregister(ds, dk);
+       if (dr->du_is_timer) {
+               // Because of the optimization to unregister fired oneshot timers
+               // from the target queue, we can't trust _dispatch_unote_registered()
+               // to tell the truth, it may not have happened yet
+               if (dqf & DSF_ARMED) {
+                       _dispatch_timers_unregister(ds->ds_timer_refs);
+                       _dispatch_release_2(ds);
                }
-       } else if (!ds->ds_is_direct_kevent) {
-               ds->ds_dkev = NULL;
-               dispatch_assert((bool)ds->ds_is_installed);
-               TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list);
-               _dispatch_kevent_unregister(dk, flags, 0);
+               dr->du_ident = DISPATCH_TIMER_IDENT_CANCELED;
        } else {
-               unsigned int dkev_dispose_options = 0;
-               if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) {
-                       dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE;
-               } else if (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) {
-                       if (!ds->ds_is_direct_kevent) {
-                               dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE;
-                       }
+               if (_dispatch_unote_needs_rearm(dr) && !(dqf & DSF_ARMED)) {
+                       options |= DU_UNREGISTER_IMMEDIATE_DELETE;
                }
-               long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options);
-               if (r == EINPROGRESS) {
+               if (!_dispatch_unote_unregister(dr, options)) {
                        _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]",
-                                       ds, dk);
+                                       ds, dr);
                        _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE);
                        return; // deferred unregistration
-#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-               } else if (r == ENOENT) {
-                       _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]",
-                                       ds, dk);
-                       _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE);
-                       return; // potential concurrent EV_DELETE delivery rdar://22047283
-#endif
-               } else {
-                       dispatch_assume_zero(r);
                }
-               ds->ds_dkev = NULL;
-               _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list);
-       }
-done:
-       dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq,
-                       DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER);
-       if (dqf & DSF_CANCEL_WAITER) {
-               _dispatch_wake_by_address(&ds->dq_atomic_flags);
        }
+
        ds->ds_is_installed = true;
-       ds->ds_needs_rearm = false; // re-arm is pointless and bad now
-       _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dk);
-       _dispatch_release(ds); // the retain is done at creation time
+       _dispatch_source_refs_finalize_unregistration(ds);
 }
 
 DISPATCH_ALWAYS_INLINE
-static bool
+static inline bool
 _dispatch_source_tryarm(dispatch_source_t ds)
 {
        dispatch_queue_flags_t oqf, nqf;
@@ -673,64 +584,56 @@ _dispatch_source_tryarm(dispatch_source_t ds)
        });
 }
 
-static bool
-_dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags)
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_source_refs_resume(dispatch_source_t ds)
 {
-       switch (ds->ds_dkev->dk_kevent.filter) {
-       case DISPATCH_EVFILT_TIMER:
-               _dispatch_timers_update(ds);
-               _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-               _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
-                               ds->ds_dkev);
+       dispatch_source_refs_t dr = ds->ds_refs;
+       if (dr->du_is_timer) {
+               _dispatch_timers_update(dr, 0);
                return true;
-#if HAVE_MACH
-       case EVFILT_MACHPORT:
-               if ((ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) &&
-                               !ds->ds_is_direct_kevent) {
-                       new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH
-               }
-               break;
-#endif
        }
        if (unlikely(!_dispatch_source_tryarm(ds))) {
                return false;
        }
-       if (unlikely(_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0))) {
-               _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED,
-                               DSF_ARMED);
-               return false;
-       }
-       _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
+       _dispatch_unote_resume(dr);
+       _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dr);
        return true;
 }
 
-static void
-_dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp)
+void
+_dispatch_source_refs_register(dispatch_source_t ds, dispatch_wlh_t wlh,
+               dispatch_priority_t pri)
 {
-       dispatch_assert_zero((bool)ds->ds_is_installed);
-       switch (ds->ds_dkev->dk_kevent.filter) {
-       case DISPATCH_EVFILT_TIMER:
+       dispatch_source_refs_t dr = ds->ds_refs;
+       dispatch_priority_t kbp;
+
+       dispatch_assert(!ds->ds_is_installed);
+
+       if (dr->du_is_timer) {
+               dispatch_queue_t dq = ds->_as_dq;
+               kbp = _dispatch_queue_compute_priority_and_wlh(dq, NULL);
                // aggressively coalesce background/maintenance QoS timers
                // <rdar://problem/12200216&27342536>
-               pp = _dispatch_source_compute_kevent_priority(ds);
-               if (_dispatch_is_background_priority(pp)) {
-                       ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_BACKGROUND;
+               if (_dispatch_qos_is_background(_dispatch_priority_qos(kbp))) {
+                       if (dr->du_fflags & DISPATCH_TIMER_STRICT) {
+                               _dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer, ds);
+                       } else {
+                               dr->du_fflags |= DISPATCH_TIMER_BACKGROUND;
+                               dr->du_ident = _dispatch_source_timer_idx(dr);
+                       }
                }
-               _dispatch_timers_update(ds);
-               _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-               _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
+               _dispatch_timers_update(dr, 0);
                return;
        }
-       uint32_t flags;
-       bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, pp, &flags);
-       TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list);
-       ds->ds_is_installed = true;
-       if (do_resume || ds->ds_needs_rearm) {
-               if (unlikely(!_dispatch_source_kevent_resume(ds, flags))) {
-                       _dispatch_source_kevent_unregister(ds);
-               }
+
+       if (unlikely(!_dispatch_source_tryarm(ds) ||
+                       !_dispatch_unote_register(dr, wlh, pri))) {
+               // Do the parts of dispatch_source_refs_unregister() that
+               // are required after this partial initialization.
+               _dispatch_source_refs_finalize_unregistration(ds);
        } else {
-               _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
+               _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, dr);
        }
        _dispatch_object_debug(ds, "%s", __func__);
 }
@@ -746,65 +649,34 @@ _dispatch_source_set_event_handler_context(void *ctxt)
        }
 }
 
-static pthread_priority_t
-_dispatch_source_compute_kevent_priority(dispatch_source_t ds)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh,
+               dispatch_priority_t pri)
 {
-       pthread_priority_t p = ds->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       dispatch_queue_t tq = ds->do_targetq;
-       pthread_priority_t tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-
-       while (unlikely(tq->do_targetq)) {
-               if (unlikely(tq == &_dispatch_mgr_q)) {
-                       return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-               }
-               if (unlikely(_dispatch_queue_is_thread_bound(tq))) {
-                       // thread bound hierarchies are weird, we need to install
-                       // from the context of the thread this hierarchy is bound to
-                       return 0;
-               }
-               if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) {
-                       // this queue may not be activated yet, so the queue graph may not
-                       // have stabilized yet
-                       _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds);
-                       return 0;
-               }
-               if (unlikely(!_dispatch_queue_has_immutable_target(tq))) {
-                       if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) {
-                               // we're not allowed to dereference tq->do_targetq
-                               _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds);
-                               return 0;
-                       }
-               }
-               if (!(tq->dq_priority & _PTHREAD_PRIORITY_INHERIT_FLAG)) {
-                       if (p < tqp) p = tqp;
-               }
-               tq = tq->do_targetq;
-               tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       }
-
-       if (unlikely(!tqp)) {
-               // pthread root queues opt out of QoS
-               return 0;
-       }
-       return _dispatch_priority_inherit_from_root_queue(p, tq);
+       _dispatch_source_refs_register(ds, wlh, pri);
+       ds->ds_is_installed = true;
 }
 
 void
-_dispatch_source_finalize_activation(dispatch_source_t ds)
+_dispatch_source_finalize_activation(dispatch_source_t ds, bool *allow_resume)
 {
        dispatch_continuation_t dc;
+       dispatch_source_refs_t dr = ds->ds_refs;
+       dispatch_priority_t pri;
+       dispatch_wlh_t wlh;
 
-       if (unlikely(ds->ds_is_direct_kevent &&
+       if (unlikely(dr->du_is_direct &&
                        (_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) {
-               return _dispatch_source_kevent_unregister(ds);
+               return _dispatch_source_refs_unregister(ds, 0);
        }
 
-       dc = _dispatch_source_get_event_handler(ds->ds_refs);
+       dc = _dispatch_source_get_event_handler(dr);
        if (dc) {
                if (_dispatch_object_is_barrier(dc)) {
                        _dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT);
                }
-               ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+               ds->dq_priority = _dispatch_priority_from_pp_strip_flags(dc->dc_priority);
                if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) {
                        _dispatch_barrier_async_detached_f(ds->_as_dq, ds,
                                        _dispatch_source_set_event_handler_context);
@@ -812,28 +684,43 @@ _dispatch_source_finalize_activation(dispatch_source_t ds)
        }
 
        // call "super"
-       _dispatch_queue_finalize_activation(ds->_as_dq);
+       _dispatch_queue_finalize_activation(ds->_as_dq, allow_resume);
 
-       if (ds->ds_is_direct_kevent && !ds->ds_is_installed) {
-               pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds);
-               if (pp) _dispatch_source_kevent_register(ds, pp);
+       if (dr->du_is_direct && !ds->ds_is_installed) {
+               dispatch_queue_t dq = ds->_as_dq;
+               pri = _dispatch_queue_compute_priority_and_wlh(dq, &wlh);
+               if (pri) _dispatch_source_install(ds, wlh, pri);
        }
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline dispatch_queue_t
-_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags,
-               uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED)
+static inline dispatch_queue_wakeup_target_t
+_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags, uint64_t *owned)
 {
        dispatch_source_t ds = dou._ds;
-       dispatch_queue_t retq = NULL;
+       dispatch_queue_wakeup_target_t retq = DISPATCH_QUEUE_WAKEUP_NONE;
        dispatch_queue_t dq = _dispatch_queue_get_current();
+       dispatch_source_refs_t dr = ds->ds_refs;
+       dispatch_queue_flags_t dqf;
+
+       if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) &&
+                       _dispatch_unote_wlh_changed(dr, _dispatch_get_wlh())) {
+               dqf = _dispatch_queue_atomic_flags_set_orig(ds->_as_dq,
+                               DSF_WLH_CHANGED);
+               if (!(dqf & DSF_WLH_CHANGED)) {
+                       _dispatch_bug_deprecated("Changing target queue "
+                                       "hierarchy after source was activated");
+               }
+       }
 
        if (_dispatch_queue_class_probe(ds)) {
                // Intentionally always drain even when on the manager queue
                // and not the source's regular target queue: we need to be able
                // to drain timer setting and the like there.
-               retq = _dispatch_queue_serial_drain(ds->_as_dq, flags, owned, NULL);
+               dispatch_with_disabled_narrowing(dic, {
+                       retq = _dispatch_queue_serial_drain(ds->_as_dq, dic, flags, owned);
+               });
        }
 
        // This function performs all source actions. Each action is responsible
@@ -843,19 +730,32 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags,
 
        // The order of tests here in invoke and in wakeup should be consistent.
 
-       dispatch_source_refs_t dr = ds->ds_refs;
        dispatch_queue_t dkq = &_dispatch_mgr_q;
+       bool prevent_starvation = false;
 
-       if (ds->ds_is_direct_kevent) {
+       if (dr->du_is_direct) {
                dkq = ds->do_targetq;
        }
 
+       if (dr->du_is_timer &&
+                       os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) {
+               dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+               if (!(dqf & (DSF_CANCELED | DQF_RELEASED))) {
+                       // timer has to be configured on the kevent queue
+                       if (dq != dkq) {
+                               return dkq;
+                       }
+                       _dispatch_source_timer_configure(ds);
+               }
+       }
+
        if (!ds->ds_is_installed) {
                // The source needs to be installed on the kevent queue.
                if (dq != dkq) {
                        return dkq;
                }
-               _dispatch_source_kevent_register(ds, _dispatch_get_defaultpriority());
+               _dispatch_source_install(ds, _dispatch_get_wlh(),
+                               _dispatch_get_basepri());
        }
 
        if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) {
@@ -873,22 +773,20 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags,
                _dispatch_source_registration_callout(ds, dq, flags);
        }
 
-       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
-       bool prevent_starvation = false;
-
-       if ((dqf & DSF_DEFERRED_DELETE) &&
-                       ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) {
+       dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+       if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) {
 unregister_event:
                // DSF_DELETE: Pending source kevent unregistration has been completed
                // !DSF_ARMED: event was delivered and can safely be unregistered
                if (dq != dkq) {
                        return dkq;
                }
-               _dispatch_source_kevent_unregister(ds);
+               _dispatch_source_refs_unregister(ds, DU_UNREGISTER_IMMEDIATE_DELETE);
                dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
        }
 
-       if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) {
+       if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) &&
+                       os_atomic_load2o(ds, ds_pending_data, relaxed)) {
                // The source has pending data to deliver via the event handler callback
                // on the target queue. Some sources need to be rearmed on the kevent
                // queue after event delivery.
@@ -900,12 +798,13 @@ unregister_event:
                        // re-queue to give other things already queued on the target queue
                        // a chance to run.
                        //
-                       // however, if the source is directly targetting an overcommit root
+                       // however, if the source is directly targeting an overcommit root
                        // queue, this would requeue the source and ask for a new overcommit
                        // thread right away.
                        prevent_starvation = dq->do_targetq ||
-                                       !(dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-                       if (prevent_starvation && ds->ds_pending_data) {
+                                       !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
+                       if (prevent_starvation &&
+                                       os_atomic_load2o(ds, ds_pending_data, relaxed)) {
                                retq = ds->do_targetq;
                        }
                } else {
@@ -920,17 +819,21 @@ unregister_event:
                // kevent queue. After uninstallation, the cancellation handler needs
                // to be delivered to the target queue.
                if (!(dqf & DSF_DELETED)) {
-                       if (dq != dkq) {
+                       if (dr->du_is_timer && !(dqf & DSF_ARMED)) {
+                               // timers can cheat if not armed because there's nothing left
+                               // to do on the manager queue and unregistration can happen
+                               // on the regular target queue
+                       } else if (dq != dkq) {
                                return dkq;
                        }
-                       _dispatch_source_kevent_unregister(ds);
+                       _dispatch_source_refs_unregister(ds, 0);
                        dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
                        if (unlikely(dqf & DSF_DEFERRED_DELETE)) {
                                if (!(dqf & DSF_ARMED)) {
                                        goto unregister_event;
                                }
                                // we need to wait for the EV_DELETE
-                               return retq;
+                               return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
                        }
                }
                if (dq != ds->do_targetq && (_dispatch_source_get_event_handler(dr) ||
@@ -944,7 +847,8 @@ unregister_event:
                prevent_starvation = false;
        }
 
-       if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) {
+       if (_dispatch_unote_needs_rearm(dr) &&
+                       !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) {
                // The source needs to be rearmed on the kevent queue.
                if (dq != dkq) {
                        return dkq;
@@ -953,20 +857,29 @@ unregister_event:
                        // no need for resume when we can directly unregister the kevent
                        goto unregister_event;
                }
-               if (prevent_starvation) {
+               if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) {
+                       // do not try to rearm the kevent if the source is suspended
+                       // from the source handler
+                       return ds->do_targetq;
+               }
+               if (prevent_starvation && dr->du_wlh == DISPATCH_WLH_ANON) {
                        // keep the old behavior to force re-enqueue to our target queue
-                       // for the rearm. It is inefficient though and we should
-                       // improve this <rdar://problem/24635615>.
+                       // for the rearm.
                        //
                        // if the handler didn't run, or this is a pending delete
                        // or our target queue is a global queue, then starvation is
                        // not a concern and we can rearm right away.
                        return ds->do_targetq;
                }
-               if (unlikely(!_dispatch_source_kevent_resume(ds, 0))) {
-                       dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+               if (unlikely(!_dispatch_source_refs_resume(ds))) {
                        goto unregister_event;
                }
+               if (!prevent_starvation && _dispatch_wlh_should_poll_unote(dr)) {
+                       // try to redrive the drain from under the lock for sources
+                       // targeting an overcommit root queue to avoid parking
+                       // when the next event has already fired
+                       _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE);
+               }
        }
 
        return retq;
@@ -974,13 +887,15 @@ unregister_event:
 
 DISPATCH_NOINLINE
 void
-_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags)
+_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic,
+               dispatch_invoke_flags_t flags)
 {
-       _dispatch_queue_class_invoke(ds->_as_dq, flags, _dispatch_source_invoke2);
+       _dispatch_queue_class_invoke(ds, dic, flags,
+                       DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_source_invoke2);
 }
 
 void
-_dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
+_dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags)
 {
        // This function determines whether the source needs to be invoked.
@@ -992,21 +907,26 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
        dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
        bool deferred_delete = (dqf & DSF_DEFERRED_DELETE);
 
-       if (ds->ds_is_direct_kevent) {
+       if (dr->du_is_direct) {
                dkq = DISPATCH_QUEUE_WAKEUP_TARGET;
        }
 
-       if (!ds->ds_is_installed) {
+       if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && dr->du_is_timer &&
+                       os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) {
+               // timer has to be configured on the kevent queue
+               tq = dkq;
+       } else if (!ds->ds_is_installed) {
                // The source needs to be installed on the kevent queue.
                tq = dkq;
        } else if (_dispatch_source_get_registration_handler(dr)) {
                // The registration handler needs to be delivered to the target queue.
                tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-       } else if (deferred_delete && ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) {
+       } else if (deferred_delete && !(dqf & DSF_ARMED)) {
                // Pending source kevent unregistration has been completed
                // or EV_ONESHOT event can be acknowledged
                tq = dkq;
-       } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) {
+       } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) &&
+                       os_atomic_load2o(ds, ds_pending_data, relaxed)) {
                // The source has pending data to deliver to the target queue.
                tq = DISPATCH_QUEUE_WAKEUP_TARGET;
        } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !deferred_delete) {
@@ -1014,13 +934,21 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
                // cancellation handler needs to be delivered to the target queue.
                // Note: cancellation assumes installation.
                if (!(dqf & DSF_DELETED)) {
-                       tq = dkq;
+                       if (dr->du_is_timer && !(dqf & DSF_ARMED)) {
+                               // timers can cheat if not armed because there's nothing left
+                               // to do on the manager queue and unregistration can happen
+                               // on the regular target queue
+                               tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+                       } else {
+                               tq = dkq;
+                       }
                } else if (_dispatch_source_get_event_handler(dr) ||
                                _dispatch_source_get_cancel_handler(dr) ||
                                _dispatch_source_get_registration_handler(dr)) {
                        tq = DISPATCH_QUEUE_WAKEUP_TARGET;
                }
-       } else if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) {
+       } else if (_dispatch_unote_needs_rearm(dr) &&
+                       !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) {
                // The source needs to be rearmed on the kevent queue.
                tq = dkq;
        }
@@ -1028,13 +956,12 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
                tq = DISPATCH_QUEUE_WAKEUP_TARGET;
        }
 
-       if (tq) {
-               return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq);
-       } else if (pp) {
-               return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags);
-       } else if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(ds);
+       if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) &&
+                       ds->do_targetq == &_dispatch_mgr_q) {
+               tq = DISPATCH_QUEUE_WAKEUP_MGR;
        }
+
+       return _dispatch_queue_class_wakeup(ds->_as_dq, qos, flags, tq);
 }
 
 void
@@ -1045,13 +972,13 @@ dispatch_source_cancel(dispatch_source_t ds)
        // could potentially invoke the source, do the cancellation,
        // unregister the source, and deallocate it. We would
        // need to therefore retain/release before setting the bit
-       _dispatch_retain(ds);
+       _dispatch_retain_2(ds);
 
        dispatch_queue_t q = ds->_as_dq;
        if (_dispatch_queue_atomic_flags_set_orig(q, DSF_CANCELED) & DSF_CANCELED) {
-               _dispatch_release_tailcall(ds);
+               _dispatch_release_2_tailcall(ds);
        } else {
-               dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME);
+               dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2);
        }
 }
 
@@ -1059,9 +986,9 @@ void
 dispatch_source_cancel_and_wait(dispatch_source_t ds)
 {
        dispatch_queue_flags_t old_dqf, dqf, new_dqf;
-       pthread_priority_t pp;
+       dispatch_source_refs_t dr = ds->ds_refs;
 
-       if (unlikely(_dispatch_source_get_cancel_handler(ds->ds_refs))) {
+       if (unlikely(_dispatch_source_get_cancel_handler(dr))) {
                DISPATCH_CLIENT_CRASH(ds, "Source has a cancel handler");
        }
 
@@ -1073,7 +1000,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds)
                }
                if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) {
                        // just add DSF_CANCELED
-               } else if ((old_dqf & DSF_DEFERRED_DELETE) || !ds->ds_is_direct_kevent){
+               } else if ((old_dqf & DSF_DEFERRED_DELETE) || !dr->du_is_direct) {
                        new_dqf |= DSF_CANCEL_WAITER;
                }
        });
@@ -1086,13 +1013,12 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds)
                return;
        }
        if (dqf & DSF_CANCEL_WAITER) {
-               goto override;
+               goto wakeup;
        }
 
        // simplified version of _dispatch_queue_drain_try_lock
        // that also sets the DIRTY bit on failure to lock
-       dispatch_lock_owner tid_self = _dispatch_tid_self();
-       uint64_t xor_owner_and_set_full_width = tid_self |
+       uint64_t set_owner_and_set_full_width = _dispatch_lock_value_for_self() |
                        DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER;
        uint64_t old_state, new_state;
 
@@ -1101,7 +1027,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds)
                if (likely(_dq_state_is_runnable(old_state) &&
                                !_dq_state_drain_locked(old_state))) {
                        new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
-                       new_state ^= xor_owner_and_set_full_width;
+                       new_state |= set_owner_and_set_full_width;
                } else if (old_dqf & DSF_CANCELED) {
                        os_atomic_rmw_loop_give_up(break);
                } else {
@@ -1125,27 +1051,28 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds)
                // same thing _dispatch_source_invoke2() does when handling cancellation
                dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
                if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) {
-                       _dispatch_source_kevent_unregister(ds);
+                       _dispatch_source_refs_unregister(ds, 0);
                        dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
                        if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) {
                                _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE);
                        }
                }
-               _dispatch_try_lock_transfer_or_wakeup(ds->_as_dq);
-       } else if (unlikely(_dq_state_drain_locked_by(old_state, tid_self))) {
+               dx_wakeup(ds, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE);
+       } else if (unlikely(_dq_state_drain_locked_by_self(old_state))) {
                DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait "
                                "called from a source handler");
        } else {
-override:
-               pp = _dispatch_get_priority() & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-               if (pp) dx_wakeup(ds, pp, DISPATCH_WAKEUP_OVERRIDING);
+               dispatch_qos_t qos;
+wakeup:
+               qos = _dispatch_qos_from_pp(_dispatch_get_priority());
+               dx_wakeup(ds, qos, DISPATCH_WAKEUP_MAKE_DIRTY);
                dispatch_activate(ds);
        }
 
        dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
        while (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) {
                if (unlikely(!(dqf & DSF_CANCEL_WAITER))) {
-                       if (!os_atomic_cmpxchgvw2o(ds, dq_atomic_flags,
+                       if (!os_atomic_cmpxchgv2o(ds, dq_atomic_flags,
                                        dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) {
                                continue;
                        }
@@ -1156,46 +1083,44 @@ override:
        }
 }
 
-static void
-_dispatch_source_merge_kevent(dispatch_source_t ds,
-               const _dispatch_kevent_qos_s *ke)
+void
+_dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data,
+               uintptr_t status, pthread_priority_t pp)
 {
-       _dispatch_object_debug(ds, "%s", __func__);
-       dispatch_wakeup_flags_t flags = 0;
+       dispatch_source_refs_t dr = du._dr;
+       dispatch_source_t ds = _dispatch_source_from_refs(dr);
+       dispatch_wakeup_flags_t wflags = 0;
        dispatch_queue_flags_t dqf;
-       pthread_priority_t pp = 0;
 
-       if (ds->ds_needs_rearm || (ke->flags & (EV_DELETE | EV_ONESHOT))) {
+       if (_dispatch_unote_needs_rearm(dr) || (flags & (EV_DELETE | EV_ONESHOT))) {
                // once we modify the queue atomic flags below, it will allow concurrent
                // threads running _dispatch_source_invoke2 to dispose of the source,
-               // so we can't safely borrow the reference we get from the knote udata
+               // so we can't safely borrow the reference we get from the muxnote udata
                // anymore, and need our own
-               flags = DISPATCH_WAKEUP_CONSUME;
-               _dispatch_retain(ds); // rdar://20382435
+               wflags = DISPATCH_WAKEUP_CONSUME_2;
+               _dispatch_retain_2(ds); // rdar://20382435
        }
 
-       if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) &&
-                       !(ke->flags & EV_DELETE)) {
+       if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
+                       !(flags & EV_DELETE)) {
                dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq,
                                DSF_DEFERRED_DELETE, DSF_ARMED);
-               if (ke->flags & EV_VANISHED) {
-                       _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
+               if (flags & EV_VANISHED) {
+                       _dispatch_bug_kevent_client("kevent", dr->du_type->dst_kind,
                                        "monitored resource vanished before the source "
                                        "cancel handler was invoked", 0);
                }
                _dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds,
-                               (ke->flags & EV_VANISHED) ? "vanished" :
-                               "deferred delete oneshot", (void*)ke->udata);
-       } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) {
-               dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq,
-                               DSF_DELETED, DSF_ARMED);
-               _dispatch_debug("kevent-source[%p]: delete kevent[%p]",
-                               ds, (void*)ke->udata);
-               if (ke->flags & EV_DELETE) goto done;
-       } else if (ds->ds_needs_rearm) {
+                               (flags & EV_VANISHED) ? "vanished" :
+                               "deferred delete oneshot", dr);
+       } else if (flags & (EV_DELETE | EV_ONESHOT)) {
+               _dispatch_source_refs_unregister(ds, DU_UNREGISTER_ALREADY_DELETED);
+               _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", ds, dr);
+               if (flags & EV_DELETE) goto done;
+               dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+       } else if (_dispatch_unote_needs_rearm(dr)) {
                dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
-               _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ",
-                               ds, (void*)ke->udata);
+               _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr);
        } else {
                dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
        }
@@ -1203,16 +1128,10 @@ _dispatch_source_merge_kevent(dispatch_source_t ds,
        if (dqf & (DSF_CANCELED | DQF_RELEASED)) {
                goto done; // rdar://20204025
        }
-#if HAVE_MACH
-       if (ke->filter == EVFILT_MACHPORT &&
-                       dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) {
-               DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel");
-       }
-#endif
 
-       unsigned long data;
-       if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) &&
-                       (ke->flags & EV_VANISHED)) {
+       dispatch_unote_action_t action = dr->du_data_action;
+       if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
+                       (flags & EV_VANISHED)) {
                // if the resource behind the ident vanished, the event handler can't
                // do anything useful anymore, so do not try to call it at all
                //
@@ -1222,401 +1141,63 @@ _dispatch_source_merge_kevent(dispatch_source_t ds,
                // if we get both bits it was a real EV_VANISHED delivery
                os_atomic_store2o(ds, ds_pending_data, 0, relaxed);
 #if HAVE_MACH
-       } else if (ke->filter == EVFILT_MACHPORT) {
-               data = DISPATCH_MACH_RECV_MESSAGE;
+       } else if (dr->du_filter == EVFILT_MACHPORT) {
                os_atomic_store2o(ds, ds_pending_data, data, relaxed);
 #endif
-       } else if (ds->ds_is_level) {
-               // ke->data is signed and "negative available data" makes no sense
-               // zero bytes happens when EV_EOF is set
-               dispatch_assert(ke->data >= 0l);
-               data = ~(unsigned long)ke->data;
+       } else if (action == DISPATCH_UNOTE_ACTION_DATA_SET) {
                os_atomic_store2o(ds, ds_pending_data, data, relaxed);
-       } else if (ds->ds_is_adder) {
-               data = (unsigned long)ke->data;
+       } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) {
                os_atomic_add2o(ds, ds_pending_data, data, relaxed);
-       } else if (ke->fflags & ds->ds_pending_data_mask) {
-               data = ke->fflags & ds->ds_pending_data_mask;
+       } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR) {
                os_atomic_or2o(ds, ds_pending_data, data, relaxed);
+       } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) {
+               // We combine the data and status into a single 64-bit value.
+               uint64_t odata, ndata;
+               uint64_t value = DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status);
+               os_atomic_rmw_loop2o(ds, ds_pending_data, odata, ndata, relaxed, {
+            ndata = DISPATCH_SOURCE_GET_DATA(odata) | value;
+               });
+       } else if (data) {
+               DISPATCH_INTERNAL_CRASH(action, "Unexpected source action value");
        }
+       _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, dr);
 
 done:
-#if DISPATCH_USE_KEVENT_QOS
-       pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-#endif
-       dx_wakeup(ds, pp, flags | DISPATCH_WAKEUP_FLUSH);
+       _dispatch_object_debug(ds, "%s", __func__);
+       dx_wakeup(ds, _dispatch_qos_from_pp(pp), wflags | DISPATCH_WAKEUP_MAKE_DIRTY);
 }
 
 #pragma mark -
-#pragma mark dispatch_kevent_t
-
-#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1))
-
-DISPATCH_CACHELINE_ALIGN
-static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE];
-
-static void
-_dispatch_kevent_init()
-{
-       unsigned int i;
-       for (i = 0; i < DSL_HASH_SIZE; i++) {
-               TAILQ_INIT(&_dispatch_sources[i]);
-       }
-}
-
-static inline uintptr_t
-_dispatch_kevent_hash(uint64_t ident, short filter)
-{
-       uint64_t value;
-#if HAVE_MACH
-       value = (filter == EVFILT_MACHPORT ||
-                       filter == DISPATCH_EVFILT_MACH_NOTIFICATION ?
-                       MACH_PORT_INDEX(ident) : ident);
-#else
-       value = ident;
-       (void)filter;
-#endif
-       return DSL_HASH((uintptr_t)value);
-}
-
-static dispatch_kevent_t
-_dispatch_kevent_find(uint64_t ident, short filter)
-{
-       uintptr_t hash = _dispatch_kevent_hash(ident, filter);
-       dispatch_kevent_t dki;
-
-       TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) {
-               if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) {
-                       break;
-               }
-       }
-       return dki;
-}
-
-static void
-_dispatch_kevent_insert(dispatch_kevent_t dk)
-{
-       if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return;
-       uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
-                       dk->dk_kevent.filter);
-       TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list);
-}
-
-// Find existing kevents, and merge any new flags if necessary
-static bool
-_dispatch_kevent_register(dispatch_kevent_t *dkp, pthread_priority_t pp,
-               uint32_t *flgp)
-{
-       dispatch_kevent_t dk = NULL, ds_dkev = *dkp;
-       uint32_t new_flags;
-       bool do_resume = false;
-
-       if (!(ds_dkev->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-               dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident,
-                               ds_dkev->dk_kevent.filter);
-       }
-       if (dk) {
-               // If an existing dispatch kevent is found, check to see if new flags
-               // need to be added to the existing kevent
-               new_flags = ~dk->dk_kevent.fflags & ds_dkev->dk_kevent.fflags;
-               dk->dk_kevent.fflags |= ds_dkev->dk_kevent.fflags;
-               free(ds_dkev);
-               *dkp = dk;
-               do_resume = new_flags;
-       } else {
-               dk = ds_dkev;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-               if (!_dispatch_kevent_workqueue_enabled) {
-                       // do nothing
-               } else if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-                       dk->dk_kevent.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-               } else {
-                       pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK |
-                                       _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-                       if (!pp) pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-                       _dispatch_assert_is_valid_qos_class(pp);
-                       dk->dk_kevent.qos = (_dispatch_kevent_priority_t)pp;
-               }
-#else
-               (void)pp;
-#endif
-               _dispatch_kevent_insert(dk);
-               new_flags = dk->dk_kevent.fflags;
-               do_resume = true;
-       }
-       // Re-register the kevent with the kernel if new flags were added
-       // by the dispatch kevent
-       if (do_resume) {
-               dk->dk_kevent.flags |= EV_ADD;
-       }
-       *flgp = new_flags;
-       return do_resume;
-}
-
-static long
-_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
-               uint32_t del_flags)
-{
-       long r;
-       bool oneshot;
-       if (dk->dk_kevent.flags & EV_DELETE) {
-               return 0;
-       }
-       switch (dk->dk_kevent.filter) {
-       case DISPATCH_EVFILT_TIMER:
-       case DISPATCH_EVFILT_CUSTOM_ADD:
-       case DISPATCH_EVFILT_CUSTOM_OR:
-               // these types not registered with kevent
-               return 0;
-#if HAVE_MACH
-       case DISPATCH_EVFILT_MACH_NOTIFICATION:
-               return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags);
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-       case EVFILT_MACHPORT:
-               if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-                       return _dispatch_kevent_machport_resume(dk, new_flags, del_flags);
-               }
-               // fall through
-#endif
-#endif // HAVE_MACH
-       default:
-               // oneshot dk may be freed by the time we return from
-               // _dispatch_kq_immediate_update if the event was delivered (and then
-               // unregistered) concurrently.
-               oneshot = (dk->dk_kevent.flags & EV_ONESHOT);
-               r = _dispatch_kq_immediate_update(&dk->dk_kevent);
-               if (r && (dk->dk_kevent.flags & EV_ADD) &&
-                               (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-                       dk->dk_kevent.flags |= EV_DELETE;
-                       dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED);
-               } else if (!oneshot && (dk->dk_kevent.flags & EV_DISPATCH)) {
-                       // we can safely skip doing this for ONESHOT events because
-                       // the next kq update we will do is _dispatch_kevent_dispose()
-                       // which also clears EV_ADD.
-                       dk->dk_kevent.flags &= ~(EV_ADD|EV_VANISHED);
-               }
-               return r;
-       }
-       (void)new_flags; (void)del_flags;
-}
-
-static long
-_dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options)
-{
-       long r = 0;
-       switch (dk->dk_kevent.filter) {
-       case DISPATCH_EVFILT_TIMER:
-       case DISPATCH_EVFILT_CUSTOM_ADD:
-       case DISPATCH_EVFILT_CUSTOM_OR:
-               if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) {
-                       free(dk);
-               } else {
-                       // these sources live on statically allocated lists
-               }
-               return r;
-       }
-       if (!(dk->dk_kevent.flags & EV_DELETE)) {
-               dk->dk_kevent.flags |= EV_DELETE;
-               dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED);
-               if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
-                       dk->dk_kevent.flags |= EV_ENABLE;
-               }
-               switch (dk->dk_kevent.filter) {
-#if HAVE_MACH
-               case DISPATCH_EVFILT_MACH_NOTIFICATION:
-                       r = _dispatch_kevent_mach_notify_resume(dk, 0,dk->dk_kevent.fflags);
-                       break;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-               case EVFILT_MACHPORT:
-                       if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-                               r = _dispatch_kevent_machport_resume(dk,0,dk->dk_kevent.fflags);
-                               break;
-                       }
-                       // fall through
-#endif
-#endif
-               default:
-                       if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
-                               _dispatch_kq_deferred_update(&dk->dk_kevent);
-                       } else {
-                               r = _dispatch_kq_immediate_update(&dk->dk_kevent);
-                       }
-                       break;
-               }
-               if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
-                       dk->dk_kevent.flags &= ~EV_ENABLE;
-               }
-       }
-       if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) {
-               bool deferred_delete = (r == EINPROGRESS);
-#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-               if (r == ENOENT) deferred_delete = true;
-#endif
-               if (deferred_delete) {
-                       // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery
-                       dk->dk_kevent.flags &= ~EV_DELETE;
-                       dk->dk_kevent.flags |= EV_ENABLE;
-                       return r;
-               }
-       } else {
-               uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
-                               dk->dk_kevent.filter);
-               TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list);
-       }
-       free(dk);
-       return r;
-}
-
-static long
-_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg,
-               unsigned int options)
-{
-       dispatch_source_refs_t dri;
-       uint32_t del_flags, fflags = 0;
-       long r = 0;
-
-       if (TAILQ_EMPTY(&dk->dk_sources) ||
-                       (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-               r = _dispatch_kevent_dispose(dk, options);
-       } else {
-               TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) {
-                       dispatch_source_t dsi = _dispatch_source_from_refs(dri);
-                       uint32_t mask = (uint32_t)dsi->ds_pending_data_mask;
-                       fflags |= mask;
-               }
-               del_flags = flg & ~fflags;
-               if (del_flags) {
-                       dk->dk_kevent.flags |= EV_ADD;
-                       dk->dk_kevent.fflags &= ~del_flags;
-                       r = _dispatch_kevent_resume(dk, 0, del_flags);
-               }
-       }
-       return r;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke)
-{
-       // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
-       // <rdar://problem/5067725>. As a workaround, we simulate an exit event for
-       // any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
-       _dispatch_kevent_qos_s fake;
-       fake = *ke;
-       fake.flags &= ~EV_ERROR;
-       fake.flags |= EV_ONESHOT;
-       fake.fflags = NOTE_EXIT;
-       fake.data = 0;
-       _dispatch_kevent_debug("synthetic NOTE_EXIT", ke);
-       _dispatch_kevent_merge(&fake);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_error(_dispatch_kevent_qos_s *ke)
-{
-       _dispatch_kevent_qos_s *kev = NULL;
-
-       if (ke->flags & EV_DELETE) {
-               if (ke->flags & EV_UDATA_SPECIFIC) {
-                       if (ke->data == EINPROGRESS) {
-                               // deferred EV_DELETE
-                               return;
-                       }
-#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-                       if (ke->data == ENOENT) {
-                               // deferred EV_DELETE
-                               return;
-                       }
-#endif
-               }
-               // for EV_DELETE if the update was deferred we may have reclaimed
-               // our dispatch_kevent_t, and it is unsafe to dereference it now.
-       } else if (ke->udata) {
-               kev = &((dispatch_kevent_t)ke->udata)->dk_kevent;
-               ke->flags |= kev->flags;
-       }
-
-#if HAVE_MACH
-       if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP &&
-                       (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled &&
-                       kev && (kev->fflags & MACH_RCV_MSG)) {
-               DISPATCH_INTERNAL_CRASH(ke->ident,
-                               "Missing EVFILT_MACHPORT support for ports");
-       }
-#endif
-
-       if (ke->data) {
-               // log the unexpected error
-               _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
-                               !ke->udata ? NULL :
-                               ke->flags & EV_DELETE ? "delete" :
-                               ke->flags & EV_ADD ? "add" :
-                               ke->flags & EV_ENABLE ? "enable" : "monitor",
-                               (int)ke->data);
-       }
-}
-
-static void
-_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke)
-{
-       if (ke->filter == EVFILT_USER) {
-               _dispatch_kevent_mgr_debug(ke);
-               return;
-       }
-       if (slowpath(ke->flags & EV_ERROR)) {
-               if (ke->filter == EVFILT_PROC && ke->data == ESRCH) {
-                       _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: "
-                                       "generating fake NOTE_EXIT", (unsigned long long)ke->udata);
-                       return _dispatch_kevent_proc_exit(ke);
-               }
-               _dispatch_debug("kevent[0x%llx]: handling error",
-                               (unsigned long long)ke->udata);
-               return _dispatch_kevent_error(ke);
-       }
-       if (ke->filter == EVFILT_TIMER) {
-               _dispatch_debug("kevent[0x%llx]: handling timer",
-                               (unsigned long long)ke->udata);
-               return _dispatch_timers_kevent(ke);
-       }
-#if HAVE_MACH
-       if (ke->filter == EVFILT_MACHPORT) {
-               _dispatch_debug("kevent[0x%llx]: handling mach port",
-                               (unsigned long long)ke->udata);
-               return _dispatch_mach_kevent_merge(ke);
-       }
-#endif
-       return _dispatch_kevent_merge(ke);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_merge(_dispatch_kevent_qos_s *ke)
-{
-       dispatch_kevent_t dk = (void*)ke->udata;
-       dispatch_source_refs_t dri, dr_next;
-
-       TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) {
-               _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke);
-       }
-}
-
-#pragma mark -
-#pragma mark dispatch_source_timer
+#pragma mark dispatch_source_timer
 
 #if DISPATCH_USE_DTRACE
-static dispatch_source_refs_t
+static dispatch_timer_source_refs_t
                _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT];
 #define _dispatch_trace_next_timer_set(x, q) \
                _dispatch_trace_next_timer[(q)] = (x)
 #define _dispatch_trace_next_timer_program(d, q) \
                _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d))
-#define _dispatch_trace_next_timer_wake(q) \
-               _dispatch_trace_timer_wake(_dispatch_trace_next_timer[(q)])
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mgr_trace_timers_wakes(void)
+{
+       uint32_t qos;
+
+       if (_dispatch_timers_will_wake) {
+               if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) {
+                       for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
+                               if (_dispatch_timers_will_wake & (1 << qos)) {
+                                       _dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]);
+                               }
+                       }
+               }
+               _dispatch_timers_will_wake = 0;
+       }
+}
 #else
 #define _dispatch_trace_next_timer_set(x, q)
 #define _dispatch_trace_next_timer_program(d, q)
-#define _dispatch_trace_next_timer_wake(q)
+#define _dispatch_mgr_trace_timers_wakes()
 #endif
 
 #define _dispatch_source_timer_telemetry_enabled() false
@@ -1643,82 +1224,40 @@ _dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock,
        }
 }
 
-static inline unsigned long
-_dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev)
-{
-       // calculate the number of intervals since last fire
-       unsigned long data, missed;
-       uint64_t now;
-       now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(_dispatch_source_timer_idx(dr)));
-       missed = (unsigned long)((now - ds_timer(dr).last_fire) /
-                       ds_timer(dr).interval);
-       // correct for missed intervals already delivered last time
-       data = prev - ds_timer(dr).missed + missed;
-       ds_timer(dr).missed = missed;
-       return data;
-}
-
-struct dispatch_set_timer_params {
-       dispatch_source_t ds;
-       struct dispatch_timer_source_s values;
-       dispatch_clock_t clock;
-};
-
+DISPATCH_NOINLINE
 static void
-_dispatch_source_set_timer3(void *context)
+_dispatch_source_timer_configure(dispatch_source_t ds)
 {
-       // Called on the _dispatch_mgr_q
-       struct dispatch_set_timer_params *params = context;
-       dispatch_source_t ds = params->ds;
-       dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t)ds->ds_refs;
-
-       params->values.flags = ds_timer(dt).flags;
-       if (params->clock == DISPATCH_CLOCK_WALL) {
-               params->values.flags |= DISPATCH_TIMER_WALL_CLOCK;
-#if HAVE_MACH
-               _dispatch_mach_host_calendar_change_register();
-#endif
+       dispatch_timer_source_refs_t dt = ds->ds_timer_refs;
+       dispatch_timer_config_t dtc;
+
+       dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency);
+       if (dtc->dtc_clock == DISPATCH_CLOCK_MACH) {
+               dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH;
        } else {
-               params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK;
+               dt->du_fflags &= ~(uint32_t)DISPATCH_TIMER_CLOCK_MACH;
        }
-       ds_timer(dt) = params->values;
-       ds->ds_ident_hack = _dispatch_source_timer_idx(ds->ds_refs);
-       // Clear any pending data that might have accumulated on
-       // older timer params <rdar://problem/8574886>
-       ds->ds_pending_data = 0;
-
-       dispatch_resume(ds);
-       if (_dispatch_source_tryarm(ds)) {
-               // Re-arm in case we got disarmed because of pending set_timer suspension
-               _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dt);
-               // Must happen after resume to avoid getting disarmed due to suspension
-               _dispatch_timers_update(ds);
+       dt->dt_timer = dtc->dtc_timer;
+       free(dtc);
+       if (ds->ds_is_installed) {
+               // Clear any pending data that might have accumulated on
+               // older timer params <rdar://problem/8574886>
+               os_atomic_store2o(ds, ds_pending_data, 0, relaxed);
+               _dispatch_timers_update(dt, 0);
        }
-       dispatch_release(ds);
-       free(params);
 }
 
-static void
-_dispatch_source_set_timer2(void *context)
-{
-       // Called on the source queue
-       struct dispatch_set_timer_params *params = context;
-       dispatch_suspend(params->ds);
-       _dispatch_barrier_async_detached_f(&_dispatch_mgr_q, params,
-                       _dispatch_source_set_timer3);
-}
-
-DISPATCH_NOINLINE
-static struct dispatch_set_timer_params *
-_dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start,
+static dispatch_timer_config_t
+_dispatch_source_timer_config_create(dispatch_time_t start,
                uint64_t interval, uint64_t leeway)
 {
-       struct dispatch_set_timer_params *params;
-       params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params));
-       params->ds = ds;
-
-       if (interval == 0) {
-               // we use zero internally to mean disabled
+       dispatch_timer_config_t dtc;
+       dtc = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_config_s));
+       if (unlikely(interval == 0)) {
+               if (start != DISPATCH_TIME_FOREVER) {
+                       _dispatch_bug_deprecated("Setting timer interval to 0 requests "
+                                       "a 1ns timer, did you mean FOREVER (a one-shot timer)?");
+               }
                interval = 1;
        } else if ((int64_t)interval < 0) {
                // 6866347 - make sure nanoseconds won't overflow
@@ -1736,7 +1275,7 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start,
        if ((int64_t)start < 0) {
                // wall clock
                start = (dispatch_time_t)-((int64_t)start);
-               params->clock = DISPATCH_CLOCK_WALL;
+               dtc->dtc_clock = DISPATCH_CLOCK_WALL;
        } else {
                // absolute clock
                interval = _dispatch_time_nano2mach(interval);
@@ -1748,66 +1287,50 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start,
                        interval = 1;
                }
                leeway = _dispatch_time_nano2mach(leeway);
-               params->clock = DISPATCH_CLOCK_MACH;
-       }
-       params->values.target = start;
-       params->values.deadline = (start < UINT64_MAX - leeway) ?
-                       start + leeway : UINT64_MAX;
-       params->values.interval = interval;
-       params->values.leeway = (interval == INT64_MAX || leeway < interval / 2) ?
-                       leeway : interval / 2;
-       return params;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start,
-               uint64_t interval, uint64_t leeway, bool source_sync)
-{
-       if (slowpath(!ds->ds_is_timer) ||
-                       slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) {
-               DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source");
+               dtc->dtc_clock = DISPATCH_CLOCK_MACH;
+       }
+       if (interval < INT64_MAX && leeway > interval / 2) {
+               leeway = interval / 2;
        }
 
-       struct dispatch_set_timer_params *params;
-       params = _dispatch_source_timer_params(ds, start, interval, leeway);
-
-       _dispatch_source_timer_telemetry(ds, params->clock, &params->values);
-       // Suspend the source so that it doesn't fire with pending changes
-       // The use of suspend/resume requires the external retain/release
-       dispatch_retain(ds);
-       if (source_sync) {
-               return _dispatch_barrier_trysync_or_async_f(ds->_as_dq, params,
-                               _dispatch_source_set_timer2);
+       dtc->dtc_timer.target = start;
+       dtc->dtc_timer.interval = interval;
+       if (start + leeway < INT64_MAX) {
+               dtc->dtc_timer.deadline = start + leeway;
        } else {
-               return _dispatch_source_set_timer2(params);
+               dtc->dtc_timer.deadline = INT64_MAX;
        }
+       return dtc;
 }
 
+DISPATCH_NOINLINE
 void
 dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start,
                uint64_t interval, uint64_t leeway)
 {
-       _dispatch_source_set_timer(ds, start, interval, leeway, true);
-}
+       dispatch_timer_source_refs_t dt = ds->ds_timer_refs;
+       dispatch_timer_config_t dtc;
 
-void
-_dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds,
-               dispatch_time_t start, uint64_t interval, uint64_t leeway)
-{
-       // Don't serialize through the source queue for CF timers <rdar://13833190>
-       _dispatch_source_set_timer(ds, start, interval, leeway, false);
+       if (unlikely(!dt->du_is_timer || (dt->du_fflags&DISPATCH_TIMER_INTERVAL))) {
+               DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source");
+       }
+
+       dtc = _dispatch_source_timer_config_create(start, interval, leeway);
+       _dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer);
+       dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release);
+       if (dtc) free(dtc);
+       dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY);
 }
 
-void
+static void
 _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval)
 {
 #define NSEC_PER_FRAME (NSEC_PER_SEC/60)
 // approx 1 year (60s * 60m * 24h * 365d)
 #define FOREVER_NSEC 31536000000000000ull
 
-       dispatch_source_refs_t dr = ds->ds_refs;
-       const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION;
+       dispatch_timer_source_refs_t dr = ds->ds_timer_refs;
+       const bool animation = dr->du_fflags & DISPATCH_INTERVAL_UI_ANIMATION;
        if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME :
                        FOREVER_NSEC/NSEC_PER_MSEC))) {
                interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC;
@@ -1816,1081 +1339,994 @@ _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval)
        }
        interval = _dispatch_time_nano2mach(interval);
        uint64_t target = _dispatch_absolute_time() + interval;
-       target = (target / interval) * interval;
+       target -= (target % interval);
        const uint64_t leeway = animation ?
                        _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2;
-       ds_timer(dr).target = target;
-       ds_timer(dr).deadline = target + leeway;
-       ds_timer(dr).interval = interval;
-       ds_timer(dr).leeway = leeway;
-       dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(ds->ds_ident_hack);
-       _dispatch_source_timer_telemetry(ds, clock, &ds_timer(dr));
+       dr->dt_timer.target = target;
+       dr->dt_timer.deadline = target + leeway;
+       dr->dt_timer.interval = interval;
+       _dispatch_source_timer_telemetry(ds, DISPATCH_CLOCK_MACH, &dr->dt_timer);
 }
 
 #pragma mark -
-#pragma mark dispatch_timers
+#pragma mark dispatch_after
 
-#define DISPATCH_TIMER_STRUCT(refs) \
-       uint64_t target, deadline; \
-       TAILQ_HEAD(, refs) dt_sources
-
-typedef struct dispatch_timer_s {
-       DISPATCH_TIMER_STRUCT(dispatch_timer_source_refs_s);
-} *dispatch_timer_t;
-
-#define DISPATCH_TIMER_INITIALIZER(tidx) \
-       [tidx] = { \
-               .target = UINT64_MAX, \
-               .deadline = UINT64_MAX, \
-               .dt_sources = TAILQ_HEAD_INITIALIZER( \
-                               _dispatch_timer[tidx].dt_sources), \
-       }
-#define DISPATCH_TIMER_INIT(kind, qos) \
-               DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \
-               DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos))
-
-struct dispatch_timer_s _dispatch_timer[] =  {
-       DISPATCH_TIMER_INIT(WALL, NORMAL),
-       DISPATCH_TIMER_INIT(WALL, CRITICAL),
-       DISPATCH_TIMER_INIT(WALL, BACKGROUND),
-       DISPATCH_TIMER_INIT(MACH, NORMAL),
-       DISPATCH_TIMER_INIT(MACH, CRITICAL),
-       DISPATCH_TIMER_INIT(MACH, BACKGROUND),
-};
-#define DISPATCH_TIMER_COUNT \
-               ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0])))
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+               void *ctxt, void *handler, bool block)
+{
+       dispatch_timer_source_refs_t dt;
+       dispatch_source_t ds;
+       uint64_t leeway, delta;
 
-#if __linux__
-#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \
-               (void*)&_dispatch_kevent_timer[tidx]
-#else
-#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \
-               (uintptr_t)&_dispatch_kevent_timer[tidx]
+       if (when == DISPATCH_TIME_FOREVER) {
+#if DISPATCH_DEBUG
+               DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity");
 #endif
-#ifdef __LP64__
-#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \
-               .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx)
-#else // __LP64__
-// dynamic initialization in _dispatch_timers_init()
-#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \
-               .udata = 0
-#endif // __LP64__
-#define DISPATCH_KEVENT_TIMER_INITIALIZER(tidx) \
-       [tidx] = { \
-               .dk_kevent = { \
-                       .ident = tidx, \
-                       .filter = DISPATCH_EVFILT_TIMER, \
-                       DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx), \
-               }, \
-               .dk_sources = TAILQ_HEAD_INITIALIZER( \
-                               _dispatch_kevent_timer[tidx].dk_sources), \
-       }
-#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \
-               DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \
-               DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos))
-
-struct dispatch_kevent_s _dispatch_kevent_timer[] = {
-       DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL),
-       DISPATCH_KEVENT_TIMER_INIT(WALL, CRITICAL),
-       DISPATCH_KEVENT_TIMER_INIT(WALL, BACKGROUND),
-       DISPATCH_KEVENT_TIMER_INIT(MACH, NORMAL),
-       DISPATCH_KEVENT_TIMER_INIT(MACH, CRITICAL),
-       DISPATCH_KEVENT_TIMER_INIT(MACH, BACKGROUND),
-       DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX_DISARM),
-};
-#define DISPATCH_KEVENT_TIMER_COUNT \
-               ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0])))
-
-#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8)
-#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(tidx, note) \
-       [tidx] = { \
-               .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(tidx), \
-               .filter = EVFILT_TIMER, \
-               .flags = EV_ONESHOT, \
-               .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \
-       }
-#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \
-               DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \
-               DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos), note)
-
-_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = {
-       DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME),
-       DISPATCH_KEVENT_TIMEOUT_INIT(WALL, CRITICAL, NOTE_MACH_CONTINUOUS_TIME | NOTE_CRITICAL),
-       DISPATCH_KEVENT_TIMEOUT_INIT(WALL, BACKGROUND, NOTE_MACH_CONTINUOUS_TIME | NOTE_BACKGROUND),
-       DISPATCH_KEVENT_TIMEOUT_INIT(MACH, NORMAL, 0),
-       DISPATCH_KEVENT_TIMEOUT_INIT(MACH, CRITICAL, NOTE_CRITICAL),
-       DISPATCH_KEVENT_TIMEOUT_INIT(MACH, BACKGROUND, NOTE_BACKGROUND),
-};
-#define DISPATCH_KEVENT_TIMEOUT_COUNT \
-               ((sizeof(_dispatch_kevent_timeout) / sizeof(_dispatch_kevent_timeout[0])))
-static_assert(DISPATCH_KEVENT_TIMEOUT_COUNT == DISPATCH_TIMER_INDEX_COUNT - 1,
-               "should have a kevent for everything but disarm (ddt assumes this)");
+               return;
+       }
 
-#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \
-               [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC
+       delta = _dispatch_timeout(when);
+       if (delta == 0) {
+               if (block) {
+                       return dispatch_async(queue, handler);
+               }
+               return dispatch_async_f(queue, ctxt, handler);
+       }
+       leeway = delta / 10; // <rdar://problem/13447496>
 
-static const uint64_t _dispatch_kevent_coalescing_window[] = {
-       DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75),
-       DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1),
-       DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100),
-};
+       if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC;
+       if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC;
 
-#define _dispatch_timers_insert(tidx, dra, dr, dr_list, dta, dt, dt_list) ({ \
-       typeof(dr) dri = NULL; typeof(dt) dti; \
-       if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \
-               TAILQ_FOREACH(dri, &dra[tidx].dk_sources, dr_list) { \
-                       if (ds_timer(dr).target < ds_timer(dri).target) { \
-                               break; \
-                       } \
-               } \
-               TAILQ_FOREACH(dti, &dta[tidx].dt_sources, dt_list) { \
-                       if (ds_timer(dt).deadline < ds_timer(dti).deadline) { \
-                               break; \
-                       } \
-               } \
-               if (dti) { \
-                       TAILQ_INSERT_BEFORE(dti, dt, dt_list); \
-               } else { \
-                       TAILQ_INSERT_TAIL(&dta[tidx].dt_sources, dt, dt_list); \
-               } \
-       } \
-       if (dri) { \
-               TAILQ_INSERT_BEFORE(dri, dr, dr_list); \
-       } else { \
-               TAILQ_INSERT_TAIL(&dra[tidx].dk_sources, dr, dr_list); \
-       } \
-       })
-
-#define _dispatch_timers_remove(tidx, dk, dra, dr, dr_list, dta, dt, dt_list) \
-       ({ \
-       if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \
-               TAILQ_REMOVE(&dta[tidx].dt_sources, dt, dt_list); \
-       } \
-       TAILQ_REMOVE(dk ? &(*(dk)).dk_sources : &dra[tidx].dk_sources, dr, \
-                       dr_list); })
-
-#define _dispatch_timers_check(dra, dta) ({ \
-       unsigned int timerm = _dispatch_timers_mask; \
-       bool update = false; \
-       unsigned int tidx; \
-       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \
-               if (!(timerm & (1 << tidx))){ \
-                       continue; \
-               } \
-               dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \
-                               TAILQ_FIRST(&dra[tidx].dk_sources); \
-               dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t) \
-                               TAILQ_FIRST(&dta[tidx].dt_sources); \
-               uint64_t target = dr ? ds_timer(dr).target : UINT64_MAX; \
-               uint64_t deadline = dr ? ds_timer(dt).deadline : UINT64_MAX; \
-               if (target != dta[tidx].target) { \
-                       dta[tidx].target = target; \
-                       update = true; \
-               } \
-               if (deadline != dta[tidx].deadline) { \
-                       dta[tidx].deadline = deadline; \
-                       update = true; \
-               } \
-       } \
-       update; })
-
-static bool _dispatch_timers_reconfigure, _dispatch_timer_expired;
-static unsigned int _dispatch_timers_mask;
-static bool _dispatch_timers_force_max_leeway;
+       // this function can and should be optimized to not use a dispatch source
+       ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue);
+       dt = ds->ds_timer_refs;
 
-static void
-_dispatch_timers_init(void)
-{
-#ifndef __LP64__
-       unsigned int tidx;
-       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-               _dispatch_kevent_timer[tidx].dk_kevent.udata =
-                               DISPATCH_KEVENT_TIMER_UDATA(tidx);
+       dispatch_continuation_t dc = _dispatch_continuation_alloc();
+       if (block) {
+               _dispatch_continuation_init(dc, ds, handler, 0, 0, 0);
+       } else {
+               _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0);
        }
-#endif // __LP64__
-       if (slowpath(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
-               _dispatch_timers_force_max_leeway = true;
+       // reference `ds` so that it doesn't show up as a leak
+       dc->dc_data = ds;
+       _dispatch_trace_continuation_push(ds->_as_dq, dc);
+       os_atomic_store2o(dt, ds_handler[DS_EVENT_HANDLER], dc, relaxed);
+
+       if ((int64_t)when < 0) {
+               // wall clock
+               when = (dispatch_time_t)-((int64_t)when);
+       } else {
+               // absolute clock
+               dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH;
+               leeway = _dispatch_time_nano2mach(leeway);
        }
+       dt->dt_timer.target = when;
+       dt->dt_timer.interval = UINT64_MAX;
+       dt->dt_timer.deadline = when + leeway;
+       dispatch_activate(ds);
 }
 
-static inline void
-_dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk)
+DISPATCH_NOINLINE
+void
+dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt,
+               dispatch_function_t func)
 {
-       dispatch_source_refs_t dr = ds->ds_refs;
-       unsigned int tidx = (unsigned int)dk->dk_kevent.ident;
+       _dispatch_after(when, queue, ctxt, func, false);
+}
 
-       if (slowpath(ds_timer_aggregate(ds))) {
-               _dispatch_timer_aggregates_unregister(ds, tidx);
-       }
-       _dispatch_timers_remove(tidx, dk, _dispatch_kevent_timer, dr, dr_list,
-                       _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list);
-       if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
-               _dispatch_timers_reconfigure = true;
-               _dispatch_timers_mask |= 1 << tidx;
-       }
+#ifdef __BLOCKS__
+void
+dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+               dispatch_block_t work)
+{
+       _dispatch_after(when, queue, NULL, work, true);
 }
+#endif
 
-// Updates the ordered list of timers based on next fire date for changes to ds.
-// Should only be called from the context of _dispatch_mgr_q.
-static void
-_dispatch_timers_update(dispatch_source_t ds)
+#pragma mark -
+#pragma mark dispatch_timers
+
+/*
+ * The dispatch_timer_heap_t structure is a double min-heap of timers,
+ * interleaving the by-target min-heap in the even slots, and the by-deadline
+ * in the odd ones.
+ *
+ * The min element of these is held inline in the dispatch_timer_heap_t
+ * structure, and further entries are held in segments.
+ *
+ * dth_segments is the number of allocated segments.
+ *
+ * Segment 0 has a size of `DISPATCH_HEAP_INIT_SEGMENT_CAPACITY` pointers
+ * Segment k has a size of (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (k - 1))
+ *
+ * Segment n (dth_segments - 1) is the last segment and points its final n
+ * entries to previous segments. Its address is held in the `dth_heap` field.
+ *
+ * segment n   [ regular timer pointers | n-1 | k | 0 ]
+ *                                         |    |   |
+ * segment n-1 <---------------------------'    |   |
+ * segment k   <--------------------------------'   |
+ * segment 0   <------------------------------------'
+ */
+#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u
+
+/*
+ * There are two min-heaps stored interleaved in a single array,
+ * even indices are for the by-target min-heap, and odd indices for
+ * the by-deadline one.
+ */
+#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1)
+#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK)
+#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \
+               (((idx) & ~DTH_HEAP_ID_MASK) | (heap_id))
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_capacity(uint32_t segments)
 {
-       dispatch_kevent_t dk = ds->ds_dkev;
-       dispatch_source_refs_t dr = ds->ds_refs;
-       unsigned int tidx;
+       if (segments == 0) return 2;
+       uint32_t seg_no = segments - 1;
+       // for C = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY,
+       // 2 + C + SUM(C << (i-1), i = 1..seg_no) - seg_no
+       return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no;
+}
 
-       DISPATCH_ASSERT_ON_MANAGER_QUEUE();
+DISPATCH_NOINLINE
+static void
+_dispatch_timer_heap_grow(dispatch_timer_heap_t dth)
+{
+       uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
+       uint32_t seg_no = dth->dth_segments++;
+       void **heap, **heap_prev = dth->dth_heap;
 
-       // Do not reschedule timers unregistered with _dispatch_kevent_unregister()
-       if (slowpath(!dk)) {
-               return;
+       if (seg_no > 0) {
+               seg_capacity <<= (seg_no - 1);
        }
-       // Move timers that are disabled, suspended or have missed intervals to the
-       // disarmed list, rearm after resume resp. source invoke will reenable them
-       if (!ds_timer(dr).target || DISPATCH_QUEUE_IS_SUSPENDED(ds) ||
-                       ds->ds_pending_data) {
-               tidx = DISPATCH_TIMER_INDEX_DISARM;
-               _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
-               _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds,
-                               ds->ds_dkev);
-       } else {
-               tidx = _dispatch_source_timer_idx(dr);
+       heap = _dispatch_calloc(seg_capacity, sizeof(void *));
+       if (seg_no > 1) {
+               uint32_t prev_seg_no = seg_no - 1;
+               uint32_t prev_seg_capacity = seg_capacity >> 1;
+               memcpy(&heap[seg_capacity - prev_seg_no],
+                               &heap_prev[prev_seg_capacity - prev_seg_no],
+                               prev_seg_no * sizeof(void *));
        }
-       if (slowpath(ds_timer_aggregate(ds))) {
-               _dispatch_timer_aggregates_register(ds);
+       if (seg_no > 0) {
+               heap[seg_capacity - seg_no] = heap_prev;
        }
-       if (slowpath(!ds->ds_is_installed)) {
-               ds->ds_is_installed = true;
-               if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
-                       _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-                       _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
-                                       ds->ds_dkev);
-               }
-               _dispatch_object_debug(ds, "%s", __func__);
-               ds->ds_dkev = NULL;
-               free(dk);
-       } else {
-               _dispatch_timers_unregister(ds, dk);
+       dth->dth_heap = heap;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth)
+{
+       uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
+       uint32_t seg_no = --dth->dth_segments;
+       void **heap = dth->dth_heap, **heap_prev = NULL;
+
+       if (seg_no > 0) {
+               seg_capacity <<= (seg_no - 1);
+               heap_prev = heap[seg_capacity - seg_no];
        }
-       if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
-               _dispatch_timers_reconfigure = true;
-               _dispatch_timers_mask |= 1 << tidx;
+       if (seg_no > 1) {
+               uint32_t prev_seg_no = seg_no - 1;
+               uint32_t prev_seg_capacity = seg_capacity >> 1;
+               memcpy(&heap_prev[prev_seg_capacity - prev_seg_no],
+                               &heap[seg_capacity - prev_seg_no],
+                               prev_seg_no * sizeof(void *));
        }
-       if (dk != &_dispatch_kevent_timer[tidx]){
-               ds->ds_dkev = &_dispatch_kevent_timer[tidx];
+       dth->dth_heap = heap_prev;
+       free(heap);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_timer_source_refs_t *
+_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx)
+{
+       uint32_t seg_no, segments = dth->dth_segments;
+       void **segment;
+
+       if (idx < DTH_ID_COUNT) {
+               return &dth->dth_min[idx];
+       }
+       idx -= DTH_ID_COUNT;
+
+       // Derive the segment number from the index. Naming
+       // DISPATCH_HEAP_INIT_SEGMENT_CAPACITY `C`, the segments index ranges are:
+       // 0: 0 .. (C - 1)
+       // 1: C .. 2 * C - 1
+       // k: 2^(k-1) * C .. 2^k * C - 1
+       // so `k` can be derived from the first bit set in `idx`
+       seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) -
+                       __builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1)));
+       if (seg_no + 1 == segments) {
+               segment = dth->dth_heap;
+       } else {
+               uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
+               seg_capacity <<= (segments - 2);
+               segment = dth->dth_heap[seg_capacity - seg_no - 1];
        }
-       _dispatch_timers_insert(tidx, _dispatch_kevent_timer, dr, dr_list,
-                       _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list);
-       if (slowpath(ds_timer_aggregate(ds))) {
-               _dispatch_timer_aggregates_update(ds, tidx);
+       if (seg_no) {
+               idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1);
        }
+       return (dispatch_timer_source_refs_t *)(segment + idx);
 }
 
+DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_timers_run2(dispatch_clock_now_cache_t nows, unsigned int tidx)
+_dispatch_timer_heap_set(dispatch_timer_source_refs_t *slot,
+               dispatch_timer_source_refs_t dt, uint32_t idx)
 {
-       dispatch_source_refs_t dr;
-       dispatch_source_t ds;
-       uint64_t now, missed;
-
-       now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
-       while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) {
-               ds = _dispatch_source_from_refs(dr);
-               // We may find timers on the wrong list due to a pending update from
-               // dispatch_source_set_timer. Force an update of the list in that case.
-               if (tidx != ds->ds_ident_hack) {
-                       _dispatch_timers_update(ds);
-                       continue;
-               }
-               if (!ds_timer(dr).target) {
-                       // No configured timers on the list
-                       break;
-               }
-               if (ds_timer(dr).target > now) {
-                       // Done running timers for now.
-                       break;
-               }
-               // Remove timers that are suspended or have missed intervals from the
-               // list, rearm after resume resp. source invoke will reenable them
-               if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) {
-                       _dispatch_timers_update(ds);
-                       continue;
-               }
-               // Calculate number of missed intervals.
-               missed = (now - ds_timer(dr).target) / ds_timer(dr).interval;
-               if (++missed > INT_MAX) {
-                       missed = INT_MAX;
-               }
-               if (ds_timer(dr).interval < INT64_MAX) {
-                       ds_timer(dr).target += missed * ds_timer(dr).interval;
-                       ds_timer(dr).deadline = ds_timer(dr).target + ds_timer(dr).leeway;
-               } else {
-                       ds_timer(dr).target = UINT64_MAX;
-                       ds_timer(dr).deadline = UINT64_MAX;
-               }
-               _dispatch_timers_update(ds);
-               ds_timer(dr).last_fire = now;
-
-               unsigned long data;
-               data = os_atomic_add2o(ds, ds_pending_data,
-                               (unsigned long)missed, relaxed);
-               _dispatch_trace_timer_fire(dr, data, (unsigned long)missed);
-               dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH);
-               if (ds_timer(dr).flags & DISPATCH_TIMER_AFTER) {
-                       _dispatch_source_kevent_unregister(ds);
-               }
-       }
+       *slot = dt;
+       dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx;
 }
 
-DISPATCH_NOINLINE
-static void
-_dispatch_timers_run(dispatch_clock_now_cache_t nows)
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_parent(uint32_t idx)
 {
-       unsigned int tidx;
-       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-               if (!TAILQ_EMPTY(&_dispatch_kevent_timer[tidx].dk_sources)) {
-                       _dispatch_timers_run2(nows, tidx);
-               }
-       }
+       uint32_t heap_id = DTH_HEAP_ID(idx);
+       idx = (idx - DTH_ID_COUNT) / 2; // go to the parent
+       return DTH_IDX_FOR_HEAP_ID(idx, heap_id);
 }
 
-#define DISPATCH_TIMERS_GET_DELAY_ALL (~0u)
-
-static inline unsigned int
-_dispatch_timers_get_delay(dispatch_clock_now_cache_t nows,
-               struct dispatch_timer_s timer[],
-               uint64_t *delay, uint64_t *leeway, unsigned int query)
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_left_child(uint32_t idx)
 {
-       unsigned int tidx, ridx = DISPATCH_TIMER_COUNT, minidx, maxidx;
-       uint64_t tmp, delta = INT64_MAX, dldelta = INT64_MAX;
-
-       if (query == DISPATCH_TIMERS_GET_DELAY_ALL) {
-               minidx = 0;
-               maxidx = DISPATCH_TIMER_COUNT - 1;
-       } else {
-               minidx = maxidx = query;
-       }
-
-       for (tidx = minidx; tidx <= maxidx; tidx++) {
-               dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx);
-               uint64_t target = timer[tidx].target;
-               if (target >= INT64_MAX) {
-                       continue;
-               }
-               uint64_t deadline = timer[tidx].deadline;
-               if (query != DISPATCH_TIMERS_GET_DELAY_ALL) {
-                       // Timer pre-coalescing <rdar://problem/13222034>
-                       unsigned int qos = DISPATCH_TIMER_QOS(tidx);
-                       uint64_t window = _dispatch_kevent_coalescing_window[qos];
-                       uint64_t latest = deadline > window ? deadline - window : 0;
-                       dispatch_source_refs_t dri;
-                       TAILQ_FOREACH(dri, &_dispatch_kevent_timer[tidx].dk_sources,
-                                       dr_list) {
-                               tmp = ds_timer(dri).target;
-                               if (tmp > latest) break;
-                               target = tmp;
-                       }
-               }
-               uint64_t now = _dispatch_time_now_cached(clock, nows);
-               if (target <= now) {
-                       delta = 0;
-                       break;
-               }
-               tmp = target - now;
-               if (clock != DISPATCH_CLOCK_WALL) {
-                       tmp = _dispatch_time_mach2nano(tmp);
-               }
-               if (tmp < INT64_MAX && tmp < delta) {
-                       ridx = tidx;
-                       delta = tmp;
-               }
-               dispatch_assert(target <= deadline);
-               tmp = deadline - now;
-               if (clock != DISPATCH_CLOCK_WALL) {
-                       tmp = _dispatch_time_mach2nano(tmp);
-               }
-               if (tmp < INT64_MAX && tmp < dldelta) {
-                       dldelta = tmp;
-               }
-       }
-       *delay = delta;
-       *leeway = delta && delta < INT64_MAX ? dldelta - delta : INT64_MAX;
-       return ridx;
+       uint32_t heap_id = DTH_HEAP_ID(idx);
+       // 2 * (idx - heap_id) + DTH_ID_COUNT + heap_id
+       return 2 * idx + DTH_ID_COUNT - heap_id;
 }
 
-
-#ifdef __linux__
-// in linux we map the _dispatch_kevent_qos_s  to struct kevent instead
-// of struct kevent64. We loose the kevent.ext[] members and the time
-// out is based on relavite msec based time vs. absolute nsec based time.
-// For now we make the adjustments right here until the solution
-// to either extend libkqueue with a proper kevent64 API or removing kevent
-// all together and move to a lower API (e.g. epoll or kernel_module.
-// Also leeway is ignored.
-
-static void
-_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay,
-               uint64_t leeway, dispatch_clock_now_cache_t nows)
+#if DISPATCH_HAVE_TIMER_COALESCING
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count)
 {
-       // call to update nows[]
-       _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
-#ifdef KEVENT_NSEC_NOT_SUPPORTED
-       // adjust nsec based delay to msec based and ignore leeway
-       delay /= 1000000L;
-       if ((int64_t)(delay) <= 0) {
-               delay = 1; // if value <= 0 the dispatch will stop
-       }
-#else
-       ke->fflags |= NOTE_NSECONDS;
-#endif
-       ke->data = (int64_t)delay;
+       uint32_t heap_id = DTH_HEAP_ID(idx);
+
+       idx -= heap_id;
+       if (unlikely(idx + DTH_ID_COUNT == count)) {
+               // reaching `count` doesn't mean we're done, but there is a weird
+               // corner case if the last item of the heap is a left child:
+               //
+               //     /\
+               //    /  \
+               //   /  __\
+               //  /__/
+               //     ^
+               //
+               // The formula below would return the sibling of `idx` which is
+               // out of bounds. Fortunately, the correct answer is the same
+               // as for idx's parent
+               idx = _dispatch_timer_heap_parent(idx);
+       }
+
+       //
+       // When considering the index in a non interleaved, 1-based array
+       // representation of a heap, hence looking at (idx / DTH_ID_COUNT + 1)
+       // for a given idx in our dual-heaps, that index is in one of two forms:
+       //
+       //     (a) 1xxxx011111    or    (b) 111111111
+       //         d    i    0              d       0
+       //
+       // The first bit set is the row of the binary tree node (0-based).
+       // The following digits from most to least significant represent the path
+       // to that node, where `0` is a left turn and `1` a right turn.
+       //
+       // For example 0b0101 (5) is a node on row 2 accessed going left then right:
+       //
+       // row 0          1
+       //              /   .
+       // row 1      2       3
+       //           . \     . .
+       // row 2    4   5   6   7
+       //         : : : : : : : :
+       //
+       // Skipping a sub-tree in walk order means going to the sibling of the last
+       // node reached after we turned left. If the node was of the form (a),
+       // this node is 1xxxx1, which for the above example is 0b0011 (3).
+       // If the node was of the form (b) then we never took a left, meaning
+       // we reached the last element in traversal order.
+       //
+
+       //
+       // we want to find
+       // - the least significant bit set to 0 in (idx / DTH_ID_COUNT + 1)
+       // - which is offset by log_2(DTH_ID_COUNT) from the position of the least
+       //   significant 0 in (idx + DTH_ID_COUNT + DTH_ID_COUNT - 1)
+       //   since idx is a multiple of DTH_ID_COUNT and DTH_ID_COUNT a power of 2.
+       // - which in turn is the same as the position of the least significant 1 in
+       //   ~(idx + DTH_ID_COUNT + DTH_ID_COUNT - 1)
+       //
+       dispatch_static_assert(powerof2(DTH_ID_COUNT));
+       idx += DTH_ID_COUNT + DTH_ID_COUNT - 1;
+       idx >>= __builtin_ctz(~idx);
+
+       //
+       // `idx` is now either:
+       // - 0 if it was the (b) case above, in which case the walk is done
+       // - 1xxxx0 as the position in a 0 based array representation of a non
+       //   interleaved heap, so we just have to compute the interleaved index.
+       //
+       return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX;
 }
 
-#else
-static void
-_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay,
-               uint64_t leeway, dispatch_clock_now_cache_t nows)
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count)
 {
-       delay += _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
-       if (slowpath(_dispatch_timers_force_max_leeway)) {
-               ke->data = (int64_t)(delay + leeway);
-               ke->ext[1] = 0;
-       } else {
-               ke->data = (int64_t)delay;
-               ke->ext[1] = leeway;
+       //
+       // Goes to the next element in heap walk order, which is the prefix ordered
+       // walk of the tree.
+       //
+       // From a given node, the next item to return is the left child if it
+       // exists, else the first right sibling we find by walking our parent chain,
+       // which is exactly what _dispatch_timer_heap_walk_skip() returns.
+       //
+       uint32_t lchild = _dispatch_timer_heap_left_child(idx);
+       if (lchild < count) {
+               return lchild;
        }
+       return _dispatch_timer_heap_walk_skip(idx, count);
 }
-#endif // __linux__
 
-static bool
-_dispatch_timers_program2(dispatch_clock_now_cache_t nows,
-               _dispatch_kevent_qos_s *ke, unsigned int tidx)
-{
-       bool poll;
-       uint64_t delay, leeway;
-
-       _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, tidx);
-       poll = (delay == 0);
-       if (poll || delay == UINT64_MAX) {
-               _dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx));
-               if (!ke->data) {
-                       return poll;
-               }
-               ke->data = 0;
-               ke->flags |= EV_DELETE;
-               ke->flags &= ~(EV_ADD|EV_ENABLE);
-       } else {
-               _dispatch_trace_next_timer_set(
-                               TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), DISPATCH_TIMER_QOS(tidx));
-               _dispatch_trace_next_timer_program(delay, DISPATCH_TIMER_QOS(tidx));
-               _dispatch_kevent_timer_set_delay(ke, delay, leeway, nows);
-               ke->flags |= EV_ADD|EV_ENABLE;
-               ke->flags &= ~EV_DELETE;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-               if (_dispatch_kevent_workqueue_enabled) {
-                       ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+DISPATCH_NOINLINE
+static uint64_t
+_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit)
+{
+       dispatch_timer_source_refs_t dri;
+       uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID);
+       uint32_t count = dth->dth_count;
+       uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target;
+
+       while (idx < count) {
+               dri = *_dispatch_timer_heap_get_slot(dth, idx);
+               tmp = dri->dt_timer.target;
+               if (tmp > limit) {
+                       // skip subtree since none of the targets below can be before limit
+                       idx = _dispatch_timer_heap_walk_skip(idx, count);
+               } else {
+                       target = tmp;
+                       idx = _dispatch_timer_heap_walk_next(idx, count);
                }
-#endif
        }
-       _dispatch_kq_deferred_update(ke);
-       return poll;
+       return target;
 }
+#endif // DISPATCH_HAVE_TIMER_COALESCING
 
 DISPATCH_NOINLINE
-static bool
-_dispatch_timers_program(dispatch_clock_now_cache_t nows)
-{
-       bool poll = false;
-       unsigned int tidx, timerm = _dispatch_timers_mask;
-       for (tidx = 0; tidx < DISPATCH_KEVENT_TIMEOUT_COUNT; tidx++) {
-               if (!(timerm & 1 << tidx)){
-                       continue;
+static void
+_dispatch_timer_heap_resift(dispatch_timer_heap_t dth,
+               dispatch_timer_source_refs_t dt, uint32_t idx)
+{
+       dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) ==
+                       offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID]));
+       dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) ==
+                       offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID]));
+#define dth_cmp(hid, dt1, op, dt2) \
+               (((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid])
+
+       dispatch_timer_source_refs_t *pslot, pdt;
+       dispatch_timer_source_refs_t *cslot, cdt;
+       dispatch_timer_source_refs_t *rslot, rdt;
+       uint32_t cidx, dth_count = dth->dth_count;
+       dispatch_timer_source_refs_t *slot;
+       int heap_id = DTH_HEAP_ID(idx);
+       bool sifted_up = false;
+
+       // try to sift up
+
+       slot = _dispatch_timer_heap_get_slot(dth, idx);
+       while (idx >= DTH_ID_COUNT) {
+               uint32_t pidx = _dispatch_timer_heap_parent(idx);
+               pslot = _dispatch_timer_heap_get_slot(dth, pidx);
+               pdt = *pslot;
+               if (dth_cmp(heap_id, pdt, <=, dt)) {
+                       break;
                }
-               poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[tidx],
-                               tidx);
+               _dispatch_timer_heap_set(slot, pdt, idx);
+               slot = pslot;
+               idx = pidx;
+               sifted_up = true;
+       }
+       if (sifted_up) {
+               goto done;
        }
-       return poll;
-}
 
-DISPATCH_NOINLINE
-static bool
-_dispatch_timers_configure(void)
-{
-       _dispatch_timer_aggregates_check();
-       // Find out if there is a new target/deadline on the timer lists
-       return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer);
+       // try to sift down
+
+       while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) {
+               uint32_t ridx = cidx + DTH_ID_COUNT;
+               cslot = _dispatch_timer_heap_get_slot(dth, cidx);
+               cdt = *cslot;
+               if (ridx < dth_count) {
+                       rslot = _dispatch_timer_heap_get_slot(dth, ridx);
+                       rdt = *rslot;
+                       if (dth_cmp(heap_id, cdt, >, rdt)) {
+                               cidx = ridx;
+                               cdt = rdt;
+                               cslot = rslot;
+                       }
+               }
+               if (dth_cmp(heap_id, dt, <=, cdt)) {
+                       break;
+               }
+               _dispatch_timer_heap_set(slot, cdt, idx);
+               slot = cslot;
+               idx = cidx;
+       }
+
+done:
+       _dispatch_timer_heap_set(slot, dt, idx);
+#undef dth_cmp
 }
 
-#if HAVE_MACH
+DISPATCH_ALWAYS_INLINE
 static void
-_dispatch_timers_calendar_change(void)
+_dispatch_timer_heap_insert(dispatch_timer_heap_t dth,
+               dispatch_timer_source_refs_t dt)
 {
-       unsigned int qos;
+       uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT;
+
+       DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], ==,
+                       DTH_INVALID_ID, "target idx");
+       DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], ==,
+                       DTH_INVALID_ID, "deadline idx");
+
+       if (idx == 0) {
+               dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID;
+               dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID;
+               dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt;
+               return;
+       }
 
-       // calendar change may have gone past the wallclock deadline
-       _dispatch_timer_expired = true;
-       for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
-               _dispatch_timers_mask |=
-                               1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos);
+       if (unlikely(idx + DTH_ID_COUNT >
+                       _dispatch_timer_heap_capacity(dth->dth_segments))) {
+               _dispatch_timer_heap_grow(dth);
        }
+       _dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID);
+       _dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID);
 }
-#endif
 
+DISPATCH_NOINLINE
 static void
-_dispatch_timers_kevent(_dispatch_kevent_qos_s *ke)
+_dispatch_timer_heap_remove(dispatch_timer_heap_t dth,
+               dispatch_timer_source_refs_t dt)
 {
-       dispatch_assert(ke->data > 0);
-       dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) ==
-                       DISPATCH_KEVENT_TIMEOUT_IDENT_MASK);
-       unsigned int tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK;
-       dispatch_assert(tidx < DISPATCH_KEVENT_TIMEOUT_COUNT);
-       dispatch_assert(_dispatch_kevent_timeout[tidx].data != 0);
-       _dispatch_kevent_timeout[tidx].data = 0; // kevent deleted via EV_ONESHOT
-       _dispatch_timer_expired = true;
-       _dispatch_timers_mask |= 1 << tidx;
-       _dispatch_trace_next_timer_wake(DISPATCH_TIMER_QOS(tidx));
-}
+       uint32_t idx = (dth->dth_count -= DTH_ID_COUNT);
 
-static inline bool
-_dispatch_mgr_timers(void)
-{
-       dispatch_clock_now_cache_s nows = { };
-       bool expired = slowpath(_dispatch_timer_expired);
-       if (expired) {
-               _dispatch_timers_run(&nows);
+       DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=,
+                       DTH_INVALID_ID, "target idx");
+       DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=,
+                       DTH_INVALID_ID, "deadline idx");
+
+       if (idx == 0) {
+               DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_TARGET_ID], ==, dt,
+                               "target slot");
+               DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_DEADLINE_ID], ==, dt,
+                               "deadline slot");
+               dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL;
+               goto clear_heap_entry;
        }
-       bool reconfigure = slowpath(_dispatch_timers_reconfigure);
-       if (reconfigure || expired) {
-               if (reconfigure) {
-                       reconfigure = _dispatch_timers_configure();
-                       _dispatch_timers_reconfigure = false;
-               }
-               if (reconfigure || expired) {
-                       expired = _dispatch_timer_expired = _dispatch_timers_program(&nows);
-                       expired = expired || _dispatch_mgr_q.dq_items_tail;
+
+       for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) {
+               dispatch_timer_source_refs_t *slot, last_dt;
+               slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id);
+               last_dt = *slot; *slot = NULL;
+               if (last_dt != dt) {
+                       uint32_t removed_idx = dt->dt_heap_entry[heap_id];
+                       _dispatch_timer_heap_resift(dth, last_dt, removed_idx);
                }
-               _dispatch_timers_mask = 0;
        }
-       return expired;
-}
-
-#pragma mark -
-#pragma mark dispatch_timer_aggregate
-
-typedef struct {
-       TAILQ_HEAD(, dispatch_timer_source_aggregate_refs_s) dk_sources;
-} dispatch_timer_aggregate_refs_s;
-
-typedef struct dispatch_timer_aggregate_s {
-       DISPATCH_QUEUE_HEADER(queue);
-       TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list;
-       dispatch_timer_aggregate_refs_s
-                       dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT];
-       struct {
-               DISPATCH_TIMER_STRUCT(dispatch_timer_source_aggregate_refs_s);
-       } dta_timer[DISPATCH_TIMER_COUNT];
-       struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT];
-       unsigned int dta_refcount;
-} DISPATCH_QUEUE_ALIGN dispatch_timer_aggregate_s;
-
-typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s;
-static dispatch_timer_aggregates_s _dispatch_timer_aggregates =
-               TAILQ_HEAD_INITIALIZER(_dispatch_timer_aggregates);
-
-dispatch_timer_aggregate_t
-dispatch_timer_aggregate_create(void)
-{
-       unsigned int tidx;
-       dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue),
-                       sizeof(struct dispatch_timer_aggregate_s));
-       _dispatch_queue_init(dta->_as_dq, DQF_NONE,
-                       DISPATCH_QUEUE_WIDTH_MAX, false);
-       dta->do_targetq = _dispatch_get_root_queue(
-                       _DISPATCH_QOS_CLASS_USER_INITIATED, true);
-       //FIXME: aggregates need custom vtable
-       //dta->dq_label = "timer-aggregate";
-       for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) {
-               TAILQ_INIT(&dta->dta_kevent_timer[tidx].dk_sources);
+       if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) {
+               _dispatch_timer_heap_shrink(dth);
        }
-       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-               TAILQ_INIT(&dta->dta_timer[tidx].dt_sources);
-               dta->dta_timer[tidx].target = UINT64_MAX;
-               dta->dta_timer[tidx].deadline = UINT64_MAX;
-               dta->dta_timer_data[tidx].target = UINT64_MAX;
-               dta->dta_timer_data[tidx].deadline = UINT64_MAX;
-       }
-       return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create(
-                       dta->_as_dq);
-}
 
-typedef struct dispatch_timer_delay_s {
-       dispatch_timer_t timer;
-       uint64_t delay, leeway;
-} *dispatch_timer_delay_t;
-
-static void
-_dispatch_timer_aggregate_get_delay(void *ctxt)
-{
-       dispatch_timer_delay_t dtd = ctxt;
-       dispatch_clock_now_cache_s nows = { };
-       _dispatch_timers_get_delay(&nows, dtd->timer, &dtd->delay, &dtd->leeway,
-                       DISPATCH_TIMERS_GET_DELAY_ALL);
+clear_heap_entry:
+       dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID;
+       dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID;
 }
 
-uint64_t
-dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta,
-               uint64_t *leeway_ptr)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_timer_heap_update(dispatch_timer_heap_t dth,
+               dispatch_timer_source_refs_t dt)
 {
-       struct dispatch_timer_delay_s dtd = {
-               .timer = dta->dta_timer_data,
-       };
-       dispatch_sync_f(dta->_as_dq, &dtd, _dispatch_timer_aggregate_get_delay);
-       if (leeway_ptr) {
-               *leeway_ptr = dtd.leeway;
-       }
-       return dtd.delay;
-}
+       DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=,
+                       DTH_INVALID_ID, "target idx");
+       DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=,
+                       DTH_INVALID_ID, "deadline idx");
 
-static void
-_dispatch_timer_aggregate_update(void *ctxt)
-{
-       dispatch_timer_aggregate_t dta = (void*)_dispatch_queue_get_current();
-       dispatch_timer_t dtau = ctxt;
-       unsigned int tidx;
-       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-               dta->dta_timer_data[tidx].target = dtau[tidx].target;
-               dta->dta_timer_data[tidx].deadline = dtau[tidx].deadline;
-       }
-       free(dtau);
+
+       _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]);
+       _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]);
 }
 
-DISPATCH_NOINLINE
-static void
-_dispatch_timer_aggregates_configure(void)
+DISPATCH_ALWAYS_INLINE
+static bool
+_dispatch_timer_heap_has_new_min(dispatch_timer_heap_t dth,
+               uint32_t count, uint32_t mask)
 {
-       dispatch_timer_aggregate_t dta;
-       dispatch_timer_t dtau;
-       TAILQ_FOREACH(dta, &_dispatch_timer_aggregates, dta_list) {
-               if (!_dispatch_timers_check(dta->dta_kevent_timer, dta->dta_timer)) {
+       dispatch_timer_source_refs_t dt;
+       bool changed = false;
+       uint64_t tmp;
+       uint32_t tidx;
+
+       for (tidx = 0; tidx < count; tidx++) {
+               if (!(mask & (1u << tidx))) {
                        continue;
                }
-               dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau));
-               memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer));
-               _dispatch_barrier_async_detached_f(dta->_as_dq, dtau,
-                               _dispatch_timer_aggregate_update);
+
+               dt = dth[tidx].dth_min[DTH_TARGET_ID];
+               tmp = dt ? dt->dt_timer.target : UINT64_MAX;
+               if (dth[tidx].dth_target != tmp) {
+                       dth[tidx].dth_target = tmp;
+                       changed = true;
+               }
+               dt = dth[tidx].dth_min[DTH_DEADLINE_ID];
+               tmp = dt ? dt->dt_timer.deadline : UINT64_MAX;
+               if (dth[tidx].dth_deadline != tmp) {
+                       dth[tidx].dth_deadline = tmp;
+                       changed = true;
+               }
        }
+       return changed;
 }
 
 static inline void
-_dispatch_timer_aggregates_check(void)
+_dispatch_timers_unregister(dispatch_timer_source_refs_t dt)
 {
-       if (fastpath(TAILQ_EMPTY(&_dispatch_timer_aggregates))) {
-               return;
-       }
-       _dispatch_timer_aggregates_configure();
+       uint32_t tidx = dt->du_ident;
+       dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx];
+
+       _dispatch_timer_heap_remove(heap, dt);
+       _dispatch_timers_reconfigure = true;
+       _dispatch_timers_processing_mask |= 1 << tidx;
+       dispatch_assert(dt->du_wlh == NULL || dt->du_wlh == DISPATCH_WLH_ANON);
+       dt->du_wlh = NULL;
 }
 
-static void
-_dispatch_timer_aggregates_register(dispatch_source_t ds)
+static inline void
+_dispatch_timers_register(dispatch_timer_source_refs_t dt, uint32_t tidx)
 {
-       dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds);
-       if (!dta->dta_refcount++) {
-               TAILQ_INSERT_TAIL(&_dispatch_timer_aggregates, dta, dta_list);
+       dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx];
+       if (_dispatch_unote_registered(dt)) {
+               DISPATCH_TIMER_ASSERT(dt->du_ident, ==, tidx, "tidx");
+               _dispatch_timer_heap_update(heap, dt);
+       } else {
+               dt->du_ident = tidx;
+               _dispatch_timer_heap_insert(heap, dt);
        }
+       _dispatch_timers_reconfigure = true;
+       _dispatch_timers_processing_mask |= 1 << tidx;
+       dispatch_assert(dt->du_wlh == NULL || dt->du_wlh == DISPATCH_WLH_ANON);
+       dt->du_wlh = DISPATCH_WLH_ANON;
 }
 
-DISPATCH_NOINLINE
-static void
-_dispatch_timer_aggregates_update(dispatch_source_t ds, unsigned int tidx)
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_source_timer_tryarm(dispatch_source_t ds)
 {
-       dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds);
-       dispatch_timer_source_aggregate_refs_t dr;
-       dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs;
-       _dispatch_timers_insert(tidx, dta->dta_kevent_timer, dr, dra_list,
-                       dta->dta_timer, dr, dta_list);
+       dispatch_queue_flags_t oqf, nqf;
+       return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, {
+               if (oqf & (DSF_CANCELED | DQF_RELEASED)) {
+                       // do not install a cancelled timer
+                       os_atomic_rmw_loop_give_up(break);
+               }
+               nqf = oqf | DSF_ARMED;
+       });
 }
 
-DISPATCH_NOINLINE
+// Updates the ordered list of timers based on next fire date for changes to ds.
+// Should only be called from the context of _dispatch_mgr_q.
 static void
-_dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx)
+_dispatch_timers_update(dispatch_unote_t du, uint32_t flags)
 {
-       dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds);
-       dispatch_timer_source_aggregate_refs_t dr;
-       dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs;
-       _dispatch_timers_remove(tidx, (dispatch_timer_aggregate_refs_s*)NULL,
-                       dta->dta_kevent_timer, dr, dra_list, dta->dta_timer, dr, dta_list);
-       if (!--dta->dta_refcount) {
-               TAILQ_REMOVE(&_dispatch_timer_aggregates, dta, dta_list);
-       }
-}
-
-#pragma mark -
-#pragma mark dispatch_kqueue
-
-static int _dispatch_kq;
-
-#if DISPATCH_DEBUG_QOS && DISPATCH_USE_KEVENT_WORKQUEUE
-#define _dispatch_kevent_assert_valid_qos(ke)  ({ \
-               if (_dispatch_kevent_workqueue_enabled) { \
-                       const _dispatch_kevent_qos_s *_ke = (ke); \
-                       if (_ke->flags & (EV_ADD|EV_ENABLE)) { \
-                               _dispatch_assert_is_valid_qos_class(\
-                                               (pthread_priority_t)_ke->qos); \
-                               dispatch_assert(_ke->qos); \
-                       } \
-               } \
-       })
-#else
-#define _dispatch_kevent_assert_valid_qos(ke)  ((void)ke)
-#endif
+       dispatch_timer_source_refs_t dr = du._dt;
+       dispatch_source_t ds = _dispatch_source_from_refs(dr);
+       const char *verb = "updated";
+       bool will_register, disarm = false;
 
+       DISPATCH_ASSERT_ON_MANAGER_QUEUE();
 
-static void
-_dispatch_kq_init(void *context DISPATCH_UNUSED)
-{
-       _dispatch_fork_becomes_unsafe();
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-       _dispatch_kevent_workqueue_init();
-       if (_dispatch_kevent_workqueue_enabled) {
-               int r;
-               const _dispatch_kevent_qos_s kev[] = {
-                       [0] = {
-                               .ident = 1,
-                               .filter = EVFILT_USER,
-                               .flags = EV_ADD|EV_CLEAR,
-                               .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
-                       },
-                       [1] = {
-                               .ident = 1,
-                               .filter = EVFILT_USER,
-                               .fflags = NOTE_TRIGGER,
-                       },
-               };
-               _dispatch_kq = -1;
-retry:
-               r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL,
-                               KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE);
-               if (slowpath(r == -1)) {
-                       int err = errno;
-                       switch (err) {
-                       case EINTR:
-                               goto retry;
-                       default:
-                               DISPATCH_CLIENT_CRASH(err,
-                                               "Failed to initalize workqueue kevent");
-                               break;
-                       }
-               }
+       if (unlikely(dr->du_ident == DISPATCH_TIMER_IDENT_CANCELED)) {
+               dispatch_assert((flags & DISPATCH_TIMERS_RETAIN_2) == 0);
                return;
        }
-#endif // DISPATCH_USE_KEVENT_WORKQUEUE
-#if DISPATCH_USE_MGR_THREAD
-       static const _dispatch_kevent_qos_s kev = {
-               .ident = 1,
-               .filter = EVFILT_USER,
-               .flags = EV_ADD|EV_CLEAR,
-       };
 
-       _dispatch_fork_becomes_unsafe();
-#if DISPATCH_USE_GUARDED_FD
-       guardid_t guard = (uintptr_t)&kev;
-       _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP);
-#else
-       _dispatch_kq = kqueue();
-#endif
-       if (_dispatch_kq == -1) {
-               int err = errno;
-               switch (err) {
-               case EMFILE:
-                       DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
-                                       "process is out of file descriptors");
-                       break;
-               case ENFILE:
-                       DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
-                                       "system is out of file descriptors");
-                       break;
-               case ENOMEM:
-                       DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
-                                       "kernel is out of memory");
-                       break;
-               default:
-                       DISPATCH_INTERNAL_CRASH(err, "kqueue() failure");
-                       break;
+       // Unregister timers that are unconfigured, disabled, suspended or have
+       // missed intervals. Rearm after dispatch_set_timer(), resume or source
+       // invoke will reenable them
+       will_register = !(flags & DISPATCH_TIMERS_UNREGISTER) &&
+                       dr->dt_timer.target < INT64_MAX &&
+                       !os_atomic_load2o(ds, ds_pending_data, relaxed) &&
+                       !DISPATCH_QUEUE_IS_SUSPENDED(ds) &&
+                       !os_atomic_load2o(dr, dt_pending_config, relaxed);
+       if (likely(!_dispatch_unote_registered(dr))) {
+               dispatch_assert((flags & DISPATCH_TIMERS_RETAIN_2) == 0);
+               if (unlikely(!will_register || !_dispatch_source_timer_tryarm(ds))) {
+                       return;
                }
+               verb = "armed";
+       } else if (unlikely(!will_register)) {
+               disarm = true;
+               verb = "disarmed";
        }
-       (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL,
-                       NULL, 0));
-       _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
-#endif // DISPATCH_USE_MGR_THREAD
-}
 
-DISPATCH_NOINLINE
-static long
-_dispatch_kq_update(const _dispatch_kevent_qos_s *ke, int n)
-{
-       int i, r;
-       _dispatch_kevent_qos_s kev_error[n];
-       static dispatch_once_t pred;
-       dispatch_once_f(&pred, NULL, _dispatch_kq_init);
-
-       for (i = 0; i < n; i++) {
-               if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
-                       _dispatch_kevent_debug_n("updating", ke + i, i, n);
-               }
+       // The heap owns a +2 on dispatch sources it references
+       //
+       // _dispatch_timers_run2() also sometimes passes DISPATCH_TIMERS_RETAIN_2
+       // when it wants to take over this +2 at the same time we are unregistering
+       // the timer from the heap.
+       //
+       // Compute our refcount balance according to these rules, if our balance
+       // would become negative we retain the source upfront, if it is positive, we
+       // get rid of the extraneous refcounts after we're done touching the source.
+       int refs = will_register ? -2 : 0;
+       if (_dispatch_unote_registered(dr) && !(flags & DISPATCH_TIMERS_RETAIN_2)) {
+               refs += 2;
+       }
+       if (refs < 0) {
+               dispatch_assert(refs == -2);
+               _dispatch_retain_2(ds);
        }
 
-       unsigned int flags = KEVENT_FLAG_ERROR_EVENTS;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-       if (_dispatch_kevent_workqueue_enabled) {
-               flags |= KEVENT_FLAG_WORKQ;
+       uint32_t tidx = _dispatch_source_timer_idx(dr);
+       if (unlikely(_dispatch_unote_registered(dr) &&
+                       (!will_register || dr->du_ident != tidx))) {
+               _dispatch_timers_unregister(dr);
+       }
+       if (likely(will_register)) {
+               _dispatch_timers_register(dr, tidx);
        }
-#endif
 
-retry:
-       r = kevent_qos(_dispatch_kq, ke, n, kev_error, n, NULL, NULL, flags);
-       if (slowpath(r == -1)) {
-               int err = errno;
-               switch (err) {
-               case EINTR:
-                       goto retry;
-               case EBADF:
-                       DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
-                       break;
-               default:
-                       (void)dispatch_assume_zero(err);
-                       break;
-               }
-               return err;
+       if (disarm) {
+               _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
        }
-       for (i = 0, n = r; i < n; i++) {
-               if (kev_error[i].flags & EV_ERROR) {
-                       _dispatch_kevent_debug("returned error", &kev_error[i]);
-                       _dispatch_kevent_drain(&kev_error[i]);
-                       r = (int)kev_error[i].data;
-               } else {
-                       _dispatch_kevent_mgr_debug(&kev_error[i]);
-                       r = 0;
-               }
+       _dispatch_debug("kevent-source[%p]: %s timer[%p]", ds, verb, dr);
+       _dispatch_object_debug(ds, "%s", __func__);
+       if (refs > 0) {
+               dispatch_assert(refs == 2);
+               _dispatch_release_2_tailcall(ds);
        }
-       return r;
 }
 
-DISPATCH_ALWAYS_INLINE
-static void
-_dispatch_kq_update_all(const _dispatch_kevent_qos_s *kev, int n)
-{
-       (void)_dispatch_kq_update(kev, n);
-}
+#define DISPATCH_TIMER_MISSED_MARKER  1ul
 
 DISPATCH_ALWAYS_INLINE
-static long
-_dispatch_kq_update_one(const _dispatch_kevent_qos_s *kev)
-{
-       return _dispatch_kq_update(kev, 1);
-}
-
-static inline bool
-_dispatch_kevent_maps_to_same_knote(const _dispatch_kevent_qos_s *e1,
-               const _dispatch_kevent_qos_s *e2)
+static inline unsigned long
+_dispatch_source_timer_compute_missed(dispatch_timer_source_refs_t dt,
+               uint64_t now, unsigned long prev)
 {
-       return e1->filter == e2->filter &&
-                       e1->ident == e2->ident &&
-                       e1->udata == e2->udata;
+       uint64_t missed = (now - dt->dt_timer.target) / dt->dt_timer.interval;
+       if (++missed + prev > LONG_MAX) {
+               missed = LONG_MAX - prev;
+       }
+       if (dt->dt_timer.interval < INT64_MAX) {
+               uint64_t push_by = missed * dt->dt_timer.interval;
+               dt->dt_timer.target += push_by;
+               dt->dt_timer.deadline += push_by;
+       } else {
+               dt->dt_timer.target = UINT64_MAX;
+               dt->dt_timer.deadline = UINT64_MAX;
+       }
+       prev += missed;
+       return prev;
 }
 
-static inline int
-_dispatch_deferred_event_find_slot(dispatch_deferred_items_t ddi,
-               const _dispatch_kevent_qos_s *ke)
+DISPATCH_ALWAYS_INLINE
+static inline unsigned long
+_dispatch_source_timer_data(dispatch_source_t ds, dispatch_unote_t du)
 {
-       _dispatch_kevent_qos_s *events = ddi->ddi_eventlist;
-       int i;
+       dispatch_timer_source_refs_t dr = du._dt;
+       unsigned long data, prev, clear_prev = 0;
 
-       for (i = 0; i < ddi->ddi_nevents; i++) {
-               if (_dispatch_kevent_maps_to_same_knote(&events[i], ke)) {
-                       break;
+       os_atomic_rmw_loop2o(ds, ds_pending_data, prev, clear_prev, relaxed, {
+               data = prev >> 1;
+               if (unlikely(prev & DISPATCH_TIMER_MISSED_MARKER)) {
+                       os_atomic_rmw_loop_give_up(goto handle_missed_intervals);
                }
-       }
-       return i;
+       });
+       return data;
+
+handle_missed_intervals:
+       // The timer may be in _dispatch_source_invoke2() already for other
+       // reasons such as running the registration handler when ds_pending_data
+       // is changed by _dispatch_timers_run2() without holding the drain lock.
+       //
+       // We hence need dependency ordering to pair with the release barrier
+       // done by _dispatch_timers_run2() when setting the MISSED_MARKER bit.
+       os_atomic_thread_fence(dependency);
+       dr = os_atomic_force_dependency_on(dr, data);
+
+       uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident));
+       if (now >= dr->dt_timer.target) {
+               OS_COMPILER_CAN_ASSUME(dr->dt_timer.interval < INT64_MAX);
+               data = _dispatch_source_timer_compute_missed(dr, now, data);
+       }
+
+       // When we see the MISSED_MARKER the manager has given up on this timer
+       // and expects the handler to call "resume".
+       //
+       // However, it may not have reflected this into the atomic flags yet
+       // so make sure _dispatch_source_invoke2() sees the timer is disarmed
+       //
+       // The subsequent _dispatch_source_refs_resume() will enqueue the source
+       // on the manager and make the changes to `ds_timer` above visible.
+       _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
+       os_atomic_store2o(ds, ds_pending_data, 0, relaxed);
+       return data;
 }
 
-static void
-_dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke)
+static inline void
+_dispatch_timers_run2(dispatch_clock_now_cache_t nows, uint32_t tidx)
 {
-       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
-       int slot;
-
-       _dispatch_kevent_assert_valid_qos(ke);
-       if (ddi) {
-               if (unlikely(ddi->ddi_nevents == ddi->ddi_maxevents)) {
-                       _dispatch_deferred_items_set(NULL);
-                       _dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents);
-                       ddi->ddi_nevents = 0;
-                       _dispatch_deferred_items_set(ddi);
+       dispatch_timer_source_refs_t dr;
+       dispatch_source_t ds;
+       uint64_t data, pending_data;
+       uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
+
+       while ((dr = _dispatch_timers_heap[tidx].dth_min[DTH_TARGET_ID])) {
+               DISPATCH_TIMER_ASSERT(dr->du_filter, ==, DISPATCH_EVFILT_TIMER,
+                               "invalid filter");
+               DISPATCH_TIMER_ASSERT(dr->du_ident, ==, tidx, "tidx");
+               DISPATCH_TIMER_ASSERT(dr->dt_timer.target, !=, 0, "missing target");
+               ds = _dispatch_source_from_refs(dr);
+               if (dr->dt_timer.target > now) {
+                       // Done running timers for now.
+                       break;
                }
-               if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
-                       _dispatch_kevent_debug("deferred", ke);
+               if (dr->du_fflags & DISPATCH_TIMER_AFTER) {
+                       _dispatch_trace_timer_fire(dr, 1, 1);
+                       _dispatch_source_merge_evt(dr, EV_ONESHOT, 1, 0, 0);
+                       _dispatch_debug("kevent-source[%p]: fired after timer[%p]", ds, dr);
+                       _dispatch_object_debug(ds, "%s", __func__);
+                       continue;
                }
-               bool needs_enable = false;
-               slot = _dispatch_deferred_event_find_slot(ddi, ke);
-               if (slot == ddi->ddi_nevents) {
-                       ddi->ddi_nevents++;
-               } else if (ke->flags & EV_DELETE) {
-                       // <rdar://problem/26202376> when deleting and an enable is pending,
-                       // we must merge EV_ENABLE to do an immediate deletion
-                       needs_enable = (ddi->ddi_eventlist[slot].flags & EV_ENABLE);
+
+               data = os_atomic_load2o(ds, ds_pending_data, relaxed);
+               if (unlikely(data)) {
+                       // the release barrier is required to make the changes
+                       // to `ds_timer` visible to _dispatch_source_timer_data()
+                       if (os_atomic_cmpxchg2o(ds, ds_pending_data, data,
+                                       data | DISPATCH_TIMER_MISSED_MARKER, release)) {
+                               _dispatch_timers_update(dr, DISPATCH_TIMERS_UNREGISTER);
+                               continue;
+                       }
                }
-               ddi->ddi_eventlist[slot] = *ke;
-               if (needs_enable) {
-                       ddi->ddi_eventlist[slot].flags |= EV_ENABLE;
+
+               data = _dispatch_source_timer_compute_missed(dr, now, 0);
+               _dispatch_timers_update(dr, DISPATCH_TIMERS_RETAIN_2);
+               pending_data = data << 1;
+               if (!_dispatch_unote_registered(dr) && dr->dt_timer.target < INT64_MAX){
+                       // if we unregistered because of suspension we have to fake we
+                       // missed events.
+                       pending_data |= DISPATCH_TIMER_MISSED_MARKER;
+                       os_atomic_store2o(ds, ds_pending_data, pending_data, release);
+               } else {
+                       os_atomic_store2o(ds, ds_pending_data, pending_data, relaxed);
                }
-       } else {
-               _dispatch_kq_update_one(ke);
+               _dispatch_trace_timer_fire(dr, data, data);
+               _dispatch_debug("kevent-source[%p]: fired timer[%p]", ds, dr);
+               _dispatch_object_debug(ds, "%s", __func__);
+               dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2);
        }
 }
 
-static long
-_dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke)
+DISPATCH_NOINLINE
+static void
+_dispatch_timers_run(dispatch_clock_now_cache_t nows)
 {
-       dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
-       int slot, last;
-
-       _dispatch_kevent_assert_valid_qos(ke);
-       if (ddi) {
-               _dispatch_kevent_qos_s *events = ddi->ddi_eventlist;
-               slot = _dispatch_deferred_event_find_slot(ddi, ke);
-               if (slot < ddi->ddi_nevents) {
-                       // <rdar://problem/26202376> when deleting and an enable is pending,
-                       // we must merge EV_ENABLE to do an immediate deletion
-                       if ((ke->flags & EV_DELETE) && (events[slot].flags & EV_ENABLE)) {
-                               ke->flags |= EV_ENABLE;
-                       }
-                       last = --ddi->ddi_nevents;
-                       if (slot != last) {
-                               events[slot] = events[last];
-                       }
+       uint32_t tidx;
+       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
+               if (_dispatch_timers_heap[tidx].dth_count) {
+                       _dispatch_timers_run2(nows, tidx);
                }
        }
-       return _dispatch_kq_update_one(ke);
 }
 
-#pragma mark -
-#pragma mark dispatch_mgr
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mgr_queue_poke(dispatch_queue_t dq DISPATCH_UNUSED,
-               pthread_priority_t pp DISPATCH_UNUSED)
-{
-       static const _dispatch_kevent_qos_s kev = {
-               .ident = 1,
-               .filter = EVFILT_USER,
-               .fflags = NOTE_TRIGGER,
-       };
+#if DISPATCH_HAVE_TIMER_COALESCING
+#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \
+               [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC
 
-#if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG
-       _dispatch_debug("waking up the dispatch manager queue: %p", dq);
+static const uint64_t _dispatch_kevent_coalescing_window[] = {
+       DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75),
+#if DISPATCH_HAVE_TIMER_QOS
+       DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1),
+       DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100),
 #endif
-       _dispatch_kq_deferred_update(&kev);
-}
+};
+#endif // DISPATCH_HAVE_TIMER_COALESCING
 
-void
-_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
-               dispatch_wakeup_flags_t flags)
+static inline dispatch_timer_delay_s
+_dispatch_timers_get_delay(dispatch_timer_heap_t dth, dispatch_clock_t clock,
+               uint32_t qos, dispatch_clock_now_cache_t nows)
 {
-       if (flags & DISPATCH_WAKEUP_FLUSH) {
-               os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release);
+       uint64_t target = dth->dth_target, deadline = dth->dth_deadline;
+       uint64_t delta = INT64_MAX, dldelta = INT64_MAX;
+       dispatch_timer_delay_s rc;
+
+       dispatch_assert(target <= deadline);
+       if (delta == 0 || target >= INT64_MAX) {
+               goto done;
        }
 
-       if (_dispatch_queue_get_current() == &_dispatch_mgr_q) {
-               return;
+       if (qos < DISPATCH_TIMER_QOS_COUNT && dth->dth_count > 2) {
+#if DISPATCH_HAVE_TIMER_COALESCING
+               // Timer pre-coalescing <rdar://problem/13222034>
+               // When we have several timers with this target/deadline bracket:
+               //
+               //      Target        window  Deadline
+               //        V           <-------V
+               // t1:    [...........|.................]
+               // t2:         [......|.......]
+               // t3:             [..|..........]
+               // t4:                | [.............]
+               //                 ^
+               //          Optimal Target
+               //
+               // Coalescing works better if the Target is delayed to "Optimal", by
+               // picking the latest target that isn't too close to the deadline.
+               uint64_t window = _dispatch_kevent_coalescing_window[qos];
+               if (target + window < deadline) {
+                       uint64_t latest = deadline - window;
+                       target = _dispatch_timer_heap_max_target_before(dth, latest);
+               }
+#endif
        }
 
-       if (!_dispatch_queue_class_probe(&_dispatch_mgr_q)) {
-               return;
+       uint64_t now = _dispatch_time_now_cached(clock, nows);
+       if (target <= now) {
+               delta = 0;
+               dldelta = 0;
+               goto done;
+       }
+
+       uint64_t tmp = target - now;
+       if (clock != DISPATCH_CLOCK_WALL) {
+               tmp = _dispatch_time_mach2nano(tmp);
+       }
+       if (tmp < delta) {
+               delta = tmp;
        }
 
-       _dispatch_mgr_queue_poke(dq, pp);
+       tmp = deadline - now;
+       if (clock != DISPATCH_CLOCK_WALL) {
+               tmp = _dispatch_time_mach2nano(tmp);
+       }
+       if (tmp < dldelta) {
+               dldelta = tmp;
+       }
+
+done:
+       rc.delay = delta;
+       rc.leeway = delta < INT64_MAX ? dldelta - delta : INT64_MAX;
+       return rc;
 }
 
-DISPATCH_NOINLINE
-static void
-_dispatch_event_init(void)
+static bool
+_dispatch_timers_program2(dispatch_clock_now_cache_t nows, uint32_t tidx)
 {
-       _dispatch_kevent_init();
-       _dispatch_timers_init();
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-       _dispatch_mach_recv_msg_buf_init();
-#endif
-       _dispatch_memorypressure_init();
-       _voucher_activity_debug_channel_init();
+       uint32_t qos = DISPATCH_TIMER_QOS(tidx);
+       dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx);
+       dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx];
+       dispatch_timer_delay_s range;
+
+       range = _dispatch_timers_get_delay(heap, clock, qos, nows);
+       if (range.delay == 0 || range.delay >= INT64_MAX) {
+               _dispatch_trace_next_timer_set(NULL, qos);
+               if (heap->dth_flags & DTH_ARMED) {
+                       _dispatch_event_loop_timer_delete(tidx);
+               }
+               return range.delay == 0;
+       }
+
+       _dispatch_trace_next_timer_set(heap->dth_min[DTH_TARGET_ID], qos);
+       _dispatch_trace_next_timer_program(range.delay, qos);
+       _dispatch_event_loop_timer_arm(tidx, range, nows);
+       return false;
 }
 
-#if DISPATCH_USE_MGR_THREAD
 DISPATCH_NOINLINE
-static void
-_dispatch_mgr_init(void)
+static bool
+_dispatch_timers_program(dispatch_clock_now_cache_t nows)
 {
-       uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
-       _dispatch_queue_set_current(&_dispatch_mgr_q);
-       if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q,
-                       DISPATCH_INVOKE_STEALING, NULL) != owned) {
-               DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail");
+       bool poll = false;
+       uint32_t tidx, timerm = _dispatch_timers_processing_mask;
+
+       for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
+               if (timerm & (1 << tidx)) {
+                       poll |= _dispatch_timers_program2(nows, tidx);
+               }
        }
-       _dispatch_mgr_priority_init();
-       _dispatch_event_init();
+       return poll;
 }
 
 DISPATCH_NOINLINE
 static bool
-_dispatch_mgr_wait_for_event(dispatch_deferred_items_t ddi, bool poll)
+_dispatch_timers_configure(void)
 {
-       int r;
-       dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist));
-
-retry:
-       r = kevent_qos(_dispatch_kq, ddi->ddi_eventlist, ddi->ddi_nevents,
-                       ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL,
-                       poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE);
-       if (slowpath(r == -1)) {
-               int err = errno;
-               switch (err) {
-               case EINTR:
-                       goto retry;
-               case EBADF:
-                       DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
-                       break;
-               default:
-                       (void)dispatch_assume_zero(err);
-                       break;
+       // Find out if there is a new target/deadline on the timer lists
+       return _dispatch_timer_heap_has_new_min(_dispatch_timers_heap,
+                       countof(_dispatch_timers_heap), _dispatch_timers_processing_mask);
+}
+
+static inline bool
+_dispatch_mgr_timers(void)
+{
+       dispatch_clock_now_cache_s nows = { };
+       bool expired = _dispatch_timers_expired;
+       if (unlikely(expired)) {
+               _dispatch_timers_run(&nows);
+       }
+       _dispatch_mgr_trace_timers_wakes();
+       bool reconfigure = _dispatch_timers_reconfigure;
+       if (unlikely(reconfigure || expired)) {
+               if (reconfigure) {
+                       reconfigure = _dispatch_timers_configure();
+                       _dispatch_timers_reconfigure = false;
+               }
+               if (reconfigure || expired) {
+                       expired = _dispatch_timers_expired = _dispatch_timers_program(&nows);
+               }
+               _dispatch_timers_processing_mask = 0;
+       }
+       return expired;
+}
+
+#pragma mark -
+#pragma mark dispatch_mgr
+
+void
+_dispatch_mgr_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
+               DISPATCH_UNUSED dispatch_qos_t qos)
+{
+       uint64_t dq_state;
+       _dispatch_trace_continuation_push(dq, dou._do);
+       if (unlikely(_dispatch_queue_push_update_tail(dq, dou._do))) {
+               _dispatch_queue_push_update_head(dq, dou._do);
+               dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release);
+               if (!_dq_state_drain_locked_by_self(dq_state)) {
+                       _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0);
                }
        }
-       ddi->ddi_nevents = 0;
-       return r > 0;
 }
 
+DISPATCH_NORETURN
+void
+_dispatch_mgr_queue_wakeup(DISPATCH_UNUSED dispatch_queue_t dq,
+               DISPATCH_UNUSED dispatch_qos_t qos,
+               DISPATCH_UNUSED dispatch_wakeup_flags_t flags)
+{
+       DISPATCH_INTERNAL_CRASH(0, "Don't try to wake up or override the manager");
+}
+
+#if DISPATCH_USE_MGR_THREAD
 DISPATCH_NOINLINE DISPATCH_NORETURN
 static void
 _dispatch_mgr_invoke(void)
 {
-       dispatch_deferred_items_s ddi;
+#if DISPATCH_EVENT_BACKEND_KEVENT
+       dispatch_kevent_s evbuf[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
+#endif
+       dispatch_deferred_items_s ddi = {
+#if DISPATCH_EVENT_BACKEND_KEVENT
+               .ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
+               .ddi_eventlist = evbuf,
+#endif
+       };
        bool poll;
 
-       ddi.ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC;
-       ddi.ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-       ddi.ddi_nevents = 0;
-       ddi.ddi_maxevents = 1;
-
        _dispatch_deferred_items_set(&ddi);
-
        for (;;) {
                _dispatch_mgr_queue_drain();
                poll = _dispatch_mgr_timers();
                poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q);
-               if (_dispatch_mgr_wait_for_event(&ddi, poll)) {
-                       _dispatch_kevent_qos_s *ke = ddi.ddi_eventlist + ddi.ddi_maxevents;
-                       _dispatch_kevent_debug("received", ke);
-                       _dispatch_kevent_drain(ke);
-               }
+               _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0);
        }
 }
 #endif // DISPATCH_USE_MGR_THREAD
@@ -2898,6 +2334,7 @@ _dispatch_mgr_invoke(void)
 DISPATCH_NORETURN
 void
 _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED,
+               dispatch_invoke_context_t dic DISPATCH_UNUSED,
                dispatch_invoke_flags_t flags DISPATCH_UNUSED)
 {
 #if DISPATCH_USE_KEVENT_WORKQUEUE
@@ -2907,7 +2344,9 @@ _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED,
        }
 #endif
 #if DISPATCH_USE_MGR_THREAD
-       _dispatch_mgr_init();
+       _dispatch_queue_set_current(&_dispatch_mgr_q);
+       _dispatch_mgr_priority_init();
+       _dispatch_queue_mgr_lock(&_dispatch_mgr_q);
        // never returns, so burn bridges behind us & clear stack 2k ahead
        _dispatch_clear_stack(2048);
        _dispatch_mgr_invoke();
@@ -2916,18 +2355,19 @@ _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED,
 
 #if DISPATCH_USE_KEVENT_WORKQUEUE
 
-#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((pthread_priority_t)(~0ul))
+#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((dispatch_priority_t)~0u)
+
+_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >=
+               DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
+               "our list should not be longer than the kernel's");
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi)
+static inline dispatch_priority_t
+_dispatch_wlh_worker_thread_init(dispatch_wlh_t wlh,
+               dispatch_deferred_items_t ddi)
 {
-       uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
-
-       ddi->ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC;
-       ddi->ddi_nevents = 0;
-       ddi->ddi_maxevents = countof(ddi->ddi_eventlist);
-       ddi->ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+       dispatch_assert(wlh);
+       dispatch_priority_t old_dbp;
 
        pthread_priority_t pp = _dispatch_get_priority();
        if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) {
@@ -2938,10 +2378,20 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi)
                // Also add the NEEDS_UNBIND flag so that
                // _dispatch_priority_compute_update knows it has to unbind
                pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
-               pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
+               if (wlh == DISPATCH_WLH_ANON) {
+                       pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
+               } else {
+                       // pthread sets the flag when it is an event delivery thread
+                       // so we need to explicitly clear it
+                       pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
+               }
                _dispatch_thread_setspecific(dispatch_priority_key,
-                                       (void *)(uintptr_t)pp);
-               ddi->ddi_stashed_pp = 0;
+                               (void *)(uintptr_t)pp);
+               if (wlh != DISPATCH_WLH_ANON) {
+                       _dispatch_debug("wlh[%p]: handling events", wlh);
+               } else {
+                       ddi->ddi_can_stash = true;
+               }
                return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER;
        }
 
@@ -2968,3588 +2418,136 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi)
        _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
 
        // ensure kevents registered from this thread are registered at manager QoS
-       pthread_priority_t old_dp = _dispatch_set_defaultpriority(
-                       (pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, NULL);
+       old_dbp = _dispatch_set_basepri(DISPATCH_PRIORITY_FLAG_MANAGER);
        _dispatch_queue_set_current(&_dispatch_mgr_q);
-       if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q,
-                       DISPATCH_INVOKE_STEALING, NULL) != owned) {
-               DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail");
-       }
-       static int event_thread_init;
-       if (!event_thread_init) {
-               event_thread_init = 1;
-               _dispatch_event_init();
-       }
-       return old_dp;
+       _dispatch_queue_mgr_lock(&_dispatch_mgr_q);
+       return old_dbp;
 }
 
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
-_dispatch_kevent_worker_thread_reset(pthread_priority_t old_dp)
+_dispatch_wlh_worker_thread_reset(dispatch_priority_t old_dbp)
 {
-       dispatch_queue_t dq = &_dispatch_mgr_q;
-       uint64_t orig_dq_state;
-
-       _dispatch_queue_drain_unlock(dq, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED,
-                       &orig_dq_state);
-       _dispatch_reset_defaultpriority(old_dp);
+       bool needs_poll = _dispatch_queue_mgr_unlock(&_dispatch_mgr_q);
+       _dispatch_reset_basepri(old_dbp);
+       _dispatch_reset_basepri_override();
        _dispatch_queue_set_current(NULL);
-       return _dq_state_is_dirty(orig_dq_state);
+       return needs_poll;
 }
 
-DISPATCH_NOINLINE
-void
-_dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents)
+DISPATCH_ALWAYS_INLINE
+static void
+_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events,
+               int *nevents)
 {
        _dispatch_introspection_thread_add();
+       DISPATCH_PERF_MON_VAR_INIT
 
-       if (!events && !nevents) {
-               // events for worker thread request have already been delivered earlier
-               return;
-       }
-
-       _dispatch_kevent_qos_s *ke = *events;
-       int n = *nevents;
-       if (!dispatch_assume(n) || !dispatch_assume(*events)) return;
-
-       dispatch_deferred_items_s ddi;
-       pthread_priority_t old_dp = _dispatch_kevent_worker_thread_init(&ddi);
+       dispatch_deferred_items_s ddi = {
+               .ddi_eventlist = events,
+       };
+       dispatch_priority_t old_dbp;
 
-       _dispatch_deferred_items_set(&ddi);
-       for (int i = 0; i < n; i++) {
-               _dispatch_kevent_debug("received", ke);
-               _dispatch_kevent_drain(ke++);
+       old_dbp = _dispatch_wlh_worker_thread_init(wlh, &ddi);
+       if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) {
+               _dispatch_perfmon_start_impl(true);
+       } else {
+               dispatch_assert(wlh == DISPATCH_WLH_ANON);
+               wlh = DISPATCH_WLH_ANON;
        }
+       _dispatch_deferred_items_set(&ddi);
+       _dispatch_event_loop_merge(events, *nevents);
 
-       if (old_dp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) {
+       if (old_dbp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) {
                _dispatch_mgr_queue_drain();
                bool poll = _dispatch_mgr_timers();
-               if (_dispatch_kevent_worker_thread_reset(old_dp)) {
+               if (_dispatch_wlh_worker_thread_reset(old_dbp)) {
                        poll = true;
                }
-               if (poll) _dispatch_mgr_queue_poke(&_dispatch_mgr_q, 0);
-       }
-       _dispatch_deferred_items_set(NULL);
-
-       if (ddi.ddi_stashed_pp & _PTHREAD_PRIORITY_PRIORITY_MASK) {
-               *nevents = 0;
-               if (ddi.ddi_nevents) {
-                       _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents);
-               }
-               ddi.ddi_stashed_pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-               return _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_dq,
-                               ddi.ddi_stashed_dou, ddi.ddi_stashed_pp);
-#ifndef WORKQ_KEVENT_EVENT_BUFFER_LEN
-       } else if (ddi.ddi_nevents > *nevents) {
-               *nevents = 0;
-               _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents);
-#endif
-       } else {
-               *nevents = ddi.ddi_nevents;
-               dispatch_static_assert(__builtin_types_compatible_p(typeof(**events),
-                               typeof(*ddi.ddi_eventlist)));
-               memcpy(*events, ddi.ddi_eventlist,
-                        (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist));
-       }
-}
-#endif // DISPATCH_USE_KEVENT_WORKQUEUE
-
-#pragma mark -
-#pragma mark dispatch_memorypressure
-
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
-#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYPRESSURE
-#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \
-               DISPATCH_MEMORYPRESSURE_NORMAL | \
-               DISPATCH_MEMORYPRESSURE_WARN | \
-               DISPATCH_MEMORYPRESSURE_CRITICAL | \
-               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
-               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
-#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \
-               DISPATCH_MEMORYPRESSURE_WARN | \
-               DISPATCH_MEMORYPRESSURE_CRITICAL | \
-               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
-               DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
-#endif
-
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
-static dispatch_source_t _dispatch_memorypressure_source;
-
-static void
-_dispatch_memorypressure_handler(void *context DISPATCH_UNUSED)
-{
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
-       unsigned long memorypressure;
-       memorypressure = dispatch_source_get_data(_dispatch_memorypressure_source);
-
-       if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) {
-               _dispatch_memory_warn = false;
-               _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT;
-#if VOUCHER_USE_MACH_VOUCHER
-               if (_firehose_task_buffer) {
-                       firehose_buffer_clear_bank_flags(_firehose_task_buffer,
-                                       FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
-               }
-#endif
-       }
-       if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) {
-               _dispatch_memory_warn = true;
-               _dispatch_continuation_cache_limit =
-                               DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN;
-#if VOUCHER_USE_MACH_VOUCHER
-               if (_firehose_task_buffer) {
-                       firehose_buffer_set_bank_flags(_firehose_task_buffer,
-                                       FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
+               if (poll) _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0);
+       } else if (ddi.ddi_stashed_dou._do) {
+               _dispatch_debug("wlh[%p]: draining deferred item %p", wlh,
+                               ddi.ddi_stashed_dou._do);
+               if (wlh == DISPATCH_WLH_ANON) {
+                       dispatch_assert(ddi.ddi_nevents == 0);
+                       _dispatch_deferred_items_set(NULL);
+                       _dispatch_root_queue_drain_deferred_item(&ddi
+                                       DISPATCH_PERF_MON_ARGS);
+               } else {
+                       _dispatch_root_queue_drain_deferred_wlh(&ddi
+                                       DISPATCH_PERF_MON_ARGS);
                }
-#endif
        }
-       if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) {
-               malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK);
-       }
-#endif
-}
-
-static void
-_dispatch_memorypressure_init(void)
-{
-       _dispatch_memorypressure_source = dispatch_source_create(
-                       DISPATCH_MEMORYPRESSURE_SOURCE_TYPE, 0,
-                       DISPATCH_MEMORYPRESSURE_SOURCE_MASK,
-                       _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true));
-       dispatch_source_set_event_handler_f(_dispatch_memorypressure_source,
-                       _dispatch_memorypressure_handler);
-       dispatch_activate(_dispatch_memorypressure_source);
-}
-#else
-static inline void _dispatch_memorypressure_init(void) {}
-#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE
-
-#pragma mark -
-#pragma mark dispatch_mach
-
-#if HAVE_MACH
-
-#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG
-#define _dispatch_debug_machport(name) \
-               dispatch_debug_machport((name), __func__)
-#else
-#define _dispatch_debug_machport(name) ((void)(name))
-#endif
-
-// Flags for all notifications that are registered/unregistered when a
-// send-possible notification is requested/delivered
-#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \
-               DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED)
-#define _DISPATCH_MACH_RECV_FLAGS (DISPATCH_MACH_RECV_MESSAGE| \
-               DISPATCH_MACH_RECV_MESSAGE_DIRECT| \
-               DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE)
-#define _DISPATCH_MACH_RECV_DIRECT_FLAGS ( \
-               DISPATCH_MACH_RECV_MESSAGE_DIRECT| \
-               DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE)
-
-#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
-#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \
-               (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y)))
-
-#define _DISPATCH_MACHPORT_HASH_SIZE 32
-#define _DISPATCH_MACHPORT_HASH(x) \
-               _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE)
-
-#ifndef MACH_RCV_VOUCHER
-#define MACH_RCV_VOUCHER 0x00000800
-#endif
-#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX
-#define DISPATCH_MACH_RCV_OPTIONS ( \
-               MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \
-               MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \
-               MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \
-               MACH_RCV_VOUCHER
-
-#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->dk_kevent.ext[0])
-
-static void _dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke,
-               mach_msg_header_t *hdr);
-static void _dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke,
-               mach_msg_header_t *hdr);
-static void _dispatch_source_merge_mach_msg(dispatch_source_t ds,
-               dispatch_source_refs_t dr, dispatch_kevent_t dk,
-               _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr,
-               mach_msg_size_t siz);
-static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk,
-               uint32_t new_flags, uint32_t del_flags, uint32_t mask,
-               mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync);
-static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr);
-static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, unsigned int options);
-static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm);
-static void _dispatch_mach_msg_recv(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, _dispatch_kevent_qos_s *ke,
-               mach_msg_header_t *hdr, mach_msg_size_t siz);
-static void _dispatch_mach_merge_notification_kevent(dispatch_mach_t dm,
-               const _dispatch_kevent_qos_s *ke);
-static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
-
-static const size_t _dispatch_mach_recv_msg_size =
-               DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE;
-static const size_t dispatch_mach_trailer_size =
-               sizeof(dispatch_mach_trailer_t);
-static mach_port_t _dispatch_mach_notify_port;
-static dispatch_source_t _dispatch_mach_notify_source;
-
-static inline void*
-_dispatch_kevent_mach_msg_buf(_dispatch_kevent_qos_s *ke)
-{
-       return (void*)ke->ext[0];
-}
-
-static inline mach_msg_size_t
-_dispatch_kevent_mach_msg_size(_dispatch_kevent_qos_s *ke)
-{
-       // buffer size in the successful receive case, but message size (like
-       // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size.
-       return (mach_msg_size_t)ke->ext[1];
-}
-
-static void
-_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds,
-       dispatch_source_type_t type DISPATCH_UNUSED,
-       uintptr_t handle DISPATCH_UNUSED,
-       unsigned long mask DISPATCH_UNUSED)
-{
-       ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-       if (_dispatch_evfilt_machport_direct_enabled) return;
-       ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-       ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-       ds->ds_is_direct_kevent = false;
-#endif
-}
-
-static const
-struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = {
-       .ke = {
-               .filter = EVFILT_MACHPORT,
-               .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-               .fflags = DISPATCH_MACH_RCV_OPTIONS,
-       },
-       .init = _dispatch_source_type_mach_recv_direct_init,
-};
-
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-static mach_port_t _dispatch_mach_portset,  _dispatch_mach_recv_portset;
-static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = {
-       .filter = EVFILT_MACHPORT,
-       .flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
-       .fflags = DISPATCH_MACH_RCV_OPTIONS,
-};
 
-static void
-_dispatch_mach_recv_msg_buf_init(void)
-{
-       if (_dispatch_evfilt_machport_direct_enabled) return;
-       mach_vm_size_t vm_size = mach_vm_round_page(
-                       _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size);
-       mach_vm_address_t vm_addr = vm_page_size;
-       kern_return_t kr;
-
-       while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
-                       VM_FLAGS_ANYWHERE))) {
-               if (kr != KERN_NO_SPACE) {
-                       DISPATCH_CLIENT_CRASH(kr,
-                                       "Could not allocate mach msg receive buffer");
-               }
-               _dispatch_temporary_resource_shortage();
-               vm_addr = vm_page_size;
+       _dispatch_deferred_items_set(NULL);
+       if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER &&
+                       !ddi.ddi_stashed_dou._do) {
+               _dispatch_perfmon_end(perfmon_thread_event_no_steal);
        }
-       _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr;
-       _dispatch_mach_recv_kevent.ext[1] = vm_size;
+       _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents);
+       *nevents = ddi.ddi_nevents;
 }
-#endif
 
 DISPATCH_NOINLINE
-static void
-_dispatch_source_merge_mach_msg_direct(dispatch_source_t ds,
-               _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr)
-{
-       dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs);
-       dispatch_queue_t cq = _dispatch_queue_get_current();
-
-       // see firehose_client_push_notify_async
-       _dispatch_queue_set_current(ds->_as_dq);
-       dc->dc_func(hdr);
-       _dispatch_queue_set_current(cq);
-       if (hdr != _dispatch_kevent_mach_msg_buf(ke)) {
-               free(hdr);
-       }
-}
-
-dispatch_source_t
-_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
-               const struct dispatch_continuation_s *dc)
-{
-       dispatch_source_t ds;
-       ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct,
-                       recvp, 0, &_dispatch_mgr_q);
-       os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER],
-                       (dispatch_continuation_t)dc, relaxed);
-       return ds;
-}
-
-static void
-_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED)
-{
-       kern_return_t kr;
-#if HAVE_MACH_PORT_CONSTRUCT
-       mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT };
-#ifdef __LP64__
-       const mach_port_context_t guard = 0xfeed09071f1ca7edull;
-#else
-       const mach_port_context_t guard = 0xff1ca7edull;
-#endif
-       kr = mach_port_construct(mach_task_self(), &opts, guard,
-                       &_dispatch_mach_notify_port);
-#else
-       kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
-                       &_dispatch_mach_notify_port);
-#endif
-       DISPATCH_VERIFY_MIG(kr);
-       if (slowpath(kr)) {
-               DISPATCH_CLIENT_CRASH(kr,
-                               "mach_port_construct() failed: cannot create receive right");
-       }
-
-       static const struct dispatch_continuation_s dc = {
-               .dc_func = (void*)_dispatch_mach_notify_source_invoke,
-       };
-       _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv(
-                       _dispatch_mach_notify_port, &dc);
-       dispatch_assert(_dispatch_mach_notify_source);
-       dispatch_activate(_dispatch_mach_notify_source);
-}
-
-static mach_port_t
-_dispatch_get_mach_notify_port(void)
-{
-       static dispatch_once_t pred;
-       dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init);
-       return _dispatch_mach_notify_port;
-}
-
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-static void
-_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED)
+void
+_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents)
 {
-       kern_return_t kr;
-
-       kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET,
-                       &_dispatch_mach_recv_portset);
-       DISPATCH_VERIFY_MIG(kr);
-       if (slowpath(kr)) {
-               DISPATCH_CLIENT_CRASH(kr,
-                               "mach_port_allocate() failed: cannot create port set");
-       }
-       _dispatch_kevent_qos_s *ke = &_dispatch_mach_recv_kevent;
-       dispatch_assert(_dispatch_kevent_mach_msg_buf(ke));
-       dispatch_assert(dispatch_mach_trailer_size ==
-                       REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS(
-                       DISPATCH_MACH_RCV_TRAILER)));
-       ke->ident = _dispatch_mach_recv_portset;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-       if (_dispatch_kevent_workqueue_enabled) {
-               ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+       if (!events && !nevents) {
+               // events for worker thread request have already been delivered earlier
+               return;
        }
-#endif
-       _dispatch_kq_immediate_update(&_dispatch_mach_recv_kevent);
-}
-
-static mach_port_t
-_dispatch_get_mach_recv_portset(void)
-{
-       static dispatch_once_t pred;
-       dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init);
-       return _dispatch_mach_recv_portset;
+       if (!dispatch_assume(*nevents && *events)) return;
+       _dispatch_adopt_wlh_anon();
+       _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents);
+       _dispatch_reset_wlh();
 }
 
-static void
-_dispatch_mach_portset_init(void *context DISPATCH_UNUSED)
-{
-       _dispatch_kevent_qos_s kev = {
-               .filter = EVFILT_MACHPORT,
-               .flags = EV_ADD,
-       };
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-       if (_dispatch_kevent_workqueue_enabled) {
-               kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-       }
-#endif
-
-       kern_return_t kr;
 
-       kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET,
-                       &_dispatch_mach_portset);
-       DISPATCH_VERIFY_MIG(kr);
-       if (slowpath(kr)) {
-               DISPATCH_CLIENT_CRASH(kr,
-                               "mach_port_allocate() failed: cannot create port set");
-       }
-       kev.ident = _dispatch_mach_portset;
-       _dispatch_kq_immediate_update(&kev);
-}
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
+#pragma mark -
+#pragma mark dispatch_source_debug
 
-static mach_port_t
-_dispatch_get_mach_portset(void)
+static size_t
+_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
 {
-       static dispatch_once_t pred;
-       dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init);
-       return _dispatch_mach_portset;
+       dispatch_queue_t target = ds->do_targetq;
+       dispatch_source_refs_t dr = ds->ds_refs;
+       return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%x, "
+                       "mask = 0x%x, pending_data = 0x%llx, registered = %d, "
+                       "armed = %d, deleted = %d%s, canceled = %d, ",
+                       target && target->dq_label ? target->dq_label : "", target,
+                       dr->du_ident, dr->du_fflags, (unsigned long long)ds->ds_pending_data,
+                       ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED),
+                       (bool)(ds->dq_atomic_flags & DSF_DELETED),
+                       (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "",
+                       (bool)(ds->dq_atomic_flags & DSF_CANCELED));
 }
 
-static kern_return_t
-_dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps)
+static size_t
+_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
 {
-       mach_port_t mp = (mach_port_t)dk->dk_kevent.ident;
-       kern_return_t kr;
-
-       _dispatch_debug_machport(mp);
-       kr = mach_port_move_member(mach_task_self(), mp, mps);
-       if (slowpath(kr)) {
-               DISPATCH_VERIFY_MIG(kr);
-               switch (kr) {
-               case KERN_INVALID_RIGHT:
-                       if (mps) {
-                               _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: "
-                                               "mach_port_move_member() failed ", kr);
-                               break;
-                       }
-                       //fall through
-               case KERN_INVALID_NAME:
-#if DISPATCH_DEBUG
-                       _dispatch_log("Corruption: Mach receive right 0x%x destroyed "
-                                       "prematurely", mp);
-#endif
-                       break;
-               default:
-                       (void)dispatch_assume_zero(kr);
-                       break;
-               }
-       }
-       return mps ? kr : 0;
+       dispatch_timer_source_refs_t dr = ds->ds_timer_refs;
+       return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx"
+                       ", interval = 0x%llx, flags = 0x%x }, ",
+                       (unsigned long long)dr->dt_timer.target,
+                       (unsigned long long)dr->dt_timer.deadline,
+                       (unsigned long long)dr->dt_timer.interval, dr->du_fflags);
 }
 
-static kern_return_t
-_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags,
-               uint32_t del_flags)
+size_t
+_dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz)
 {
-       kern_return_t kr = 0;
-       dispatch_assert_zero(new_flags & del_flags);
-       if ((new_flags & _DISPATCH_MACH_RECV_FLAGS) ||
-                       (del_flags & _DISPATCH_MACH_RECV_FLAGS)) {
-               mach_port_t mps;
-               if (new_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) {
-                       mps = _dispatch_get_mach_recv_portset();
-               } else if ((new_flags & DISPATCH_MACH_RECV_MESSAGE) ||
-                               ((del_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) &&
-                               (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE))) {
-                       mps = _dispatch_get_mach_portset();
-               } else {
-                       mps = MACH_PORT_NULL;
-               }
-               kr = _dispatch_mach_portset_update(dk, mps);
+       dispatch_source_refs_t dr = ds->ds_refs;
+       size_t offset = 0;
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
+                       dx_kind(ds), ds);
+       offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset);
+       offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset);
+       if (dr->du_is_timer) {
+               offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset);
        }
-       return kr;
-}
-#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-
-static kern_return_t
-_dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags,
-               uint32_t del_flags)
-{
-       kern_return_t kr = 0;
-       dispatch_assert_zero(new_flags & del_flags);
-       if ((new_flags & _DISPATCH_MACH_SP_FLAGS) ||
-                       (del_flags & _DISPATCH_MACH_SP_FLAGS)) {
-               // Requesting a (delayed) non-sync send-possible notification
-               // registers for both immediate dead-name notification and delayed-arm
-               // send-possible notification for the port.
-               // The send-possible notification is armed when a mach_msg() with the
-               // the MACH_SEND_NOTIFY to the port times out.
-               // If send-possible is unavailable, fall back to immediate dead-name
-               // registration rdar://problem/2527840&9008724
-               kr = _dispatch_mach_notify_update(dk, new_flags, del_flags,
-                               _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE,
-                               MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0);
-       }
-       return kr;
+       offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, "
+                       "filter = %s }", dr,  dr->du_is_direct ? " (direct)" : "",
+                       dr->du_type->dst_kind);
+       return offset;
 }
-
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke)
-{
-       mach_port_t name = (mach_port_name_t)ke->data;
-       dispatch_kevent_t dk;
-
-       _dispatch_debug_machport(name);
-       dk = _dispatch_kevent_find(name, EVFILT_MACHPORT);
-       if (!dispatch_assume(dk)) {
-               return;
-       }
-       _dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH
-
-       _dispatch_kevent_qos_s kev = {
-               .ident = name,
-               .filter = EVFILT_MACHPORT,
-               .flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
-               .fflags = DISPATCH_MACH_RECV_MESSAGE,
-               .udata = (uintptr_t)dk,
-       };
-       _dispatch_kevent_debug("synthetic", &kev);
-       _dispatch_kevent_merge(&kev);
-}
-#endif
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke)
-{
-       mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke);
-       mach_msg_size_t siz;
-       mach_msg_return_t kr = (mach_msg_return_t)ke->fflags;
-
-       if (!fastpath(hdr)) {
-               DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message");
-       }
-       if (fastpath(!kr)) {
-               _dispatch_kevent_mach_msg_recv(ke, hdr);
-               goto out;
-       } else if (kr != MACH_RCV_TOO_LARGE) {
-               goto out;
-       } else if (!ke->data) {
-               DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity");
-       }
-       if (slowpath(ke->ext[1] > (UINT_MAX - dispatch_mach_trailer_size))) {
-               DISPATCH_INTERNAL_CRASH(ke->ext[1],
-                               "EVFILT_MACHPORT with overlarge message");
-       }
-       siz = _dispatch_kevent_mach_msg_size(ke) + dispatch_mach_trailer_size;
-       hdr = malloc(siz);
-       if (!dispatch_assume(hdr)) {
-               // Kernel will discard message too large to fit
-               hdr = NULL;
-               siz = 0;
-       }
-       mach_port_t name = (mach_port_name_t)ke->data;
-       const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS |
-                       MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE);
-       kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE,
-                       MACH_PORT_NULL);
-       if (fastpath(!kr)) {
-               _dispatch_kevent_mach_msg_recv(ke, hdr);
-               goto out;
-       } else if (kr == MACH_RCV_TOO_LARGE) {
-               _dispatch_log("BUG in libdispatch client: "
-                               "_dispatch_kevent_mach_msg_drain: dropped message too "
-                               "large to fit in memory: id = 0x%x, size = %u",
-                               hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke));
-               kr = MACH_MSG_SUCCESS;
-       }
-       if (hdr != _dispatch_kevent_mach_msg_buf(ke)) {
-               free(hdr);
-       }
-out:
-       if (slowpath(kr)) {
-               _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: "
-                               "message reception failed", kr);
-       }
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke)
-{
-       if (unlikely(!(ke->flags & EV_UDATA_SPECIFIC))) {
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-               if (ke->ident == _dispatch_mach_recv_portset) {
-                       _dispatch_kevent_mach_msg_drain(ke);
-                       return _dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent);
-               } else if (ke->ident == _dispatch_mach_portset) {
-                       return _dispatch_kevent_machport_drain(ke);
-               }
-#endif
-               return _dispatch_kevent_error(ke);
-       }
-
-       dispatch_kevent_t dk = (dispatch_kevent_t)ke->udata;
-       dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources);
-       bool is_reply = (dk->dk_kevent.flags & EV_ONESHOT);
-       dispatch_source_t ds = _dispatch_source_from_refs(dr);
-
-       if (_dispatch_kevent_mach_msg_size(ke)) {
-               _dispatch_kevent_mach_msg_drain(ke);
-               if (is_reply) {
-                       // _dispatch_kevent_mach_msg_drain() should have deleted this event
-                       dispatch_assert(ke->flags & EV_DELETE);
-                       return;
-               }
-
-               if (!(ds->dq_atomic_flags & DSF_CANCELED)) {
-                       // re-arm the mach channel
-                       ke->fflags = DISPATCH_MACH_RCV_OPTIONS;
-                       ke->data = 0;
-                       ke->ext[0] = 0;
-                       ke->ext[1] = 0;
-                       return _dispatch_kq_deferred_update(ke);
-               }
-       } else if (is_reply) {
-               DISPATCH_INTERNAL_CRASH(ke->flags, "Unexpected EVFILT_MACHPORT event");
-       }
-       if (unlikely((ke->flags & EV_VANISHED) &&
-                       (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE))) {
-               DISPATCH_CLIENT_CRASH(ke->flags,
-                               "Unexpected EV_VANISHED (do not destroy random mach ports)");
-       }
-       return _dispatch_kevent_merge(ke);
-}
-
-static void
-_dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke,
-               mach_msg_header_t *hdr)
-{
-       dispatch_source_refs_t dri;
-       dispatch_kevent_t dk;
-       mach_port_t name = hdr->msgh_local_port;
-       mach_msg_size_t siz = hdr->msgh_size + dispatch_mach_trailer_size;
-
-       if (!dispatch_assume(hdr->msgh_size <= UINT_MAX -
-                       dispatch_mach_trailer_size)) {
-               _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-                               "received overlarge message");
-               return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-       }
-       if (!dispatch_assume(name)) {
-               _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-                               "received message with MACH_PORT_NULL port");
-               return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-       }
-       _dispatch_debug_machport(name);
-       if (ke->flags & EV_UDATA_SPECIFIC) {
-               dk = (void*)ke->udata;
-       } else {
-               dk = _dispatch_kevent_find(name, EVFILT_MACHPORT);
-       }
-       if (!dispatch_assume(dk)) {
-               _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-                               "received message with unknown kevent");
-               return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-       }
-       TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) {
-               dispatch_source_t dsi = _dispatch_source_from_refs(dri);
-               if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) {
-                       return _dispatch_source_merge_mach_msg(dsi, dri, dk, ke, hdr, siz);
-               }
-       }
-       _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-                       "received message with no listeners");
-       return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-}
-
-static void
-_dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke,
-               mach_msg_header_t *hdr)
-{
-       if (hdr) {
-               mach_msg_destroy(hdr);
-               if (hdr != _dispatch_kevent_mach_msg_buf(ke)) {
-                       free(hdr);
-               }
-       }
-}
-
-static void
-_dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr,
-               dispatch_kevent_t dk, _dispatch_kevent_qos_s *ke,
-               mach_msg_header_t *hdr, mach_msg_size_t siz)
-{
-       if (dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE) {
-               return _dispatch_source_merge_mach_msg_direct(ds, ke, hdr);
-       }
-       dispatch_mach_reply_refs_t dmr = NULL;
-       if (dk->dk_kevent.flags & EV_ONESHOT) {
-               dmr = (dispatch_mach_reply_refs_t)dr;
-       }
-       return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, ke, hdr, siz);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final)
-{
-       dispatch_source_refs_t dri, dr_next;
-       dispatch_kevent_t dk;
-       bool unreg;
-
-       dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION);
-       if (!dk) {
-               return;
-       }
-
-       // Update notification registration state.
-       dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS;
-       _dispatch_kevent_qos_s kev = {
-               .ident = name,
-               .filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
-               .flags = EV_ADD|EV_ENABLE,
-               .fflags = flag,
-               .udata = (uintptr_t)dk,
-       };
-       if (final) {
-               // This can never happen again
-               unreg = true;
-       } else {
-               // Re-register for notification before delivery
-               unreg = _dispatch_kevent_resume(dk, flag, 0);
-       }
-       DISPATCH_MACH_NOTIFICATION_ARMED(dk) = 0;
-       TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) {
-               dispatch_source_t dsi = _dispatch_source_from_refs(dri);
-               if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) {
-                       dispatch_mach_t dm = (dispatch_mach_t)dsi;
-                       _dispatch_mach_merge_notification_kevent(dm, &kev);
-                       if (unreg && dm->dm_dkev) {
-                               _dispatch_mach_notification_kevent_unregister(dm);
-                       }
-               } else {
-                       _dispatch_source_merge_kevent(dsi, &kev);
-                       if (unreg) {
-                               _dispatch_source_kevent_unregister(dsi);
-                       }
-               }
-               if (!dr_next || DISPATCH_MACH_NOTIFICATION_ARMED(dk)) {
-                       // current merge is last in list (dk might have been freed)
-                       // or it re-armed the notification
-                       return;
-               }
-       }
-}
-
-static kern_return_t
-_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags,
-               uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid,
-               mach_port_mscount_t notify_sync)
-{
-       mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident;
-       typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data;
-       kern_return_t kr, krr = 0;
-
-       // Update notification registration state.
-       dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask;
-       dk->dk_kevent.data &= ~(del_flags & mask);
-
-       _dispatch_debug_machport(port);
-       if ((dk->dk_kevent.data & mask) && !(prev & mask)) {
-               _dispatch_debug("machport[0x%08x]: registering for send-possible "
-                               "notification", port);
-               previous = MACH_PORT_NULL;
-               krr = mach_port_request_notification(mach_task_self(), port,
-                               notify_msgid, notify_sync, _dispatch_get_mach_notify_port(),
-                               MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
-               DISPATCH_VERIFY_MIG(krr);
-
-               switch(krr) {
-               case KERN_INVALID_NAME:
-               case KERN_INVALID_RIGHT:
-                       // Suppress errors & clear registration state
-                       dk->dk_kevent.data &= ~mask;
-                       break;
-               default:
-                       // Else, we don't expect any errors from mach. Log any errors
-                       if (dispatch_assume_zero(krr)) {
-                               // log the error & clear registration state
-                               dk->dk_kevent.data &= ~mask;
-                       } else if (dispatch_assume_zero(previous)) {
-                               // Another subsystem has beat libdispatch to requesting the
-                               // specified Mach notification on this port. We should
-                               // technically cache the previous port and message it when the
-                               // kernel messages our port. Or we can just say screw those
-                               // subsystems and deallocate the previous port.
-                               // They should adopt libdispatch :-P
-                               kr = mach_port_deallocate(mach_task_self(), previous);
-                               DISPATCH_VERIFY_MIG(kr);
-                               (void)dispatch_assume_zero(kr);
-                               previous = MACH_PORT_NULL;
-                       }
-               }
-       } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) {
-               _dispatch_debug("machport[0x%08x]: unregistering for send-possible "
-                               "notification", port);
-               previous = MACH_PORT_NULL;
-               kr = mach_port_request_notification(mach_task_self(), port,
-                               notify_msgid, notify_sync, MACH_PORT_NULL,
-                               MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous);
-               DISPATCH_VERIFY_MIG(kr);
-
-               switch (kr) {
-               case KERN_INVALID_NAME:
-               case KERN_INVALID_RIGHT:
-               case KERN_INVALID_ARGUMENT:
-                       break;
-               default:
-                       if (dispatch_assume_zero(kr)) {
-                               // log the error
-                       }
-               }
-       } else {
-               return 0;
-       }
-       if (slowpath(previous)) {
-               // the kernel has not consumed the send-once right yet
-               (void)dispatch_assume_zero(
-                               _dispatch_send_consume_send_once_right(previous));
-       }
-       return krr;
-}
-
-static void
-_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED)
-{
-       static int notify_type = HOST_NOTIFY_CALENDAR_SET;
-       kern_return_t kr;
-       _dispatch_debug("registering for calendar-change notification");
-retry:
-       kr = host_request_notification(_dispatch_get_mach_host_port(),
-                       notify_type, _dispatch_get_mach_notify_port());
-       // Fallback when missing support for newer _SET variant, fires strictly more.
-       if (kr == KERN_INVALID_ARGUMENT &&
-               notify_type != HOST_NOTIFY_CALENDAR_CHANGE){
-               notify_type = HOST_NOTIFY_CALENDAR_CHANGE;
-               goto retry;
-       }
-       DISPATCH_VERIFY_MIG(kr);
-       (void)dispatch_assume_zero(kr);
-}
-
-static void
-_dispatch_mach_host_calendar_change_register(void)
-{
-       static dispatch_once_t pred;
-       dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update);
-}
-
-static void
-_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr)
-{
-       mig_reply_error_t reply;
-       dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union
-               __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem));
-       dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size);
-       boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head);
-       if (!success && reply.RetCode == MIG_BAD_ID &&
-                       (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID ||
-                        hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) {
-               _dispatch_debug("calendar-change notification");
-               _dispatch_timers_calendar_change();
-               _dispatch_mach_host_notify_update(NULL);
-               success = TRUE;
-               reply.RetCode = KERN_SUCCESS;
-       }
-       if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) {
-               (void)dispatch_assume_zero(reply.RetCode);
-       }
-       if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) {
-               mach_msg_destroy(hdr);
-       }
-}
-
-kern_return_t
-_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED,
-               mach_port_name_t name)
-{
-#if DISPATCH_DEBUG
-       _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x "
-                       "deleted prematurely", name);
-#endif
-
-       _dispatch_debug_machport(name);
-       _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true);
-
-       return KERN_SUCCESS;
-}
-
-kern_return_t
-_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED,
-               mach_port_name_t name)
-{
-       kern_return_t kr;
-
-       _dispatch_debug("machport[0x%08x]: dead-name notification", name);
-       _dispatch_debug_machport(name);
-       _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true);
-
-       // the act of receiving a dead name notification allocates a dead-name
-       // right that must be deallocated
-       kr = mach_port_deallocate(mach_task_self(), name);
-       DISPATCH_VERIFY_MIG(kr);
-       //(void)dispatch_assume_zero(kr);
-
-       return KERN_SUCCESS;
-}
-
-kern_return_t
-_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED,
-               mach_port_name_t name)
-{
-       _dispatch_debug("machport[0x%08x]: send-possible notification", name);
-       _dispatch_debug_machport(name);
-       _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false);
-
-       return KERN_SUCCESS;
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_t
-
-#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1
-#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
-#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4
-#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8
-#define DISPATCH_MACH_OPTIONS_MASK 0xffff
-
-#define DM_SEND_STATUS_SUCCESS 0x1
-#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2
-
-DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t,
-       DM_SEND_INVOKE_NONE            = 0x0,
-       DM_SEND_INVOKE_FLUSH           = 0x1,
-       DM_SEND_INVOKE_NEEDS_BARRIER   = 0x2,
-       DM_SEND_INVOKE_CANCEL          = 0x4,
-       DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8,
-       DM_SEND_INVOKE_IMMEDIATE_SEND  = 0x10,
-);
-#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \
-               ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND)
-
-static inline pthread_priority_t _dispatch_mach_priority_propagate(
-               mach_msg_option_t options);
-static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou);
-static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou);
-static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm,
-               mach_port_t local_port, mach_port_t remote_port);
-static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, mach_port_t local_port);
-static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected(
-               dispatch_object_t dou, dispatch_mach_reply_refs_t dmr);
-static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm,
-               dispatch_object_t dou);
-static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(
-               dispatch_mach_msg_t dmsg);
-static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou,
-               pthread_priority_t pp);
-
-static dispatch_mach_t
-_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context,
-               dispatch_mach_handler_function_t handler, bool handler_is_block)
-{
-       dispatch_mach_t dm;
-       dispatch_mach_refs_t dr;
-
-       dm = _dispatch_alloc(DISPATCH_VTABLE(mach),
-                       sizeof(struct dispatch_mach_s));
-       _dispatch_queue_init(dm->_as_dq, DQF_NONE, 1, true);
-
-       dm->dq_label = label;
-       dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds
-
-       dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s));
-       dr->dr_source_wref = _dispatch_ptr2wref(dm);
-       dr->dm_handler_func = handler;
-       dr->dm_handler_ctxt = context;
-       dm->ds_refs = dr;
-       dm->dm_handler_is_block = handler_is_block;
-
-       dm->dm_refs = _dispatch_calloc(1ul,
-                       sizeof(struct dispatch_mach_send_refs_s));
-       dm->dm_refs->dr_source_wref = _dispatch_ptr2wref(dm);
-       dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED;
-       TAILQ_INIT(&dm->dm_refs->dm_replies);
-
-       if (slowpath(!q)) {
-               q = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
-       } else {
-               _dispatch_retain(q);
-       }
-       dm->do_targetq = q;
-       _dispatch_object_debug(dm, "%s", __func__);
-       return dm;
-}
-
-dispatch_mach_t
-dispatch_mach_create(const char *label, dispatch_queue_t q,
-               dispatch_mach_handler_t handler)
-{
-       dispatch_block_t bb = _dispatch_Block_copy((void*)handler);
-       return _dispatch_mach_create(label, q, bb,
-                       (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true);
-}
-
-dispatch_mach_t
-dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context,
-               dispatch_mach_handler_function_t handler)
-{
-       return _dispatch_mach_create(label, q, context, handler, false);
-}
-
-void
-_dispatch_mach_dispose(dispatch_mach_t dm)
-{
-       _dispatch_object_debug(dm, "%s", __func__);
-       dispatch_mach_refs_t dr = dm->ds_refs;
-       if (dm->dm_handler_is_block && dr->dm_handler_ctxt) {
-               Block_release(dr->dm_handler_ctxt);
-       }
-       free(dr);
-       free(dm->dm_refs);
-       _dispatch_queue_destroy(dm->_as_dq);
-}
-
-void
-dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive,
-               mach_port_t send, dispatch_mach_msg_t checkin)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dispatch_kevent_t dk;
-       uint32_t disconnect_cnt;
-       dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct;
-
-       dm->ds_is_direct_kevent = (bool)_dispatch_evfilt_machport_direct_enabled;
-       if (MACH_PORT_VALID(receive)) {
-               dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-               dk->dk_kevent = type->ke;
-               dk->dk_kevent.ident = receive;
-               dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_VANISHED;
-               dk->dk_kevent.udata = (uintptr_t)dk;
-               TAILQ_INIT(&dk->dk_sources);
-               dm->ds_dkev = dk;
-               dm->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-               dm->ds_needs_rearm = dm->ds_is_direct_kevent;
-               if (!dm->ds_is_direct_kevent) {
-                       dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-                       dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-               }
-               _dispatch_retain(dm); // the reference the manager queue holds
-       }
-       dr->dm_send = send;
-       if (MACH_PORT_VALID(send)) {
-               if (checkin) {
-                       dispatch_retain(checkin);
-                       checkin->dmsg_options = _dispatch_mach_checkin_options();
-                       dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin);
-               }
-               dr->dm_checkin = checkin;
-       }
-       // monitor message reply ports
-       dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
-       dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 ==
-                       DISPATCH_MACH_NEVER_INSTALLED);
-       disconnect_cnt = os_atomic_dec2o(dr, dm_disconnect_cnt, release);
-       if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) {
-               DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected");
-       }
-       _dispatch_object_debug(dm, "%s", __func__);
-       return dispatch_activate(dm);
-}
-
-// assumes low bit of mach port names is always set
-#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u
-
-static inline void
-_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr)
-{
-       dmr->dmr_reply &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED;
-}
-
-static inline bool
-_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr)
-{
-       mach_port_t reply_port = dmr->dmr_reply;
-       return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false;
-}
-
-static inline mach_port_t
-_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr)
-{
-       mach_port_t reply_port = dmr->dmr_reply;
-       return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0;
-}
-
-static inline bool
-_dispatch_mach_reply_tryremove(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr)
-{
-       bool removed;
-       _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-       if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-               TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-       }
-       _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-       return removed;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, unsigned int options)
-{
-       dispatch_mach_msg_t dmsgr = NULL;
-       bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED);
-       if (options & DKEV_UNREGISTER_REPLY_REMOVE) {
-               _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-               if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-                       DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
-               }
-               TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-               _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-       }
-       if (disconnected) {
-               dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
-       } else if (dmr->dmr_voucher) {
-               _voucher_release(dmr->dmr_voucher);
-               dmr->dmr_voucher = NULL;
-       }
-       _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p",
-                       _dispatch_mach_reply_get_reply_port(dmr),
-                       disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
-       if (dmsgr) {
-               return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-       }
-       dispatch_assert(!(options & DKEV_UNREGISTER_WAKEUP));
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, unsigned int options)
-{
-       dispatch_mach_msg_t dmsgr = NULL;
-       bool replies_empty = false;
-       bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED);
-       if (options & DKEV_UNREGISTER_REPLY_REMOVE) {
-               _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-               if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-                       DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
-               }
-               TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-               replies_empty = TAILQ_EMPTY(&dm->dm_refs->dm_replies);
-               _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-       }
-       if (disconnected) {
-               dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
-       } else if (dmr->dmr_voucher) {
-               _voucher_release(dmr->dmr_voucher);
-               dmr->dmr_voucher = NULL;
-       }
-       uint32_t flags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
-       dispatch_kevent_t dk = dmr->dmr_dkev;
-       _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p",
-                       (mach_port_t)dk->dk_kevent.ident,
-                       disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
-       if (!dm->ds_is_direct_kevent) {
-               dmr->dmr_dkev = NULL;
-               TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list);
-               _dispatch_kevent_unregister(dk, flags, 0);
-       } else {
-               long r = _dispatch_kevent_unregister(dk, flags, options);
-               if (r == EINPROGRESS) {
-                       _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]",
-                                       (mach_port_t)dk->dk_kevent.ident, dk);
-                       dispatch_assert(options == DKEV_UNREGISTER_DISCONNECTED);
-                       // dmr must be put back so that the event delivery finds it, the
-                       // replies lock is held by the caller.
-                       TAILQ_INSERT_HEAD(&dm->dm_refs->dm_replies, dmr, dmr_list);
-                       if (dmsgr) {
-                               dmr->dmr_voucher = dmsgr->dmsg_voucher;
-                               dmsgr->dmsg_voucher = NULL;
-                               dispatch_release(dmsgr);
-                       }
-                       return; // deferred unregistration
-               }
-               dispatch_assume_zero(r);
-               dmr->dmr_dkev = NULL;
-               _TAILQ_TRASH_ENTRY(dmr, dr_list);
-       }
-       free(dmr);
-       if (dmsgr) {
-               return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-       }
-       if ((options & DKEV_UNREGISTER_WAKEUP) && replies_empty &&
-                       (dm->dm_refs->dm_disconnect_cnt ||
-                       (dm->dq_atomic_flags & DSF_CANCELED))) {
-               dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH);
-       }
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_waiter_register(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
-               dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts)
-{
-       dmr->dr_source_wref = _dispatch_ptr2wref(dm);
-       dmr->dmr_dkev = NULL;
-       dmr->dmr_reply = reply_port;
-       if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
-               _dispatch_mach_reply_mark_reply_port_owned(dmr);
-       } else {
-               if (dmsg->dmsg_voucher) {
-                       dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
-               }
-               dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority;
-               // make reply context visible to leaks rdar://11777199
-               dmr->dmr_ctxt = dmsg->do_ctxt;
-       }
-
-       _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p",
-                       reply_port, dmsg->do_ctxt);
-       _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-       if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-               DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered");
-       }
-       TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list);
-       _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port,
-               dispatch_mach_msg_t dmsg)
-{
-       dispatch_kevent_t dk;
-       dispatch_mach_reply_refs_t dmr;
-       dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct;
-       pthread_priority_t mp, pp;
-
-       dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-       dk->dk_kevent = type->ke;
-       dk->dk_kevent.ident = reply_port;
-       dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_ONESHOT;
-       dk->dk_kevent.udata = (uintptr_t)dk;
-       TAILQ_INIT(&dk->dk_sources);
-       if (!dm->ds_is_direct_kevent) {
-               dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
-               dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-       }
-
-       dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s));
-       dmr->dr_source_wref = _dispatch_ptr2wref(dm);
-       dmr->dmr_dkev = dk;
-       dmr->dmr_reply = reply_port;
-       if (dmsg->dmsg_voucher) {
-               dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
-       }
-       dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority;
-       // make reply context visible to leaks rdar://11777199
-       dmr->dmr_ctxt = dmsg->do_ctxt;
-
-       pp = dm->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-       if (pp && dm->ds_is_direct_kevent) {
-               mp = dmsg->dmsg_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-               if (pp < mp) pp = mp;
-               pp |= dm->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-       } else {
-               pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-       }
-
-       _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p",
-                       reply_port, dmsg->do_ctxt);
-       uint32_t flags;
-       bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, pp, &flags);
-       TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr,
-                       dr_list);
-       _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-       if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-               DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered");
-       }
-       TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list);
-       _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-       if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) {
-               return _dispatch_mach_reply_kevent_unregister(dm, dmr,
-                               DKEV_UNREGISTER_DISCONNECTED|DKEV_UNREGISTER_REPLY_REMOVE);
-       }
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm)
-{
-       DISPATCH_ASSERT_ON_MANAGER_QUEUE();
-       dispatch_kevent_t dk = dm->dm_dkev;
-       dm->dm_dkev = NULL;
-       TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs,
-                       dr_list);
-       dm->ds_pending_data_mask &= ~(unsigned long)
-                       (DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD);
-       _dispatch_kevent_unregister(dk,
-                       DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD, 0);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send)
-{
-       DISPATCH_ASSERT_ON_MANAGER_QUEUE();
-       dispatch_kevent_t dk;
-
-       dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-       dk->dk_kevent = _dispatch_source_type_mach_send.ke;
-       dk->dk_kevent.ident = send;
-       dk->dk_kevent.flags |= EV_ADD|EV_ENABLE;
-       dk->dk_kevent.fflags = DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD;
-       dk->dk_kevent.udata = (uintptr_t)dk;
-       TAILQ_INIT(&dk->dk_sources);
-
-       dm->ds_pending_data_mask |= dk->dk_kevent.fflags;
-
-       uint32_t flags;
-       bool do_resume = _dispatch_kevent_register(&dk,
-                       _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, &flags);
-       TAILQ_INSERT_TAIL(&dk->dk_sources,
-                       (dispatch_source_refs_t)dm->dm_refs, dr_list);
-       dm->dm_dkev = dk;
-       if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) {
-               _dispatch_mach_notification_kevent_unregister(dm);
-       }
-}
-
-static mach_port_t
-_dispatch_get_thread_reply_port(void)
-{
-       mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port();
-       if (mrp) {
-               reply_port = mrp;
-               _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port",
-                               reply_port);
-       } else {
-               reply_port = mach_reply_port();
-               _dispatch_set_thread_mig_reply_port(reply_port);
-               _dispatch_debug("machport[0x%08x]: allocated thread sync reply port",
-                               reply_port);
-       }
-       _dispatch_debug_machport(reply_port);
-       return reply_port;
-}
-
-static void
-_dispatch_clear_thread_reply_port(mach_port_t reply_port)
-{
-       mach_port_t mrp = _dispatch_get_thread_mig_reply_port();
-       if (reply_port != mrp) {
-               if (mrp) {
-                       _dispatch_debug("machport[0x%08x]: did not clear thread sync reply "
-                                       "port (found 0x%08x)", reply_port, mrp);
-               }
-               return;
-       }
-       _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL);
-       _dispatch_debug_machport(reply_port);
-       _dispatch_debug("machport[0x%08x]: cleared thread sync reply port",
-                       reply_port);
-}
-
-static void
-_dispatch_set_thread_reply_port(mach_port_t reply_port)
-{
-       _dispatch_debug_machport(reply_port);
-       mach_port_t mrp = _dispatch_get_thread_mig_reply_port();
-       if (mrp) {
-               kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
-                               MACH_PORT_RIGHT_RECEIVE, -1);
-               DISPATCH_VERIFY_MIG(kr);
-               dispatch_assume_zero(kr);
-               _dispatch_debug("machport[0x%08x]: deallocated sync reply port "
-                               "(found 0x%08x)", reply_port, mrp);
-       } else {
-               _dispatch_set_thread_mig_reply_port(reply_port);
-               _dispatch_debug("machport[0x%08x]: restored thread sync reply port",
-                               reply_port);
-       }
-}
-
-static inline mach_port_t
-_dispatch_mach_msg_get_remote_port(dispatch_object_t dou)
-{
-       mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
-       mach_port_t remote = hdr->msgh_remote_port;
-       return remote;
-}
-
-static inline mach_port_t
-_dispatch_mach_msg_get_reply_port(dispatch_object_t dou)
-{
-       mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
-       mach_port_t local = hdr->msgh_local_port;
-       if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) !=
-                       MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL;
-       return local;
-}
-
-static inline void
-_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err,
-               unsigned long reason)
-{
-       dispatch_assert_zero(reason & ~(unsigned long)code_emask);
-       dmsg->dmsg_error = ((err || !reason) ? err :
-                        err_local|err_sub(0x3e0)|(mach_error_t)reason);
-}
-
-static inline unsigned long
-_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr)
-{
-       mach_error_t err = dmsg->dmsg_error;
-
-       dmsg->dmsg_error = 0;
-       if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) {
-               *err_ptr = 0;
-               return err_get_code(err);
-       }
-       *err_ptr = err;
-       return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT;
-}
-
-static void
-_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr,
-               _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, mach_msg_size_t siz)
-{
-       _dispatch_debug_machport(hdr->msgh_remote_port);
-       _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
-                       hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
-       bool canceled = (dm->dq_atomic_flags & DSF_CANCELED);
-       if (!dmr && canceled) {
-               // message received after cancellation, _dispatch_mach_kevent_merge is
-               // responsible for mach channel source state (e.g. deferred deletion)
-               return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-       }
-       dispatch_mach_msg_t dmsg;
-       voucher_t voucher;
-       pthread_priority_t priority;
-       void *ctxt = NULL;
-       if (dmr) {
-               _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher
-               voucher = dmr->dmr_voucher;
-               dmr->dmr_voucher = NULL; // transfer reference
-               priority = dmr->dmr_priority;
-               ctxt = dmr->dmr_ctxt;
-               unsigned int options = DKEV_DISPOSE_IMMEDIATE_DELETE;
-               options |= DKEV_UNREGISTER_REPLY_REMOVE;
-               options |= DKEV_UNREGISTER_WAKEUP;
-               if (canceled) options |= DKEV_UNREGISTER_DISCONNECTED;
-               _dispatch_mach_reply_kevent_unregister(dm, dmr, options);
-               ke->flags |= EV_DELETE; // remember that unregister deleted the event
-               if (canceled) return;
-       } else {
-               voucher = voucher_create_with_mach_msg(hdr);
-               priority = _voucher_get_priority(voucher);
-       }
-       dispatch_mach_msg_destructor_t destructor;
-       destructor = (hdr == _dispatch_kevent_mach_msg_buf(ke)) ?
-                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
-                       DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
-       dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
-       if (hdr == _dispatch_kevent_mach_msg_buf(ke)) {
-               _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf);
-       }
-       dmsg->dmsg_voucher = voucher;
-       dmsg->dmsg_priority = priority;
-       dmsg->do_ctxt = ctxt;
-       _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED);
-       _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg);
-       _dispatch_voucher_ktrace_dmsg_push(dmsg);
-       return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_mach_msg_t
-_dispatch_mach_msg_reply_recv(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, mach_port_t reply_port)
-{
-       if (slowpath(!MACH_PORT_VALID(reply_port))) {
-               DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port");
-       }
-       void *ctxt = dmr->dmr_ctxt;
-       mach_msg_header_t *hdr, *hdr2 = NULL;
-       void *hdr_copyout_addr;
-       mach_msg_size_t siz, msgsiz = 0;
-       mach_msg_return_t kr;
-       mach_msg_option_t options;
-       siz = mach_vm_round_page(_dispatch_mach_recv_msg_size +
-                       dispatch_mach_trailer_size);
-       hdr = alloca(siz);
-       for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size);
-                       p < (mach_vm_address_t)hdr + siz; p += vm_page_size) {
-               *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
-       }
-       options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER);
-retry:
-       _dispatch_debug_machport(reply_port);
-       _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port,
-                       (options & MACH_RCV_TIMEOUT) ? "poll" : "wait");
-       kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE,
-                       MACH_PORT_NULL);
-       hdr_copyout_addr = hdr;
-       _dispatch_debug_machport(reply_port);
-       _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) "
-                       "returned: %s - 0x%x", reply_port, siz, options,
-                       mach_error_string(kr), kr);
-       switch (kr) {
-       case MACH_RCV_TOO_LARGE:
-               if (!fastpath(hdr->msgh_size <= UINT_MAX -
-                               dispatch_mach_trailer_size)) {
-                       DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message");
-               }
-               if (options & MACH_RCV_LARGE) {
-                       msgsiz = hdr->msgh_size + dispatch_mach_trailer_size;
-                       hdr2 = malloc(msgsiz);
-                       if (dispatch_assume(hdr2)) {
-                               hdr = hdr2;
-                               siz = msgsiz;
-                       }
-                       options |= MACH_RCV_TIMEOUT;
-                       options &= ~MACH_RCV_LARGE;
-                       goto retry;
-               }
-               _dispatch_log("BUG in libdispatch client: "
-                               "dispatch_mach_send_and_wait_for_reply: dropped message too "
-                               "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id,
-                               hdr->msgh_size);
-               break;
-       case MACH_RCV_INVALID_NAME: // rdar://problem/21963848
-       case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327
-       case MACH_RCV_PORT_DIED:
-               // channel was disconnected/canceled and reply port destroyed
-               _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: "
-                               "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr);
-               goto out;
-       case MACH_MSG_SUCCESS:
-               if (hdr->msgh_remote_port) {
-                       _dispatch_debug_machport(hdr->msgh_remote_port);
-               }
-               _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, "
-                               "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id,
-                               hdr->msgh_size, hdr->msgh_remote_port);
-               siz = hdr->msgh_size + dispatch_mach_trailer_size;
-               if (hdr2 && siz < msgsiz) {
-                       void *shrink = realloc(hdr2, msgsiz);
-                       if (shrink) hdr = hdr2 = shrink;
-               }
-               break;
-       default:
-               dispatch_assume_zero(kr);
-               break;
-       }
-       _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port);
-       hdr->msgh_local_port = MACH_PORT_NULL;
-       if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) {
-               if (!kr) mach_msg_destroy(hdr);
-               goto out;
-       }
-       dispatch_mach_msg_t dmsg;
-       dispatch_mach_msg_destructor_t destructor = (!hdr2) ?
-                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
-                       DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
-       dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
-       if (!hdr2 || hdr != hdr_copyout_addr) {
-               _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr_copyout_addr, (uint64_t)_dispatch_mach_msg_get_msg(dmsg));
-       }
-       dmsg->do_ctxt = ctxt;
-       return dmsg;
-out:
-       free(hdr2);
-       return NULL;
-}
-
-static inline void
-_dispatch_mach_msg_reply_received(dispatch_mach_t dm,
-               dispatch_mach_reply_refs_t dmr, mach_port_t local_port)
-{
-       bool removed = _dispatch_mach_reply_tryremove(dm, dmr);
-       if (!MACH_PORT_VALID(local_port) || !removed) {
-               // port moved/destroyed during receive, or reply waiter was never
-               // registered or already removed (disconnected)
-               return;
-       }
-       mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr);
-       _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p",
-                       reply_port, dmr->dmr_ctxt);
-       if (_dispatch_mach_reply_is_reply_port_owned(dmr)) {
-               _dispatch_set_thread_reply_port(reply_port);
-               if (local_port != reply_port) {
-                       DISPATCH_CLIENT_CRASH(local_port,
-                                       "Reply received on unexpected port");
-               }
-               return;
-       }
-       mach_msg_header_t *hdr;
-       dispatch_mach_msg_t dmsg;
-       dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
-                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
-       hdr->msgh_local_port = local_port;
-       dmsg->dmsg_voucher = dmr->dmr_voucher;
-       dmr->dmr_voucher = NULL;  // transfer reference
-       dmsg->dmsg_priority = dmr->dmr_priority;
-       dmsg->do_ctxt = dmr->dmr_ctxt;
-       _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED);
-       return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-}
-
-static inline void
-_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port,
-               mach_port_t remote_port)
-{
-       mach_msg_header_t *hdr;
-       dispatch_mach_msg_t dmsg;
-       dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
-                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
-       if (local_port) hdr->msgh_local_port = local_port;
-       if (remote_port) hdr->msgh_remote_port = remote_port;
-       _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED);
-       _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ?
-                       local_port : remote_port, local_port ? "receive" : "send");
-       return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-}
-
-static inline dispatch_mach_msg_t
-_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou,
-               dispatch_mach_reply_refs_t dmr)
-{
-       dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
-       mach_port_t reply_port = dmsg ? dmsg->dmsg_reply :
-                       _dispatch_mach_reply_get_reply_port(dmr);
-       voucher_t v;
-
-       if (!reply_port) {
-               if (!dmsg) {
-                       v = dmr->dmr_voucher;
-                       dmr->dmr_voucher = NULL; // transfer reference
-                       if (v) _voucher_release(v);
-               }
-               return NULL;
-       }
-
-       if (dmsg) {
-               v = dmsg->dmsg_voucher;
-               if (v) _voucher_retain(v);
-       } else {
-               v = dmr->dmr_voucher;
-               dmr->dmr_voucher = NULL; // transfer reference
-       }
-
-       if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
-                       (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) ||
-                       (dmr && !dmr->dmr_dkev &&
-                       _dispatch_mach_reply_is_reply_port_owned(dmr))) {
-               if (v) _voucher_release(v);
-               // deallocate owned reply port to break _dispatch_mach_msg_reply_recv
-               // out of waiting in mach_msg(MACH_RCV_MSG)
-               kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
-                               MACH_PORT_RIGHT_RECEIVE, -1);
-               DISPATCH_VERIFY_MIG(kr);
-               dispatch_assume_zero(kr);
-               return NULL;
-       }
-
-       mach_msg_header_t *hdr;
-       dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
-                       DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
-       dmsgr->dmsg_voucher = v;
-       hdr->msgh_local_port = reply_port;
-       if (dmsg) {
-               dmsgr->dmsg_priority = dmsg->dmsg_priority;
-               dmsgr->do_ctxt = dmsg->do_ctxt;
-       } else {
-               dmsgr->dmsg_priority = dmr->dmr_priority;
-               dmsgr->do_ctxt = dmr->dmr_ctxt;
-       }
-       _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED);
-       _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p",
-                       hdr->msgh_local_port, dmsgr->do_ctxt);
-       return dmsgr;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou)
-{
-       dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
-       mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
-       mach_msg_option_t msg_opts = dmsg->dmsg_options;
-       _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, "
-                       "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x",
-                       msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
-                       msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply);
-       unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ?
-                       0 : DISPATCH_MACH_MESSAGE_NOT_SENT;
-       dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
-       _dispatch_mach_msg_set_reason(dmsg, 0, reason);
-       _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-       if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-}
-
-DISPATCH_NOINLINE
-static uint32_t
-_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou,
-               dispatch_mach_reply_refs_t dmr, pthread_priority_t pp,
-               dispatch_mach_send_invoke_flags_t send_flags)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL;
-       voucher_t voucher = dmsg->dmsg_voucher;
-       mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL;
-       uint32_t send_status = 0;
-       bool clear_voucher = false, kvoucher_move_send = false;
-       mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
-       bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
-                       MACH_MSG_TYPE_MOVE_SEND_ONCE);
-       mach_port_t reply_port = dmsg->dmsg_reply;
-       if (!is_reply) {
-               dr->dm_needs_mgr = 0;
-               if (unlikely(dr->dm_checkin && dmsg != dr->dm_checkin)) {
-                       // send initial checkin message
-                       if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() !=
-                                       &_dispatch_mgr_q)) {
-                               // send kevent must be uninstalled on the manager queue
-                               dr->dm_needs_mgr = 1;
-                               goto out;
-                       }
-                       if (unlikely(!_dispatch_mach_msg_send(dm,
-                                       dr->dm_checkin, NULL, pp, DM_SEND_INVOKE_NONE))) {
-                               goto out;
-                       }
-                       dr->dm_checkin = NULL;
-               }
-       }
-       mach_msg_return_t kr = 0;
-       mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options;
-       if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) {
-               mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED;
-               opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK);
-               if (!is_reply) {
-                       if (dmsg != dr->dm_checkin) {
-                               msg->msgh_remote_port = dr->dm_send;
-                       }
-                       if (_dispatch_queue_get_current() == &_dispatch_mgr_q) {
-                               if (slowpath(!dm->dm_dkev)) {
-                                       _dispatch_mach_notification_kevent_register(dm,
-                                                       msg->msgh_remote_port);
-                               }
-                               if (fastpath(dm->dm_dkev)) {
-                                       if (DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) {
-                                               goto out;
-                                       }
-                                       opts |= MACH_SEND_NOTIFY;
-                               }
-                       }
-                       opts |= MACH_SEND_TIMEOUT;
-                       if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) {
-                               ipc_kvoucher = _voucher_create_mach_voucher_with_priority(
-                                               voucher, dmsg->dmsg_priority);
-                       }
-                       _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg);
-                       if (ipc_kvoucher) {
-                               kvoucher_move_send = true;
-                               clear_voucher = _voucher_mach_msg_set_mach_voucher(msg,
-                                               ipc_kvoucher, kvoucher_move_send);
-                       } else {
-                               clear_voucher = _voucher_mach_msg_set(msg, voucher);
-                       }
-                       if (pp && _dispatch_evfilt_machport_direct_enabled) {
-                               opts |= MACH_SEND_OVERRIDE;
-                               msg_priority = (mach_msg_priority_t)pp;
-                       }
-               }
-               _dispatch_debug_machport(msg->msgh_remote_port);
-               if (reply_port) _dispatch_debug_machport(reply_port);
-               if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) {
-                       if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
-                               _dispatch_clear_thread_reply_port(reply_port);
-                       }
-                       _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg,
-                                       msg_opts);
-               }
-               kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0,
-                               msg_priority);
-               _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
-                               "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
-                               "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
-                               opts, msg_opts, msg->msgh_voucher_port, reply_port,
-                               mach_error_string(kr), kr);
-               if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) {
-                       _dispatch_mach_reply_waiter_unregister(dm, dmr,
-                                       DKEV_UNREGISTER_REPLY_REMOVE);
-               }
-               if (clear_voucher) {
-                       if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) {
-                               DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption");
-                       }
-                       mach_voucher_t kv;
-                       kv = _voucher_mach_msg_clear(msg, kvoucher_move_send);
-                       if (kvoucher_move_send) ipc_kvoucher = kv;
-               }
-       }
-       if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) {
-               if (opts & MACH_SEND_NOTIFY) {
-                       _dispatch_debug("machport[0x%08x]: send-possible notification "
-                                       "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident);
-                       DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) = 1;
-               } else {
-                       // send kevent must be installed on the manager queue
-                       dr->dm_needs_mgr = 1;
-               }
-               if (ipc_kvoucher) {
-                       _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher);
-                       voucher_t ipc_voucher;
-                       ipc_voucher = _voucher_create_with_priority_and_mach_voucher(
-                                       voucher, dmsg->dmsg_priority, ipc_kvoucher);
-                       _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
-                                       ipc_voucher, dmsg, voucher);
-                       if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
-                       dmsg->dmsg_voucher = ipc_voucher;
-               }
-               goto out;
-       } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) {
-               _voucher_dealloc_mach_voucher(ipc_kvoucher);
-       }
-       if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port &&
-                       !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply_port)) {
-               if (!dm->ds_is_direct_kevent &&
-                               _dispatch_queue_get_current() != &_dispatch_mgr_q) {
-                       // reply receive kevent must be installed on the manager queue
-                       dr->dm_needs_mgr = 1;
-                       dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY;
-                       goto out;
-               }
-               _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg);
-       }
-       if (unlikely(!is_reply && dmsg == dr->dm_checkin && dm->dm_dkev)) {
-               _dispatch_mach_notification_kevent_unregister(dm);
-       }
-       if (slowpath(kr)) {
-               // Send failed, so reply was never registered <rdar://problem/14309159>
-               dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
-       }
-       _dispatch_mach_msg_set_reason(dmsg, kr, 0);
-       if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) &&
-                       (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) {
-               // Return sent message synchronously <rdar://problem/25947334>
-               send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT;
-       } else {
-               _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-       }
-       if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-       send_status |= DM_SEND_STATUS_SUCCESS;
-out:
-       return send_status;
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_send_refs_t
-
-static void _dispatch_mach_cancel(dispatch_mach_t dm);
-static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm,
-               pthread_priority_t pp);
-
-DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dm_state_get_override(uint64_t dm_state)
-{
-       dm_state &= DISPATCH_MACH_STATE_OVERRIDE_MASK;
-       return (pthread_priority_t)(dm_state >> 32);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dm_state_override_from_priority(pthread_priority_t pp)
-{
-       uint64_t pp_state = pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-       return pp_state << 32;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dm_state_needs_override(uint64_t dm_state, uint64_t pp_state)
-{
-       return (pp_state > (dm_state & DISPATCH_MACH_STATE_OVERRIDE_MASK));
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dm_state_merge_override(uint64_t dm_state, uint64_t pp_state)
-{
-       if (_dm_state_needs_override(dm_state, pp_state)) {
-               dm_state &= ~DISPATCH_MACH_STATE_OVERRIDE_MASK;
-               dm_state |= pp_state;
-               dm_state |= DISPATCH_MACH_STATE_DIRTY;
-               dm_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-       }
-       return dm_state;
-}
-
-#define _dispatch_mach_send_push_update_tail(dr, tail) \
-               os_mpsc_push_update_tail(dr, dm, tail, do_next)
-#define _dispatch_mach_send_push_update_head(dr, head) \
-               os_mpsc_push_update_head(dr, dm, head)
-#define _dispatch_mach_send_get_head(dr) \
-               os_mpsc_get_head(dr, dm)
-#define _dispatch_mach_send_unpop_head(dr, dc, dc_next) \
-               os_mpsc_undo_pop_head(dr, dm, dc, dc_next, do_next)
-#define _dispatch_mach_send_pop_head(dr, head) \
-               os_mpsc_pop_head(dr, dm, head, do_next)
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dr,
-               dispatch_object_t dou)
-{
-       if (_dispatch_mach_send_push_update_tail(dr, dou._do)) {
-               _dispatch_mach_send_push_update_head(dr, dou._do);
-               return true;
-       }
-       return false;
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
-               dispatch_mach_send_invoke_flags_t send_flags)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dispatch_mach_reply_refs_t dmr;
-       dispatch_mach_msg_t dmsg;
-       struct dispatch_object_s *dc = NULL, *next_dc = NULL;
-       pthread_priority_t pp = _dm_state_get_override(dr->dm_state);
-       uint64_t old_state, new_state;
-       uint32_t send_status;
-       bool needs_mgr, disconnecting, returning_send_result = false;
-
-again:
-       needs_mgr = false; disconnecting = false;
-       while (dr->dm_tail) {
-               dc = _dispatch_mach_send_get_head(dr);
-               do {
-                       dispatch_mach_send_invoke_flags_t sf = send_flags;
-                       // Only request immediate send result for the first message
-                       send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
-                       next_dc = _dispatch_mach_send_pop_head(dr, dc);
-                       if (_dispatch_object_has_type(dc,
-                                       DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
-                               if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
-                                       goto partial_drain;
-                               }
-                               _dispatch_continuation_pop(dc, dm->_as_dq, flags);
-                               continue;
-                       }
-                       if (_dispatch_object_is_slow_item(dc)) {
-                               dmsg = ((dispatch_continuation_t)dc)->dc_data;
-                               dmr = ((dispatch_continuation_t)dc)->dc_other;
-                       } else if (_dispatch_object_has_vtable(dc)) {
-                               dmsg = (dispatch_mach_msg_t)dc;
-                               dmr = NULL;
-                       } else {
-                               if ((dm->dm_dkev || !dm->ds_is_direct_kevent) &&
-                                               (_dispatch_queue_get_current() != &_dispatch_mgr_q)) {
-                                       // send kevent must be uninstalled on the manager queue
-                                       needs_mgr = true;
-                                       goto partial_drain;
-                               }
-                               if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) {
-                                       disconnecting = true;
-                                       goto partial_drain;
-                               }
-                               continue;
-                       }
-                       _dispatch_voucher_ktrace_dmsg_pop(dmsg);
-                       if (unlikely(dr->dm_disconnect_cnt ||
-                                       (dm->dq_atomic_flags & DSF_CANCELED))) {
-                               _dispatch_mach_msg_not_sent(dm, dmsg);
-                               continue;
-                       }
-                       send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, pp, sf);
-                       if (unlikely(!send_status)) {
-                               goto partial_drain;
-                       }
-                       if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) {
-                               returning_send_result = true;
-                       }
-               } while ((dc = next_dc));
-       }
-
-       os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-               if (old_state & DISPATCH_MACH_STATE_DIRTY) {
-                       new_state = old_state;
-                       new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-                       new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-                       new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-               } else {
-                       // unlock
-                       new_state = 0;
-               }
-       });
-       goto out;
-
-partial_drain:
-       // if this is not a complete drain, we must undo some things
-       _dispatch_mach_send_unpop_head(dr, dc, next_dc);
-
-       if (_dispatch_object_has_type(dc,
-                       DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-                       new_state = old_state;
-                       new_state |= DISPATCH_MACH_STATE_DIRTY;
-                       new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-                       new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
-               });
-       } else {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-                       new_state = old_state;
-                       if (old_state & (DISPATCH_MACH_STATE_DIRTY |
-                                       DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) {
-                               new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-                               new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-                               new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-                       } else {
-                               new_state |= DISPATCH_MACH_STATE_DIRTY;
-                               new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
-                       }
-               });
-       }
-
-out:
-       if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) {
-               // Ensure that the root queue sees that this thread was overridden.
-               _dispatch_set_defaultpriority_override();
-       }
-
-       if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) {
-               os_atomic_thread_fence(acquire);
-               pp = _dm_state_get_override(new_state);
-               goto again;
-       }
-
-       if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-               pp = _dm_state_get_override(new_state);
-               _dispatch_mach_send_barrier_drain_push(dm, pp);
-       } else {
-               if (needs_mgr || dr->dm_needs_mgr) {
-                       pp = _dm_state_get_override(new_state);
-               } else {
-                       pp = 0;
-               }
-               if (!disconnecting) dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH);
-       }
-       return returning_send_result;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_send_invoke(dispatch_mach_t dm,
-               dispatch_invoke_flags_t flags,
-               dispatch_mach_send_invoke_flags_t send_flags)
-{
-       dispatch_lock_owner tid_self = _dispatch_tid_self();
-       uint64_t old_state, new_state;
-       pthread_priority_t pp_floor;
-
-       uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK;
-       uint64_t canlock_state = 0;
-
-       if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) {
-               canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-               canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER;
-       } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
-               canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-       }
-
-       if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) {
-               pp_floor = 0;
-       } else {
-               // _dispatch_queue_class_invoke will have applied the queue override
-               // (if any) before we get here. Else use the default base priority
-               // as an estimation of the priority we already asked for.
-               pp_floor = dm->_as_dq->dq_override;
-               if (!pp_floor) {
-                       pp_floor = _dispatch_get_defaultpriority();
-                       pp_floor &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-               }
-       }
-
-retry:
-       os_atomic_rmw_loop2o(dm->dm_refs, dm_state, old_state, new_state, acquire, {
-               new_state = old_state;
-               if (unlikely((old_state & canlock_mask) != canlock_state)) {
-                       if (!(send_flags & DM_SEND_INVOKE_FLUSH)) {
-                               os_atomic_rmw_loop_give_up(break);
-                       }
-                       new_state |= DISPATCH_MACH_STATE_DIRTY;
-               } else {
-                       if (likely(pp_floor)) {
-                               pthread_priority_t pp = _dm_state_get_override(old_state);
-                               if (unlikely(pp > pp_floor)) {
-                                       os_atomic_rmw_loop_give_up({
-                                               _dispatch_wqthread_override_start(tid_self, pp);
-                                               // Ensure that the root queue sees
-                                               // that this thread was overridden.
-                                               _dispatch_set_defaultpriority_override();
-                                               pp_floor = pp;
-                                               goto retry;
-                                       });
-                               }
-                       }
-                       new_state |= tid_self;
-                       new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-                       new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-                       new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-               }
-       });
-
-       if (unlikely((old_state & canlock_mask) != canlock_state)) {
-               return;
-       }
-       if (send_flags & DM_SEND_INVOKE_CANCEL) {
-               _dispatch_mach_cancel(dm);
-       }
-       _dispatch_mach_send_drain(dm, flags, send_flags);
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags)
-{
-       dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current();
-       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-       dispatch_thread_frame_s dtf;
-
-       DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY);
-       DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER);
-       // hide the mach channel (see _dispatch_mach_barrier_invoke comment)
-       _dispatch_thread_frame_stash(&dtf);
-       _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{
-               _dispatch_mach_send_invoke(dm, flags,
-                               DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER);
-       });
-       _dispatch_thread_frame_unstash(&dtf);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm,
-               pthread_priority_t pp)
-{
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-
-       dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN);
-       dc->dc_func = NULL;
-       dc->dc_ctxt = NULL;
-       dc->dc_voucher = DISPATCH_NO_VOUCHER;
-       dc->dc_priority = DISPATCH_NO_PRIORITY;
-       return _dispatch_queue_push(dm->_as_dq, dc, pp);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc,
-               pthread_priority_t pp)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       uint64_t pp_state, old_state, new_state, state_flags = 0;
-       dispatch_lock_owner owner;
-       bool wakeup;
-
-       // <rdar://problem/25896179> when pushing a send barrier that destroys
-       // the last reference to this channel, and the send queue is already
-       // draining on another thread, the send barrier may run as soon as
-       // _dispatch_mach_send_push_inline() returns.
-       _dispatch_retain(dm);
-       pp_state = _dm_state_override_from_priority(pp);
-
-       wakeup = _dispatch_mach_send_push_inline(dr, dc);
-       if (wakeup) {
-               state_flags = DISPATCH_MACH_STATE_DIRTY;
-               if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) {
-                       state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-               }
-       }
-
-       if (state_flags) {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-                       new_state = _dm_state_merge_override(old_state, pp_state);
-                       new_state |= state_flags;
-               });
-       } else {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, relaxed, {
-                       new_state = _dm_state_merge_override(old_state, pp_state);
-                       if (old_state == new_state) {
-                               os_atomic_rmw_loop_give_up(break);
-                       }
-               });
-       }
-
-       pp = _dm_state_get_override(new_state);
-       owner = _dispatch_lock_owner((dispatch_lock)old_state);
-       if (owner) {
-               if (_dm_state_needs_override(old_state, pp_state)) {
-                       _dispatch_wqthread_override_start_check_owner(owner, pp,
-                                       &dr->dm_state_lock.dul_lock);
-               }
-               return _dispatch_release_tailcall(dm);
-       }
-
-       dispatch_wakeup_flags_t wflags = 0;
-       if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-               _dispatch_mach_send_barrier_drain_push(dm, pp);
-       } else if (wakeup || dr->dm_disconnect_cnt ||
-                       (dm->dq_atomic_flags & DSF_CANCELED)) {
-               wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME;
-       } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-               wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME;
-       }
-       if (wflags) {
-               return dx_wakeup(dm, pp, wflags);
-       }
-       return _dispatch_release_tailcall(dm);
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm,
-               dispatch_object_t dou, pthread_priority_t pp,
-               dispatch_mach_send_invoke_flags_t send_flags)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dispatch_lock_owner tid_self = _dispatch_tid_self();
-       uint64_t pp_state, old_state, new_state, canlock_mask, state_flags = 0;
-       dispatch_lock_owner owner;
-
-       pp_state = _dm_state_override_from_priority(pp);
-       bool wakeup = _dispatch_mach_send_push_inline(dr, dou);
-       if (wakeup) {
-               state_flags = DISPATCH_MACH_STATE_DIRTY;
-       }
-
-       if (unlikely(dr->dm_disconnect_cnt ||
-                       (dm->dq_atomic_flags & DSF_CANCELED))) {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-                       new_state = _dm_state_merge_override(old_state, pp_state);
-                       new_state |= state_flags;
-               });
-               dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH);
-               return false;
-       }
-
-       canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK |
-                       DISPATCH_MACH_STATE_PENDING_BARRIER;
-       if (state_flags) {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, seq_cst, {
-                       new_state = _dm_state_merge_override(old_state, pp_state);
-                       new_state |= state_flags;
-                       if (likely((old_state & canlock_mask) == 0)) {
-                               new_state |= tid_self;
-                               new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-                               new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-                               new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-                       }
-               });
-       } else {
-               os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, acquire, {
-                       new_state = _dm_state_merge_override(old_state, pp_state);
-                       if (new_state == old_state) {
-                               os_atomic_rmw_loop_give_up(return false);
-                       }
-                       if (likely((old_state & canlock_mask) == 0)) {
-                               new_state |= tid_self;
-                               new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-                               new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-                               new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-                       }
-               });
-       }
-
-       owner = _dispatch_lock_owner((dispatch_lock)old_state);
-       if (owner) {
-               if (_dm_state_needs_override(old_state, pp_state)) {
-                       _dispatch_wqthread_override_start_check_owner(owner, pp,
-                                       &dr->dm_state_lock.dul_lock);
-               }
-               return false;
-       }
-
-       if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-               dx_wakeup(dm, pp, DISPATCH_WAKEUP_OVERRIDING);
-               return false;
-       }
-
-       // Ensure our message is still at the head of the queue and has not already
-       // been dequeued by another thread that raced us to the send queue lock.
-       // A plain load of the head and comparison against our object pointer is
-       // sufficient.
-       if (unlikely(!(wakeup && dou._do == dr->dm_head))) {
-               // Don't request immediate send result for messages we don't own
-               send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
-       }
-       return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags);
-}
-
-static void
-_dispatch_mach_merge_notification_kevent(dispatch_mach_t dm,
-               const _dispatch_kevent_qos_s *ke)
-{
-       if (!(ke->fflags & dm->ds_pending_data_mask)) {
-               return;
-       }
-       _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN,
-                       DM_SEND_INVOKE_FLUSH);
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_t
-
-static inline mach_msg_option_t
-_dispatch_mach_checkin_options(void)
-{
-       mach_msg_option_t options = 0;
-#if DISPATCH_USE_CHECKIN_NOIMPORTANCE
-       options = MACH_SEND_NOIMPORTANCE; // <rdar://problem/16996737>
-#endif
-       return options;
-}
-
-
-static inline mach_msg_option_t
-_dispatch_mach_send_options(void)
-{
-       mach_msg_option_t options = 0;
-       return options;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_mach_priority_propagate(mach_msg_option_t options)
-{
-#if DISPATCH_USE_NOIMPORTANCE_QOS
-       if (options & MACH_SEND_NOIMPORTANCE) return 0;
-#else
-       (void)options;
-#endif
-       return _dispatch_priority_propagate();
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
-               dispatch_continuation_t dc_wait, mach_msg_option_t options)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) {
-               DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued");
-       }
-       dispatch_retain(dmsg);
-       pthread_priority_t priority = _dispatch_mach_priority_propagate(options);
-       options |= _dispatch_mach_send_options();
-       dmsg->dmsg_options = options;
-       mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
-       dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg);
-       bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
-                       MACH_MSG_TYPE_MOVE_SEND_ONCE);
-       dmsg->dmsg_priority = priority;
-       dmsg->dmsg_voucher = _voucher_copy();
-       _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
-
-       uint32_t send_status;
-       bool returning_send_result = false;
-       dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
-       if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) {
-               send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND;
-       }
-       if (is_reply && !dmsg->dmsg_reply && !dr->dm_disconnect_cnt &&
-                       !(dm->dq_atomic_flags & DSF_CANCELED)) {
-               // replies are sent to a send-once right and don't need the send queue
-               dispatch_assert(!dc_wait);
-               send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags);
-               dispatch_assert(send_status);
-               returning_send_result = !!(send_status &
-                               DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT);
-       } else {
-               _dispatch_voucher_ktrace_dmsg_push(dmsg);
-               priority &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-               dispatch_object_t dou = { ._dmsg = dmsg };
-               if (dc_wait) dou._dc = dc_wait;
-               returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou,
-                               priority, send_flags);
-       }
-       if (returning_send_result) {
-               _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg);
-               if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
-               dmsg->dmsg_voucher = NULL;
-               dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
-               dispatch_release(dmsg);
-       }
-       return returning_send_result;
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
-               mach_msg_option_t options)
-{
-       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-       options &= ~DISPATCH_MACH_OPTIONS_MASK;
-       bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
-       dispatch_assert(!returned_send_result);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
-               mach_msg_option_t options, dispatch_mach_send_flags_t send_flags,
-               dispatch_mach_reason_t *send_result, mach_error_t *send_error)
-{
-       if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
-               DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
-       }
-       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-       options &= ~DISPATCH_MACH_OPTIONS_MASK;
-       options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
-       bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
-       unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
-       mach_error_t err = 0;
-       if (returned_send_result) {
-               reason = _dispatch_mach_msg_get_reason(dmsg, &err);
-       }
-       *send_result = reason;
-       *send_error = err;
-}
-
-static inline
-dispatch_mach_msg_t
-_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
-               dispatch_mach_msg_t dmsg, mach_msg_option_t options,
-               bool *returned_send_result)
-{
-       mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
-       if (!reply_port) {
-               // use per-thread mach reply port <rdar://24597802>
-               reply_port = _dispatch_get_thread_reply_port();
-               mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
-               dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) ==
-                               MACH_MSG_TYPE_MAKE_SEND_ONCE);
-               hdr->msgh_local_port = reply_port;
-               options |= DISPATCH_MACH_OWNED_REPLY_PORT;
-       }
-
-       dispatch_mach_reply_refs_t dmr;
-#if DISPATCH_DEBUG
-       dmr = _dispatch_calloc(1, sizeof(*dmr));
-#else
-       struct dispatch_mach_reply_refs_s dmr_buf = { };
-       dmr = &dmr_buf;
-#endif
-       struct dispatch_continuation_s dc_wait = {
-               .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT,
-               .dc_data = dmsg,
-               .dc_other = dmr,
-               .dc_priority = DISPATCH_NO_PRIORITY,
-               .dc_voucher = DISPATCH_NO_VOUCHER,
-       };
-       dmr->dmr_ctxt = dmsg->do_ctxt;
-       *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options);
-       if (options & DISPATCH_MACH_OWNED_REPLY_PORT) {
-               _dispatch_clear_thread_reply_port(reply_port);
-       }
-       dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port);
-#if DISPATCH_DEBUG
-       free(dmr);
-#endif
-       return dmsg;
-}
-
-DISPATCH_NOINLINE
-dispatch_mach_msg_t
-dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
-               dispatch_mach_msg_t dmsg, mach_msg_option_t options)
-{
-       bool returned_send_result;
-       dispatch_mach_msg_t reply;
-       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-       options &= ~DISPATCH_MACH_OPTIONS_MASK;
-       options |= DISPATCH_MACH_WAIT_FOR_REPLY;
-       reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
-                       &returned_send_result);
-       dispatch_assert(!returned_send_result);
-       return reply;
-}
-
-DISPATCH_NOINLINE
-dispatch_mach_msg_t
-dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm,
-               dispatch_mach_msg_t dmsg, mach_msg_option_t options,
-               dispatch_mach_send_flags_t send_flags,
-               dispatch_mach_reason_t *send_result, mach_error_t *send_error)
-{
-       if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
-               DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
-       }
-       bool returned_send_result;
-       dispatch_mach_msg_t reply;
-       dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-       options &= ~DISPATCH_MACH_OPTIONS_MASK;
-       options |= DISPATCH_MACH_WAIT_FOR_REPLY;
-       options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
-       reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
-                       &returned_send_result);
-       unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
-       mach_error_t err = 0;
-       if (returned_send_result) {
-               reason = _dispatch_mach_msg_get_reason(dmsg, &err);
-       }
-       *send_result = reason;
-       *send_error = err;
-       return reply;
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_disconnect(dispatch_mach_t dm)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       bool disconnected;
-       if (dm->dm_dkev) {
-               _dispatch_mach_notification_kevent_unregister(dm);
-       }
-       if (MACH_PORT_VALID(dr->dm_send)) {
-               _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send);
-       }
-       dr->dm_send = MACH_PORT_NULL;
-       if (dr->dm_checkin) {
-               _dispatch_mach_msg_not_sent(dm, dr->dm_checkin);
-               dr->dm_checkin = NULL;
-       }
-       _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-       dispatch_mach_reply_refs_t dmr, tmp;
-       TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp) {
-               TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-               _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-               if (dmr->dmr_dkev) {
-                       _dispatch_mach_reply_kevent_unregister(dm, dmr,
-                                       DKEV_UNREGISTER_DISCONNECTED);
-               } else {
-                       _dispatch_mach_reply_waiter_unregister(dm, dmr,
-                                       DKEV_UNREGISTER_DISCONNECTED);
-               }
-       }
-       disconnected = TAILQ_EMPTY(&dm->dm_refs->dm_replies);
-       _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-       return disconnected;
-}
-
-static void
-_dispatch_mach_cancel(dispatch_mach_t dm)
-{
-       _dispatch_object_debug(dm, "%s", __func__);
-       if (!_dispatch_mach_disconnect(dm)) return;
-       if (dm->ds_dkev) {
-               mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident;
-               _dispatch_source_kevent_unregister(dm->_as_ds);
-               if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) {
-                       _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL);
-               }
-       } else {
-               _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED,
-                               DSF_ARMED | DSF_DEFERRED_DELETE);
-       }
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou)
-{
-       if (!_dispatch_mach_disconnect(dm)) return false;
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dr->dm_checkin = dou._dc->dc_data;
-       dr->dm_send = (mach_port_t)dou._dc->dc_other;
-       _dispatch_continuation_free(dou._dc);
-       (void)os_atomic_dec2o(dr, dm_disconnect_cnt, relaxed);
-       _dispatch_object_debug(dm, "%s", __func__);
-       _dispatch_release(dm); // <rdar://problem/26266265>
-       return true;
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send,
-               dispatch_mach_msg_t checkin)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       (void)os_atomic_inc2o(dr, dm_disconnect_cnt, relaxed);
-       if (MACH_PORT_VALID(send) && checkin) {
-               dispatch_retain(checkin);
-               checkin->dmsg_options = _dispatch_mach_checkin_options();
-               dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin);
-       } else {
-               checkin = NULL;
-               dr->dm_checkin_port = MACH_PORT_NULL;
-       }
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-       dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-       // actually called manually in _dispatch_mach_send_drain
-       dc->dc_func = (void*)_dispatch_mach_reconnect_invoke;
-       dc->dc_ctxt = dc;
-       dc->dc_data = checkin;
-       dc->dc_other = (void*)(uintptr_t)send;
-       dc->dc_voucher = DISPATCH_NO_VOUCHER;
-       dc->dc_priority = DISPATCH_NO_PRIORITY;
-       _dispatch_retain(dm); // <rdar://problem/26266265>
-       return _dispatch_mach_send_push(dm, dc, 0);
-}
-
-DISPATCH_NOINLINE
-mach_port_t
-dispatch_mach_get_checkin_port(dispatch_mach_t dm)
-{
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) {
-               return MACH_PORT_DEAD;
-       }
-       return dr->dm_checkin_port;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_connect_invoke(dispatch_mach_t dm)
-{
-       dispatch_mach_refs_t dr = dm->ds_refs;
-       _dispatch_client_callout4(dr->dm_handler_ctxt,
-                       DISPATCH_MACH_CONNECTED, NULL, 0, dr->dm_handler_func);
-       dm->dm_connect_handler_called = 1;
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
-               dispatch_invoke_flags_t flags)
-{
-       dispatch_thread_frame_s dtf;
-       dispatch_mach_refs_t dr;
-       dispatch_mach_t dm;
-       mach_error_t err;
-       unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err);
-       _dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE|
-                       DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE;
-
-       // hide mach channel
-       dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf);
-       dr = dm->ds_refs;
-       dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
-       _dispatch_voucher_ktrace_dmsg_pop(dmsg);
-       _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg);
-       (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority,
-                       dmsg->dmsg_voucher, adopt_flags);
-       dmsg->dmsg_voucher = NULL;
-       dispatch_invoke_with_autoreleasepool(flags, {
-               if (slowpath(!dm->dm_connect_handler_called)) {
-                       _dispatch_mach_connect_invoke(dm);
-               }
-               _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err,
-                               dr->dm_handler_func);
-       });
-       _dispatch_thread_frame_unstash(&dtf);
-       _dispatch_introspection_queue_item_complete(dmsg);
-       dispatch_release(dmsg);
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags)
-{
-       dispatch_thread_frame_s dtf;
-       dispatch_mach_t dm = dc->dc_other;
-       dispatch_mach_refs_t dr;
-       uintptr_t dc_flags = (uintptr_t)dc->dc_data;
-       unsigned long type = dc_type(dc);
-
-       // hide mach channel from clients
-       if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
-               // on the send queue, the mach channel isn't the current queue
-               // its target queue is the current one already
-               _dispatch_thread_frame_stash(&dtf);
-       }
-       dr = dm->ds_refs;
-       DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT);
-       _dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{
-               dispatch_invoke_with_autoreleasepool(flags, {
-                       if (slowpath(!dm->dm_connect_handler_called)) {
-                               _dispatch_mach_connect_invoke(dm);
-                       }
-                       _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
-                       _dispatch_client_callout4(dr->dm_handler_ctxt,
-                                       DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0,
-                                       dr->dm_handler_func);
-               });
-       });
-       if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
-               _dispatch_thread_frame_unstash(&dtf);
-       }
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context,
-               dispatch_function_t func)
-{
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-       pthread_priority_t pp;
-
-       _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
-       dc->dc_data = (void *)dc->dc_flags;
-       dc->dc_other = dm;
-       dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER);
-       _dispatch_trace_continuation_push(dm->_as_dq, dc);
-       pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc);
-       return _dispatch_mach_send_push(dm, dc, pp);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
-{
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-       pthread_priority_t pp;
-
-       _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
-       dc->dc_data = (void *)dc->dc_flags;
-       dc->dc_other = dm;
-       dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER);
-       _dispatch_trace_continuation_push(dm->_as_dq, dc);
-       pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc);
-       return _dispatch_mach_send_push(dm, dc, pp);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context,
-               dispatch_function_t func)
-{
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-
-       _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
-       dc->dc_data = (void *)dc->dc_flags;
-       dc->dc_other = dm;
-       dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER);
-       return _dispatch_continuation_async(dm->_as_dq, dc);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
-{
-       dispatch_continuation_t dc = _dispatch_continuation_alloc();
-       uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-
-       _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
-       dc->dc_data = (void *)dc->dc_flags;
-       dc->dc_other = dm;
-       dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER);
-       return _dispatch_continuation_async(dm->_as_dq, dc);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
-{
-       dispatch_mach_refs_t dr = dm->ds_refs;
-
-       dispatch_invoke_with_autoreleasepool(flags, {
-               if (slowpath(!dm->dm_connect_handler_called)) {
-                       _dispatch_mach_connect_invoke(dm);
-               }
-               _dispatch_client_callout4(dr->dm_handler_ctxt,
-                               DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func);
-       });
-       dm->dm_cancel_handler_called = 1;
-       _dispatch_release(dm); // the retain is done at creation time
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_cancel(dispatch_mach_t dm)
-{
-       dispatch_source_cancel(dm->_as_ds);
-}
-
-static void
-_dispatch_mach_install(dispatch_mach_t dm, pthread_priority_t pp)
-{
-       uint32_t disconnect_cnt;
-
-       if (dm->ds_dkev) {
-               _dispatch_source_kevent_register(dm->_as_ds, pp);
-       }
-       if (dm->ds_is_direct_kevent) {
-               pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK |
-                               _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG |
-                               _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-               // _dispatch_mach_reply_kevent_register assumes this has been done
-               // which is unlike regular sources or queues, the DEFAULTQUEUE flag
-               // is used so that the priority of that channel doesn't act as a floor
-               // QoS for incoming messages (26761457)
-               dm->dq_priority = (dispatch_priority_t)pp;
-       }
-       dm->ds_is_installed = true;
-       if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_refs, dm_disconnect_cnt,
-                       DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) {
-               DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed");
-       }
-}
-
-void
-_dispatch_mach_finalize_activation(dispatch_mach_t dm)
-{
-       if (dm->ds_is_direct_kevent && !dm->ds_is_installed) {
-               dispatch_source_t ds = dm->_as_ds;
-               pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds);
-               if (pp) _dispatch_mach_install(dm, pp);
-       }
-
-       // call "super"
-       _dispatch_queue_finalize_activation(dm->_as_dq);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_queue_t
-_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags,
-               uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED)
-{
-       dispatch_mach_t dm = dou._dm;
-       dispatch_queue_t retq = NULL;
-       dispatch_queue_t dq = _dispatch_queue_get_current();
-
-       // This function performs all mach channel actions. Each action is
-       // responsible for verifying that it takes place on the appropriate queue.
-       // If the current queue is not the correct queue for this action, the
-       // correct queue will be returned and the invoke will be re-driven on that
-       // queue.
-
-       // The order of tests here in invoke and in wakeup should be consistent.
-
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dispatch_queue_t dkq = &_dispatch_mgr_q;
-
-       if (dm->ds_is_direct_kevent) {
-               dkq = dm->do_targetq;
-       }
-
-       if (slowpath(!dm->ds_is_installed)) {
-               // The channel needs to be installed on the kevent queue.
-               if (dq != dkq) {
-                       return dkq;
-               }
-               _dispatch_mach_install(dm, _dispatch_get_defaultpriority());
-       }
-
-       if (_dispatch_queue_class_probe(dm)) {
-               if (dq == dm->do_targetq) {
-                       retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL);
-               } else {
-                       retq = dm->do_targetq;
-               }
-       }
-
-       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
-
-       if (dr->dm_tail) {
-               bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt &&
-                               (dm->dm_dkev || !dm->ds_is_direct_kevent));
-               if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) ||
-                               (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) {
-                       // The channel has pending messages to send.
-                       if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) {
-                               return retq ? retq : &_dispatch_mgr_q;
-                       }
-                       dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
-                       if (dq != &_dispatch_mgr_q) {
-                               send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER;
-                       }
-                       _dispatch_mach_send_invoke(dm, flags, send_flags);
-               }
-       } else if (dqf & DSF_CANCELED) {
-               // The channel has been cancelled and needs to be uninstalled from the
-               // manager queue. After uninstallation, the cancellation handler needs
-               // to be delivered to the target queue.
-               if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
-                       // waiting for the delivery of a deferred delete event
-                       return retq;
-               }
-               if ((dqf & DSF_STATE_MASK) != DSF_DELETED) {
-                       if (dq != &_dispatch_mgr_q) {
-                               return retq ? retq : &_dispatch_mgr_q;
-                       }
-                       _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL);
-                       dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
-                       if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) {
-                               // waiting for the delivery of a deferred delete event
-                               // or deletion didn't happen because send_invoke couldn't
-                               // acquire the send lock
-                               return retq;
-                       }
-               }
-               if (!dm->dm_cancel_handler_called) {
-                       if (dq != dm->do_targetq) {
-                               return retq ? retq : dm->do_targetq;
-                       }
-                       _dispatch_mach_cancel_invoke(dm, flags);
-               }
-       }
-
-       return retq;
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
-{
-       _dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2);
-}
-
-void
-_dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp,
-               dispatch_wakeup_flags_t flags)
-{
-       // This function determines whether the mach channel needs to be invoked.
-       // The order of tests here in probe and in invoke should be consistent.
-
-       dispatch_mach_send_refs_t dr = dm->dm_refs;
-       dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR;
-       dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
-       dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
-
-       if (dm->ds_is_direct_kevent) {
-               dkq = DISPATCH_QUEUE_WAKEUP_TARGET;
-       }
-
-       if (!dm->ds_is_installed) {
-               // The channel needs to be installed on the kevent queue.
-               tq = dkq;
-               goto done;
-       }
-
-       if (_dispatch_queue_class_probe(dm)) {
-               tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-               goto done;
-       }
-
-       if (_dispatch_lock_is_locked(dr->dm_state_lock.dul_lock)) {
-               // Sending and uninstallation below require the send lock, the channel
-               // will be woken up when the lock is dropped <rdar://15132939&15203957>
-               _dispatch_queue_reinstate_override_priority(dm, (dispatch_priority_t)pp);
-               goto done;
-       }
-
-       if (dr->dm_tail) {
-               bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt &&
-                               (dm->dm_dkev || !dm->ds_is_direct_kevent));
-               if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) ||
-                               (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) {
-                       if (unlikely(requires_mgr)) {
-                               tq = DISPATCH_QUEUE_WAKEUP_MGR;
-                       } else {
-                               tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-                       }
-               } else {
-                       // can happen when we can't send because the port is full
-                       // but we should not lose the override
-                       _dispatch_queue_reinstate_override_priority(dm,
-                                       (dispatch_priority_t)pp);
-               }
-       } else if (dqf & DSF_CANCELED) {
-               if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
-                       // waiting for the delivery of a deferred delete event
-               } else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) {
-                       // The channel needs to be uninstalled from the manager queue
-                       tq = DISPATCH_QUEUE_WAKEUP_MGR;
-               } else if (!dm->dm_cancel_handler_called) {
-                       // the cancellation handler needs to be delivered to the target
-                       // queue.
-                       tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-               }
-       }
-
-done:
-       if (tq) {
-               return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq);
-       } else if (pp) {
-               return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags);
-       } else if (flags & DISPATCH_WAKEUP_CONSUME) {
-               return _dispatch_release_tailcall(dm);
-       }
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_msg_t
-
-dispatch_mach_msg_t
-dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size,
-               dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr)
-{
-       if (slowpath(size < sizeof(mach_msg_header_t)) ||
-                       slowpath(destructor && !msg)) {
-               DISPATCH_CLIENT_CRASH(size, "Empty message");
-       }
-       dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg),
-                       sizeof(struct dispatch_mach_msg_s) +
-                       (destructor ? 0 : size - sizeof(dmsg->dmsg_msg)));
-       if (destructor) {
-               dmsg->dmsg_msg = msg;
-       } else if (msg) {
-               memcpy(dmsg->dmsg_buf, msg, size);
-       }
-       dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
-       dmsg->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-                       false);
-       dmsg->dmsg_destructor = destructor;
-       dmsg->dmsg_size = size;
-       if (msg_ptr) {
-               *msg_ptr = _dispatch_mach_msg_get_msg(dmsg);
-       }
-       return dmsg;
-}
-
-void
-_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg)
-{
-       if (dmsg->dmsg_voucher) {
-               _voucher_release(dmsg->dmsg_voucher);
-               dmsg->dmsg_voucher = NULL;
-       }
-       switch (dmsg->dmsg_destructor) {
-       case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT:
-               break;
-       case DISPATCH_MACH_MSG_DESTRUCTOR_FREE:
-               free(dmsg->dmsg_msg);
-               break;
-       case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: {
-               mach_vm_size_t vm_size = dmsg->dmsg_size;
-               mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg;
-               (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
-                               vm_addr, vm_size));
-               break;
-       }}
-}
-
-static inline mach_msg_header_t*
-_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg)
-{
-       return dmsg->dmsg_destructor ? dmsg->dmsg_msg :
-                       (mach_msg_header_t*)dmsg->dmsg_buf;
-}
-
-mach_msg_header_t*
-dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr)
-{
-       if (size_ptr) {
-               *size_ptr = dmsg->dmsg_size;
-       }
-       return _dispatch_mach_msg_get_msg(dmsg);
-}
-
-size_t
-_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz)
-{
-       size_t offset = 0;
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
-                       dx_kind(dmsg), dmsg);
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, "
-                       "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1);
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, "
-                       "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf);
-       mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
-       if (hdr->msgh_id) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ",
-                               hdr->msgh_id);
-       }
-       if (hdr->msgh_size) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ",
-                               hdr->msgh_size);
-       }
-       if (hdr->msgh_bits) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "bits <l %u, r %u",
-                               MACH_MSGH_BITS_LOCAL(hdr->msgh_bits),
-                               MACH_MSGH_BITS_REMOTE(hdr->msgh_bits));
-               if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) {
-                       offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x",
-                                       MACH_MSGH_BITS_OTHER(hdr->msgh_bits));
-               }
-               offset += dsnprintf(&buf[offset], bufsiz - offset, ">, ");
-       }
-       if (hdr->msgh_local_port && hdr->msgh_remote_port) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, "
-                               "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port);
-       } else if (hdr->msgh_local_port) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x",
-                               hdr->msgh_local_port);
-       } else if (hdr->msgh_remote_port) {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x",
-                               hdr->msgh_remote_port);
-       } else {
-               offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports");
-       }
-       offset += dsnprintf(&buf[offset], bufsiz - offset, " } }");
-       return offset;
-}
-
-#pragma mark -
-#pragma mark dispatch_mig_server
-
-mach_msg_return_t
-dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
-               dispatch_mig_callback_t callback)
-{
-       mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT
-               | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX)
-               | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER;
-       mach_msg_options_t tmp_options;
-       mig_reply_error_t *bufTemp, *bufRequest, *bufReply;
-       mach_msg_return_t kr = 0;
-       uint64_t assertion_token = 0;
-       unsigned int cnt = 1000; // do not stall out serial queues
-       boolean_t demux_success;
-       bool received = false;
-       size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE;
-
-       bufRequest = alloca(rcv_size);
-       bufRequest->RetCode = 0;
-       for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size);
-                       p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) {
-               *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
-       }
-
-       bufReply = alloca(rcv_size);
-       bufReply->Head.msgh_size = 0;
-       for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size);
-                       p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) {
-               *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
-       }
-
-#if DISPATCH_DEBUG
-       options |= MACH_RCV_LARGE; // rdar://problem/8422992
-#endif
-       tmp_options = options;
-       // XXX FIXME -- change this to not starve out the target queue
-       for (;;) {
-               if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) {
-                       options &= ~MACH_RCV_MSG;
-                       tmp_options &= ~MACH_RCV_MSG;
-
-                       if (!(tmp_options & MACH_SEND_MSG)) {
-                               goto out;
-                       }
-               }
-               kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
-                               (mach_msg_size_t)rcv_size, (mach_port_t)ds->ds_ident_hack, 0,0);
-
-               tmp_options = options;
-
-               if (slowpath(kr)) {
-                       switch (kr) {
-                       case MACH_SEND_INVALID_DEST:
-                       case MACH_SEND_TIMED_OUT:
-                               if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
-                                       mach_msg_destroy(&bufReply->Head);
-                               }
-                               break;
-                       case MACH_RCV_TIMED_OUT:
-                               // Don't return an error if a message was sent this time or
-                               // a message was successfully received previously
-                               // rdar://problems/7363620&7791738
-                               if(bufReply->Head.msgh_remote_port || received) {
-                                       kr = MACH_MSG_SUCCESS;
-                               }
-                               break;
-                       case MACH_RCV_INVALID_NAME:
-                               break;
-#if DISPATCH_DEBUG
-                       case MACH_RCV_TOO_LARGE:
-                               // receive messages that are too large and log their id and size
-                               // rdar://problem/8422992
-                               tmp_options &= ~MACH_RCV_LARGE;
-                               size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE;
-                               void *large_buf = malloc(large_size);
-                               if (large_buf) {
-                                       rcv_size = large_size;
-                                       bufReply = large_buf;
-                               }
-                               if (!mach_msg(&bufReply->Head, tmp_options, 0,
-                                               (mach_msg_size_t)rcv_size,
-                                               (mach_port_t)ds->ds_ident_hack, 0, 0)) {
-                                       _dispatch_log("BUG in libdispatch client: "
-                                                       "dispatch_mig_server received message larger than "
-                                                       "requested size %zd: id = 0x%x, size = %d",
-                                                       maxmsgsz, bufReply->Head.msgh_id,
-                                                       bufReply->Head.msgh_size);
-                               }
-                               if (large_buf) {
-                                       free(large_buf);
-                               }
-                               // fall through
-#endif
-                       default:
-                               _dispatch_bug_mach_client(
-                                               "dispatch_mig_server: mach_msg() failed", kr);
-                               break;
-                       }
-                       goto out;
-               }
-
-               if (!(tmp_options & MACH_RCV_MSG)) {
-                       goto out;
-               }
-
-               if (assertion_token) {
-#if DISPATCH_USE_IMPORTANCE_ASSERTION
-                       int r = proc_importance_assertion_complete(assertion_token);
-                       (void)dispatch_assume_zero(r);
-#endif
-                       assertion_token = 0;
-               }
-               received = true;
-
-               bufTemp = bufRequest;
-               bufRequest = bufReply;
-               bufReply = bufTemp;
-
-#if DISPATCH_USE_IMPORTANCE_ASSERTION
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-               int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head,
-                               NULL, &assertion_token);
-               if (r && slowpath(r != EIO)) {
-                       (void)dispatch_assume_zero(r);
-               }
-#pragma clang diagnostic pop
-#endif
-               _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head));
-               demux_success = callback(&bufRequest->Head, &bufReply->Head);
-
-               if (!demux_success) {
-                       // destroy the request - but not the reply port
-                       bufRequest->Head.msgh_remote_port = 0;
-                       mach_msg_destroy(&bufRequest->Head);
-               } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
-                       // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode
-                       // is present
-                       if (slowpath(bufReply->RetCode)) {
-                               if (bufReply->RetCode == MIG_NO_REPLY) {
-                                       continue;
-                               }
-
-                               // destroy the request - but not the reply port
-                               bufRequest->Head.msgh_remote_port = 0;
-                               mach_msg_destroy(&bufRequest->Head);
-                       }
-               }
-
-               if (bufReply->Head.msgh_remote_port) {
-                       tmp_options |= MACH_SEND_MSG;
-                       if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) !=
-                                       MACH_MSG_TYPE_MOVE_SEND_ONCE) {
-                               tmp_options |= MACH_SEND_TIMEOUT;
-                       }
-               }
-       }
-
-out:
-       if (assertion_token) {
-#if DISPATCH_USE_IMPORTANCE_ASSERTION
-               int r = proc_importance_assertion_complete(assertion_token);
-               (void)dispatch_assume_zero(r);
-#endif
-       }
-
-       return kr;
-}
-
-#endif /* HAVE_MACH */
-
-#pragma mark -
-#pragma mark dispatch_source_debug
-
-DISPATCH_NOINLINE
-static const char *
-_evfiltstr(short filt)
-{
-       switch (filt) {
-#define _evfilt2(f) case (f): return #f
-       _evfilt2(EVFILT_READ);
-       _evfilt2(EVFILT_WRITE);
-       _evfilt2(EVFILT_AIO);
-       _evfilt2(EVFILT_VNODE);
-       _evfilt2(EVFILT_PROC);
-       _evfilt2(EVFILT_SIGNAL);
-       _evfilt2(EVFILT_TIMER);
-#if HAVE_MACH
-       _evfilt2(EVFILT_MACHPORT);
-       _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION);
-#endif
-       _evfilt2(EVFILT_FS);
-       _evfilt2(EVFILT_USER);
-#ifdef EVFILT_SOCK
-       _evfilt2(EVFILT_SOCK);
-#endif
-#ifdef EVFILT_MEMORYSTATUS
-       _evfilt2(EVFILT_MEMORYSTATUS);
-#endif
-
-       _evfilt2(DISPATCH_EVFILT_TIMER);
-       _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
-       _evfilt2(DISPATCH_EVFILT_CUSTOM_OR);
-       default:
-               return "EVFILT_missing";
-       }
-}
-
-#if DISPATCH_DEBUG
-static const char *
-_evflagstr2(uint16_t *flagsp)
-{
-#define _evflag2(f) \
-       if ((*flagsp & (f)) == (f) && (f)) { \
-               *flagsp &= ~(f); \
-               return #f "|"; \
-       }
-       _evflag2(EV_ADD);
-       _evflag2(EV_DELETE);
-       _evflag2(EV_ENABLE);
-       _evflag2(EV_DISABLE);
-       _evflag2(EV_ONESHOT);
-       _evflag2(EV_CLEAR);
-       _evflag2(EV_RECEIPT);
-       _evflag2(EV_DISPATCH);
-       _evflag2(EV_UDATA_SPECIFIC);
-#ifdef EV_POLL
-       _evflag2(EV_POLL);
-#endif
-#ifdef EV_OOBAND
-       _evflag2(EV_OOBAND);
-#endif
-       _evflag2(EV_ERROR);
-       _evflag2(EV_EOF);
-       _evflag2(EV_VANISHED);
-       *flagsp = 0;
-       return "EV_UNKNOWN ";
-}
-
-DISPATCH_NOINLINE
-static const char *
-_evflagstr(uint16_t flags, char *str, size_t strsize)
-{
-       str[0] = 0;
-       while (flags) {
-               strlcat(str, _evflagstr2(&flags), strsize);
-       }
-       size_t sz = strlen(str);
-       if (sz) str[sz-1] = 0;
-       return str;
-}
-#endif
-
-static size_t
-_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
-{
-       dispatch_queue_t target = ds->do_targetq;
-       return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, "
-                       "mask = 0x%lx, pending_data = 0x%lx, registered = %d, "
-                       "armed = %d, deleted = %d%s, canceled = %d, ",
-                       target && target->dq_label ? target->dq_label : "", target,
-                       ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data,
-                       ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED),
-                       (bool)(ds->dq_atomic_flags & DSF_DELETED),
-                       (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "",
-                       (bool)(ds->dq_atomic_flags & DSF_CANCELED));
-}
-
-static size_t
-_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
-{
-       dispatch_source_refs_t dr = ds->ds_refs;
-       return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx"
-                       ", last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ",
-                       (unsigned long long)ds_timer(dr).target,
-                       (unsigned long long)ds_timer(dr).deadline,
-                       (unsigned long long)ds_timer(dr).last_fire,
-                       (unsigned long long)ds_timer(dr).interval, ds_timer(dr).flags);
-}
-
-size_t
-_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz)
-{
-       size_t offset = 0;
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
-                       dx_kind(ds), ds);
-       offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset);
-       offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset);
-       if (ds->ds_is_timer) {
-               offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset);
-       }
-       const char *filter;
-       if (!ds->ds_dkev) {
-               filter = "????";
-       } else if (ds->ds_is_custom_source) {
-               filter = _evfiltstr((int16_t)(uintptr_t)ds->ds_dkev);
-       } else {
-               filter = _evfiltstr(ds->ds_dkev->dk_kevent.filter);
-       }
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, "
-                       "filter = %s }", ds->ds_dkev,  ds->ds_is_direct_kevent ? " (direct)"
-                       : "", filter);
-       return offset;
-}
-
-#if HAVE_MACH
-static size_t
-_dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz)
-{
-       dispatch_queue_t target = dm->do_targetq;
-       return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, "
-                       "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, "
-                       "send state = %016llx, disconnected = %d, canceled = %d ",
-                       target && target->dq_label ? target->dq_label : "", target,
-                       dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0,
-                       dm->dm_refs->dm_send,
-                       dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0,
-                       dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) ?
-                       " (armed)" : "", dm->dm_refs->dm_checkin_port,
-                       dm->dm_refs->dm_checkin ? " (pending)" : "",
-                       dm->dm_refs->dm_state, dm->dm_refs->dm_disconnect_cnt,
-                       (bool)(dm->dq_atomic_flags & DSF_CANCELED));
-}
-
-size_t
-_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz)
-{
-       size_t offset = 0;
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
-                       dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label :
-                       dx_kind(dm), dm);
-       offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset);
-       offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset);
-       offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
-       return offset;
-}
-#endif // HAVE_MACH
-
-#if DISPATCH_DEBUG
-DISPATCH_NOINLINE
-static void
-dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev,
-               int i, int n, const char *function, unsigned int line)
-{
-       char flagstr[256];
-       char i_n[31];
-
-       if (n > 1) {
-               snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n);
-       } else {
-               i_n[0] = '\0';
-       }
-#if DISPATCH_USE_KEVENT_QOS
-       _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
-                       "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
-                       "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, "
-                       "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident,
-                       _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
-                       sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
-                       kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3],
-                       function, line);
-#else
-       _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
-                       "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
-                       "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s #%u", verb, kev, i_n,
-                       kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
-                       sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
-#ifndef IGNORE_KEVENT64_EXT
-                       kev->ext[0], kev->ext[1],
-#else
-                       0ull, 0ull,
-#endif
-                       function, line);
-#endif
-}
-
-#if HAVE_MACH
-
-#ifndef MACH_PORT_TYPE_SPREQUEST
-#define MACH_PORT_TYPE_SPREQUEST 0x40000000
-#endif
-
-DISPATCH_NOINLINE
-void
-dispatch_debug_machport(mach_port_t name, const char* str)
-{
-       mach_port_type_t type;
-       mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0;
-       unsigned int dnreqs = 0, dnrsiz;
-       kern_return_t kr = mach_port_type(mach_task_self(), name, &type);
-       if (kr) {
-               _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name,
-                               kr, mach_error_string(kr), str);
-               return;
-       }
-       if (type & MACH_PORT_TYPE_SEND) {
-               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-                               MACH_PORT_RIGHT_SEND, &ns));
-       }
-       if (type & MACH_PORT_TYPE_SEND_ONCE) {
-               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-                               MACH_PORT_RIGHT_SEND_ONCE, &nso));
-       }
-       if (type & MACH_PORT_TYPE_DEAD_NAME) {
-               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-                               MACH_PORT_RIGHT_DEAD_NAME, &nd));
-       }
-       if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) {
-               kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs);
-               if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr);
-       }
-       if (type & MACH_PORT_TYPE_RECEIVE) {
-               mach_port_status_t status = { .mps_pset = 0, };
-               mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT;
-               (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-                               MACH_PORT_RIGHT_RECEIVE, &nr));
-               (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(),
-                               name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt));
-               _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
-                               "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) "
-                               "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) "
-                               "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs,
-                               type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N",
-                               status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N",
-                               status.mps_srights ? "Y":"N", status.mps_sorights,
-                               status.mps_qlimit, status.mps_msgcount, status.mps_mscount,
-                               status.mps_seqno, str);
-       } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE|
-                       MACH_PORT_TYPE_DEAD_NAME)) {
-               _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
-                               "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs,
-                               type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str);
-       } else {
-               _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type,
-                               str);
-       }
-}
-
-#endif // HAVE_MACH
-
-#endif // DISPATCH_DEBUG
index a9bf1c5ba3d346d22a99882501ef1e52ec07b239..55b81e78775aaada4adcc34a08cd9ed84338b2e7 100644 (file)
 #include <dispatch/base.h> // for HeaderDoc
 #endif
 
-#define DISPATCH_EVFILT_TIMER          (-EVFILT_SYSCOUNT - 1)
-#define DISPATCH_EVFILT_CUSTOM_ADD     (-EVFILT_SYSCOUNT - 2)
-#define DISPATCH_EVFILT_CUSTOM_OR      (-EVFILT_SYSCOUNT - 3)
-#define DISPATCH_EVFILT_MACH_NOTIFICATION      (-EVFILT_SYSCOUNT - 4)
-#define DISPATCH_EVFILT_SYSCOUNT       ( EVFILT_SYSCOUNT + 4)
-
-#if HAVE_MACH
-// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t
-//       bit values must not overlap as they share the same kevent fflags !
-
-/*!
- * @enum dispatch_source_mach_send_flags_t
- *
- * @constant DISPATCH_MACH_SEND_DELETED
- * Port-deleted notification. Disabled for source registration.
- */
-enum {
-       DISPATCH_MACH_SEND_DELETED = 0x4,
-};
-/*!
- * @enum dispatch_source_mach_recv_flags_t
- *
- * @constant DISPATCH_MACH_RECV_MESSAGE
- * Receive right has pending messages
- *
- * @constant DISPATCH_MACH_RECV_MESSAGE_DIRECT
- * Receive messages from receive right directly via kevent64()
- *
- * @constant DISPATCH_MACH_RECV_NO_SENDERS
- * Receive right has no more senders. TODO <rdar://problem/8132399>
- */
-enum {
-       DISPATCH_MACH_RECV_MESSAGE = 0x2,
-       DISPATCH_MACH_RECV_MESSAGE_DIRECT = 0x10,
-       DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20,
-       DISPATCH_MACH_RECV_NO_SENDERS = 0x40,
-};
-#endif // HAVE_MACH
-
 enum {
        /* DISPATCH_TIMER_STRICT 0x1 */
        /* DISPATCH_TIMER_BACKGROUND = 0x2, */
-       DISPATCH_TIMER_WALL_CLOCK = 0x4,
+       DISPATCH_TIMER_CLOCK_MACH = 0x4,
        DISPATCH_TIMER_INTERVAL = 0x8,
-       DISPATCH_TIMER_WITH_AGGREGATE = 0x10,
+       DISPATCH_TIMER_AFTER = 0x10,
        /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */
-       DISPATCH_TIMER_AFTER = 0x40,
 };
 
-#define DISPATCH_TIMER_QOS_NORMAL 0u
-#define DISPATCH_TIMER_QOS_CRITICAL 1u
-#define DISPATCH_TIMER_QOS_BACKGROUND 2u
-#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1)
-#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul)
-
-#define DISPATCH_TIMER_CLOCK(tidx) ((dispatch_clock_t)((uintptr_t)(tidx) & 1))
-
-#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock))
-#define DISPATCH_TIMER_INDEX_DISARM \
-               DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT)
-#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1)
-#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \
-               DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \
-               DISPATCH_CLOCK_WALL : DISPATCH_CLOCK_MACH, \
-               f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \
-               f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \
-               DISPATCH_TIMER_QOS_NORMAL); })
-
-struct dispatch_kevent_s {
-       TAILQ_ENTRY(dispatch_kevent_s) dk_list;
-       TAILQ_HEAD(, dispatch_source_refs_s) dk_sources;
-       _dispatch_kevent_qos_s dk_kevent;
-};
-
-typedef struct dispatch_kevent_s *dispatch_kevent_t;
-
-typedef typeof(((dispatch_kevent_t)NULL)->dk_kevent.udata) _dispatch_kevent_qos_udata_t;
-
-#define DISPATCH_KEV_CUSTOM_ADD ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_ADD)
-#define DISPATCH_KEV_CUSTOM_OR  ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_OR)
-
-struct dispatch_source_type_s {
-       _dispatch_kevent_qos_s ke;
-       uint64_t mask;
-       void (*init)(dispatch_source_t ds, dispatch_source_type_t type,
-                       uintptr_t handle, unsigned long mask);
-};
-
-struct dispatch_timer_source_s {
-       uint64_t target;
-       uint64_t deadline;
-       uint64_t last_fire;
-       uint64_t interval;
-       uint64_t leeway;
-       unsigned long flags; // dispatch_timer_flags_t
-       unsigned long missed;
-};
-
-enum {
-       DS_EVENT_HANDLER = 0,
-       DS_CANCEL_HANDLER,
-       DS_REGISTN_HANDLER,
-};
-
-// Source state which may contain references to the source object
-// Separately allocated so that 'leaks' can see sources <rdar://problem/9050566>
-typedef struct dispatch_source_refs_s {
-       TAILQ_ENTRY(dispatch_source_refs_s) dr_list;
-       uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t
-       dispatch_continuation_t volatile ds_handler[3];
-} *dispatch_source_refs_t;
-
-typedef struct dispatch_timer_source_refs_s {
-       struct dispatch_source_refs_s _ds_refs;
-       struct dispatch_timer_source_s _ds_timer;
-       TAILQ_ENTRY(dispatch_timer_source_refs_s) dt_list;
-} *dispatch_timer_source_refs_t;
-
-typedef struct dispatch_timer_source_aggregate_refs_s {
-       struct dispatch_timer_source_refs_s _dsa_refs;
-       TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dra_list;
-       TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dta_list;
-} *dispatch_timer_source_aggregate_refs_t;
-
-#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr))
-#define _dispatch_wref2ptr(ref) ((void*)~(ref))
-#define _dispatch_source_from_refs(dr) \
-               ((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref))
-#define ds_timer(dr) \
-               (((dispatch_timer_source_refs_t)(dr))->_ds_timer)
-#define ds_timer_aggregate(ds) \
-               ((dispatch_timer_aggregate_t)((ds)->dq_specific_q))
-
 DISPATCH_ALWAYS_INLINE
 static inline unsigned int
-_dispatch_source_timer_idx(dispatch_source_refs_t dr)
+_dispatch_source_timer_idx(dispatch_unote_t du)
 {
-       return DISPATCH_TIMER_IDENT(ds_timer(dr).flags);
+       uint32_t clock, qos = 0, fflags = du._dt->du_fflags;
+
+       dispatch_assert(DISPATCH_CLOCK_MACH == 1);
+       dispatch_assert(DISPATCH_CLOCK_WALL == 0);
+       clock = (fflags & DISPATCH_TIMER_CLOCK_MACH) / DISPATCH_TIMER_CLOCK_MACH;
+
+#if DISPATCH_HAVE_TIMER_QOS
+       dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL);
+       dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND);
+       qos = fflags & (DISPATCH_TIMER_STRICT | DISPATCH_TIMER_BACKGROUND);
+       // flags are normalized so this should never happen
+       dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT);
+#endif
+
+       return DISPATCH_TIMER_INDEX(clock, qos);
 }
 
 #define _DISPATCH_SOURCE_HEADER(refs) \
        DISPATCH_QUEUE_HEADER(refs); \
-       /* LP64: fills 32bit hole in QUEUE_HEADER */ \
        unsigned int \
-               ds_is_level:1, \
-               ds_is_adder:1, \
                ds_is_installed:1, \
-               ds_is_direct_kevent:1, \
-               ds_is_custom_source:1, \
-               ds_needs_rearm:1, \
-               ds_is_timer:1, \
-               ds_vmpressure_override:1, \
-               ds_memorypressure_override:1, \
-               dm_handler_is_block:1, \
+               dm_needs_mgr:1, \
                dm_connect_handler_called:1, \
-               dm_cancel_handler_called:1; \
-       dispatch_kevent_t ds_dkev; \
-       dispatch_##refs##_refs_t ds_refs; \
-       unsigned long ds_pending_data_mask;
+               dm_uninstalled:1, \
+               dm_cancel_handler_called:1, \
+               dm_is_xpc:1
 
 #define DISPATCH_SOURCE_HEADER(refs) \
        struct dispatch_source_s _as_ds[0]; \
@@ -199,150 +79,53 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr)
 DISPATCH_CLASS_DECL_BARE(source);
 _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object);
 
-#if DISPATCH_PURE_C
+#ifndef __cplusplus
 struct dispatch_source_s {
        _DISPATCH_SOURCE_HEADER(source);
-       unsigned long ds_ident_hack;
-       unsigned long ds_data;
-       unsigned long ds_pending_data;
-} DISPATCH_QUEUE_ALIGN;
-#endif
-
-#if HAVE_MACH
-// Mach channel state which may contain references to the channel object
-// layout must match dispatch_source_refs_s
-struct dispatch_mach_refs_s {
-       TAILQ_ENTRY(dispatch_mach_refs_s) dr_list;
-       uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
-       dispatch_mach_handler_function_t dm_handler_func;
-       void *dm_handler_ctxt;
-};
-typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t;
-
-struct dispatch_mach_reply_refs_s {
-       TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list;
-       uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
-       dispatch_kevent_t dmr_dkev;
-       void *dmr_ctxt;
-       mach_port_t dmr_reply;
-       dispatch_priority_t dmr_priority;
-       voucher_t dmr_voucher;
-       TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list;
-};
-typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t;
-
-#define _DISPATCH_MACH_STATE_UNUSED_MASK_2       0xff00000000000000ull
-#define DISPATCH_MACH_STATE_OVERRIDE_MASK        0x00ffff0000000000ull
-#define _DISPATCH_MACH_STATE_UNUSED_MASK_1       0x000000f000000000ull
-#define DISPATCH_MACH_STATE_DIRTY                0x0000000800000000ull
-#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE    0x0000000400000000ull
-#define _DISPATCH_MACH_STATE_UNUSED_MASK_0       0x0000000200000000ull
-#define DISPATCH_MACH_STATE_PENDING_BARRIER      0x0000000100000000ull
-#define DISPATCH_MACH_STATE_UNLOCK_MASK          0x00000000ffffffffull
+       uint64_t ds_data DISPATCH_ATOMIC64_ALIGN;
+       uint64_t ds_pending_data DISPATCH_ATOMIC64_ALIGN;
+} DISPATCH_ATOMIC64_ALIGN;
 
-struct dispatch_mach_send_refs_s {
-       TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list;
-       uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
-       dispatch_mach_msg_t dm_checkin;
-       TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies;
-       dispatch_unfair_lock_s dm_replies_lock;
-#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000)
-#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0)
-#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1)
-       uint32_t volatile dm_disconnect_cnt;
-       union {
-               uint64_t volatile dm_state;
-               DISPATCH_STRUCT_LITTLE_ENDIAN_2(
-                       dispatch_unfair_lock_s dm_state_lock,
-                       uint32_t dm_state_bits
-               );
-       };
-       unsigned int dm_needs_mgr:1;
-       struct dispatch_object_s *volatile dm_tail;
-       struct dispatch_object_s *volatile dm_head;
-       mach_port_t dm_send, dm_checkin_port;
-};
-typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t;
+// Extracts source data from the ds_data field
+#define DISPATCH_SOURCE_GET_DATA(d) ((d) & 0xFFFFFFFF)
 
-DISPATCH_CLASS_DECL(mach);
-#if DISPATCH_PURE_C
-struct dispatch_mach_s {
-       DISPATCH_SOURCE_HEADER(mach);
-       dispatch_kevent_t dm_dkev;
-       dispatch_mach_send_refs_t dm_refs;
-} DISPATCH_QUEUE_ALIGN;
-#endif
+// Extracts status from the ds_data field
+#define DISPATCH_SOURCE_GET_STATUS(d) ((d) >> 32)
 
-DISPATCH_CLASS_DECL(mach_msg);
-struct dispatch_mach_msg_s {
-       DISPATCH_OBJECT_HEADER(mach_msg);
-       union {
-               mach_msg_option_t dmsg_options;
-               mach_error_t dmsg_error;
-       };
-       mach_port_t dmsg_reply;
-       pthread_priority_t dmsg_priority;
-       voucher_t dmsg_voucher;
-       dispatch_mach_msg_destructor_t dmsg_destructor;
-       size_t dmsg_size;
-       union {
-               mach_msg_header_t *dmsg_msg;
-               char dmsg_buf[0];
-       };
-};
-#endif // HAVE_MACH
+// Combine data and status for the ds_data field
+#define DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status) \
+               ((((uint64_t)(status)) << 32) | (data))
 
-extern const struct dispatch_source_type_s _dispatch_source_type_after;
+#endif // __cplusplus
 
-#if TARGET_OS_EMBEDDED
-#define DSL_HASH_SIZE  64u // must be a power of two
-#else
-#define DSL_HASH_SIZE 256u // must be a power of two
-#endif
-
-dispatch_source_t
-_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
-               const struct dispatch_continuation_s *dc);
+void _dispatch_source_refs_register(dispatch_source_t ds,
+               dispatch_wlh_t wlh, dispatch_priority_t bp);
+void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options);
 void _dispatch_source_xref_dispose(dispatch_source_t ds);
-void _dispatch_source_dispose(dispatch_source_t ds);
-void _dispatch_source_finalize_activation(dispatch_source_t ds);
-void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags);
-void _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
+void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free);
+void _dispatch_source_finalize_activation(dispatch_source_t ds,
+               bool *allow_resume);
+void _dispatch_source_invoke(dispatch_source_t ds,
+               dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
+void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags);
+void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags,
+               uintptr_t data, uintptr_t status, pthread_priority_t pp);
 size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz);
-void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval);
-void _dispatch_source_set_event_handler_continuation(dispatch_source_t ds,
-               dispatch_continuation_t dc);
+
 DISPATCH_EXPORT // for firehose server
 void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp,
                unsigned long val);
 
-#if HAVE_MACH
-void _dispatch_mach_dispose(dispatch_mach_t dm);
-void _dispatch_mach_finalize_activation(dispatch_mach_t dm);
-void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags);
-void _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp,
+void _dispatch_mgr_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
+               dispatch_qos_t qos);
+void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
                dispatch_wakeup_flags_t flags);
-size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz);
-
-void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg);
-void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
+void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_context_t dic,
                dispatch_invoke_flags_t flags);
-size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf,
-               size_t bufsiz);
-
-void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags);
-void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
-               dispatch_invoke_flags_t flags);
-#endif // HAVE_MACH
-
-void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
-               dispatch_wakeup_flags_t flags);
-void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
 #if DISPATCH_USE_KEVENT_WORKQUEUE
-void _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events,
+void _dispatch_kevent_worker_thread(dispatch_kevent_t *events,
                int *nevents);
-#endif
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
 
 #endif /* __DISPATCH_SOURCE_INTERNAL__ */
index 982411f06b080d10bcb92747c51e8e611d5e4cfc..1e7350463e9532181bfe22e3b0ca8f488993c36b 100644 (file)
@@ -49,8 +49,22 @@ public struct DispatchData : RandomAccessCollection {
        /// Initialize a `Data` with copied memory content.
        ///
        /// - parameter bytes: A pointer to the memory. It will be copied.
+       @available(swift, deprecated: 4, message: "Use init(bytes: UnsafeRawBufferPointer) instead")
        public init(bytes buffer: UnsafeBufferPointer<UInt8>) {
-               let d = dispatch_data_create(buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default())
+               let d = buffer.baseAddress == nil ? _swift_dispatch_data_empty()
+                                       : dispatch_data_create(buffer.baseAddress!, buffer.count, nil,
+                                                       _dispatch_data_destructor_default())
+               self.init(data: d)
+       }
+
+       /// Initialize a `Data` with copied memory content.
+       ///
+       /// - parameter bytes: A pointer to the memory. It will be copied.
+       /// - parameter count: The number of bytes to copy.
+       public init(bytes buffer: UnsafeRawBufferPointer) {
+               let d = buffer.baseAddress == nil ? _swift_dispatch_data_empty()
+                                       : dispatch_data_create(buffer.baseAddress!, buffer.count, nil,
+                                                       _dispatch_data_destructor_default())
                self.init(data: d)
        }
 
@@ -58,9 +72,23 @@ public struct DispatchData : RandomAccessCollection {
        ///
        /// - parameter bytes: A buffer pointer containing the data.
        /// - parameter deallocator: Specifies the mechanism to free the indicated buffer.
+       @available(swift, deprecated: 4, message: "Use init(bytesNoCopy: UnsafeRawBufferPointer, deallocater: Deallocator) instead")
        public init(bytesNoCopy bytes: UnsafeBufferPointer<UInt8>, deallocator: Deallocator = .free) {
                let (q, b) = deallocator._deallocator
-               let d = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b)
+               let d = bytes.baseAddress == nil ? _swift_dispatch_data_empty()
+                                       : dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b)
+               self.init(data: d)
+       }
+
+       /// Initialize a `Data` without copying the bytes.
+       ///
+       /// - parameter bytes: A pointer to the bytes.
+       /// - parameter count: The size of the bytes.
+       /// - parameter deallocator: Specifies the mechanism to free the indicated buffer.
+       public init(bytesNoCopy bytes: UnsafeRawBufferPointer, deallocator: Deallocator = .free) {
+               let (q, b) = deallocator._deallocator
+               let d = bytes.baseAddress == nil ? _swift_dispatch_data_empty()
+                                       : dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b)
                self.init(data: d)
        }
 
@@ -89,14 +117,20 @@ public struct DispatchData : RandomAccessCollection {
        }
 
        public func enumerateBytes(
-               block: @noescape (_ buffer: UnsafeBufferPointer<UInt8>, _ byteIndex: Int, _ stop: inout Bool) -> Void)
+               block: (_ buffer: UnsafeBufferPointer<UInt8>, _ byteIndex: Int, _ stop: inout Bool) -> Void)
        {
-               _swift_dispatch_data_apply(__wrapped.__wrapped) { (_, offset: Int, ptr: UnsafeRawPointer, size: Int) in
-                       let bytePtr = ptr.bindMemory(to: UInt8.self, capacity: size)
-                       let bp = UnsafeBufferPointer(start: bytePtr, count: size)
-                       var stop = false
-                       block(bp, offset, &stop)
-                       return !stop
+               // we know that capturing block in the closure being created/passed to dispatch_data_apply
+               // does not cause block to escape because dispatch_data_apply does not allow its
+               // block argument to escape.  Therefore, the usage of withoutActuallyEscaping to
+               // bypass the Swift type system is safe.
+               withoutActuallyEscaping(block) { escapableBlock in
+                       _ = CDispatch.dispatch_data_apply(__wrapped.__wrapped) { (_, offset: Int, ptr: UnsafeRawPointer, size: Int) in
+                               let bytePtr = ptr.bindMemory(to: UInt8.self, capacity: size)
+                               let bp = UnsafeBufferPointer(start: bytePtr, count: size)
+                               var stop = false
+                               escapableBlock(bp, offset, &stop)
+                               return !stop
+                       }
                }
        }
 
@@ -104,11 +138,23 @@ public struct DispatchData : RandomAccessCollection {
        ///
        /// - parameter bytes: A pointer to the bytes to copy in to the data.
        /// - parameter count: The number of bytes to copy.
+       @available(swift, deprecated: 4, message: "Use append(_: UnsafeRawBufferPointer) instead")
        public mutating func append(_ bytes: UnsafePointer<UInt8>, count: Int) {
                let data = dispatch_data_create(bytes, count, nil, _dispatch_data_destructor_default())
                self.append(DispatchData(data: data))
        }
 
+       /// Append bytes to the data.
+       ///
+       /// - parameter bytes: A pointer to the bytes to copy in to the data.
+       /// - parameter count: The number of bytes to copy.
+       public mutating func append(_ bytes: UnsafeRawBufferPointer) {
+               // Nil base address does nothing.
+               guard bytes.baseAddress != nil else { return }
+               let data = dispatch_data_create(bytes.baseAddress!, bytes.count, nil, _dispatch_data_destructor_default())
+               self.append(DispatchData(data: data))
+       }
+
        /// Append data to the data.
        ///
        /// - parameter data: The data to append to this data.
@@ -121,7 +167,7 @@ public struct DispatchData : RandomAccessCollection {
        ///
        /// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`.
        public mutating func append<SourceType>(_ buffer : UnsafeBufferPointer<SourceType>) {
-               let count = buffer.count * sizeof(SourceType.self)
+               let count = buffer.count * MemoryLayout<SourceType>.stride;
                buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: count) {
                        self.append($0, count: count)
                }
@@ -129,11 +175,16 @@ public struct DispatchData : RandomAccessCollection {
 
        private func _copyBytesHelper(to pointer: UnsafeMutableRawPointer, from range: CountableRange<Index>) {
                var copiedCount = 0
+               if range.isEmpty { return }
+               let rangeSize = range.count
                _ = CDispatch.dispatch_data_apply(__wrapped.__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafeRawPointer, size: Int) in
-                       let limit = Swift.min((range.endIndex - range.startIndex) - copiedCount, size)
-                       memcpy(pointer + copiedCount, ptr, limit)
-                       copiedCount += limit
-                       return copiedCount < (range.endIndex - range.startIndex)
+                       if offset >= range.endIndex { return false } // This region is after endIndex
+                       let copyOffset = range.startIndex > offset ? range.startIndex - offset : 0 // offset of first byte, in this region
+                       if copyOffset >= size { return true } // This region is before startIndex
+                       let count = Swift.min(rangeSize - copiedCount, size - copyOffset)
+                       memcpy(pointer + copiedCount, ptr + copyOffset, count)
+                       copiedCount += count
+                       return copiedCount < rangeSize
                }
        }
 
@@ -142,19 +193,43 @@ public struct DispatchData : RandomAccessCollection {
        /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into.
        /// - parameter count: The number of bytes to copy.
        /// - warning: This method does not verify that the contents at pointer have enough space to hold `count` bytes.
+       @available(swift, deprecated: 4, message: "Use copyBytes(to: UnsafeMutableRawBufferPointer, count: Int) instead")
        public func copyBytes(to pointer: UnsafeMutablePointer<UInt8>, count: Int) {
                _copyBytesHelper(to: pointer, from: 0..<count)
        }
+
+       /// Copy the contents of the data to a pointer.
+       ///
+       /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. The buffer must be large
+       ///     enough to hold `count` bytes.
+       /// - parameter count: The number of bytes to copy.
+       public func copyBytes(to pointer: UnsafeMutableRawBufferPointer, count: Int) {
+               assert(count <= pointer.count, "Buffer too small to copy \(count) bytes")
+               guard pointer.baseAddress != nil else { return }
+               _copyBytesHelper(to: pointer.baseAddress!, from: 0..<count)
+       }
                
        /// Copy a subset of the contents of the data to a pointer.
        ///
        /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into.
        /// - parameter range: The range in the `Data` to copy.
        /// - warning: This method does not verify that the contents at pointer have enough space to hold the required number of bytes.
+       @available(swift, deprecated: 4, message: "Use copyBytes(to: UnsafeMutableRawBufferPointer, from: CountableRange<Index>) instead")
        public func copyBytes(to pointer: UnsafeMutablePointer<UInt8>, from range: CountableRange<Index>) {
                _copyBytesHelper(to: pointer, from: range)
        }
        
+       /// Copy a subset of the contents of the data to a pointer.
+       ///
+       /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. The buffer must be large
+       ///     enough to hold `count` bytes.
+       /// - parameter range: The range in the `Data` to copy.
+       public func copyBytes(to pointer: UnsafeMutableRawBufferPointer, from range: CountableRange<Index>) {
+               assert(range.count <= pointer.count, "Buffer too small to copy \(range.count) bytes")
+               guard pointer.baseAddress != nil else { return }
+               _copyBytesHelper(to: pointer.baseAddress!, from: range)
+       }
+
        /// Copy the contents of the data into a buffer.
        ///
        /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `MemoryLayout<DestinationType>.stride * buffer.count` then the first N bytes will be copied into the buffer.
@@ -182,10 +257,7 @@ public struct DispatchData : RandomAccessCollection {
                
                guard !copyRange.isEmpty else { return 0 }
                
-               let bufferCapacity = buffer.count * sizeof(DestinationType.self)
-               buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: bufferCapacity) {
-                       _copyBytesHelper(to: $0, from: copyRange)
-               }
+               _copyBytesHelper(to: buffer.baseAddress!, from: copyRange)
                return copyRange.count
        }
 
@@ -274,11 +346,6 @@ public struct DispatchDataIterator : IteratorProtocol, Sequence {
        internal var _position: DispatchData.Index
 }
 
-typealias _swift_data_applier = @convention(block) (dispatch_data_t, Int, UnsafeRawPointer, Int) -> Bool
-
-@_silgen_name("_swift_dispatch_data_apply")
-internal func _swift_dispatch_data_apply(_ data: dispatch_data_t, _ block: _swift_data_applier)
-
 @_silgen_name("_swift_dispatch_data_empty")
 internal func _swift_dispatch_data_empty() -> dispatch_data_t
 
index 6e804515aaaa1b3bed59dacc93be82e416d335a0..d40bb68a4a014e11286be553eb0313e1635db7ca 100644 (file)
@@ -91,6 +91,8 @@ Protocols:
   SwiftName: DispatchSourceUserDataOr
 - Name: OS_dispatch_source_data_add
   SwiftName: DispatchSourceUserDataAdd
+- Name: OS_dispatch_source_data_replace
+  SwiftName: DispatchSourceUserDataReplace
 - Name: OS_dispatch_source_vnode
   SwiftName: DispatchSourceFileSystemObject
 - Name: OS_dispatch_source_write
index ae8229928c6c110b42c09ae440771fb4fce83ee3..9c667d570a1ceae7a866776a6fe8ae18499dae58 100644 (file)
@@ -26,6 +26,7 @@
 @protocol OS_dispatch_source_timer;
 @protocol OS_dispatch_source_data_add;
 @protocol OS_dispatch_source_data_or;
+@protocol OS_dispatch_source_data_replace;
 @protocol OS_dispatch_source_vnode;
 @protocol OS_dispatch_source_write;
 
@@ -44,6 +45,7 @@ static void _dispatch_overlay_constructor() {
     class_addProtocol(source, @protocol(OS_dispatch_source_timer));
     class_addProtocol(source, @protocol(OS_dispatch_source_data_add));
     class_addProtocol(source, @protocol(OS_dispatch_source_data_or));
+    class_addProtocol(source, @protocol(OS_dispatch_source_data_replace));
     class_addProtocol(source, @protocol(OS_dispatch_source_vnode));
     class_addProtocol(source, @protocol(OS_dispatch_source_write));
   }
@@ -63,6 +65,29 @@ static void _dispatch_overlay_constructor() {
 #define SWIFT_CC_swift
 #endif
 
+extern "C" dispatch_queue_attr_t _swift_dispatch_queue_concurrent(void);
+extern "C" void _swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t));
+extern "C" dispatch_queue_t _swift_dispatch_get_main_queue(void);
+extern "C" dispatch_data_t _swift_dispatch_data_empty(void);
+extern "C" dispatch_block_t _swift_dispatch_data_destructor_default(void);
+extern "C" dispatch_block_t _swift_dispatch_data_destructor_free(void);
+extern "C" dispatch_block_t _swift_dispatch_data_destructor_munmap(void);
+extern "C" dispatch_block_t _swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block);
+extern "C" dispatch_block_t _swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block);
+extern "C" void _swift_dispatch_block_cancel(dispatch_block_t block);
+extern "C" long _swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout);
+extern "C" void _swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block);
+extern "C" long _swift_dispatch_block_testcancel(dispatch_block_t block);
+extern "C" void _swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block);
+extern "C" void _swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block);
+extern "C" void _swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block);
+extern "C" void _swift_dispatch_release(dispatch_object_t obj);
+extern "C" void _swift_dispatch_retain(dispatch_object_t obj);
+#if !USE_OBJC
+extern "C" void * objc_retainAutoreleasedReturnValue(void *obj);
+#endif
+
+
 SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE
 extern "C" dispatch_queue_attr_t
 _swift_dispatch_queue_concurrent(void) {
@@ -141,12 +166,6 @@ _swift_dispatch_block_testcancel(dispatch_block_t block) {
   return dispatch_block_testcancel(block);
 }
 
-SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE
-extern "C" bool
-_swift_dispatch_data_apply(dispatch_data_t data, bool (^applier)(dispatch_data_t, size_t, const void *, size_t)) {
-  return dispatch_data_apply(data, applier);
-}
-
 SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE
 extern "C" void
 _swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block) {
@@ -171,13 +190,14 @@ _swift_dispatch_release(dispatch_object_t obj) {
   dispatch_release(obj);
 }
 
-// DISPATCH_RUNTIME_STDLIB_INTERFACE
-// extern "C" dispatch_queue_t
-// _swift_apply_current_root_queue() {
-//     return DISPATCH_APPLY_CURRENT_ROOT_QUEUE;
-// }
+SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE
+extern "C" void
+_swift_dispatch_retain(dispatch_object_t obj) {
+  dispatch_retain(obj);
+}
 
 #define SOURCE(t)                                                              \
+  extern "C" dispatch_source_type_t _swift_dispatch_source_type_##t(void);     \
   SWIFT_CC(swift)                                                              \
   DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_source_type_t  \
   _swift_dispatch_source_type_##t(void) {                                      \
@@ -186,6 +206,7 @@ _swift_dispatch_release(dispatch_object_t obj) {
 
 SOURCE(DATA_ADD)
 SOURCE(DATA_OR)
+SOURCE(DATA_REPLACE)
 #if HAVE_MACH
 SOURCE(MACH_SEND)
 SOURCE(MACH_RECV)
@@ -202,12 +223,25 @@ SOURCE(VNODE)
 #endif
 SOURCE(WRITE)
 
-// See comment in CFFuntime.c explaining why objc_retainAutoreleasedReturnValue is needed.
-extern "C" void swift_release(void *);
+#if !USE_OBJC
+
+// For CF functions with 'Get' semantics, the compiler currently assumes that
+// the result is autoreleased and must be retained. It does so on all platforms
+// by emitting a call to objc_retainAutoreleasedReturnValue. On Darwin, this is
+// implemented by the ObjC runtime. On non-ObjC platforms, there is no runtime,
+// and therefore we have to stub it out here ourselves. The compiler will
+// eventually call swift_release to balance the retain below. This is a
+// workaround until the compiler no longer emits this callout on non-ObjC
+// platforms.
+extern "C" void swift_retain(void *);
+
+SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE
 extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) {
     if (obj) {
-        swift_release(obj);
+        swift_retain(obj);
         return obj;
     }
     else return NULL;
 }
+
+#endif // !USE_OBJC
index 8ce417aa74d984b9f610796949ef91d6ece73e18..d26f6416063ac35b393131a97de3d82e562b522a 100644 (file)
@@ -55,6 +55,7 @@ public extension DispatchIO {
                self.init(__type: type.rawValue, fd: fileDescriptor, queue: queue, handler: cleanupHandler)
        }
 
+       @available(swift, obsoleted: 4)
        public convenience init(
                type: StreamType,
                path: UnsafePointer<Int8>,
@@ -66,6 +67,18 @@ public extension DispatchIO {
                self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler)
        }
 
+       @available(swift, introduced: 4)
+       public convenience init?(
+               type: StreamType,
+               path: UnsafePointer<Int8>,
+               oflag: Int32,
+               mode: mode_t,
+               queue: DispatchQueue,
+               cleanupHandler: @escaping (_ error: Int32) -> Void)
+       {
+               self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler)
+       }
+
        public convenience init(
                type: StreamType,
                io: DispatchIO,
index 5443b7c07beddf3b89c028fdff275e2e92edc57e..df6a7b336ac529488134f36c1e0ae358a7c1137e 100644 (file)
@@ -110,7 +110,7 @@ public func dispatch_data_copy_region(_ data: dispatch_data_t, _ location: Int,
        fatalError()
 }
 
-@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)")
+@available(*, unavailable, renamed:"DispatchQueue.async(self:group:qos:flags:execute:)")
 public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void)
 {
        fatalError()
@@ -140,13 +140,13 @@ public func dispatch_io_set_interval(_ channel: DispatchIO, _ interval: UInt64,
        fatalError()
 }
 
-@available(*, unavailable, renamed:"DispatchQueue.apply(attributes:iterations:execute:)")
+@available(*, unavailable, renamed:"DispatchQueue.concurrentPerform(iterations:execute:)")
 public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: (Int) -> Void)
 {
        fatalError()
 }
 
-@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:execute:)")
+@available(*, unavailable, renamed:"DispatchQueue.async(self:execute:)")
 public func dispatch_async(_ queue: DispatchQueue, _ block: @escaping () -> Void)
 {
        fatalError()
@@ -158,7 +158,7 @@ public func dispatch_get_global_queue(_ identifier: Int, _ flags: UInt) -> Dispa
        fatalError()
 }
 
-@available(*, unavailable, renamed: "DispatchQueue.main")
+@available(*, unavailable, renamed: "getter:DispatchQueue.main()")
 public func dispatch_get_main_queue() -> DispatchQueue
 {
        fatalError()
index b7628c9cfd37c375624c9cb101ff2913c5b52cc5..bff1bc323894bad434a798bb0858e6fcd90d3153 100644 (file)
@@ -216,11 +216,13 @@ public extension DispatchQueue {
        {
                var result: T?
                var error: Swift.Error?
-               fn {
-                       do {
-                               result = try work()
-                       } catch let e {
-                               error = e
+               withoutActuallyEscaping(work) { _work in
+                       fn {
+                               do {
+                                       result = try _work()
+                               } catch let e {
+                                       error = e
+                               }
                        }
                }
                if let e = error {
@@ -324,12 +326,21 @@ public extension DispatchQueue {
                return nil
        }
 
-       public func setSpecific<T>(key: DispatchSpecificKey<T>, value: T) {
-               let v = _DispatchSpecificValue(value: value)
+       public func setSpecific<T>(key: DispatchSpecificKey<T>, value: T?) {
                let k = Unmanaged.passUnretained(key).toOpaque()
-               let p = Unmanaged.passRetained(v).toOpaque()
+               let v = value.flatMap { _DispatchSpecificValue(value: $0) }
+               let p = v.flatMap { Unmanaged.passRetained($0).toOpaque() }
                dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue)
        }
+
+       #if os(Android)
+       @_silgen_name("_dispatch_install_thread_detach_callback")
+       private static func _dispatch_install_thread_detach_callback(_ cb: @escaping @convention(c) () -> Void)
+
+       public static func setThreadDetachCallback(_ cb: @escaping @convention(c) () -> Void) {
+               _dispatch_install_thread_detach_callback(cb)
+       }
+       #endif
 }
 
 private func _destructDispatchSpecificValue(ptr: UnsafeMutableRawPointer?) {
@@ -344,8 +355,5 @@ internal func _swift_dispatch_queue_concurrent() -> dispatch_queue_attr_t
 @_silgen_name("_swift_dispatch_get_main_queue")
 internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t
 
-@_silgen_name("_swift_dispatch_apply_current_root_queue")
-internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t
-
 @_silgen_name("_swift_dispatch_apply_current")
 internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) (Int) -> Void)
index 9dab8f0a94886274440cca8c2a43ae09f72ebc22..421a6e9bb65415f0a13964284979a1a9710bb2c9 100644 (file)
@@ -112,7 +112,7 @@ public extension DispatchSource {
        }
 #endif
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
        public struct ProcessEvent : OptionSet, RawRepresentable {
                public let rawValue: UInt
                public init(rawValue: UInt) { self.rawValue = rawValue }
@@ -170,7 +170,7 @@ public extension DispatchSource {
        }
 #endif
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
        public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess {
                let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped)
                return DispatchSource(source: source) as DispatchSourceProcess
@@ -201,8 +201,13 @@ public extension DispatchSource {
                let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped)
                return DispatchSource(source: source) as DispatchSourceUserDataOr
        }
+    
+       public class func makeUserDataReplaceSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataReplace {
+               let source = dispatch_source_create(_swift_dispatch_source_type_data_replace(), 0, 0, queue?.__wrapped)
+               return DispatchSource(source: source) as DispatchSourceUserDataReplace
+       }
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
        public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject {
                let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped)
                return DispatchSource(source: source) as DispatchSourceFileSystemObject
@@ -255,7 +260,7 @@ public extension DispatchSourceMemoryPressure {
 }
 #endif
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 public extension DispatchSourceProcess {
        public var handle: pid_t {
                return pid_t(dispatch_source_get_handle(self as! DispatchSource))
@@ -274,32 +279,346 @@ public extension DispatchSourceProcess {
 #endif
 
 public extension DispatchSourceTimer {
+       ///
+       /// Sets the deadline and leeway for a timer event that fires once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared and the next timer event will occur at `deadline`.
+       ///
+       /// Delivery of the timer event may be delayed by the system in order to improve power consumption
+       /// and system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       /// - note: Delivery of the timer event does not cancel the timer source.
+       ///
+       /// - parameter deadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on Mach absolute
+       ///     time.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)")
        public func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) {
                dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, ~0, UInt64(leeway.rawValue))
        }
 
+       ///
+       /// Sets the deadline and leeway for a timer event that fires once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared and the next timer event will occur at `wallDeadline`.
+       ///
+       /// Delivery of the timer event may be delayed by the system in order to improve power consumption
+       /// and system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       /// - note: Delivery of the timer event does not cancel the timer source.
+       ///
+       /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on
+       ///     `gettimeofday(3)`.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)")
        public func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) {
                dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, ~0, UInt64(leeway.rawValue))
        }
 
+       ///
+       /// Sets the deadline, interval and leeway for a timer event that fires at least once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `deadline` and every `interval` units of
+       /// time thereafter until the timer source is canceled.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption
+       /// and system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `deadline + N * interval`, the upper
+       /// limit is the smaller of `leeway` and `interval/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter deadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on Mach absolute
+       ///     time.
+       /// - parameter interval: the interval for the timer.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)")
        public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) {
-               dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue))
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue))
        }
 
+       ///
+       /// Sets the deadline, interval and leeway for a timer event that fires at least once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `deadline` and every `interval` seconds
+       /// thereafter until the timer source is canceled.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption and
+       /// system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `deadline + N * interval`, the upper
+       /// limit is the smaller of `leeway` and `interval/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter deadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on Mach absolute
+       ///     time.
+       /// - parameter interval: the interval for the timer in seconds.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)")
        public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
-               dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue))
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue))
        }
 
+       ///
+       /// Sets the deadline, interval and leeway for a timer event that fires at least once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `wallDeadline` and every `interval` units of
+       /// time thereafter until the timer source is canceled.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption and
+       /// system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `wallDeadline + N * interval`, the upper
+       /// limit is the smaller of `leeway` and `interval/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on
+       ///     `gettimeofday(3)`.
+       /// - parameter interval: the interval for the timer.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)")
        public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) {
-               dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue))
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue))
        }
 
+       ///
+       /// Sets the deadline, interval and leeway for a timer event that fires at least once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `wallDeadline` and every `interval` seconds
+       /// thereafter until the timer source is canceled.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption and
+       /// system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `wallDeadline + N * interval`, the upper
+       /// limit is the smaller of `leeway` and `interval/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on
+       ///     `gettimeofday(3)`.
+       /// - parameter interval: the interval for the timer in seconds.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)")
        public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
-               dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue))
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue))
+       }
+
+       ///
+       /// Sets the deadline, repeat interval and leeway for a timer event.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `deadline` and every `repeating` units of
+       /// time thereafter until the timer source is canceled. If the value of `repeating` is `.never`,
+       /// or is defaulted, the timer fires only once.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption
+       /// and system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `deadline + N * repeating`, the upper
+       /// limit is the smaller of `leeway` and `repeating/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter deadline: the time at which the first timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on Mach absolute
+       ///     time.
+       /// - parameter repeating: the repeat interval for the timer, or `.never` if the timer should fire
+       ///             only once.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, introduced: 4)
+       public func schedule(deadline: DispatchTime, repeating interval: DispatchTimeInterval = .never, leeway: DispatchTimeInterval = .nanoseconds(0)) {
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue))
+       }
+
+       ///
+       /// Sets the deadline, repeat interval and leeway for a timer event.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `deadline` and every `repeating` seconds
+       /// thereafter until the timer source is canceled. If the value of `repeating` is `.infinity`,
+       /// the timer fires only once.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption
+       /// and system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `deadline + N * repeating`, the upper
+       /// limit is the smaller of `leeway` and `repeating/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter deadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on Mach absolute
+       ///     time.
+       /// - parameter repeating: the repeat interval for the timer in seconds, or `.infinity` if the timer
+       ///             should fire only once.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, introduced: 4)
+       public func schedule(deadline: DispatchTime, repeating interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue))
+       }
+
+       ///
+       /// Sets the deadline, repeat interval and leeway for a timer event.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `wallDeadline` and every `repeating` units of
+       /// time thereafter until the timer source is canceled. If the value of `repeating` is `.never`,
+       /// or is defaulted, the timer fires only once.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption and
+       /// system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `wallDeadline + N * repeating`, the upper
+       /// limit is the smaller of `leeway` and `repeating/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on
+       ///     `gettimeofday(3)`.
+       /// - parameter repeating: the repeat interval for the timer, or `.never` if the timer should fire
+       ///             only once.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, introduced: 4)
+       public func schedule(wallDeadline: DispatchWallTime, repeating interval: DispatchTimeInterval = .never, leeway: DispatchTimeInterval = .nanoseconds(0)) {
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue))
+       }
+
+       ///
+       /// Sets the deadline, repeat interval and leeway for a timer event that fires at least once.
+       ///
+       /// Once this function returns, any pending source data accumulated for the previous timer values
+       /// has been cleared. The next timer event will occur at `wallDeadline` and every `repeating` seconds
+       /// thereafter until the timer source is canceled. If the value of `repeating` is `.infinity`,
+       /// the timer fires only once.
+       ///
+       /// Delivery of a timer event may be delayed by the system in order to improve power consumption
+       /// and system performance. The upper limit to the allowable delay may be configured with the `leeway`
+       /// argument; the lower limit is under the control of the system.
+       ///
+       /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to
+       /// `leeway`. For the subsequent timer fires at `wallDeadline + N * repeating`, the upper
+       /// limit is the smaller of `leeway` and `repeating/2`.
+       ///
+       /// The lower limit to the allowable delay may vary with process state such as visibility of the
+       /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system
+       /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller
+       /// than the current lower limit. Note that a minimal amount of delay is to be expected even if
+       /// this flag is specified.
+       ///
+       /// Calling this method has no effect if the timer source has already been canceled.
+       ///
+       /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the
+       ///     leeway and other considerations described above. The deadline is based on
+       ///     `gettimeofday(3)`.
+       /// - parameter repeating: the repeat interval for the timer in seconds, or `.infinity` if the timer
+       ///             should fire only once.
+       /// - parameter leeway: the leeway for the timer.
+       ///
+       @available(swift, introduced: 4)
+       public func schedule(wallDeadline: DispatchWallTime, repeating interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
+               dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue))
        }
 }
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 public extension DispatchSourceFileSystemObject {
        public var handle: Int32 {
                return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped))
@@ -318,45 +637,48 @@ public extension DispatchSourceFileSystemObject {
 #endif
 
 public extension DispatchSourceUserDataAdd {
-       /// @function mergeData
+       /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_ADD`
+       /// and submits its event handler block to its target queue.
        ///
-       /// @abstract
-       /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or
-       /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its
-       /// target queue.
-       ///
-       /// @param value
-       /// The value to coalesce with the pending data using a logical OR or an ADD
-       /// as specified by the dispatch source type. A value of zero has no effect
-       /// and will not result in the submission of the event handler block.
+       /// - parameter data: the value to add to the current pending data. A value of zero
+       ///             has no effect and will not result in the submission of the event handler block.
        public func add(data: UInt) {
                dispatch_source_merge_data((self as! DispatchSource).__wrapped, data)
        }
 }
 
 public extension DispatchSourceUserDataOr {
-       /// @function mergeData
-       ///
-       /// @abstract
-       /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or
-       /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its
-       /// target queue.
+       /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_OR` and
+       /// submits its event handler block to its target queue.
        ///
-       /// @param value
-       /// The value to coalesce with the pending data using a logical OR or an ADD
-       /// as specified by the dispatch source type. A value of zero has no effect
-       /// and will not result in the submission of the event handler block.
+       /// - parameter data: The value to OR into the current pending data. A value of zero
+       ///             has no effect and will not result in the submission of the event handler block.
        public func or(data: UInt) {
                dispatch_source_merge_data((self as! DispatchSource).__wrapped, data)
        }
 }
 
+public extension DispatchSourceUserDataReplace {
+       /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_REPLACE`
+       /// and submits its event handler block to its target queue.
+       ///
+       /// - parameter data: The value that will replace the current pending data.
+       ///             A value of zero will be stored but will not result in the submission of the event
+       ///             handler block.
+       public func replace(data: UInt) {
+               dispatch_source_merge_data((self as! DispatchSource).__wrapped, data)
+       }
+}
+
 @_silgen_name("_swift_dispatch_source_type_DATA_ADD")
 internal func _swift_dispatch_source_type_data_add() -> dispatch_source_type_t
 
 @_silgen_name("_swift_dispatch_source_type_DATA_OR")
 internal func _swift_dispatch_source_type_data_or() -> dispatch_source_type_t
 
+@_silgen_name("_swift_dispatch_source_type_DATA_REPLACE")
+internal func _swift_dispatch_source_type_data_replace() -> dispatch_source_type_t
+
 #if HAVE_MACH
 @_silgen_name("_swift_dispatch_source_type_MACH_SEND")
 internal func _swift_dispatch_source_type_mach_send() -> dispatch_source_type_t
@@ -368,7 +690,7 @@ internal func _swift_dispatch_source_type_mach_recv() -> dispatch_source_type_t
 internal func _swift_dispatch_source_type_memorypressure() -> dispatch_source_type_t
 #endif
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 @_silgen_name("_swift_dispatch_source_type_PROC")
 internal func _swift_dispatch_source_type_proc() -> dispatch_source_type_t
 #endif
@@ -382,7 +704,7 @@ internal func _swift_dispatch_source_type_signal() -> dispatch_source_type_t
 @_silgen_name("_swift_dispatch_source_type_TIMER")
 internal func _swift_dispatch_source_type_timer() -> dispatch_source_type_t
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 @_silgen_name("_swift_dispatch_source_type_VNODE")
 internal func _swift_dispatch_source_type_vnode() -> dispatch_source_type_t
 #endif
index a9559fd24a161bf0ea8398e09247d7836e50d023..d7d49c96b92f7e14d5a95f58097efca8b658cec5 100644 (file)
 import CDispatch
 
 public struct DispatchTime : Comparable {
+#if HAVE_MACH
+       private static let timebaseInfo: mach_timebase_info_data_t = {
+               var info = mach_timebase_info_data_t(numer: 1, denom: 1)
+               mach_timebase_info(&info)
+               return info
+       }()
+#endif
        public let rawValue: dispatch_time_t
 
        public static func now() -> DispatchTime {
@@ -37,12 +45,36 @@ public struct DispatchTime : Comparable {
        ///   - uptimeNanoseconds: The number of nanoseconds since boot, excluding
        ///                        time the system spent asleep
        /// - Returns: A new `DispatchTime`
+       /// - Discussion: This clock is the same as the value returned by
+       ///               `mach_absolute_time` when converted into nanoseconds.
+       ///               On some platforms, the nanosecond value is rounded up to a
+       ///               multiple of the Mach timebase, using the conversion factors
+       ///               returned by `mach_timebase_info()`. The nanosecond equivalent
+       ///               of the rounded result can be obtained by reading the
+       ///               `uptimeNanoseconds` property.
+       ///               Note that `DispatchTime(uptimeNanoseconds: 0)` is
+       ///               equivalent to `DispatchTime.now()`, that is, its value
+       ///               represents the number of nanoseconds since boot (excluding
+       ///               system sleep time), not zero nanoseconds since boot.
        public init(uptimeNanoseconds: UInt64) {
-               self.rawValue = dispatch_time_t(uptimeNanoseconds)
+               var rawValue = uptimeNanoseconds
+#if HAVE_MACH
+               if (DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom) {
+                       rawValue = (rawValue * UInt64(DispatchTime.timebaseInfo.denom) 
+                               + UInt64(DispatchTime.timebaseInfo.numer - 1)) / UInt64(DispatchTime.timebaseInfo.numer)
+               }
+#endif
+               self.rawValue = dispatch_time_t(rawValue)
        }
 
        public var uptimeNanoseconds: UInt64 {
-               return UInt64(self.rawValue)
+               var result = self.rawValue
+#if HAVE_MACH
+               if (DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom) {
+                       result = result * UInt64(DispatchTime.timebaseInfo.numer) / UInt64(DispatchTime.timebaseInfo.denom)
+               }
+#endif
+               return result
        }
 }
 
@@ -75,8 +107,12 @@ public struct DispatchWallTime : Comparable {
 }
 
 public func <(a: DispatchWallTime, b: DispatchWallTime) -> Bool {
-       if a.rawValue == ~0 || b.rawValue == ~0 { return false }
-       return -Int64(a.rawValue) < -Int64(b.rawValue)
+       if b.rawValue == ~0 {
+               return a.rawValue != ~0
+       } else if a.rawValue == ~0 {
+               return false
+       }
+       return -Int64(bitPattern: a.rawValue) < -Int64(bitPattern: b.rawValue)
 }
 
 public func ==(a: DispatchWallTime, b: DispatchWallTime) -> Bool {
@@ -88,6 +124,8 @@ public enum DispatchTimeInterval {
        case milliseconds(Int)
        case microseconds(Int)
        case nanoseconds(Int)
+       @_downgrade_exhaustivity_check
+       case never
 
        internal var rawValue: Int64 {
                switch self {
@@ -95,6 +133,16 @@ public enum DispatchTimeInterval {
                case .milliseconds(let ms): return Int64(ms) * Int64(NSEC_PER_MSEC)
                case .microseconds(let us): return Int64(us) * Int64(NSEC_PER_USEC)
                case .nanoseconds(let ns): return Int64(ns)
+               case .never: return Int64.max
+               }
+       }
+
+       public static func ==(lhs: DispatchTimeInterval, rhs: DispatchTimeInterval) -> Bool {
+               switch (lhs, rhs) {
+               case (.never, .never): return true
+               case (.never, _): return false
+               case (_, .never): return false
+               default: return lhs.rawValue == rhs.rawValue
                }
        }
 }
@@ -110,12 +158,16 @@ public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTim
 }
 
 public func +(time: DispatchTime, seconds: Double) -> DispatchTime {
-       let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC)))
+       let interval = seconds * Double(NSEC_PER_SEC)
+       let t = CDispatch.dispatch_time(time.rawValue,
+               interval.isInfinite || interval.isNaN ? Int64.max : Int64(interval))
        return DispatchTime(rawValue: t)
 }
 
 public func -(time: DispatchTime, seconds: Double) -> DispatchTime {
-       let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC)))
+       let interval = -seconds * Double(NSEC_PER_SEC)
+       let t = CDispatch.dispatch_time(time.rawValue,
+               interval.isInfinite || interval.isNaN ? Int64.min : Int64(interval))
        return DispatchTime(rawValue: t)
 }
 
@@ -130,11 +182,15 @@ public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> Dispatc
 }
 
 public func +(time: DispatchWallTime, seconds: Double) -> DispatchWallTime {
-       let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC)))
+       let interval = seconds * Double(NSEC_PER_SEC)
+       let t = CDispatch.dispatch_time(time.rawValue,
+               interval.isInfinite || interval.isNaN ? Int64.max : Int64(interval))
        return DispatchWallTime(rawValue: t)
 }
 
 public func -(time: DispatchWallTime, seconds: Double) -> DispatchWallTime {
-       let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC)))
+       let interval = -seconds * Double(NSEC_PER_SEC)
+       let t = CDispatch.dispatch_time(time.rawValue,
+               interval.isInfinite || interval.isNaN ? Int64.min : Int64(interval))
        return DispatchWallTime(rawValue: t)
 }
index deb3c6dfec2525a7af76b213d7486f0a9eaadc2b..5a551dfba3ff80c648fcd5c10cacbd6337750c92 100644 (file)
@@ -158,7 +158,7 @@ public class DispatchSource : DispatchObject,
        DispatchSourceProtocol, DispatchSourceRead,
        DispatchSourceSignal, DispatchSourceTimer,
        DispatchSourceUserDataAdd, DispatchSourceUserDataOr,
-       DispatchSourceWrite {
+       DispatchSourceUserDataReplace, DispatchSourceWrite {
        internal let __wrapped:dispatch_source_t
 
        final internal override func wrapped() -> dispatch_object_t {
@@ -180,7 +180,7 @@ extension DispatchSource : DispatchSourceMachSend,
 }
 #endif
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 extension DispatchSource : DispatchSourceProcess,
        DispatchSourceFileSystemObject {
 }
@@ -188,7 +188,6 @@ extension DispatchSource : DispatchSourceProcess,
 
 internal class __DispatchData : DispatchObject {
        internal let __wrapped:dispatch_data_t
-       internal let __owned:Bool
 
        final internal override func wrapped() -> dispatch_object_t {
                return unsafeBitCast(__wrapped, to: dispatch_object_t.self)
@@ -196,13 +195,13 @@ internal class __DispatchData : DispatchObject {
 
        internal init(data:dispatch_data_t, owned:Bool) {
                __wrapped = data
-               __owned = owned
+               if !owned {
+                       _swift_dispatch_retain(unsafeBitCast(data, to: dispatch_object_t.self))
+               }
        }
 
        deinit {
-               if __owned {
-                       _swift_dispatch_release(wrapped())
-               }
+               _swift_dispatch_release(wrapped())
        }
 }
 
@@ -244,6 +243,10 @@ public protocol DispatchSourceUserDataOr : DispatchSourceProtocol {
        func or(data: UInt)
 }
 
+public protocol DispatchSourceUserDataReplace : DispatchSourceProtocol {
+       func replace(data: UInt)
+}
+
 #if HAVE_MACH
 public protocol DispatchSourceMachSend : DispatchSourceProtocol {
        public var handle: mach_port_t { get }
@@ -268,7 +271,7 @@ public protocol DispatchSourceMemoryPressure : DispatchSourceProtocol {
 }
 #endif
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 public protocol DispatchSourceProcess : DispatchSourceProtocol {
        var handle: pid_t { get }
 
@@ -298,7 +301,7 @@ public protocol DispatchSourceTimer : DispatchSourceProtocol {
        func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval)
 }
 
-#if !os(Linux)
+#if !os(Linux) && !os(Android)
 public protocol DispatchSourceFileSystemObject : DispatchSourceProtocol {
        var handle: Int32 { get }
 
@@ -335,3 +338,6 @@ internal enum _OSQoSClass : UInt32  {
 
 @_silgen_name("_swift_dispatch_release")
 internal func _swift_dispatch_release(_ obj: dispatch_object_t) -> Void
+
+@_silgen_name("_swift_dispatch_retain")
+internal func _swift_dispatch_retain(_ obj: dispatch_object_t) -> Void
index 6db48806a4e6e21dbbcd66c539df28bff2950bf4..5b0bab0bfc22a4fa621eed92ee1003e5279ac84f 100644 (file)
 
 #include "internal.h"
 
-#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \
-               || TARGET_OS_WIN32
-DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = {
-       .ratio_1_to_1 = true,
-};
+#if DISPATCH_USE_HOST_TIME
+typedef struct _dispatch_host_time_data_s {
+       long double frac;
+       bool ratio_1_to_1;
+} _dispatch_host_time_data_s;
+
+DISPATCH_CACHELINE_ALIGN
+static _dispatch_host_time_data_s _dispatch_host_time_data;
+
+uint64_t (*_dispatch_host_time_mach2nano)(uint64_t machtime);
+uint64_t (*_dispatch_host_time_nano2mach)(uint64_t nsec);
+
+static uint64_t
+_dispatch_mach_host_time_mach2nano(uint64_t machtime)
+{
+       _dispatch_host_time_data_s *const data = &_dispatch_host_time_data;
+
+       if (unlikely(!machtime || data->ratio_1_to_1)) {
+               return machtime;
+       }
+       if (machtime >= INT64_MAX) {
+               return INT64_MAX;
+       }
+       long double big_tmp = ((long double)machtime * data->frac) + .5L;
+       if (unlikely(big_tmp >= INT64_MAX)) {
+               return INT64_MAX;
+       }
+       return (uint64_t)big_tmp;
+}
+
+static uint64_t
+_dispatch_mach_host_time_nano2mach(uint64_t nsec)
+{
+       _dispatch_host_time_data_s *const data = &_dispatch_host_time_data;
+
+       if (unlikely(!nsec || data->ratio_1_to_1)) {
+               return nsec;
+       }
+       if (nsec >= INT64_MAX) {
+               return INT64_MAX;
+       }
+       long double big_tmp = ((long double)nsec / data->frac) + .5L;
+       if (unlikely(big_tmp >= INT64_MAX)) {
+               return INT64_MAX;
+       }
+       return (uint64_t)big_tmp;
+}
+
+static void
+_dispatch_host_time_init(mach_timebase_info_data_t *tbi)
+{
+       _dispatch_host_time_data.frac = tbi->numer;
+       _dispatch_host_time_data.frac /= tbi->denom;
+       _dispatch_host_time_data.ratio_1_to_1 = (tbi->numer == tbi->denom);
+       _dispatch_host_time_mach2nano = _dispatch_mach_host_time_mach2nano;
+       _dispatch_host_time_nano2mach = _dispatch_mach_host_time_nano2mach;
+}
+#endif // DISPATCH_USE_HOST_TIME
 
 void
-_dispatch_get_host_time_init(void *context DISPATCH_UNUSED)
+_dispatch_time_init(void)
 {
-#if !TARGET_OS_WIN32
+#if DISPATCH_USE_HOST_TIME
        mach_timebase_info_data_t tbi;
        (void)dispatch_assume_zero(mach_timebase_info(&tbi));
-       _dispatch_host_time_data.frac = tbi.numer;
-       _dispatch_host_time_data.frac /= tbi.denom;
-       _dispatch_host_time_data.ratio_1_to_1 = (tbi.numer == tbi.denom);
-#else
-       LARGE_INTEGER freq;
-       dispatch_assume(QueryPerformanceFrequency(&freq));
-       _dispatch_host_time_data.frac = (long double)NSEC_PER_SEC /
-                       (long double)freq.QuadPart;
-       _dispatch_host_time_data.ratio_1_to_1 = (freq.QuadPart == 1);
-#endif /* TARGET_OS_WIN32 */
+       _dispatch_host_time_init(&tbi);
+#endif // DISPATCH_USE_HOST_TIME
 }
-#endif
 
 dispatch_time_t
 dispatch_time(dispatch_time_t inval, int64_t delta)
index c49689338fd1eaece71a519d6b9487e38afc95c0..c670f60b7944e1ee4593b29a4203d0f45c6a632b 100644 (file)
 
 #if DISPATCH_PURE_C
 
-#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
-typedef struct dispatch_trace_timer_params_s {
-       int64_t deadline, interval, leeway;
-} *dispatch_trace_timer_params_t;
-
-#include "provider.h"
-#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
-
 #if DISPATCH_USE_DTRACE_INTROSPECTION
 #define _dispatch_trace_callout(_c, _f, _dcc) do { \
                if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \
@@ -108,7 +100,7 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
                } else { \
                        _dc = (void*)_do; \
                        _ctxt = _dc->dc_ctxt; \
-                       if (_dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { \
+                       if (_dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { \
                                _kind = "semaphore"; \
                                _func = (dispatch_function_t)dispatch_semaphore_signal; \
                        } else if (_dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { \
@@ -131,8 +123,8 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
 #if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
-               dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
+_dispatch_trace_root_queue_push_list(dispatch_queue_t dq,
+               dispatch_object_t _head, dispatch_object_t _tail, int n)
 {
        if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
                struct dispatch_object_s *dou = _head._do;
@@ -141,20 +133,20 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
                } while (dou != _tail._do && (dou = dou->do_next));
        }
        _dispatch_introspection_queue_push_list(dq, _head, _tail);
-       _dispatch_queue_push_list(dq, _head, _tail, pp, n);
+       _dispatch_root_queue_push_inline(dq, _head, _tail, n);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
-               pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+               dispatch_qos_t qos)
 {
        if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
                struct dispatch_object_s *dou = _tail._do;
                _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
        }
        _dispatch_introspection_queue_push(dq, _tail);
-       _dispatch_queue_push_inline(dq, _tail, pp, flags);
+       _dispatch_queue_push_inline(dq, _tail, qos);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -168,7 +160,7 @@ _dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail)
        _dispatch_introspection_queue_push(dq, _tail);
 }
 
-#define _dispatch_queue_push_list _dispatch_trace_queue_push_list
+#define _dispatch_root_queue_push_inline _dispatch_trace_root_queue_push_list
 #define _dispatch_queue_push_inline _dispatch_trace_queue_push_inline
 
 DISPATCH_ALWAYS_INLINE
@@ -189,7 +181,7 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou)
 
 #if DISPATCH_USE_DTRACE
 static inline dispatch_function_t
-_dispatch_trace_timer_function(dispatch_source_refs_t dr)
+_dispatch_trace_timer_function(dispatch_timer_source_refs_t dr)
 {
        dispatch_continuation_t dc;
        dc = os_atomic_load(&dr->ds_handler[DS_EVENT_HANDLER], relaxed);
@@ -216,8 +208,9 @@ _dispatch_trace_timer_params(dispatch_clock_t clock,
                params->deadline = _dispatch_trace_time2nano2(values->target,
                                values->target < now ? 0 : values->target - now);
        }
+       uint64_t leeway = values->deadline - values->target;
        params->interval = _dispatch_trace_time2nano(values->interval);
-       params->leeway = _dispatch_trace_time2nano(values->leeway);
+       params->leeway = _dispatch_trace_time2nano(leeway);
        return params;
 }
 
@@ -233,22 +226,23 @@ static inline void
 _dispatch_trace_timer_configure(dispatch_source_t ds, dispatch_clock_t clock,
                struct dispatch_timer_source_s *values)
 {
+       dispatch_timer_source_refs_t dr = ds->ds_timer_refs;
        struct dispatch_trace_timer_params_s params;
-       DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs),
+       DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(dr),
                        _dispatch_trace_timer_params(clock, values, 0, &params));
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline)
+_dispatch_trace_timer_program(dispatch_timer_source_refs_t dr, uint64_t deadline)
 {
        if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) {
                if (deadline && dr) {
                        dispatch_source_t ds = _dispatch_source_from_refs(dr);
-                       dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(ds->ds_ident_hack);
+                       dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(dr->du_ident);
                        struct dispatch_trace_timer_params_s params;
                        DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr),
-                                       _dispatch_trace_timer_params(clock, &ds_timer(dr),
+                                       _dispatch_trace_timer_params(clock, &dr->dt_timer,
                                        deadline, &params));
                }
        }
@@ -256,7 +250,7 @@ _dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline)
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_wake(dispatch_source_refs_t dr)
+_dispatch_trace_timer_wake(dispatch_timer_source_refs_t dr)
 {
        if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) {
                if (dr) {
@@ -268,8 +262,8 @@ _dispatch_trace_timer_wake(dispatch_source_refs_t dr)
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data,
-               unsigned long missed)
+_dispatch_trace_timer_fire(dispatch_timer_source_refs_t dr, uint64_t data,
+               uint64_t missed)
 {
        if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) {
                if (!(data - missed) && dr) {
index 9c474c83b7b8e0d4bc163205cd062cda7c088425..458e2f0a479915da3caf4f01e1c63ceafe2e9037 100644 (file)
@@ -85,6 +85,7 @@ voucher_create(voucher_recipe_t recipe)
        if (extra) {
                memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra);
        }
+       _voucher_trace(CREATE, voucher, MACH_PORT_NULL, 0);
        return voucher;
 }
 #endif
@@ -165,24 +166,69 @@ _voucher_thread_cleanup(void *voucher)
        _voucher_release(voucher);
 }
 
+#pragma mark -
+#pragma mark voucher_hash
+
 DISPATCH_CACHELINE_ALIGN
-static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE];
-#define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))])
-static dispatch_unfair_lock_s _vouchers_lock;
-#define _vouchers_lock_lock() _dispatch_unfair_lock_lock(&_vouchers_lock)
-#define _vouchers_lock_unlock() _dispatch_unfair_lock_unlock(&_vouchers_lock)
+static voucher_hash_head_s _voucher_hash[VL_HASH_SIZE];
+
+#define _voucher_hash_head(kv)   (&_voucher_hash[VL_HASH((kv))])
+static dispatch_unfair_lock_s _voucher_hash_lock;
+#define _voucher_hash_lock_lock() \
+               _dispatch_unfair_lock_lock(&_voucher_hash_lock)
+#define _voucher_hash_lock_unlock() \
+               _dispatch_unfair_lock_unlock(&_voucher_hash_lock)
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_head_init(voucher_hash_head_s *head)
+{
+       _voucher_hash_set_next(&head->vhh_first, VOUCHER_NULL);
+       _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &head->vhh_first);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_enqueue(mach_voucher_t kv, voucher_t v)
+{
+       // same as TAILQ_INSERT_TAIL
+       voucher_hash_head_s *head = _voucher_hash_head(kv);
+       uintptr_t prev_ptr = head->vhh_last_ptr;
+       _voucher_hash_set_next(&v->v_list.vhe_next, VOUCHER_NULL);
+       v->v_list.vhe_prev_ptr = prev_ptr;
+       _voucher_hash_store_to_prev_ptr(prev_ptr, v);
+       _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &v->v_list.vhe_next);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_remove(mach_voucher_t kv, voucher_t v)
+{
+       // same as TAILQ_REMOVE
+       voucher_hash_head_s *head = _voucher_hash_head(kv);
+       voucher_t next = _voucher_hash_get_next(v->v_list.vhe_next);
+       uintptr_t prev_ptr = v->v_list.vhe_prev_ptr;
+       if (next) {
+               next->v_list.vhe_prev_ptr = prev_ptr;
+       } else {
+               head->vhh_last_ptr = prev_ptr;
+       }
+       _voucher_hash_store_to_prev_ptr(prev_ptr, next);
+       _voucher_hash_mark_not_enqueued(v);
+}
 
 static voucher_t
 _voucher_find_and_retain(mach_voucher_t kv)
 {
-       voucher_t v;
        if (!kv) return NULL;
-       _vouchers_lock_lock();
-       TAILQ_FOREACH(v, _vouchers_head(kv), v_list) {
+       _voucher_hash_lock_lock();
+       voucher_hash_head_s *head = _voucher_hash_head(kv);
+       voucher_t v = _voucher_hash_get_next(head->vhh_first);
+       while (v) {
                if (v->v_ipc_kvoucher == kv) {
                        int xref_cnt = os_atomic_inc2o(v, os_obj_xref_cnt, relaxed);
                        _dispatch_voucher_debug("retain  -> %d", v, xref_cnt + 1);
-                       if (slowpath(xref_cnt < 0)) {
+                       if (unlikely(xref_cnt < 0)) {
                                _dispatch_voucher_debug("over-release", v);
                                _OS_OBJECT_CLIENT_CRASH("Voucher over-release");
                        }
@@ -192,8 +238,9 @@ _voucher_find_and_retain(mach_voucher_t kv)
                        }
                        break;
                }
+               v = _voucher_hash_get_next(v->v_list.vhe_next);
        }
-       _vouchers_lock_unlock();
+       _voucher_hash_lock_unlock();
        return v;
 }
 
@@ -202,35 +249,35 @@ _voucher_insert(voucher_t v)
 {
        mach_voucher_t kv = v->v_ipc_kvoucher;
        if (!kv) return;
-       _vouchers_lock_lock();
-       if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) {
+       _voucher_hash_lock_lock();
+       if (unlikely(_voucher_hash_is_enqueued(v))) {
                _dispatch_voucher_debug("corruption", v);
-               DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption");
+               DISPATCH_CLIENT_CRASH(0, "Voucher corruption");
        }
-       TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list);
-       _vouchers_lock_unlock();
+       _voucher_hash_enqueue(kv, v);
+       _voucher_hash_lock_unlock();
 }
 
 static void
 _voucher_remove(voucher_t v)
 {
        mach_voucher_t kv = v->v_ipc_kvoucher;
-       if (!_TAILQ_IS_ENQUEUED(v, v_list)) return;
-       _vouchers_lock_lock();
-       if (slowpath(!kv)) {
+       if (!_voucher_hash_is_enqueued(v)) return;
+       _voucher_hash_lock_lock();
+       if (unlikely(!kv)) {
                _dispatch_voucher_debug("corruption", v);
                DISPATCH_CLIENT_CRASH(0, "Voucher corruption");
        }
        // check for resurrection race with _voucher_find_and_retain
-       if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0 &&
-                       _TAILQ_IS_ENQUEUED(v, v_list)) {
-               TAILQ_REMOVE(_vouchers_head(kv), v, v_list);
-               _TAILQ_MARK_NOT_ENQUEUED(v, v_list);
-               v->v_list.tqe_next = (void*)~0ull;
+       if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0) {
+               if (_voucher_hash_is_enqueued(v)) _voucher_hash_remove(kv, v);
        }
-       _vouchers_lock_unlock();
+       _voucher_hash_lock_unlock();
 }
 
+#pragma mark -
+#pragma mark mach_voucher_t
+
 void
 _voucher_dealloc_mach_voucher(mach_voucher_t kv)
 {
@@ -313,18 +360,11 @@ voucher_replace_default_voucher(void)
 #define _voucher_mach_recipe_size(payload_size) \
        (sizeof(mach_voucher_attr_recipe_data_t) + (payload_size))
 
-#if VOUCHER_USE_MACH_VOUCHER_PRIORITY
 #define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\
                _voucher_mach_recipe_size(0) + \
                _voucher_mach_recipe_size(sizeof(ipc_pthread_priority_value_t)) + \
                _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \
                _voucher_extra_size(v)))
-#else
-#define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\
-               _voucher_mach_recipe_size(0) + \
-               _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \
-               _voucher_extra_size(v)))
-#endif
 
 DISPATCH_ALWAYS_INLINE
 static inline mach_voucher_attr_recipe_size_t
@@ -345,7 +385,6 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v,
        };
        size += _voucher_mach_recipe_size(0);
 
-#if VOUCHER_USE_MACH_VOUCHER_PRIORITY
        if (pp) {
                ipc_pthread_priority_value_t value = (ipc_pthread_priority_value_t)pp;
                *mvar_buf++ = (mach_voucher_attr_recipe_data_t){
@@ -356,7 +395,6 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v,
                mvar_buf = _dispatch_memappend(mvar_buf, &value);
                size += _voucher_mach_recipe_size(sizeof(value));
        }
-#endif // VOUCHER_USE_MACH_VOUCHER_PRIORITY
 
        if ((v && v->v_activity) || pp) {
                _voucher_mach_udata_s *udata_buf;
@@ -419,7 +457,7 @@ _voucher_get_mach_voucher(voucher_t voucher)
 
        size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority);
        kr = _voucher_create_mach_voucher(mvar, size, &kv);
-       if (dispatch_assume_zero(kr) || !kv){
+       if (dispatch_assume_zero(kr) || !kv) {
                return MACH_VOUCHER_NULL;
        }
        if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL,
@@ -453,7 +491,7 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher,
 
        size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority);
        kr = _voucher_create_mach_voucher(mvar, size, &kv);
-       if (dispatch_assume_zero(kr) || !kv){
+       if (dispatch_assume_zero(kr) || !kv) {
                return MACH_VOUCHER_NULL;
        }
        _dispatch_kvoucher_debug("create with priority from voucher[%p]", kv,
@@ -471,29 +509,6 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits)
        mach_voucher_attr_recipe_size_t kvr_size = 0;
        mach_voucher_attr_content_size_t udata_sz = 0;
        _voucher_mach_udata_s *udata = NULL;
-#if !VOUCHER_USE_BANK_AUTOREDEEM
-       mach_voucher_t rkv;
-       const mach_voucher_attr_recipe_data_t redeem_recipe[] = {
-               [0] = {
-                       .key = MACH_VOUCHER_ATTR_KEY_ALL,
-                       .command = MACH_VOUCHER_ATTR_COPY,
-                       .previous_voucher = kv,
-               },
-               [1] = {
-                       .key = MACH_VOUCHER_ATTR_KEY_BANK,
-                       .command = MACH_VOUCHER_ATTR_REDEEM,
-               },
-       };
-       kr = _voucher_create_mach_voucher(redeem_recipe, sizeof(redeem_recipe),
-                       &rkv);
-       if (!dispatch_assume_zero(kr)) {
-               _voucher_dealloc_mach_voucher(kv);
-               _dispatch_kvoucher_debug("redeemed from 0x%08x", rkv, kv);
-               kv = rkv;
-       } else {
-               _dispatch_voucher_debug_machport(kv);
-       }
-#endif
        voucher_t v = _voucher_find_and_retain(kv);
        if (v) {
                _dispatch_voucher_debug("kvoucher[0x%08x] found", v, kv);
@@ -548,15 +563,12 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits)
                                .key = MACH_VOUCHER_ATTR_KEY_USER_DATA,
                                .command = MACH_VOUCHER_ATTR_REMOVE,
                        },
-#if VOUCHER_USE_MACH_VOUCHER_PRIORITY
                        [2] = {
                                .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY,
                                .command = MACH_VOUCHER_ATTR_REMOVE,
                        },
-#endif
                };
                mach_voucher_attr_recipe_size_t size = sizeof(remove_userdata_recipe);
-
                kr = _voucher_create_mach_voucher(remove_userdata_recipe, size, &nkv);
                if (!dispatch_assume_zero(kr)) {
                        _dispatch_voucher_debug("kvoucher[0x%08x] udata removal "
@@ -574,6 +586,7 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits)
                }
        }
 
+       _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity);
        _voucher_insert(v);
        _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv);
        return v;
@@ -608,6 +621,7 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov,
                                "voucher[%p]", v, kv, ov);
                _dispatch_voucher_debug_machport(kv);
        }
+       _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity);
        return v;
 }
 
@@ -635,7 +649,7 @@ _voucher_create_without_importance(voucher_t ov)
        };
        kr = _voucher_create_mach_voucher(importance_remove_recipe,
                        sizeof(importance_remove_recipe), &kv);
-       if (dispatch_assume_zero(kr) || !kv){
+       if (dispatch_assume_zero(kr) || !kv) {
                if (ov->v_ipc_kvoucher) return NULL;
                kv = MACH_VOUCHER_NULL;
        }
@@ -665,6 +679,7 @@ _voucher_create_without_importance(voucher_t ov)
                _dispatch_voucher_debug("kvoucher[0x%08x] create without importance "
                                "from voucher[%p]", v, kv, ov);
        }
+       _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity);
        return v;
 }
 
@@ -684,7 +699,7 @@ _voucher_create_accounting_voucher(voucher_t ov)
        };
        kr = _voucher_create_mach_voucher(&accounting_copy_recipe,
                        sizeof(accounting_copy_recipe), &kv);
-       if (dispatch_assume_zero(kr) || !kv){
+       if (dispatch_assume_zero(kr) || !kv) {
                return NULL;
        }
        voucher_t v = _voucher_find_and_retain(kv);
@@ -700,6 +715,7 @@ _voucher_create_accounting_voucher(voucher_t ov)
                v->v_kvbase = _voucher_retain(ov);
                _voucher_dealloc_mach_voucher(kv); // borrow base reference
        }
+       _voucher_trace(CREATE, v, kv, v->v_activity);
        _voucher_insert(v);
        _dispatch_voucher_debug("kvoucher[0x%08x] create accounting voucher "
                        "from voucher[%p]", v, kv, ov);
@@ -757,18 +773,19 @@ _voucher_xref_dispose(voucher_t voucher)
 {
        _dispatch_voucher_debug("xref_dispose", voucher);
        _voucher_remove(voucher);
-       return _os_object_release_internal_inline((_os_object_t)voucher);
+       return _os_object_release_internal_n_inline((_os_object_t)voucher, 1);
 }
 
 void
 _voucher_dispose(voucher_t voucher)
 {
+       _voucher_trace(DISPOSE, voucher);
        _dispatch_voucher_debug("dispose", voucher);
-       if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) {
+       if (slowpath(_voucher_hash_is_enqueued(voucher))) {
                _dispatch_voucher_debug("corruption", voucher);
-               DISPATCH_CLIENT_CRASH(voucher->v_list.tqe_prev, "Voucher corruption");
+               DISPATCH_CLIENT_CRASH(0, "Voucher corruption");
        }
-       voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS;
+       _voucher_hash_mark_not_enqueued(voucher);
        if (voucher->v_ipc_kvoucher) {
                if (voucher->v_ipc_kvoucher != voucher->v_kvoucher) {
                        _voucher_dealloc_mach_voucher(voucher->v_ipc_kvoucher);
@@ -823,6 +840,7 @@ _voucher_activity_debug_channel_init(void)
        if (dbgp) {
                dm = dispatch_mach_create_f("com.apple.debug-channel",
                                DISPATCH_TARGET_QUEUE_DEFAULT, NULL, handler);
+               dm->dm_recv_refs->du_can_be_wlh = false; // 29906118
                dispatch_mach_connect(dm, dbgp, MACH_PORT_NULL, NULL);
                // will force the DISPATCH_MACH_CONNECTED event
                dispatch_mach_send_barrier_f(dm, NULL,
@@ -1004,7 +1022,7 @@ _voucher_init(void)
        _voucher_libkernel_init();
        unsigned int i;
        for (i = 0; i < VL_HASH_SIZE; i++) {
-               TAILQ_INIT(&_vouchers[i]);
+               _voucher_hash_head_init(&_voucher_hash[i]);
        }
 }
 
@@ -1053,6 +1071,12 @@ _voucher_activity_id_allocate(firehose_activity_flags_t flags)
        return FIREHOSE_ACTIVITY_ID_MAKE(aid, flags);
 }
 
+firehose_activity_id_t
+voucher_activity_id_allocate(firehose_activity_flags_t flags)
+{
+       return _voucher_activity_id_allocate(flags);
+}
+
 #define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, privbuf) \
                firehose_buffer_tracepoint_reserve(_firehose_task_buffer, stamp, \
                                stream, pub, priv, privbuf)
@@ -1073,7 +1097,13 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED)
        info_size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 1,
                        &p_uniqinfo, PROC_PIDUNIQIDENTIFIERINFO_SIZE);
        if (slowpath(info_size != PROC_PIDUNIQIDENTIFIERINFO_SIZE)) {
-               DISPATCH_INTERNAL_CRASH(info_size, "Unable to get the unique pid");
+               if (info_size == 0) {
+                       DISPATCH_INTERNAL_CRASH(errno,
+                               "Unable to get the unique pid (error)");
+               } else {
+                       DISPATCH_INTERNAL_CRASH(info_size,
+                               "Unable to get the unique pid (size)");
+               }
        }
        _voucher_unique_pid = p_uniqinfo.p_uniqueid;
 
@@ -1152,10 +1182,6 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
        if (base == VOUCHER_CURRENT) {
                base = ov;
        }
-       if (_voucher_activity_disabled()) {
-               *trace_id = 0;
-               return base ? _voucher_retain(base) : VOUCHER_NULL;
-       }
 
        FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid);
        if (ov && (current_id = ov->v_activity)) {
@@ -1185,6 +1211,10 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
        v->v_activity_creator = _voucher_unique_pid;
        v->v_parent_activity = parent_id;
 
+       if (_voucher_activity_disabled()) {
+               goto done;
+       }
+
        static const firehose_stream_t streams[2] = {
                firehose_stream_metadata,
                firehose_stream_persist,
@@ -1211,7 +1241,9 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
                pubptr = _dispatch_mempcpy(pubptr, pubdata, publen);
                _voucher_activity_tracepoint_flush(ft, ftid);
        }
+done:
        *trace_id = ftid.ftid_value;
+       _voucher_trace(CREATE, v, v->v_kvoucher, va_id);
        return v;
 }
 
@@ -1404,7 +1436,7 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz)
        size_t offset = 0;
        #define bufprintf(...) \
                        offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__)
-       bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x", v,
+       bufprintf("voucher[%p] = { xref = %d, ref = %d", v,
                        v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1);
 
        if (v->v_kvbase) {
@@ -1440,7 +1472,7 @@ voucher_create(voucher_recipe_t recipe)
        (void)recipe;
        return NULL;
 }
-#endif
+#endif // VOUCHER_ENABLE_RECIPE_OBJECTS
 
 voucher_t
 voucher_adopt(voucher_t voucher)
@@ -1525,12 +1557,14 @@ _voucher_create_accounting_voucher(voucher_t voucher)
        return NULL;
 }
 
+#if HAVE_MACH
 voucher_t
 voucher_create_with_mach_msg(mach_msg_header_t *msg)
 {
        (void)msg;
        return NULL;
 }
+#endif
 
 #if VOUCHER_ENABLE_GET_MACH_VOUCHER
 mach_voucher_t
@@ -1539,7 +1573,7 @@ voucher_get_mach_voucher(voucher_t voucher)
        (void)voucher;
        return 0;
 }
-#endif
+#endif // VOUCHER_ENABLE_GET_MACH_VOUCHER
 
 void
 _voucher_xref_dispose(voucher_t voucher)
@@ -1573,7 +1607,7 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf
        (void)persona_info;
        return -1;
 }
-#endif
+#endif // VOUCHER_EXPORT_PERSONA_SPI
 
 void
 _voucher_activity_debug_channel_init(void)
index 449f4ba5c45cf3e8db4880bd34c290837b9c6874..772c8c43467ecccabb081c179da5968beb62af73 100644 (file)
@@ -54,7 +54,7 @@ OS_OBJECT_DECL_CLASS(voucher_recipe);
  * @result
  * The newly created voucher object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t
 voucher_create(voucher_recipe_t recipe);
@@ -78,7 +78,7 @@ voucher_create(voucher_recipe_t recipe);
  * @result
  * A mach voucher port.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
 mach_voucher_t
 voucher_get_mach_voucher(voucher_t voucher);
@@ -123,9 +123,7 @@ void voucher_release(voucher_t voucher);
 #define DISPATCH_VOUCHER_ACTIVITY_DEBUG 1
 #endif
 
-#if VOUCHER_USE_MACH_VOUCHER_PRIORITY
 #include <voucher/ipc_pthread_priority_types.h>
-#endif
 
 typedef uint32_t _voucher_magic_t;
 typedef uint32_t _voucher_priority_t;
@@ -160,7 +158,10 @@ typedef struct voucher_s {
        struct voucher_vtable_s *os_obj_isa,
        os_obj_ref_cnt,
        os_obj_xref_cnt);
-       TAILQ_ENTRY(voucher_s) v_list;
+       struct voucher_hash_entry_s {
+               uintptr_t vhe_next;
+               uintptr_t vhe_prev_ptr;
+       } v_list;
        mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference
        voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference
        firehose_activity_id_t v_activity;
@@ -174,6 +175,54 @@ typedef struct voucher_s {
 #endif
 } voucher_s;
 
+typedef struct voucher_hash_head_s {
+       uintptr_t vhh_first;
+       uintptr_t vhh_last_ptr;
+} voucher_hash_head_s;
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_hash_is_enqueued(const struct voucher_s *v)
+{
+       return v->v_list.vhe_prev_ptr != 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_mark_not_enqueued(struct voucher_s *v)
+{
+       v->v_list.vhe_prev_ptr = 0;
+       v->v_list.vhe_next = (uintptr_t)DISPATCH_OBJECT_LISTLESS;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_set_next(uintptr_t *next, struct voucher_s *v)
+{
+       *next = ~(uintptr_t)v;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_hash_get_next(uintptr_t next)
+{
+       return (voucher_t)~next;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_set_prev_ptr(uintptr_t *prev_ptr, uintptr_t *addr)
+{
+       *prev_ptr = ~(uintptr_t)addr;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_hash_store_to_prev_ptr(uintptr_t prev_ptr, struct voucher_s *v)
+{
+       *(uintptr_t *)~prev_ptr = ~(uintptr_t)v;
+}
+
 #if VOUCHER_ENABLE_RECIPE_OBJECTS
 #define _voucher_extra_size(v) ((v)->v_recipe_extra_size)
 #define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset)
@@ -206,48 +255,54 @@ typedef struct voucher_recipe_s {
                _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__)
 #define _dispatch_kvoucher_debug(msg, kv, ...) \
                _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__)
-#if DISPATCH_MACHPORT_DEBUG
-#define _dispatch_voucher_debug_machport(name) \
-               dispatch_debug_machport((name), __func__)
-#else
-#define _dispatch_voucher_debug_machport(name) ((void)(name))
-#endif
+#define _dispatch_voucher_debug_machport(name) _dispatch_debug_machport(name)
 #else
 #define _dispatch_voucher_debug(msg, v, ...)
 #define _dispatch_kvoucher_debug(msg, kv, ...)
 #define _dispatch_voucher_debug_machport(name) ((void)(name))
 #endif
 
-#if DISPATCH_PURE_C
+#if DISPATCH_USE_DTRACE
+#define _voucher_trace(how, ...)  ({ \
+               if (unlikely(VOUCHER_##how##_ENABLED())) { \
+                       VOUCHER_##how(__VA_ARGS__); \
+               } \
+       })
+#else
+#define _voucher_trace(how, ...) ((void)0)
+#endif
+
+#ifndef DISPATCH_VOUCHER_OBJC_DEBUG
+#if DISPATCH_INTROSPECTION || DISPATCH_DEBUG
+#define DISPATCH_VOUCHER_OBJC_DEBUG 1
+#else
+#define DISPATCH_VOUCHER_OBJC_DEBUG 0
+#endif
+#endif // DISPATCH_VOUCHER_OBJC_DEBUG
 
 DISPATCH_ALWAYS_INLINE
-static inline voucher_t
-_voucher_retain(voucher_t voucher)
+static inline struct voucher_s *
+_voucher_retain_inline(struct voucher_s *voucher)
 {
-#if !DISPATCH_VOUCHER_OBJC_DEBUG
        // not using _os_object_refcnt* because we don't need barriers:
        // vouchers are immutable and are in a hash table with a lock
        int xref_cnt = os_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed);
+       _voucher_trace(RETAIN, (voucher_t)voucher, xref_cnt + 1);
        _dispatch_voucher_debug("retain  -> %d", voucher, xref_cnt + 1);
        if (unlikely(xref_cnt <= 0)) {
                _OS_OBJECT_CLIENT_CRASH("Voucher resurrection");
        }
-#else
-       os_retain(voucher);
-       _dispatch_voucher_debug("retain  -> %d", voucher,
-                       voucher->os_obj_xref_cnt + 1);
-#endif // DISPATCH_DEBUG
        return voucher;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_voucher_release(voucher_t voucher)
+_voucher_release_inline(struct voucher_s *voucher)
 {
-#if !DISPATCH_VOUCHER_OBJC_DEBUG
        // not using _os_object_refcnt* because we don't need barriers:
        // vouchers are immutable and are in a hash table with a lock
        int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed);
+       _voucher_trace(RELEASE, (voucher_t)voucher, xref_cnt + 1);
        _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1);
        if (likely(xref_cnt >= 0)) {
                return;
@@ -256,10 +311,31 @@ _voucher_release(voucher_t voucher)
                _OS_OBJECT_CLIENT_CRASH("Voucher over-release");
        }
        return _os_object_xref_dispose((_os_object_t)voucher);
+}
+
+#if DISPATCH_PURE_C
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_retain(voucher_t voucher)
+{
+#if DISPATCH_VOUCHER_OBJC_DEBUG
+       os_retain(voucher);
 #else
-       _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt);
-       return os_release(voucher);
-#endif // DISPATCH_DEBUG
+       _voucher_retain_inline(voucher);
+#endif // DISPATCH_VOUCHER_OBJC_DEBUG
+       return voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_release(voucher_t voucher)
+{
+#if DISPATCH_VOUCHER_OBJC_DEBUG
+       os_release(voucher);
+#else
+       _voucher_release_inline(voucher);
+#endif // DISPATCH_VOUCHER_OBJC_DEBUG
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -270,13 +346,13 @@ _voucher_release_no_dispose(voucher_t voucher)
        // not using _os_object_refcnt* because we don't need barriers:
        // vouchers are immutable and are in a hash table with a lock
        int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed);
+       _voucher_trace(RELEASE, voucher, xref_cnt + 1);
        _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1);
        if (likely(xref_cnt >= 0)) {
                return;
        }
        _OS_OBJECT_CLIENT_CRASH("Voucher over-release");
 #else
-       _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt);
        return os_release(voucher);
 #endif // DISPATCH_DEBUG
 }
@@ -319,8 +395,10 @@ static inline mach_voucher_t
 _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher)
 {
        if (ov == voucher) return VOUCHER_NO_MACH_VOUCHER;
-       _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov);
+       if (ov) _voucher_trace(ORPHAN, ov);
        _dispatch_thread_setspecific(dispatch_voucher_key, voucher);
+       if (voucher) _voucher_trace(ADOPT, voucher);
+       _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov);
        mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL;
        mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL;
 #if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS
@@ -502,21 +580,13 @@ _dispatch_continuation_voucher_set(dispatch_continuation_t dc,
 {
        voucher_t v = NULL;
 
+       (void)dqu;
        // _dispatch_continuation_voucher_set is never called for blocks with
        // private data or with the DISPATCH_BLOCK_HAS_VOUCHER flag set.
        // only _dispatch_continuation_init_slow handles this bit.
        dispatch_assert(!(flags & DISPATCH_BLOCK_HAS_VOUCHER));
 
-       if (dqu._oq->oq_override_voucher != DISPATCH_NO_VOUCHER) {
-               // if the queue has an override voucher, we should not capture anything
-               //
-               // if the continuation is enqueued before the queue is activated, then
-               // this optimization fails and we do capture whatever is current
-               //
-               // _dispatch_continuation_voucher_adopt() would do the right thing
-               // but using DISPATCH_NO_VOUCHER here is more efficient.
-               v = DISPATCH_NO_VOUCHER;
-       } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) {
+       if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) {
                v = _voucher_copy();
        }
        dc->dc_voucher = v;
@@ -532,7 +602,7 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc,
                voucher_t ov, uintptr_t dc_flags)
 {
        voucher_t v = dc->dc_voucher;
-       _dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT);
+       dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT);
        dispatch_assert(DISPATCH_OBJ_CONSUME_BIT == DISPATCH_VOUCHER_CONSUME);
 
        if (consume) {
@@ -544,7 +614,7 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc,
 
                if (likely(!(dc_flags & DISPATCH_OBJ_ENFORCE_VOUCHER))) {
                        if (unlikely(ov != DISPATCH_NO_VOUCHER && v != ov)) {
-                               if (consume) _voucher_release(v);
+                               if (consume && v) _voucher_release(v);
                                consume = 0;
                                v = ov;
                        }
diff --git a/tools/voucher_trace.d b/tools/voucher_trace.d
new file mode 100755 (executable)
index 0000000..890198e
--- /dev/null
@@ -0,0 +1,78 @@
+#!/usr/sbin/dtrace -s
+
+/*
+ * Copyright (c) 2017 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Usage: voucher_trace.d -p [pid]
+ *        traced process must have been executed with
+ *        DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with
+ *        DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug
+ */
+
+#pragma D option quiet
+#pragma D option zdefs
+#pragma D option bufsize=16m
+
+BEGIN {
+       printf("Starting to trace voucher operations...\n");
+}
+
+voucher$target:libdispatch*.dylib::create
+{
+       printf("ALLOC   voucher 0x%p, thread %#llx, ref 1, port %#x, aid %#llx", arg0, tid, arg1, arg2);
+       ustack(10);
+       printf("\n")
+}
+
+voucher$target:libdispatch*.dylib::dispose
+{
+       printf("FREE    voucher 0x%p, thread %#llx, ref 0", arg0, tid);
+       ustack(10);
+       printf("\n")
+}
+
+voucher$target:libdispatch*.dylib::retain
+{
+       printf("RETAIN  voucher 0x%p, thread %#llx, ref %d", arg0, tid, arg1);
+       ustack(10);
+       printf("\n")
+}
+
+voucher$target:libdispatch*.dylib::release
+{
+       printf("RELEASE voucher 0x%p, thread %#llx, ref %d", arg0, tid, arg1);
+       ustack(10);
+       printf("\n")
+}
+
+voucher$target:libdispatch*.dylib::adopt
+{
+       printf("ADOPT   voucher 0x%p, thread %#llx", arg0, tid);
+       ustack(10);
+       printf("\n")
+}
+
+voucher$target:libdispatch*.dylib::orphan
+{
+       printf("ORPHAN  voucher 0x%p, thread %#llx", arg0, tid);
+       ustack(10);
+       printf("\n")
+}
index aabda625bc8f0b3772582b131cec7772b45d304b..dd1814db9acac7baedc435037c7ba46a30678fb4 100644 (file)
 // @APPLE_APACHE_LICENSE_HEADER_END@
 //
 
-OTHER_LDFLAGS =
-BUILD_VARIANTS = normal
-GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 DISPATCH_VARIANT_DYLD_STUB=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0
 PRODUCT_NAME = libdispatch_dyld_stub
 INSTALL_PATH = /usr/local/lib/dyld_stub
-EXCLUDED_SOURCE_FILE_NAMES = *
-INCLUDED_SOURCE_FILE_NAMES = voucher.c // it's minimal with DISPATCH_VARIANT_DYLD_STUB
+BUILD_VARIANTS = normal
+GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS)
+OTHER_LDFLAGS =
 VERSIONING_SYSTEM =
+EXCLUDED_SOURCE_FILE_NAMES = *
+INCLUDED_SOURCE_FILE_NAMES = voucher.c // minimal with DISPATCH_VARIANT_DYLD_STUB
index 1f0eddc4cc508d2519e82fd108076d54731d0d19..af3715f1eeeb0b2c78f0b4d30f3be525d6ea3f09 100644 (file)
 // @APPLE_APACHE_LICENSE_HEADER_END@
 //
 
-OTHER_LDFLAGS =
-BUILD_VARIANTS = normal debug
-GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0
-PRODUCT_NAME = libdispatch
-INSTALL_PATH = /usr/local/lib/system
-
 // skip simulator
 SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
+PRODUCT_NAME = libdispatch
+INSTALL_PATH = /usr/local/lib/system
+BUILD_VARIANTS = normal debug
+GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(STATICLIB_PREPROCESSOR_DEFINITIONS)
+OTHER_LDFLAGS =
 SKIP_INSTALL[sdk=*simulator*] = YES
 EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = *
index a42add8ef7a82aa6d12d88d665faed486aa2efe9..2f2e273e1bf80811afdd06ad2726fa6aa9046d19 100644 (file)
@@ -23,3 +23,4 @@ PRODUCT_NAME = libdispatch_$(DISPATCH_RESOLVED_VARIANT)
 OTHER_LDFLAGS =
 SKIP_INSTALL = YES
 VERSIONING_SYSTEM =
+EXCLUDED_SOURCE_FILE_NAMES = *
diff --git a/xcodeconfig/libdispatch-resolver_iphoneos.order b/xcodeconfig/libdispatch-resolver_iphoneos.order
deleted file mode 100644 (file)
index eea9845..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright (c) 2013 Apple Inc. All rights reserved.
-#
-# @APPLE_APACHE_LICENSE_HEADER_START@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# @APPLE_APACHE_LICENSE_HEADER_END@
-#
-
index 0ece6354e3d18b652048163dbdcd993b3fcb2520..170c5b356e963a63c7e78ed992b1d9b27ee3d025 100644 (file)
 // @APPLE_APACHE_LICENSE_HEADER_END@
 //
 
-OTHER_LDFLAGS =
+// skip simulator
+SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
+PRODUCT_NAME = libdispatch_up
 BUILD_VARIANTS = normal
+GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_HW_CONFIG_UP=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS)
+OTHER_LDFLAGS =
 SKIP_INSTALL = YES
-EXCLUDED_SOURCE_FILE_NAMES = *
-GCC_PREPROCESSOR_DEFINITIONS = $(inherited) USE_OBJC=0 DISPATCH_USE_DTRACE=0
+EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = *
index 65dfd04f96c1532128362263d21db7c3ef98dd95..d8a5113a23fd6986787e5b97b2c62d748e56f5ca 100644 (file)
@@ -19,8 +19,9 @@
 #
 
 __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap
-__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus
 __dispatch_queue_attrs __dispatch_queue_attr_concurrent
+__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus
 _dispatch_assert_queue$V2 _dispatch_assert_queue
 _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not
 _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target
+_dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF
index d5b08d6dd54ca89c88679ab9a3f95045f2d01a33..643e1d38b91ce733de1eb90c28c636d9ce797c8a 100644 (file)
@@ -40,6 +40,7 @@ ONLY_ACTIVE_ARCH = NO
 CLANG_LINK_OBJC_RUNTIME = NO
 GCC_C_LANGUAGE_STANDARD = gnu11
 CLANG_CXX_LANGUAGE_STANDARD = gnu++11
+ENABLE_STRICT_OBJC_MSGSEND = YES
 GCC_ENABLE_CPP_EXCEPTIONS = NO
 GCC_STRICT_ALIASING = YES
 GCC_SYMBOLS_PRIVATE_EXTERN = YES
@@ -49,24 +50,40 @@ GCC_WARN_64_TO_32_BIT_CONVERSION = YES
 GCC_WARN_ABOUT_RETURN_TYPE = YES
 GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES
 GCC_WARN_ABOUT_MISSING_NEWLINE = YES
-GCC_WARN_UNUSED_VARIABLE = YES
-GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES
 GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES
+GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES
 GCC_WARN_SIGN_COMPARE = YES
+GCC_WARN_STRICT_SELECTOR_MATCH = YES
+GCC_WARN_UNDECLARED_SELECTOR = YES
 GCC_WARN_UNINITIALIZED_AUTOS = YES
+GCC_WARN_UNKNOWN_PRAGMAS = YES
+GCC_WARN_UNUSED_FUNCTION = YES
+GCC_WARN_UNUSED_LABEL = YES
+GCC_WARN_UNUSED_PARAMETER = YES
+GCC_WARN_UNUSED_VARIABLE = YES
+CLANG_WARN_ASSIGN_ENUM = YES
+CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES
+CLANG_WARN_DOCUMENTATION_COMMENTS = YES
+CLANG_WARN__DUPLICATE_METHOD_MATCH = YES
 CLANG_WARN_EMPTY_BODY = YES
 CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES
+CLANG_WARN_INFINITE_RECURSION = YES
+CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES
+CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES
 CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES
-CLANG_WARN_DOCUMENTATION_COMMENTS = YES
+CLANG_WARN_SUSPICIOUS_MOVE = YES
+CLANG_WARN_UNREACHABLE_CODE = YES
 GCC_TREAT_WARNINGS_AS_ERRORS = YES
 GCC_OPTIMIZATION_LEVEL = s
-GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS)
 GCC_NO_COMMON_BLOCKS = YES
-WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-packed -Wno-unknown-warning-option
-OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
+GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS)
+STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0
+WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wimplicit-fallthrough -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunguarded-availability -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS)
+NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla
+OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PLATFORM_CFLAGS)
 OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions
 OTHER_CFLAGS_normal = -momit-leaf-frame-pointer
-OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1
+OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1
 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1
 GENERATE_PROFILING_CODE = NO
 DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION)
diff --git a/xcodeconfig/libdispatch_iphoneos.order b/xcodeconfig/libdispatch_iphoneos.order
deleted file mode 100644 (file)
index eea9845..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright (c) 2013 Apple Inc. All rights reserved.
-#
-# @APPLE_APACHE_LICENSE_HEADER_START@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# @APPLE_APACHE_LICENSE_HEADER_END@
-#
-
index 07a8b9ac14b58552234cbf62ccdde45784f357c4..4c711994c5fe49c777691eed2812bcf33a2e391c 100644 (file)
 // @APPLE_APACHE_LICENSE_HEADER_END@
 //
 
-OTHER_MIGFLAGS = -novouchers
-OTHER_LDFLAGS =
 SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator
 PRODUCT_NAME = $(TARGET_NAME)
 INSTALL_PATH = /usr/local/lib/
+GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0
+OTHER_MIGFLAGS = -novouchers
+OTHER_LDFLAGS =
 PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os
 PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os
 STRIP_INSTALLED_PRODUCT = NO
 COPY_PHASE_STRIP = NO
 SEPARATE_STRIP = NO
-GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0
-
 VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL)
 
 COPY_HEADERS_RUN_UNIFDEF = YES
index f6b2a99f68acf7ba0f89e2338ede47b618769ee0..c572f80e759b52084cbdb65854a6c18e867d989b 100644 (file)
 
 #include "libfirehose.xcconfig"
 
-OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed
-// LLVM_LTO = YES
+SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
 PRODUCT_NAME = $(TARGET_NAME)
 INSTALL_PATH = /usr/local/lib/kernel/
+GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0
+OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed
+// LLVM_LTO = YES
 PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/kernel/os
-SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
-
 HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/Kernel.framework/Headers $(SDKROOT)/usr/local/include/os $(SDKROOT)/usr/local/include/firehose
 
-GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0
-
 COPY_HEADERS_RUN_UNIFDEF = YES
 COPY_HEADERS_UNIFDEF_FLAGS = -DKERNEL=1 -DOS_FIREHOSE_SPI=1 -DOS_VOUCHER_ACTIVITY_SPI_TYPES=1 -UOS_VOUCHER_ACTIVITY_SPI